diff --git a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality index 3bb187aa3de0..ba88d48269d6 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality +++ b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality @@ -2110,3 +2110,4 @@ TABLE rbt_table_gc_ttl ALTER TABLE rbt_table_gc_ttl CONFIGURE ZONE USING constraints = '{+region=ap-southeast-2: 1, +region=ca-central-1: 1, +region=us-east-1: 1}', voter_constraints = '[+region=ca-central-1]', lease_preferences = '[[+region=ca-central-1]]' + diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row index 9c22cb7524da..6ae258cdb1df 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row @@ -1178,3 +1178,509 @@ crdb_region pk ap-southeast-2 1 ca-central-1 3 ca-central-1 6 + +statement ok +CREATE DATABASE add_regions WITH PRIMARY REGION "ca-central-1"; +USE add_regions + +statement ok +CREATE TABLE regional_by_row ( + pk INT PRIMARY KEY, + i INT, + INDEX(i), + FAMILY (pk, i) +) LOCALITY REGIONAL BY ROW + +statement ok +CREATE TABLE regional_by_row_as ( + pk INT PRIMARY KEY, + i INT, + cr crdb_internal_region NOT NULL DEFAULT 'ca-central-1', + INDEX(i), + FAMILY (cr, pk, i) +) LOCALITY REGIONAL BY ROW AS "cr"; + +query TT +SHOW CREATE TABLE regional_by_row +---- +regional_by_row CREATE TABLE public.regional_by_row ( + pk INT8 NOT NULL, + i INT8 NULL, + crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_i_idx (i ASC), + FAMILY fam_0_pk_i_crdb_region (pk, i, crdb_region) +) LOCALITY REGIONAL BY ROW; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row +---- +DATABASE add_regions ALTER DATABASE add_regions CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 3, + num_voters = 3, + constraints = '{+region=ca-central-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TTT +SELECT partition_name, index_name, zone_config FROM [SHOW PARTITIONS FROM TABLE regional_by_row] +---- +ca-central-1 regional_by_row@primary num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ca-central-1 regional_by_row@regional_by_row_i_idx num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' + +query TT +SHOW CREATE TABLE regional_by_row_as +---- +regional_by_row_as CREATE TABLE public.regional_by_row_as ( + pk INT8 NOT NULL, + i INT8 NULL, + cr public.crdb_internal_region NOT NULL DEFAULT 'ca-central-1':::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_as_i_idx (i ASC), + FAMILY fam_0_cr_pk_i (cr, pk, i) +) LOCALITY REGIONAL BY ROW AS cr; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row_as +---- +DATABASE add_regions ALTER DATABASE add_regions CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 3, + num_voters = 3, + constraints = '{+region=ca-central-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TTT +SELECT partition_name, index_name, zone_config FROM [SHOW PARTITIONS FROM TABLE regional_by_row_as] +---- +ca-central-1 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ca-central-1 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' + +# Next, add a region. We expect this thing to succeed and add a partition + +# zone config corresponding to the regions to both the regional by row tables. +statement ok +ALTER DATABASE add_regions ADD REGION "us-east-1" + +query TT +SHOW CREATE TABLE regional_by_row +---- +regional_by_row CREATE TABLE public.regional_by_row ( + pk INT8 NOT NULL, + i INT8 NULL, + crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_i_idx (i ASC), + FAMILY fam_0_pk_i_crdb_region (pk, i, crdb_region) +) LOCALITY REGIONAL BY ROW; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]' + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row +---- +DATABASE add_regions ALTER DATABASE add_regions CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 4, + num_voters = 3, + constraints = '{+region=ca-central-1: 1, +region=us-east-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TT +SHOW CREATE TABLE regional_by_row_as +---- +regional_by_row_as CREATE TABLE public.regional_by_row_as ( + pk INT8 NOT NULL, + i INT8 NULL, + cr public.crdb_internal_region NOT NULL DEFAULT 'ca-central-1':::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_as_i_idx (i ASC), + FAMILY fam_0_cr_pk_i (cr, pk, i) +) LOCALITY REGIONAL BY ROW AS cr; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]' + + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row_as +---- +DATABASE add_regions ALTER DATABASE add_regions CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 4, + num_voters = 3, + constraints = '{+region=ca-central-1: 1, +region=us-east-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TTT +SELECT partition_name, index_name, zone_config FROM [SHOW PARTITIONS FROM TABLE regional_by_row_as] +---- +ca-central-1 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ca-central-1 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +us-east-1 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' +us-east-1 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' + +# Do the same thing as above, except with a different region. +statement ok +ALTER DATABASE add_regions ADD REGION "ap-southeast-2" + +query TT +SHOW CREATE TABLE regional_by_row +---- +regional_by_row CREATE TABLE public.regional_by_row ( + pk INT8 NOT NULL, + i INT8 NULL, + crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_i_idx (i ASC), + FAMILY fam_0_pk_i_crdb_region (pk, i, crdb_region) +) LOCALITY REGIONAL BY ROW; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]' + + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row +---- +DATABASE add_regions ALTER DATABASE add_regions CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 5, + num_voters = 3, + constraints = '{+region=ap-southeast-2: 1, +region=ca-central-1: 1, +region=us-east-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TT +SHOW CREATE TABLE regional_by_row_as +---- +regional_by_row_as CREATE TABLE public.regional_by_row_as ( + pk INT8 NOT NULL, + i INT8 NULL, + cr public.crdb_internal_region NOT NULL DEFAULT 'ca-central-1':::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_as_i_idx (i ASC), + FAMILY fam_0_cr_pk_i (cr, pk, i) +) LOCALITY REGIONAL BY ROW AS cr; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]' + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row_as +---- +DATABASE add_regions ALTER DATABASE add_regions CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 5, + num_voters = 3, + constraints = '{+region=ap-southeast-2: 1, +region=ca-central-1: 1, +region=us-east-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TTT +SELECT partition_name, index_name, zone_config FROM [SHOW PARTITIONS FROM TABLE regional_by_row_as] +---- +ca-central-1 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ca-central-1 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +us-east-1 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' +us-east-1 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' +ap-southeast-2 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=ap-southeast-2]', +lease_preferences = '[[+region=ap-southeast-2]]' +ap-southeast-2 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=ap-southeast-2]', +lease_preferences = '[[+region=ap-southeast-2]]' + +statement ok +CREATE DATABASE add_regions_in_txn WITH PRIMARY REGION "ca-central-1"; +USE add_regions_in_txn + +statement ok +CREATE TABLE regional_by_row ( + pk INT PRIMARY KEY, + i INT, + INDEX(i), + FAMILY (pk, i) +) LOCALITY REGIONAL BY ROW + +statement ok +CREATE TABLE regional_by_row_as ( + pk INT PRIMARY KEY, + i INT, + cr crdb_internal_region NOT NULL DEFAULT 'ca-central-1', + INDEX(i), + FAMILY (cr, pk, i) +) LOCALITY REGIONAL BY ROW AS "cr"; + +statement ok +BEGIN; +ALTER DATABASE add_regions_in_txn ADD REGION "us-east-1"; +ALTER DATABASE add_regions_in_txn ADD REGION "ap-southeast-2"; +COMMIT; + + +query TT +SHOW CREATE TABLE regional_by_row +---- +regional_by_row CREATE TABLE public.regional_by_row ( + pk INT8 NOT NULL, + i INT8 NULL, + crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_i_idx (i ASC), + FAMILY fam_0_pk_i_crdb_region (pk, i, crdb_region) +) LOCALITY REGIONAL BY ROW; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions_in_txn.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions_in_txn.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions_in_txn.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions_in_txn.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions_in_txn.public.regional_by_row@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions_in_txn.public.regional_by_row@regional_by_row_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]' + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row +---- +DATABASE add_regions_in_txn ALTER DATABASE add_regions_in_txn CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 5, + num_voters = 3, + constraints = '{+region=ap-southeast-2: 1, +region=ca-central-1: 1, +region=us-east-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TTT +SELECT partition_name, index_name, zone_config FROM [SHOW PARTITIONS FROM TABLE regional_by_row] +---- +ca-central-1 regional_by_row@primary num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ca-central-1 regional_by_row@regional_by_row_i_idx num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ap-southeast-2 regional_by_row@primary num_voters = 3, +voter_constraints = '[+region=ap-southeast-2]', +lease_preferences = '[[+region=ap-southeast-2]]' +ap-southeast-2 regional_by_row@regional_by_row_i_idx num_voters = 3, +voter_constraints = '[+region=ap-southeast-2]', +lease_preferences = '[[+region=ap-southeast-2]]' +us-east-1 regional_by_row@primary num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' +us-east-1 regional_by_row@regional_by_row_i_idx num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' + +query TT +SHOW CREATE TABLE regional_by_row_as +---- +regional_by_row_as CREATE TABLE public.regional_by_row_as ( + pk INT8 NOT NULL, + i INT8 NULL, + cr public.crdb_internal_region NOT NULL DEFAULT 'ca-central-1':::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (pk ASC), + INDEX regional_by_row_as_i_idx (i ASC), + FAMILY fam_0_cr_pk_i (cr, pk, i) +) LOCALITY REGIONAL BY ROW AS cr; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions_in_txn.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ap-southeast-2" OF INDEX add_regions_in_txn.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ap-southeast-2]', + lease_preferences = '[[+region=ap-southeast-2]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions_in_txn.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "ca-central-1" OF INDEX add_regions_in_txn.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions_in_txn.public.regional_by_row_as@primary CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]'; +ALTER PARTITION "us-east-1" OF INDEX add_regions_in_txn.public.regional_by_row_as@regional_by_row_as_i_idx CONFIGURE ZONE USING + num_voters = 3, + voter_constraints = '[+region=us-east-1]', + lease_preferences = '[[+region=us-east-1]]' + +query TT +SHOW ZONE CONFIGURATION FOR TABLE regional_by_row_as +---- +DATABASE add_regions_in_txn ALTER DATABASE add_regions_in_txn CONFIGURE ZONE USING + range_min_bytes = 134217728, + range_max_bytes = 536870912, + gc.ttlseconds = 90000, + num_replicas = 5, + num_voters = 3, + constraints = '{+region=ap-southeast-2: 1, +region=ca-central-1: 1, +region=us-east-1: 1}', + voter_constraints = '[+region=ca-central-1]', + lease_preferences = '[[+region=ca-central-1]]' + +query TTT +SELECT partition_name, index_name, zone_config FROM [SHOW PARTITIONS FROM TABLE regional_by_row_as] +---- +ca-central-1 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ca-central-1 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=ca-central-1]', +lease_preferences = '[[+region=ca-central-1]]' +ap-southeast-2 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=ap-southeast-2]', +lease_preferences = '[[+region=ap-southeast-2]]' +ap-southeast-2 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=ap-southeast-2]', +lease_preferences = '[[+region=ap-southeast-2]]' +us-east-1 regional_by_row_as@primary num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' +us-east-1 regional_by_row_as@regional_by_row_as_i_idx num_voters = 3, +voter_constraints = '[+region=us-east-1]', +lease_preferences = '[[+region=us-east-1]]' diff --git a/pkg/ccl/multiregionccl/BUILD.bazel b/pkg/ccl/multiregionccl/BUILD.bazel index f6d4006e8373..89e961013a6c 100644 --- a/pkg/ccl/multiregionccl/BUILD.bazel +++ b/pkg/ccl/multiregionccl/BUILD.bazel @@ -36,6 +36,7 @@ go_test( "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/randutil", + "//pkg/util/syncutil", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], diff --git a/pkg/ccl/multiregionccl/regional_by_row_test.go b/pkg/ccl/multiregionccl/regional_by_row_test.go index 0fdf86aafd63..1400f74ac29f 100644 --- a/pkg/ccl/multiregionccl/regional_by_row_test.go +++ b/pkg/ccl/multiregionccl/regional_by_row_test.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -35,6 +36,42 @@ import ( // REGIONAL BY ROW tests are defined in multiregionccl as REGIONAL BY ROW // requires CCL to operate. +// createTestMultiRegionCluster creates a test cluster with numServers number of +// nodes with the provided testing knobs applied to each of the nodes. Every +// node is placed in its own locality, named "us-east1", "us-east2", and so on. +func createTestMultiRegionCluster( + t *testing.T, numServers int, knobs base.TestingKnobs, +) (serverutils.TestClusterInterface, *gosql.DB, func()) { + serverArgs := make(map[int]base.TestServerArgs) + regionNames := make([]string, numServers) + for i := 0; i < numServers; i++ { + // "us-east1", "us-east2"... + regionNames[i] = fmt.Sprintf("us-east%d", i+1) + } + + for i := 0; i < numServers; i++ { + serverArgs[i] = base.TestServerArgs{ + Knobs: knobs, + Locality: roachpb.Locality{ + Tiers: []roachpb.Tier{{Key: "region", Value: regionNames[i]}}, + }, + } + } + + tc := serverutils.StartNewTestCluster(t, numServers, base.TestClusterArgs{ + ServerArgsPerNode: serverArgs, + }) + + ctx := context.Background() + cleanup := func() { + tc.Stopper().Stop(ctx) + } + + sqlDB := tc.ServerConn(0) + + return tc, sqlDB, cleanup +} + // TestAlterTableLocalityRegionalByRowError tests an alteration involving // REGIONAL BY ROW which gets its async job interrupted by some sort of // error or cancellation. After this, we expect the table to retain @@ -350,3 +387,61 @@ USE t; }) } } + +// TestRepartitionFailureRollback adds and removes a region from a multi-region +// database, but injects a non-retryable error before regional by row tables +// can be repartitioned. The expectation is that we should roll back changes to +// the multi-region enum, reverting to the state before the region add/remove +// transaction was executed. +func TestRepartitionFailureRollback(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // Decrease the adopt loop interval so that retries happen quickly. + defer sqltestutils.SetTestJobsAdoptInterval()() + + numServers := 3 + + var mu syncutil.Mutex + errorReturned := false + knobs := base.TestingKnobs{ + SQLTypeSchemaChanger: &sql.TypeSchemaChangerTestingKnobs{ + RunBeforeMultiRegionUpdates: func() error { + mu.Lock() + defer mu.Unlock() + if !errorReturned { + errorReturned = true + return errors.New("boom") + } + return nil + }, + }, + } + _, sqlDB, cleanup := createTestMultiRegionCluster(t, numServers, knobs) + defer cleanup() + + _, err := sqlDB.Exec( + `CREATE DATABASE db WITH PRIMARY REGION "us-east1" REGIONS "us-east2"; +CREATE TABLE db.t(k INT PRIMARY KEY) LOCALITY REGIONAL BY ROW`) + require.NoError(t, err) + if err != nil { + t.Error(err) + } + + _, err = sqlDB.Exec(`BEGIN; +ALTER DATABASE db ADD REGION "us-east3"; +ALTER DATABASE db DROP REGION "us-east2"; +COMMIT;`) + require.Error(t, err, "boom") + + // The cleanup job should kick in and revert the changes that happened to the + // type descriptor in the user txn. We should eventually be able to add + // "us-east3" and remove "us-east2". + testutils.SucceedsSoon(t, func() error { + _, err = sqlDB.Exec(`BEGIN; + ALTER DATABASE db ADD REGION "us-east3"; + ALTER DATABASE db DROP REGION "us-east2"; + COMMIT;`) + return err + }) +} diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index 046d5d28236c..64c7cd9263b3 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -873,7 +873,14 @@ func (s *SQLServer) preStart( PlanHookMaker: func(opName string, txn *kv.Txn, user security.SQLUsername) (interface{}, func()) { // This is a hack to get around a Go package dependency cycle. See comment // in sql/jobs/registry.go on planHookMaker. - return sql.NewInternalPlanner(opName, txn, user, &sql.MemoryMetrics{}, s.execCfg, sessiondatapb.SessionData{}) + return sql.NewInternalPlanner( + opName, + txn, + user, + &sql.MemoryMetrics{}, + s.execCfg, + sessiondatapb.SessionData{}, + ) }, }, scheduledjobs.ProdJobSchedulerEnv, diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index 7678d47935c9..dccc8a1e6568 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -1313,6 +1313,21 @@ func (tc *Collection) AddUncommittedDescriptor(desc catalog.MutableDescriptor) e return err } +// maybeRefreshCachedFieldsOnTypeDescriptor refreshes the cached fields on a +// Mutable if the given descriptor is a type descriptor and works as a pass +// through for all other descriptors. Mutable type descriptors are refreshed to +// reconstruct enumMetadata. This ensures that tables hydration following a +// type descriptor update (in the same txn) happens using the modified fields. +func maybeRefreshCachedFieldsOnTypeDescriptor( + desc catalog.MutableDescriptor, +) (catalog.MutableDescriptor, error) { + typeDesc, ok := desc.(catalog.TypeDescriptor) + if ok { + return typedesc.UpdateCachedFieldsOnModifiedMutable(typeDesc) + } + return desc, nil +} + func (tc *Collection) addUncommittedDescriptor( desc catalog.MutableDescriptor, ) (*uncommittedDescriptor, error) { @@ -1324,8 +1339,13 @@ func (tc *Collection) addUncommittedDescriptor( desc.GetID(), version, origVersion) } + mutable, err := maybeRefreshCachedFieldsOnTypeDescriptor(desc) + if err != nil { + return nil, err + } + ud := &uncommittedDescriptor{ - mutable: desc, + mutable: mutable, immutable: desc.ImmutableCopy(), } diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index f312fd02a2f8..152a2191d474 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -4048,7 +4048,7 @@ func (desc *wrapper) GetRegionalByTableRegion() (descpb.RegionName, error) { // REGIONAL BY ROW table. func (desc *wrapper) GetRegionalByRowTableRegionColumnName() (tree.Name, error) { if !desc.IsLocalityRegionalByRow() { - return "", errors.AssertionFailedf("%s is not REGIONAL BY ROW", desc.Name) + return "", errors.AssertionFailedf("%q is not a REGIONAL BY ROW table", desc.Name) } colName := desc.LocalityConfig.GetRegionalByRow().As if colName == nil { diff --git a/pkg/sql/catalog/typedesc/type_desc.go b/pkg/sql/catalog/typedesc/type_desc.go index ce3da9a2aeb4..afcffdd9cc48 100644 --- a/pkg/sql/catalog/typedesc/type_desc.go +++ b/pkg/sql/catalog/typedesc/type_desc.go @@ -113,6 +113,23 @@ func NewExistingMutable(desc descpb.TypeDescriptor) *Mutable { } } +// UpdateCachedFieldsOnModifiedMutable refreshes the Immutable field by +// reconstructing it. This means that the fields used to fill enumMetadata +// (readOnly, logicalReps, physicalReps) are reconstructed to reflect the +// modified Mutable's state. This allows us to hydrate tables correctly even +// when preceded by a type descriptor modification in the same transaction. +func UpdateCachedFieldsOnModifiedMutable(desc catalog.TypeDescriptor) (*Mutable, error) { + imm := makeImmutable(*protoutil.Clone(desc.TypeDesc()).(*descpb.TypeDescriptor)) + imm.isUncommittedVersion = desc.IsUncommittedVersion() + + mutable, ok := desc.(*Mutable) + if !ok { + return nil, errors.AssertionFailedf("type descriptor was not mutable") + } + mutable.Immutable = imm + return mutable, nil +} + // NewImmutable returns an Immutable from the given TypeDescriptor. func NewImmutable(desc descpb.TypeDescriptor) *Immutable { m := makeImmutable(desc) diff --git a/pkg/sql/job_exec_context.go b/pkg/sql/job_exec_context.go index e8a94efd6377..91ce4efc1c5c 100644 --- a/pkg/sql/job_exec_context.go +++ b/pkg/sql/job_exec_context.go @@ -31,7 +31,15 @@ type plannerJobExecContext struct { func MakeJobExecContext( opName string, user security.SQLUsername, memMetrics *MemoryMetrics, execCfg *ExecutorConfig, ) (JobExecContext, func()) { - p, close := newInternalPlanner(opName, nil /*txn*/, user, memMetrics, execCfg, sessiondatapb.SessionData{}) + plannerInterface, close := NewInternalPlanner( + opName, + nil, /*txn*/ + user, + memMetrics, + execCfg, + sessiondatapb.SessionData{}, + ) + p := plannerInterface.(*planner) return &plannerJobExecContext{p: p}, close } diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index d25170cf36f1..e5369ee367ec 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -234,6 +234,24 @@ func (evalCtx *extendedEvalContext) setSessionID(sessionID ClusterWideID) { // growth in the log. var noteworthyInternalMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_INTERNAL_MEMORY_USAGE", 1<<20 /* 1 MB */) +// internalPlannerParams encapsulates configurable planner fields. The defaults +// are set in newInternalPlanner. +type internalPlannerParams struct { + collection *descs.Collection +} + +// InternalPlannerParamsOption is an option that can be passed to +// NewInternalPlanner. +type InternalPlannerParamsOption func(*internalPlannerParams) + +// WithDescCollection configures the planner with the provided collection +// instead of the default (creating a new one from scratch). +func WithDescCollection(collection *descs.Collection) InternalPlannerParamsOption { + return func(params *internalPlannerParams) { + params.collection = collection + } +} + // NewInternalPlanner is an exported version of newInternalPlanner. It // returns an interface{} so it can be used outside of the sql package. func NewInternalPlanner( @@ -243,8 +261,9 @@ func NewInternalPlanner( memMetrics *MemoryMetrics, execCfg *ExecutorConfig, sessionData sessiondatapb.SessionData, + opts ...InternalPlannerParamsOption, ) (interface{}, func()) { - return newInternalPlanner(opName, txn, user, memMetrics, execCfg, sessionData) + return newInternalPlanner(opName, txn, user, memMetrics, execCfg, sessionData, opts...) } // newInternalPlanner creates a new planner instance for internal usage. This @@ -262,7 +281,21 @@ func newInternalPlanner( memMetrics *MemoryMetrics, execCfg *ExecutorConfig, sessionData sessiondatapb.SessionData, + opts ...InternalPlannerParamsOption, ) (*planner, func()) { + // Default parameters which may be override by the supplied options. + params := &internalPlannerParams{ + // The table collection used by the internal planner does not rely on the + // deprecatedDatabaseCache and there are no subscribers to the + // deprecatedDatabaseCache, so we can leave it uninitialized. + // Furthermore, we're not concerned about the efficiency of querying tables + // with user-defined types, hence the nil hydratedTables. + collection: descs.NewCollection(execCfg.Settings, execCfg.LeaseManager, nil /* hydratedTables */), + } + for _, opt := range opts { + opt(params) + } + // We need a context that outlives all the uses of the planner (since the // planner captures it in the EvalCtx, and so does the cleanup function that // we're going to return. We just create one here instead of asking the caller @@ -280,12 +313,6 @@ func newInternalPlanner( } sd.SessionData.Database = "system" sd.SessionData.UserProto = user.EncodeProto() - // The table collection used by the internal planner does not rely on the - // deprecatedDatabaseCache and there are no subscribers to the - // deprecatedDatabaseCache, so we can leave it uninitialized. - // Furthermore, we're not concerned about the efficiency of querying tables - // with user-defined types, hence the nil hydratedTables. - tables := descs.NewCollection(execCfg.Settings, execCfg.LeaseManager, nil /* hydratedTables */) dataMutator := &sessionDataMutator{ data: sd, defaults: SessionDefaults(map[string]string{ @@ -324,7 +351,7 @@ func newInternalPlanner( noteworthyInternalMemoryUsageBytes, execCfg.Settings) p.extendedEvalCtx = internalExtendedEvalCtx( - ctx, sd, dataMutator, tables, txn, ts, ts, execCfg, plannerMon, + ctx, sd, dataMutator, params.collection, txn, ts, ts, execCfg, plannerMon, ) p.extendedEvalCtx.Planner = p p.extendedEvalCtx.PrivilegedAccessor = p @@ -344,7 +371,7 @@ func newInternalPlanner( p.extendedEvalCtx.ExecCfg = execCfg p.extendedEvalCtx.Placeholders = &p.semaCtx.Placeholders p.extendedEvalCtx.Annotations = &p.semaCtx.Annotations - p.extendedEvalCtx.Descs = tables + p.extendedEvalCtx.Descs = params.collection p.queryCacheSession.Init() p.optPlanningCtx.init(p) diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 9b9f69983c46..a3ae3922f4cb 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -248,7 +248,15 @@ func (sc *SchemaChanger) backfillQueryIntoTable( // Create an internal planner as the planner used to serve the user query // would have committed by this point. - p, cleanup := NewInternalPlanner(desc, txn, security.RootUserName(), &MemoryMetrics{}, sc.execCfg, sessiondatapb.SessionData{}) + p, cleanup := NewInternalPlanner( + desc, + txn, + security.RootUserName(), + &MemoryMetrics{}, + sc.execCfg, + sessiondatapb.SessionData{}, + ) + defer cleanup() localPlanner := p.(*planner) stmt, err := parser.ParseOne(query) diff --git a/pkg/sql/type_change.go b/pkg/sql/type_change.go index 3bb56a4e26d9..f08c80e55d91 100644 --- a/pkg/sql/type_change.go +++ b/pkg/sql/type_change.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/errors" @@ -165,6 +166,10 @@ type TypeSchemaChangerTestingKnobs struct { // RunAfterOnFailOrCancel runs after OnFailOrCancel completes, if // OnFailOrCancel is triggered. RunAfterOnFailOrCancel func() error + // RunBeforeMultiRegionUpdates is a multi-region specific testing knob which + // runs after enum promotion and before multi-region updates (such as + // repartitioning tables, applying zone configs etc.) + RunBeforeMultiRegionUpdates func() error } // ModuleTestingKnobs implements the ModuleTestingKnobs interface. @@ -254,6 +259,11 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { return err } + // A list of multi-region tables that were repartitioned as a result of + // promotion/demotion of enum values. This is used to track tables whose + // leases need to be invalidated. + var repartitionedTables []descpb.ID + // Now that we've ascertained that the enum values can be removed, we can // actually go about modifying the type descriptor. @@ -283,6 +293,31 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { ); err != nil { return err } + + // Additional work must be performed once the promotion/demotion of enum + // members has been taken care of. In particular, index partitions for + // REGIONAL BY ROW tables must be updated to reflect the new region values + // available. + if typeDesc.Kind == descpb.TypeDescriptor_MULTIREGION_ENUM { + immut, err := descsCol.GetImmutableTypeByID(ctx, txn, t.typeID, tree.ObjectLookupFlags{}) + if err != nil { + return err + } + if fn := t.execCfg.TypeSchemaChangerTestingKnobs.RunBeforeMultiRegionUpdates; fn != nil { + return fn() + } + repartitionedTables, err = repartitionRegionalByRowTables( + ctx, + immut, + txn, + t.execCfg, + descsCol, + ) + if err != nil { + return err + } + } + return txn.Run(ctx, b) } if err := descs.Txn( @@ -291,14 +326,31 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { ); err != nil { return err } - } - // Finally, make sure all of the leases are updated. - if err := WaitToUpdateLeases(ctx, leaseMgr, t.typeID); err != nil { - if errors.Is(err, catalog.ErrDescriptorNotFound) { - return nil + // If any tables were repartitioned, make sure their leases are updated as + // well. + for _, tbID := range repartitionedTables { + if err := WaitToUpdateLeases(ctx, leaseMgr, tbID); err != nil { + if errors.Is(err, catalog.ErrDescriptorNotFound) { + // Swallow. + log.Infof(ctx, + "could not find table %d to be repartitioned when adding/removing regions on "+ + "enum %d, assuming it was dropped and moving on", + tbID, + t.typeID, + ) + } + return err + } + } + + // Finally, make sure all of the type descriptor leases are updated. + if err := WaitToUpdateLeases(ctx, leaseMgr, t.typeID); err != nil { + if errors.Is(err, catalog.ErrDescriptorNotFound) { + return nil + } + return err } - return err } // If the type is being dropped, remove the descriptor here. @@ -314,6 +366,122 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { return nil } +// repartitionRegionalByRowTables takes a multi-region enum and re-partitions +// all REGIONAL BY ROW tables in the enclosing database such that there is a +// partition and corresponding zone configuration for all PUBLIC enum members +// (regions). +// This currently doesn't work too well if there are READ ONLY member on the +// type. This is because we create the partitioning clause based on the regions +// on the database descriptor, which may include the READ ONLY member, and +// partitioning on a READ ONLY member doesn't work. This will go away once +// https://github.com/cockroachdb/cockroach/issues/60620 is fixed. +func repartitionRegionalByRowTables( + ctx context.Context, + typeDesc *typedesc.Immutable, + txn *kv.Txn, + execCfg *ExecutorConfig, + descsCol *descs.Collection, +) ([]descpb.ID, error) { + var repartitionedTableIDs []descpb.ID + if typeDesc.GetKind() != descpb.TypeDescriptor_MULTIREGION_ENUM { + return repartitionedTableIDs, errors.AssertionFailedf( + "expected multi-region enum, but found type descriptor of kind: %v", typeDesc.GetKind(), + ) + } + p, cleanup := NewInternalPlanner( + "repartition-regional-by-row-tables", + txn, + security.RootUserName(), + &MemoryMetrics{}, + execCfg, + sessiondatapb.SessionData{}, + WithDescCollection(descsCol), + ) + defer cleanup() + localPlanner := p.(*planner) + + _, dbDesc, err := localPlanner.Descriptors().GetImmutableDatabaseByID( + ctx, txn, typeDesc.ParentID, tree.DatabaseLookupFlags{Required: true}) + if err != nil { + return nil, err + } + allDescs, err := localPlanner.Descriptors().GetAllDescriptors(ctx, txn) + if err != nil { + return nil, err + } + lCtx := newInternalLookupCtx(ctx, allDescs, dbDesc, nil /* fallback */) + + b := txn.NewBatch() + for _, tbID := range lCtx.tbIDs { + tableDesc, err := localPlanner.Descriptors().GetMutableTableByID( + ctx, txn, tbID, tree.ObjectLookupFlags{ + CommonLookupFlags: tree.CommonLookupFlags{ + Required: true, + IncludeDropped: true, + }, + }) + if err != nil { + return nil, err + } + + if !tableDesc.IsLocalityRegionalByRow() || tableDesc.Dropped() { + // We only need to re-partition REGIONAL BY ROW tables. Even then, we + // don't need to (can't) repartition a REGIONAL BY ROW table if it has + // been dropped. + continue + } + + colName, err := tableDesc.GetRegionalByRowTableRegionColumnName() + if err != nil { + return nil, err + } + partitionAllBy := partitionByForRegionalByRow(*dbDesc.RegionConfig, colName) + + // Update the partitioning on all indexes of the table that aren't being + // dropped. + for _, index := range tableDesc.NonDropIndexes() { + newIdx, err := CreatePartitioning( + ctx, + localPlanner.extendedEvalCtx.Settings, + localPlanner.EvalContext(), + tableDesc, + *index.IndexDesc(), + partitionAllBy, + nil, /* allowedNewColumnName*/ + true, /* allowImplicitPartitioning */ + ) + if err != nil { + return nil, err + } + // Update the index descriptor proto's partitioning. + index.IndexDesc().Partitioning = newIdx.Partitioning + } + + // Update the zone configurations now that the partition's been added. + if err := ApplyZoneConfigForMultiRegionTable( + ctx, + txn, + localPlanner.ExecCfg(), + *dbDesc.RegionConfig, + tableDesc, + ApplyZoneConfigForMultiRegionTableOptionTableAndIndexes, + ); err != nil { + return nil, err + } + + if err := localPlanner.Descriptors().WriteDescToBatch(ctx, false /* kvTrace */, tableDesc, b); err != nil { + return nil, err + } + + repartitionedTableIDs = append(repartitionedTableIDs, tbID) + } + if err := txn.Run(ctx, b); err != nil { + return nil, err + } + + return repartitionedTableIDs, nil +} + // isTransitioningInCurrentJob returns true if the given member is either being // added or removed in the current job. func (t *typeSchemaChanger) isTransitioningInCurrentJob( diff --git a/pkg/sql/values_test.go b/pkg/sql/values_test.go index 5ce075d0c17e..77db29ef4987 100644 --- a/pkg/sql/values_test.go +++ b/pkg/sql/values_test.go @@ -48,10 +48,15 @@ func makeTestPlanner() *planner { } // TODO(andrei): pass the cleanup along to the caller. - p, _ /* cleanup */ := newInternalPlanner( - "test", nil /* txn */, security.RootUserName(), &MemoryMetrics{}, &execCfg, sessiondatapb.SessionData{}, + p, _ /* cleanup */ := NewInternalPlanner( + "test", + nil, /* txn */ + security.RootUserName(), + &MemoryMetrics{}, + &execCfg, + sessiondatapb.SessionData{}, ) - return p + return p.(*planner) } func TestValues(t *testing.T) {