diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row index 15116dde32a9..36a45dc53804 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row @@ -1652,6 +1652,9 @@ ALTER TABLE regional_by_row_table ADD CONSTRAINT unique_b_a UNIQUE(b, a) # We should plan uniqueness checks for all unique indexes in # REGIONAL BY ROW tables. +# TODO(treilly): The constraint check for uniq_idx should use uniq_idx but due +# to stats issues w/ empty stats, partial indexes and multicol stats its not. +# Hopefully fixing #67583 (and possibly #67479) will resolve this. query T SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table (pk, pk2, a, b) VALUES (1, 1, 1, 1)] OFFSET 2 ---- @@ -1698,9 +1701,9 @@ SELECT * FROM [EXPLAIN INSERT INTO regional_by_row_table (pk, pk2, a, b) VALUES │ └── • error if rows │ │ │ └── • lookup join (semi) -│ │ table: regional_by_row_table@uniq_idx (partial index) -│ │ lookup condition: (column3 = a) AND (crdb_region = 'ap-southeast-2') -│ │ remote lookup condition: (column3 = a) AND (crdb_region IN ('ca-central-1', 'us-east-1')) +│ │ table: regional_by_row_table@new_idx +│ │ lookup condition: ((column3 = a) AND (crdb_region = 'ap-southeast-2')) AND (b > 0) +│ │ remote lookup condition: ((column3 = a) AND (crdb_region IN ('ca-central-1', 'us-east-1'))) AND (b > 0) │ │ pred: (column1 != pk) OR (crdb_region_default != crdb_region) │ │ │ └── • filter @@ -1728,6 +1731,9 @@ INSERT INTO regional_by_row_table (crdb_region, pk, pk2, a, b) VALUES ('us-east- # The conflict columns in an upsert should only include the primary key, # not the region column. +# TODO(treilly): The constraint check for uniq_idx should use uniq_idx but due +# to stats issues w/ empty stats, partial indexes and multicol stats its not. +# Hopefully fixing #67583 (and possibly #67479) will resolve this. query T SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table (crdb_region, pk, pk2, a, b) VALUES ('us-east-1', 2, 3, 2, 3)] OFFSET 2 ---- @@ -1779,9 +1785,9 @@ SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table (crdb_region, pk, pk2, │ └── • error if rows │ │ │ └── • lookup join (semi) -│ │ table: regional_by_row_table@uniq_idx (partial index) -│ │ lookup condition: (column4 = a) AND (crdb_region = 'ap-southeast-2') -│ │ remote lookup condition: (column4 = a) AND (crdb_region IN ('ca-central-1', 'us-east-1')) +│ │ table: regional_by_row_table@new_idx +│ │ lookup condition: ((column4 = a) AND (crdb_region = 'ap-southeast-2')) AND (b > 0) +│ │ remote lookup condition: ((column4 = a) AND (crdb_region IN ('ca-central-1', 'us-east-1'))) AND (b > 0) │ │ pred: (upsert_pk != pk) OR (column1 != crdb_region) │ │ │ └── • filter @@ -1803,6 +1809,9 @@ SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table (crdb_region, pk, pk2, └── • scan buffer label: buffer 1 +# TODO(treilly): The constraint check for uniq_idx should use uniq_idx but due +# to stats issues w/ empty stats, partial indexes and multicol stats its not. +# Hopefully fixing #67583 (and possibly #67479) will resolve this. query T SELECT * FROM [EXPLAIN UPSERT INTO regional_by_row_table (crdb_region, pk, pk2, a, b) VALUES ('us-east-1', 23, 24, 25, 26), ('ca-central-1', 30, 30, 31, 32)] OFFSET 2 @@ -1850,9 +1859,9 @@ VALUES ('us-east-1', 23, 24, 25, 26), ('ca-central-1', 30, 30, 31, 32)] OFFSET 2 │ └── • error if rows │ │ │ └── • lookup join (semi) -│ │ table: regional_by_row_table@uniq_idx (partial index) -│ │ lookup condition: (column4 = a) AND (crdb_region = 'ap-southeast-2') -│ │ remote lookup condition: (column4 = a) AND (crdb_region IN ('ca-central-1', 'us-east-1')) +│ │ table: regional_by_row_table@new_idx +│ │ lookup condition: ((column4 = a) AND (crdb_region = 'ap-southeast-2')) AND (b > 0) +│ │ remote lookup condition: ((column4 = a) AND (crdb_region IN ('ca-central-1', 'us-east-1'))) AND (b > 0) │ │ pred: (upsert_pk != pk) OR (column1 != crdb_region) │ │ │ └── • filter diff --git a/pkg/roachpb/data.go b/pkg/roachpb/data.go index 0caa0453e205..a01b4df3a4df 100644 --- a/pkg/roachpb/data.go +++ b/pkg/roachpb/data.go @@ -2244,6 +2244,18 @@ func (s Span) ContainsKey(key Key) bool { return bytes.Compare(key, s.Key) >= 0 && bytes.Compare(key, s.EndKey) < 0 } +// CompareKey returns -1 if the key precedes the span start, 0 if its contained +// by the span and 1 if its after the end of the span. +func (s Span) CompareKey(key Key) int { + if bytes.Compare(key, s.Key) >= 0 { + if bytes.Compare(key, s.EndKey) < 0 { + return 0 + } + return 1 + } + return -1 +} + // ProperlyContainsKey returns whether the span properly contains the given key. func (s Span) ProperlyContainsKey(key Key) bool { return bytes.Compare(key, s.Key) > 0 && bytes.Compare(key, s.EndKey) < 0 diff --git a/pkg/sql/execinfrapb/processors_sql.pb.go b/pkg/sql/execinfrapb/processors_sql.pb.go index 665f04cc73c9..6b1f18893486 100644 --- a/pkg/sql/execinfrapb/processors_sql.pb.go +++ b/pkg/sql/execinfrapb/processors_sql.pb.go @@ -845,12 +845,13 @@ type JoinReaderSpec struct { // more complicated than a simple equality between input columns and index // columns. In this case, LookupExpr specifies the expression that will be // used to construct the spans for each lookup. Currently, the only - // expressions supported are conjunctions (AND expressions) of equality and - // IN expressions, specifically: + // expressions supported are conjunctions (AND expressions) of equality, IN + // expressions, and simple inequalities, specifically: // 1. equalities between two variables (one from the input and one from the // index) representing the equi-join condition(s), // 2. equalities between an index column and a constant, and // 3. IN expressions between an index column and a tuple of constants. + // 4. LT,GT,GE,LE between an index var and a constant. // // Variables in this expression are assigned in the same way as the ON // condition below. Assuming that the left stream has N columns and the right diff --git a/pkg/sql/execinfrapb/processors_sql.proto b/pkg/sql/execinfrapb/processors_sql.proto index c6faafb2ca8d..e348c922bd31 100644 --- a/pkg/sql/execinfrapb/processors_sql.proto +++ b/pkg/sql/execinfrapb/processors_sql.proto @@ -280,12 +280,13 @@ message JoinReaderSpec { // more complicated than a simple equality between input columns and index // columns. In this case, LookupExpr specifies the expression that will be // used to construct the spans for each lookup. Currently, the only - // expressions supported are conjunctions (AND expressions) of equality and - // IN expressions, specifically: + // expressions supported are conjunctions (AND expressions) of equality, IN + // expressions, and simple inequalities, specifically: // 1. equalities between two variables (one from the input and one from the // index) representing the equi-join condition(s), // 2. equalities between an index column and a constant, and // 3. IN expressions between an index column and a tuple of constants. + // 4. LT,GT,GE,LE between an index var and a constant. // // Variables in this expression are assigned in the same way as the ON // condition below. Assuming that the left stream has N columns and the right diff --git a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join index 2b683114a0ba..160f05ce5d77 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join @@ -75,8 +75,7 @@ vectorized: true │ columns: (a, b, c, d, e, f) │ estimated row count: 33 │ table: def@primary -│ equality: (b) = (f) -│ pred: e > 1 +│ lookup condition: (f = b) AND (e > 1) │ └── • scan columns: (a, b, c) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join_spans b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join_spans new file mode 100644 index 000000000000..5acfa1590fa7 --- /dev/null +++ b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join_spans @@ -0,0 +1,849 @@ +# LogicTest: local + +statement ok +CREATE TABLE metrics ( + id SERIAL PRIMARY KEY, + nullable INT, + name STRING, + INDEX name_index (name) +) + +statement ok +insert into metrics (id,nullable,name) values (1,NULL,'cpu'), (2,1,'cpu'), (3,NULL,'mem'), (4,2,'disk') + +statement ok +CREATE TABLE metric_values ( + metric_id INT8, + time TIMESTAMPTZ, + nullable INT, + value INT8, + PRIMARY KEY (metric_id, time), + INDEX secondary (metric_id, nullable, time) +) + +statement ok +insert into metric_values (metric_id, time, nullable, value) values + (1,'2020-01-01 00:00:00+00:00',NULL,0), + (1,'2020-01-01 00:00:01+00:00',1,1), + (2,'2020-01-01 00:00:00+00:00',NULL,2), + (2,'2020-01-01 00:00:01+00:00',2,3), + (2,'2020-01-01 00:01:01+00:00',-11,4), + (2,'2020-01-01 00:01:02+00:00',-10,5), + (3,'2020-01-01 00:01:00+00:00',NULL,6), + (3,'2020-01-01 00:01:01+00:00',3,7) + +# metric_values_desc is a descending time version of metric_values. +statement ok +CREATE TABLE metric_values_desc ( + metric_id INT8, + time TIMESTAMPTZ, + nullable INT, + value INT8, + PRIMARY KEY (metric_id, time DESC), + INDEX secondary (metric_id, nullable, time DESC) +) + +statement ok +insert into metric_values_desc select * from metric_values + +# The final statements below need some stats to chose the lookup join. +statement ok +ALTER TABLE metric_values INJECT STATISTICS +'[ + { + "columns": ["metric_id"], + "created_at": "2018-01-01 1:00:00.00000+00:00", + "row_count": 1000, + "distinct_count": 10 + }, + { + "columns": ["time"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 1000, + "distinct_count": 1000 + }, + { + "columns": ["nullable"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 1000, + "distinct_count": 10, + "histo_buckets": [ + {"num_eq": 0, "num_range": 0, "distinct_range": 0, "upper_bound": "-10"}, + {"num_eq": 0, "num_range": 1000, "distinct_range": 10, "upper_bound": "0"} + ], + "histo_col_type": "INT" + }, + { + "columns": ["value"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 1000, + "distinct_count": 1000 + } +]' + +statement ok +ALTER TABLE metrics INJECT STATISTICS +'[ + { + "columns": ["id"], + "created_at": "2018-01-01 1:00:00.00000+00:00", + "row_count": 10, + "distinct_count": 10 + }, + { + "columns": ["nullable"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 10, + "distinct_count": 10 + }, + { + "columns": ["name"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 10, + "distinct_count": 10 + } +]' + +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + time > '2020-01-01 00:00:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 33 +│ order: +value +│ +└── • lookup join + │ estimated row count: 33 + │ table: metric_values@primary + │ lookup condition: (metric_id = id) AND ("time" > '2020-01-01 00:00:00+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values_desc +INNER JOIN metrics +ON metric_id=id +WHERE + time > '2020-01-01 00:00:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ order: +value +│ +└── • lookup join + │ table: metric_values_desc@primary + │ lookup condition: (metric_id = id) AND ("time" > '2020-01-01 00:00:00+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time >= '2020-01-01 00:00:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 33 +│ order: +value +│ +└── • lookup join + │ estimated row count: 33 + │ table: metric_values@primary + │ lookup condition: (metric_id = id) AND ("time" >= '2020-01-01 00:00:00+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values_desc +INNER JOIN metrics +ON metric_id=id +WHERE + time >= '2020-01-01 00:00:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ order: +value +│ +└── • lookup join + │ table: metric_values_desc@primary + │ lookup condition: (metric_id = id) AND ("time" >= '2020-01-01 00:00:00+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time < '2020-01-01 00:00:00+00:00' AND + name='cpu' +---- +distribution: local +vectorized: true +· +• lookup join +│ estimated row count: 33 +│ table: metric_values@primary +│ lookup condition: (metric_id = id) AND ("time" < '2020-01-01 00:00:00+00:00') +│ +└── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values_desc +INNER JOIN metrics +ON metric_id=id +WHERE + time < '2020-01-01 00:00:00+00:00' AND + name='cpu' +---- +distribution: local +vectorized: true +· +• lookup join +│ table: metric_values_desc@primary +│ lookup condition: (metric_id = id) AND ("time" < '2020-01-01 00:00:00+00:00') +│ +└── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time <= '2020-01-01 00:00:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 33 +│ order: +value +│ +└── • lookup join + │ estimated row count: 33 + │ table: metric_values@primary + │ lookup condition: (metric_id = id) AND ("time" <= '2020-01-01 00:00:00+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values_desc +INNER JOIN metrics +ON metric_id=id +WHERE + time <= '2020-01-01 00:00:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ order: +value +│ +└── • lookup join + │ table: metric_values_desc@primary + │ lookup condition: (metric_id = id) AND ("time" <= '2020-01-01 00:00:00+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time < '2020-01-01 00:00:10+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 33 +│ order: +value +│ +└── • lookup join + │ estimated row count: 33 + │ table: metric_values@primary + │ lookup condition: (metric_id = id) AND ("time" < '2020-01-01 00:00:10+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values_desc +INNER JOIN metrics +ON metric_id=id +WHERE + time < '2020-01-01 00:00:10+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ order: +value +│ +└── • lookup join + │ table: metric_values_desc@primary + │ lookup condition: (metric_id = id) AND ("time" < '2020-01-01 00:00:10+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time BETWEEN '2020-01-01 00:00:00+00:00' AND '2020-01-01 00:10:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 11 +│ order: +value +│ +└── • lookup join + │ estimated row count: 11 + │ table: metric_values@primary + │ lookup condition: (metric_id = id) AND (("time" >= '2020-01-01 00:00:00+00:00') AND ("time" <= '2020-01-01 00:10:00+00:00')) + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +query T +EXPLAIN +SELECT * +FROM metric_values_desc +INNER JOIN metrics +ON metric_id=id +WHERE + time BETWEEN '2020-01-01 00:00:00+00:00' AND '2020-01-01 00:10:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ order: +value +│ +└── • lookup join + │ table: metric_values_desc@primary + │ lookup condition: (metric_id = id) AND (("time" >= '2020-01-01 00:00:00+00:00') AND ("time" <= '2020-01-01 00:10:00+00:00')) + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +# Test lookup conditions w/ a left join. +query T +EXPLAIN +SELECT * +FROM metrics +LEFT JOIN metric_values +ON metric_id=id +AND time BETWEEN '2020-01-01 00:00:00+00:00' AND '2020-01-01 00:10:00+00:00' +AND name='cpu' +ORDER BY value, id +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 11 +│ order: +value,+id +│ +└── • lookup join (left outer) + │ estimated row count: 11 + │ table: metric_values@primary + │ lookup condition: (metric_id = id) AND (("time" >= '2020-01-01 00:00:00+00:00') AND ("time" <= '2020-01-01 00:10:00+00:00')) + │ pred: name = 'cpu' + │ + └── • scan + estimated row count: 10 (100% of the table; stats collected ago) + table: metrics@primary + spans: FULL SCAN + +# Test lookup conditions w/ a semi join. +query T +EXPLAIN +SELECT * +FROM metrics m +WHERE EXISTS (SELECT * FROM metric_values mv WHERE mv.metric_id = m.id AND time BETWEEN '2020-01-01 00:00:00+00:00' AND '2020-01-01 00:10:00+00:00') +ORDER BY m.id +---- +distribution: local +vectorized: true +· +• lookup join (semi) +│ estimated row count: 10 +│ table: metric_values@primary +│ lookup condition: (metric_id = id) AND (("time" >= '2020-01-01 00:00:00+00:00') AND ("time" <= '2020-01-01 00:10:00+00:00')) +│ +└── • scan + estimated row count: 10 (100% of the table; stats collected ago) + table: metrics@primary + spans: FULL SCAN + +# Test NULL values in pre-join where conditions. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +AND v.nullable = m.nullable +WHERE + time > '2020-01-01 00:00:00+00:00' AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 3 +│ order: +value +│ +└── • lookup join + │ estimated row count: 3 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ estimated row count: 3 + │ table: metric_values@secondary + │ lookup condition: ((metric_id = id) AND (nullable = nullable)) AND ("time" > '2020-01-01 00:00:00+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +# Test NULL values in bounded lookup span. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + v.nullable BETWEEN -20 AND -10 AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• lookup join +│ estimated row count: 0 +│ table: metrics@primary +│ equality: (id) = (id) +│ equality cols are key +│ +└── • sort + │ estimated row count: 0 + │ order: +value + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@secondary + │ lookup condition: (metric_id = id) AND ((nullable >= -20) AND (nullable <= -10)) + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +# Test NULL values in > unbounded lookup span. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + v.nullable > 1 AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• lookup join +│ estimated row count: 0 +│ table: metrics@primary +│ equality: (id) = (id) +│ equality cols are key +│ +└── • sort + │ estimated row count: 0 + │ order: +value + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@secondary + │ lookup condition: (metric_id = id) AND (nullable > 1) + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +# Test NULL values in >= unbounded lookup span. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + v.nullable >= 1 AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• lookup join +│ estimated row count: 0 +│ table: metrics@primary +│ equality: (id) = (id) +│ equality cols are key +│ +└── • sort + │ estimated row count: 0 + │ order: +value + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@secondary + │ lookup condition: (metric_id = id) AND (nullable >= 1) + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + + +# Test NULL values in < unbounded lookup span. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + v.nullable < -10 AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• lookup join +│ estimated row count: 0 +│ table: metrics@primary +│ equality: (id) = (id) +│ equality cols are key +│ +└── • sort + │ estimated row count: 0 + │ order: +value + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@secondary + │ lookup condition: (metric_id = id) AND (nullable < -10) + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +# Test NULL values in <= unbounded lookup span. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + v.nullable <= -10 AND + name='cpu' +ORDER BY value +---- +distribution: local +vectorized: true +· +• lookup join +│ estimated row count: 0 +│ table: metrics@primary +│ equality: (id) = (id) +│ equality cols are key +│ +└── • sort + │ estimated row count: 0 + │ order: +value + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@secondary + │ lookup condition: (metric_id = id) AND (nullable <= -10) + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + +# Test NULL values in WHERE equality conditions. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + time < '2020-01-01 00:00:10+00:00' AND + name='cpu' AND + v.nullable = m.nullable +ORDER BY value +---- +distribution: local +vectorized: true +· +• sort +│ estimated row count: 3 +│ order: +value +│ +└── • lookup join + │ estimated row count: 3 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ estimated row count: 3 + │ table: metric_values@secondary + │ lookup condition: ((metric_id = id) AND (nullable = nullable)) AND ("time" < '2020-01-01 00:00:10+00:00') + │ + └── • index join + │ estimated row count: 1 + │ table: metrics@primary + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] + + +# Test NULL values in simple equality condition. +query T +EXPLAIN +SELECT * +FROM metric_values as v +INNER JOIN metrics as m +ON metric_id=id +WHERE + time < '2020-01-01 00:00:10+00:00' AND + name='cpu' AND + v.nullable = 1 +ORDER BY value +---- +distribution: local +vectorized: true +· +• lookup join +│ estimated row count: 0 +│ table: metrics@primary +│ equality: (id) = (id) +│ equality cols are key +│ +└── • sort + │ estimated row count: 0 + │ order: +value + │ + └── • lookup join + │ estimated row count: 0 + │ table: metric_values@primary + │ equality: (metric_id, time) = (metric_id,time) + │ equality cols are key + │ + └── • lookup join + │ table: metric_values@secondary + │ lookup condition: ((metric_id = id) AND (nullable = 1)) AND ("time" < '2020-01-01 00:00:10+00:00') + │ + └── • render + │ estimated row count: 1 + │ + └── • scan + estimated row count: 1 (10% of the table; stats collected ago) + table: metrics@name_index + spans: [/'cpu' - /'cpu'] diff --git a/pkg/sql/opt/memo/testdata/logprops/lookup-join b/pkg/sql/opt/memo/testdata/logprops/lookup-join index 61df384a6fe0..c152872db68c 100644 --- a/pkg/sql/opt/memo/testdata/logprops/lookup-join +++ b/pkg/sql/opt/memo/testdata/logprops/lookup-join @@ -85,16 +85,20 @@ inner-join (lookup abcd) ├── interesting orderings: (+6,+7) ├── inner-join (lookup abcd@secondary) │ ├── columns: m:1(int!null) n:2(int) a:6(int!null) b:7(int!null) abcd.rowid:9(int!null) - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── eq [type=bool, outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ │ ├── variable: a:6 [type=int] + │ │ │ └── variable: m:1 [type=int] + │ │ └── gt [type=bool, outer=(7), constraints=(/7: [/3 - ]; tight)] + │ │ ├── variable: b:7 [type=int] + │ │ └── const: 2 [type=int] │ ├── fd: (9)-->(6,7), (1)==(6), (6)==(1) │ ├── scan small │ │ ├── columns: m:1(int) n:2(int) │ │ ├── prune: (1,2) │ │ └── unfiltered-cols: (1-5) - │ └── filters - │ └── gt [type=bool, outer=(7), constraints=(/7: [/3 - ]; tight)] - │ ├── variable: b:7 [type=int] - │ └── const: 2 [type=int] + │ └── filters (true) └── filters (true) # Filter that can only be applied after the primary index join. diff --git a/pkg/sql/opt/memo/testdata/stats/lookup-join b/pkg/sql/opt/memo/testdata/stats/lookup-join index 189677947c32..2e344e5d008d 100644 --- a/pkg/sql/opt/memo/testdata/stats/lookup-join +++ b/pkg/sql/opt/memo/testdata/stats/lookup-join @@ -81,14 +81,16 @@ inner-join (lookup abcd) ├── fd: (1)==(6), (6)==(1) ├── inner-join (lookup abcd@secondary) │ ├── columns: m:1(int!null) n:2(int) a:6(int!null) b:7(int!null) abcd.rowid:9(int!null) - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [type=bool, outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 > 2 [type=bool, outer=(7), constraints=(/7: [/3 - ]; tight)] │ ├── stats: [rows=33, distinct(1)=10, null(1)=0, distinct(6)=10, null(6)=0, distinct(7)=33, null(7)=0] │ ├── fd: (9)-->(6,7), (1)==(6), (6)==(1) │ ├── scan small │ │ ├── columns: m:1(int) n:2(int) │ │ └── stats: [rows=10, distinct(1)=10, null(1)=0] - │ └── filters - │ └── b:7 > 2 [type=bool, outer=(7), constraints=(/7: [/3 - ]; tight)] + │ └── filters (true) └── filters (true) # Filter that can only be applied after the primary index join. diff --git a/pkg/sql/opt/xform/BUILD.bazel b/pkg/sql/opt/xform/BUILD.bazel index 2e4d65fbe2cc..0b30f1591a04 100644 --- a/pkg/sql/opt/xform/BUILD.bazel +++ b/pkg/sql/opt/xform/BUILD.bazel @@ -56,6 +56,8 @@ go_test( srcs = [ "coster_test.go", "general_funcs_test.go", + "join_funcs_export_test.go", + "join_funcs_test.go", "join_order_builder_test.go", "main_test.go", "optimizer_test.go", @@ -73,6 +75,7 @@ go_test( "//pkg/security/securitytest", "//pkg/settings/cluster", "//pkg/sql/opt", + "//pkg/sql/opt/constraint", "//pkg/sql/opt/memo", "//pkg/sql/opt/norm", "//pkg/sql/opt/testutils", diff --git a/pkg/sql/opt/xform/coster.go b/pkg/sql/opt/xform/coster.go index 6344ae9c8d33..084d02337e53 100644 --- a/pkg/sql/opt/xform/coster.go +++ b/pkg/sql/opt/xform/coster.go @@ -1474,7 +1474,11 @@ func lookupJoinInputLimitHint(inputRowCount, outputRowCount, outputLimitHint flo func lookupExprCost(join memo.RelExpr) memo.Cost { lookupExpr, ok := join.(*memo.LookupJoinExpr) if ok { - return cpuCostFactor * memo.Cost(len(lookupExpr.LookupExpr)) + // 1.1 is a fudge factor that pushes some plans over the edge when choosing + // between a partial index vs full index plus lookup expr in the + // regional_by_row. + // TODO(treilly): do some empirical analysis and model this better + return cpuCostFactor * memo.Cost(len(lookupExpr.LookupExpr)) * 1.1 } return 0 } diff --git a/pkg/sql/opt/xform/join_funcs.go b/pkg/sql/opt/xform/join_funcs.go index 084c17e38304..5765361c248a 100644 --- a/pkg/sql/opt/xform/join_funcs.go +++ b/pkg/sql/opt/xform/join_funcs.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" + "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/invertedidx" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" @@ -297,8 +298,13 @@ func (c *CustomFuncs) GenerateLookupJoins( // join implements logic equivalent to simple equality between // columns (where NULL never equals anything). foundVals, allIdx, ok := c.findJoinFilterConstants(allFilters, idxCol) + var foundRange bool if !ok { - break + // Also allow a limited form of range condition filters. + allIdx, foundRange = c.findJoinFilterRange(allFilters, idxCol) + if !foundRange { + break + } } if len(foundVals) > 1 { @@ -321,6 +327,11 @@ func (c *CustomFuncs) GenerateLookupJoins( } } + if foundRange { + shouldBuildMultiSpanLookupJoin = true + break + } + // We will join these constant values with the input to make // equality columns for the lookup join. if constFilters == nil { @@ -343,11 +354,12 @@ func (c *CustomFuncs) GenerateLookupJoins( } if shouldBuildMultiSpanLookupJoin { - // Some of the index columns were constrained to multiple constant values, - // and we did not use the method constructJoinWithConstants to create a - // cross join as the input (either because it would have been incorrect or - // because it would have eliminated the opportunity to apply other - // optimizations such as locality optimized search; see above). + // Some of the index columns were constrained to multiple constant values + // or a range expression, and we did not use the method + // constructJoinWithConstants to create a cross join as the input (either + // because it would have been incorrect or because it would have + // eliminated the opportunity to apply other optimizations such as + // locality optimized search; see above). // // As an alternative, we store all the filters needed for the lookup in // LookupExpr, which will be used to construct spans at execution time. @@ -566,27 +578,41 @@ func (c *CustomFuncs) findFiltersForIndexLookup( continue } + var foundRange bool // Try to find a filter that constrains this column to non-NULL // constant values. We cannot use a NULL value because the lookup // join implements logic equivalent to simple equality between // columns (where NULL never equals anything). values, allIdx, ok := c.findJoinFilterConstants(filters, idxCol) if !ok { - break + // If there's no const filters look for an inequality range. + allIdx, foundRange = c.findJoinFilterRange(filters, idxCol) + if !foundRange { + break + } } if constFilters == nil { constFilters = make(memo.FiltersExpr, 0, numIndexKeyCols-j) } - // Ensure that the constant filter is either an equality or an IN expression. - // These are the only two types of expressions currently supported by the - // lookupJoiner for building lookup spans. + // Ensure that the constant filter is an equality, IN or inequality + // expression. These are the only types of expressions currently supported + // by the lookupJoiner for building lookup spans. constFilter := filters[allIdx] - if !c.isCanonicalConstFilter(constFilter) { - constFilter = c.makeConstFilter(idxCol, values) + if !c.isCanonicalLookupJoinFilter(constFilter) { + if len(values) > 0 { + constFilter = c.makeConstFilter(idxCol, values) + } else if foundRange { + constFilter = c.makeRangeFilter(idxCol, constFilter) + } } constFilters = append(constFilters, constFilter) + + // Generating additional columns after a range isn't helpful so stop here. + if foundRange { + break + } } if len(eqFilters) == 0 { @@ -597,24 +623,34 @@ func (c *CustomFuncs) findFiltersForIndexLookup( return eqFilters, constFilters, rightSideCols } -// isCanonicalConstFilter checks that the given filter is a constant filter in -// one of two possible canonical formats: -// 1. It is an equality between a variable and a constant. -// 2. It is an IN expression between a variable and a tuple of constants. -// Returns true if the filter matches one of these two formats. Otherwise -// returns false. -func (c *CustomFuncs) isCanonicalConstFilter(filter memo.FiltersItem) bool { - switch t := filter.Condition.(type) { - case *memo.EqExpr: - if t.Left.Op() == opt.VariableOp && opt.IsConstValueOp(t.Right) { - return true - } - case *memo.InExpr: - if t.Left.Op() == opt.VariableOp && memo.CanExtractConstTuple(t.Right) { +// isCanonicalLookupJoinFilter returns true for the limited set of expr's that are +// supported by the lookup joiner at execution time. +func (c *CustomFuncs) isCanonicalLookupJoinFilter(filter memo.FiltersItem) bool { + var checkExpr func(expr opt.Expr) bool + checkExpr = func(expr opt.Expr) bool { + switch t := expr.(type) { + case *memo.RangeExpr: + return checkExpr(t.And) + case *memo.AndExpr: + return checkExpr(t.Left) && checkExpr(t.Right) + case *memo.GeExpr: + return checkExpr(t.Left) && checkExpr(t.Right) + case *memo.GtExpr: + return checkExpr(t.Left) && checkExpr(t.Right) + case *memo.LeExpr: + return checkExpr(t.Left) && checkExpr(t.Right) + case *memo.LtExpr: + return checkExpr(t.Left) && checkExpr(t.Right) + case *memo.VariableExpr: return true + case *memo.EqExpr: + return checkExpr(t.Left) && checkExpr(t.Right) + case *memo.InExpr: + return checkExpr(t.Left) && memo.CanExtractConstTuple(t.Right) } + return opt.IsConstValueOp(expr) } - return false + return checkExpr(filter.Condition) } // makeConstFilter builds a filter that constrains the given column to the given @@ -640,6 +676,59 @@ func (c *CustomFuncs) makeConstFilter(col opt.ColumnID, values tree.Datums) memo )) } +// makeRangeFilter builds a filter from a constrained column, we assume the +// column is constrained by at least 1 tight constraint. This code doesn't +// handle descending columns. +func (c *CustomFuncs) makeRangeFilter(col opt.ColumnID, filter memo.FiltersItem) memo.FiltersItem { + props := filter.ScalarProps() + if props.Constraints.Length() == 0 || + props.Constraints.Constraint(0).Spans.Count() != 1 || + props.Constraints.Constraint(0).Columns.Get(0).Descending() { + panic(errors.AssertionFailedf("makeRangeFilter needs at least one ascending constraint with one span")) + } + span := props.Constraints.Constraint(0).Spans.Get(0) + return c.makeRangeFilterFromSpan(col, span) +} + +// makeRangeFilterFromSpan constructs a filter from a constraint.Span. +func (c *CustomFuncs) makeRangeFilterFromSpan( + col opt.ColumnID, span *constraint.Span, +) memo.FiltersItem { + variable := c.e.f.ConstructVariable(col) + var left, right opt.ScalarExpr + + // Here and below we need to check for IsEmpty and IsNull because sometimes + // Null is used for unbounded spans. Found empirically by forcing + // findFiltersForIndexLookup to always wrap the filters with makeRangeFilter. + if !span.StartKey().IsEmpty() && !span.StartKey().IsNull() { + val := span.StartKey().Value(0) + if span.StartBoundary() == constraint.IncludeBoundary { + left = c.e.f.ConstructGe(variable, c.e.f.ConstructConstVal(val, val.ResolvedType())) + } else { + left = c.e.f.ConstructGt(variable, c.e.f.ConstructConstVal(val, val.ResolvedType())) + } + } + + if !span.EndKey().IsEmpty() && !span.EndKey().IsNull() { + val := span.EndKey().Value(0) + if span.EndBoundary() == constraint.IncludeBoundary { + right = c.e.f.ConstructLe(variable, c.e.f.ConstructConstVal(val, val.ResolvedType())) + } else { + right = c.e.f.ConstructLt(variable, c.e.f.ConstructConstVal(val, val.ResolvedType())) + } + } + + if left != nil && right != nil { + return c.e.f.ConstructFiltersItem(c.e.f.ConstructRange(c.e.f.ConstructAnd(right, left))) + } else if left != nil { + return c.e.f.ConstructFiltersItem(left) + } else if right != nil { + return c.e.f.ConstructFiltersItem(right) + } + + panic(errors.AssertionFailedf("Constraint needs a valid start or end key")) +} + // constructContinuationColumnForPairedJoin constructs a continuation column // ID for the paired-joiners used for left outer/semi/anti joins when the // first join generates false positives (due to an inverted index or @@ -975,6 +1064,28 @@ func (c *CustomFuncs) findJoinFilterConstants( return bestValues, bestFilterIdx, true } +// findJoinFilterRange tries to find an inequality range for this column. +func (c *CustomFuncs) findJoinFilterRange( + filters memo.FiltersExpr, col opt.ColumnID, +) (filterIdx int, ok bool) { + for filterIdx := range filters { + props := filters[filterIdx].ScalarProps() + if props.TightConstraints && !props.Constraints.IsUnconstrained() { + constraint := props.Constraints.Constraint(0) + constraintCol := constraint.Columns.Get(0).ID() + // See comment in findFiltersForIndexLookup for why we check filter here. + // We only support 1 span in the execution engine so check that. + if constraintCol != col || + constraint.Spans.Count() != 1 || + !c.isCanonicalLookupJoinFilter(filters[filterIdx]) { + continue + } + return filterIdx, true + } + } + return 0, false +} + // constructJoinWithConstants constructs a cross join that joins every row in // the input with every value in vals. The cross join will be converted into a // projection by inlining normalization rules if vals contains only a single diff --git a/pkg/sql/opt/xform/join_funcs_export_test.go b/pkg/sql/opt/xform/join_funcs_export_test.go new file mode 100644 index 000000000000..5abd356b36b8 --- /dev/null +++ b/pkg/sql/opt/xform/join_funcs_export_test.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package xform + +var TestingMakeRangeFilterFromSpan = (*CustomFuncs).makeRangeFilterFromSpan +var TestingIsCanonicalLookupJoinFilter = (*CustomFuncs).isCanonicalLookupJoinFilter diff --git a/pkg/sql/opt/xform/join_funcs_test.go b/pkg/sql/opt/xform/join_funcs_test.go new file mode 100644 index 000000000000..fccace1b1d38 --- /dev/null +++ b/pkg/sql/opt/xform/join_funcs_test.go @@ -0,0 +1,191 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package xform_test + +import ( + "reflect" + "testing" + + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/opt" + "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" + "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" + "github.com/cockroachdb/cockroach/pkg/sql/opt/norm" + "github.com/cockroachdb/cockroach/pkg/sql/opt/testutils" + "github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat" + "github.com/cockroachdb/cockroach/pkg/sql/opt/xform" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" +) + +func TestCustomFuncs_makeRangeFilter(t *testing.T) { + defer leaktest.AfterTest(t)() + fb := makeFilterBuilder(t) + col := fb.tbl.ColumnID(0) + intLow := tree.NewDInt(0) + intHigh := tree.NewDInt(1) + nullKey := constraint.MakeKey(tree.DNull) + + tests := []struct { + name string + filter string + start constraint.Key + startBoundary constraint.SpanBoundary + end constraint.Key + endBoundary constraint.SpanBoundary + }{ + {"lt", "@1 < 1", + constraint.EmptyKey, constraint.IncludeBoundary, + constraint.MakeKey(intHigh), constraint.ExcludeBoundary, + }, + {"le", "@1 <= 1", + constraint.EmptyKey, constraint.IncludeBoundary, + constraint.MakeKey(intHigh), constraint.IncludeBoundary, + }, + {"gt", "@1 > 0", + constraint.MakeKey(intLow), constraint.ExcludeBoundary, + constraint.EmptyKey, constraint.IncludeBoundary, + }, + {"ge", "@1 >= 0", + constraint.MakeKey(intLow), constraint.IncludeBoundary, + constraint.EmptyKey, constraint.IncludeBoundary, + }, + {"lt-null", "@1 < 1", + nullKey, constraint.ExcludeBoundary, + constraint.MakeKey(intHigh), constraint.ExcludeBoundary, + }, + {"le-null", "@1 <= 1", + nullKey, constraint.ExcludeBoundary, + constraint.MakeKey(intHigh), constraint.IncludeBoundary, + }, + {"gt-null", "@1 > 0", + constraint.MakeKey(intLow), constraint.ExcludeBoundary, + nullKey, constraint.IncludeBoundary, + }, + {"ge-null", "@1 >= 0", + constraint.MakeKey(intLow), constraint.IncludeBoundary, + nullKey, constraint.IncludeBoundary, + }, + {"ge<", "@1 >= 0 AND @1 < 1", + constraint.MakeKey(intLow), constraint.IncludeBoundary, + constraint.MakeKey(intHigh), constraint.ExcludeBoundary, + }, + {"ge&le", "@1 >= 0 AND @1 <= 1", + constraint.MakeKey(intLow), constraint.IncludeBoundary, + constraint.MakeKey(intHigh), constraint.IncludeBoundary, + }, + {"gt<", "@1 > 0 AND @1 < 1", + constraint.MakeKey(intLow), constraint.ExcludeBoundary, + constraint.MakeKey(intHigh), constraint.ExcludeBoundary, + }, + {"gt&le", "@1 > 0 AND @1 <= 1", + constraint.MakeKey(intLow), constraint.ExcludeBoundary, + constraint.MakeKey(intHigh), constraint.IncludeBoundary, + }, + } + fut := xform.TestingMakeRangeFilterFromSpan + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fb.o.CustomFuncs() + var sp constraint.Span + sp.Init(tt.start, tt.startBoundary, tt.end, tt.endBoundary) + want := fb.buildFilter(tt.filter) + if got := fut(c, col, &sp); !reflect.DeepEqual(got, want) { + t.Errorf("makeRangeFilter() = %v, want %v", got, want) + } + }) + } +} + +type testFilterBuilder struct { + t *testing.T + semaCtx *tree.SemaContext + ctx *tree.EvalContext + o *xform.Optimizer + f *norm.Factory + tbl opt.TableID +} + +func makeFilterBuilder(t *testing.T) testFilterBuilder { + var o xform.Optimizer + ctx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) + o.Init(&ctx, nil) + f := o.Factory() + cat := testcat.New() + if _, err := cat.ExecuteDDL("CREATE TABLE a (i INT PRIMARY KEY, b BOOL)"); err != nil { + t.Fatal(err) + } + tn := tree.NewTableNameWithSchema("t", tree.PublicSchemaName, "a") + tbl := f.Metadata().AddTable(cat.Table(tn), tn) + return testFilterBuilder{ + t: t, + semaCtx: &tree.SemaContext{}, + ctx: &ctx, + o: &o, + f: f, + tbl: tbl, + } +} + +func (fb *testFilterBuilder) buildFilter(str string) memo.FiltersItem { + return testutils.BuildFilters(fb.t, fb.f, fb.semaCtx, fb.ctx, str)[0] +} + +func TestCustomFuncs_isCanonicalFilter(t *testing.T) { + defer leaktest.AfterTest(t)() + fb := makeFilterBuilder(t) + + tests := []struct { + name string + filter string + want bool + }{ + // Test that True, False, Null values are hit as const. + {name: "eq-int", + filter: "i = 10", + want: true, + }, + {name: "neq-int", + filter: "i != 10", + want: false, + }, + {name: "eq-null", + filter: "i = NULL", + want: true, + }, + {name: "eq-true", + filter: "b = TRUE", + want: true, + }, + {name: "in-tuple", + filter: "i IN (1,2)", + want: true, + }, + {name: "and-eq-lt", + filter: "i = 10 AND i < 10", + want: true, + }, + {name: "or-eq-lt", + filter: "i = 10 OR i < 10", + want: false, + }, + } + fut := xform.TestingIsCanonicalLookupJoinFilter + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fb.o.CustomFuncs() + filter := fb.buildFilter(tt.filter) + if got := fut(c, filter); got != tt.want { + t.Errorf("isCanonicalLookupJoinFilter() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/sql/opt/xform/optimizer.go b/pkg/sql/opt/xform/optimizer.go index 72c3ce2a0bc4..74f4f9c8f50d 100644 --- a/pkg/sql/opt/xform/optimizer.go +++ b/pkg/sql/opt/xform/optimizer.go @@ -977,3 +977,8 @@ func (o *Optimizer) recomputeCostImpl( func (o *Optimizer) FormatExpr(e opt.Expr, flags memo.ExprFmtFlags) string { return memo.FormatExpr(e, flags, o.mem, o.catalog) } + +// CustomFuncs exports the xform.CustomFuncs for testing purposes. +func (o *Optimizer) CustomFuncs() *CustomFuncs { + return &o.explorer.funcs +} diff --git a/pkg/sql/opt/xform/testdata/coster/zone b/pkg/sql/opt/xform/testdata/coster/zone index ab3a37c64783..88d18eb1000e 100644 --- a/pkg/sql/opt/xform/testdata/coster/zone +++ b/pkg/sql/opt/xform/testdata/coster/zone @@ -752,7 +752,7 @@ anti-join (lookup abc_part@bc_idx [as=a2]) │ └── a2.r:7 = 'west' [outer=(7), constraints=(/7: [/'west' - /'west']; tight), fd=()-->(7)] ├── cardinality: [0 - 1] ├── stats: [rows=1e-10] - ├── cost: 18.153533 + ├── cost: 18.1549817 ├── key: () ├── fd: ()-->(1-4) ├── anti-join (lookup abc_part@bc_idx [as=a2]) @@ -763,7 +763,7 @@ anti-join (lookup abc_part@bc_idx [as=a2]) │ │ └── a2.r:7 = 'east' [outer=(7), constraints=(/7: [/'east' - /'east']; tight), fd=()-->(7)] │ ├── cardinality: [0 - 1] │ ├── stats: [rows=0.900900001, distinct(1)=0.89738934, null(1)=0, distinct(2)=0.900900001, null(2)=0, distinct(3)=0.900900001, null(3)=0, distinct(4)=0.900900001, null(4)=0] - │ ├── cost: 10.8531367 + │ ├── cost: 10.8538647 │ ├── key: () │ ├── fd: ()-->(1-4) │ ├── locality-optimized-search diff --git a/pkg/sql/opt/xform/testdata/external/tpce b/pkg/sql/opt/xform/testdata/external/tpce index 8e19660c2ff5..dace24b450df 100644 --- a/pkg/sql/opt/xform/testdata/external/tpce +++ b/pkg/sql/opt/xform/testdata/external/tpce @@ -4113,7 +4113,10 @@ limit │ │ ├── ordering: +20 opt(24) [actual: +20] │ │ └── inner-join (lookup watch_item) │ │ ├── columns: wi_wl_id:19!null wi_s_symb:20!null wl_id:23!null wl_c_id:24!null - │ │ ├── key columns: [23] = [19] + │ │ ├── lookup expression + │ │ │ └── filters + │ │ │ ├── wi_wl_id:19 = wl_id:23 [outer=(19,23), constraints=(/19: (/NULL - ]; /23: (/NULL - ]), fd=(19)==(23), (23)==(19)] + │ │ │ └── wi_s_symb:20 > 'SYMB' [outer=(20), constraints=(/20: [/e'SYMB\x00' - ]; tight)] │ │ ├── key: (20,23) │ │ ├── fd: ()-->(24), (19)==(23), (23)==(19) │ │ ├── select @@ -4126,8 +4129,7 @@ limit │ │ │ │ └── fd: (23)-->(24) │ │ │ └── filters │ │ │ └── wl_c_id:24 = 0 [outer=(24), constraints=(/24: [/0 - /0]; tight), fd=()-->(24)] - │ │ └── filters - │ │ └── wi_s_symb:20 > 'SYMB' [outer=(20), constraints=(/20: [/e'SYMB\x00' - ]; tight)] + │ │ └── filters (true) │ └── filters (true) └── 1 diff --git a/pkg/sql/opt/xform/testdata/external/tpce-no-stats b/pkg/sql/opt/xform/testdata/external/tpce-no-stats index b53395de1c80..bbd0fc1b1eb2 100644 --- a/pkg/sql/opt/xform/testdata/external/tpce-no-stats +++ b/pkg/sql/opt/xform/testdata/external/tpce-no-stats @@ -4133,7 +4133,10 @@ limit │ │ ├── ordering: +20 opt(24) [actual: +20] │ │ └── inner-join (lookup watch_item) │ │ ├── columns: wi_wl_id:19!null wi_s_symb:20!null wl_id:23!null wl_c_id:24!null - │ │ ├── key columns: [23] = [19] + │ │ ├── lookup expression + │ │ │ └── filters + │ │ │ ├── wi_wl_id:19 = wl_id:23 [outer=(19,23), constraints=(/19: (/NULL - ]; /23: (/NULL - ]), fd=(19)==(23), (23)==(19)] + │ │ │ └── wi_s_symb:20 > 'SYMB' [outer=(20), constraints=(/20: [/e'SYMB\x00' - ]; tight)] │ │ ├── key: (20,23) │ │ ├── fd: ()-->(24), (19)==(23), (23)==(19) │ │ ├── select @@ -4146,8 +4149,7 @@ limit │ │ │ │ └── fd: (23)-->(24) │ │ │ └── filters │ │ │ └── wl_c_id:24 = 0 [outer=(24), constraints=(/24: [/0 - /0]; tight), fd=()-->(24)] - │ │ └── filters - │ │ └── wi_s_symb:20 > 'SYMB' [outer=(20), constraints=(/20: [/e'SYMB\x00' - ]; tight)] + │ │ └── filters (true) │ └── filters (true) └── 1 diff --git a/pkg/sql/opt/xform/testdata/external/trading b/pkg/sql/opt/xform/testdata/external/trading index 47823f8b24b0..0076de7353d5 100644 --- a/pkg/sql/opt/xform/testdata/external/trading +++ b/pkg/sql/opt/xform/testdata/external/trading @@ -845,7 +845,12 @@ project │ │ │ │ └── ordering: +10 opt(9) [actual: +10] │ │ │ ├── left-join (lookup transactiondetails@detailscardidindex) │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null transactiondetails.dealerid:20 isbuy:21 transactiondate:22 transactiondetails.cardid:23 quantity:24 - │ │ │ │ ├── key columns: [42 43 1] = [20 21 23] + │ │ │ │ ├── lookup expression + │ │ │ │ │ └── filters + │ │ │ │ │ ├── transactiondetails.cardid:23 = id:1 [outer=(1,23), constraints=(/1: (/NULL - ]; /23: (/NULL - ]), fd=(1)==(23), (23)==(1)] + │ │ │ │ │ ├── transactiondetails.dealerid:20 = 1 [outer=(20), constraints=(/20: [/1 - /1]; tight), fd=()-->(20)] + │ │ │ │ │ ├── NOT isbuy:21 [outer=(21), constraints=(/21: [/false - /false]; tight), fd=()-->(21)] + │ │ │ │ │ └── (transactiondate:22 >= '2020-02-28 00:00:00+00:00') AND (transactiondate:22 <= '2020-03-01 00:00:00+00:00') [outer=(22), constraints=(/22: [/'2020-02-28 00:00:00+00:00' - /'2020-03-01 00:00:00+00:00']; tight)] │ │ │ │ ├── immutable │ │ │ │ ├── stats: [rows=3543333.33, distinct(1)=19000, null(1)=0, distinct(2)=13000, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5601.15328, null(6)=0, distinct(23)=19000, null(23)=0] │ │ │ │ ├── key: (1,22-24) @@ -854,7 +859,7 @@ project │ │ │ │ ├── project │ │ │ │ │ ├── columns: "lookup_join_const_col_@21":43!null "lookup_join_const_col_@20":42!null id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null │ │ │ │ │ ├── immutable - │ │ │ │ │ ├── stats: [rows=19000, distinct(1)=19000, null(1)=0, distinct(2)=13000, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5601.15328, null(6)=0, distinct(42)=1, null(42)=0, distinct(43)=1, null(43)=0] + │ │ │ │ │ ├── stats: [rows=19000] │ │ │ │ │ ├── key: (1) │ │ │ │ │ ├── fd: ()-->(42,43), (1)-->(2-6), (2,4,5)~~>(1,3,6) │ │ │ │ │ ├── ordering: +1 @@ -876,8 +881,7 @@ project │ │ │ │ │ └── projections │ │ │ │ │ ├── false [as="lookup_join_const_col_@21":43] │ │ │ │ │ └── 1 [as="lookup_join_const_col_@20":42] - │ │ │ │ └── filters - │ │ │ │ └── (transactiondate:22 >= '2020-02-28 00:00:00+00:00') AND (transactiondate:22 <= '2020-03-01 00:00:00+00:00') [outer=(22), constraints=(/22: [/'2020-02-28 00:00:00+00:00' - /'2020-03-01 00:00:00+00:00']; tight)] + │ │ │ │ └── filters (true) │ │ │ └── filters (true) │ │ └── aggregations │ │ ├── sum [as=sum:30, outer=(24)] diff --git a/pkg/sql/opt/xform/testdata/external/trading-mutation b/pkg/sql/opt/xform/testdata/external/trading-mutation index 264a3cd8fcd2..bba7a2fb1290 100644 --- a/pkg/sql/opt/xform/testdata/external/trading-mutation +++ b/pkg/sql/opt/xform/testdata/external/trading-mutation @@ -849,7 +849,12 @@ project │ │ │ │ └── ordering: +10 opt(9) [actual: +10] │ │ │ ├── left-join (lookup transactiondetails@detailscardidindex) │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null transactiondetails.dealerid:24 isbuy:25 transactiondate:26 transactiondetails.cardid:27 quantity:28 - │ │ │ │ ├── key columns: [48 49 1] = [24 25 27] + │ │ │ │ ├── lookup expression + │ │ │ │ │ └── filters + │ │ │ │ │ ├── transactiondetails.cardid:27 = id:1 [outer=(1,27), constraints=(/1: (/NULL - ]; /27: (/NULL - ]), fd=(1)==(27), (27)==(1)] + │ │ │ │ │ ├── transactiondetails.dealerid:24 = 1 [outer=(24), constraints=(/24: [/1 - /1]; tight), fd=()-->(24)] + │ │ │ │ │ ├── NOT isbuy:25 [outer=(25), constraints=(/25: [/false - /false]; tight), fd=()-->(25)] + │ │ │ │ │ └── (transactiondate:26 >= '2020-02-28 00:00:00+00:00') AND (transactiondate:26 <= '2020-03-01 00:00:00+00:00') [outer=(26), constraints=(/26: [/'2020-02-28 00:00:00+00:00' - /'2020-03-01 00:00:00+00:00']; tight)] │ │ │ │ ├── immutable │ │ │ │ ├── stats: [rows=3543333.33, distinct(1)=19000, null(1)=0, distinct(2)=13000, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5601.15328, null(6)=0, distinct(27)=19000, null(27)=0] │ │ │ │ ├── key: (1,26-28) @@ -858,7 +863,7 @@ project │ │ │ │ ├── project │ │ │ │ │ ├── columns: "lookup_join_const_col_@25":49!null "lookup_join_const_col_@24":48!null id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null │ │ │ │ │ ├── immutable - │ │ │ │ │ ├── stats: [rows=19000, distinct(1)=19000, null(1)=0, distinct(2)=13000, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5601.15328, null(6)=0, distinct(48)=1, null(48)=0, distinct(49)=1, null(49)=0] + │ │ │ │ │ ├── stats: [rows=19000] │ │ │ │ │ ├── key: (1) │ │ │ │ │ ├── fd: ()-->(48,49), (1)-->(2-6), (2,4,5)~~>(1,3,6) │ │ │ │ │ ├── ordering: +1 @@ -880,8 +885,7 @@ project │ │ │ │ │ └── projections │ │ │ │ │ ├── false [as="lookup_join_const_col_@25":49] │ │ │ │ │ └── 1 [as="lookup_join_const_col_@24":48] - │ │ │ │ └── filters - │ │ │ │ └── (transactiondate:26 >= '2020-02-28 00:00:00+00:00') AND (transactiondate:26 <= '2020-03-01 00:00:00+00:00') [outer=(26), constraints=(/26: [/'2020-02-28 00:00:00+00:00' - /'2020-03-01 00:00:00+00:00']; tight)] + │ │ │ │ └── filters (true) │ │ │ └── filters (true) │ │ └── aggregations │ │ ├── sum [as=sum:36, outer=(28)] diff --git a/pkg/sql/opt/xform/testdata/rules/join b/pkg/sql/opt/xform/testdata/rules/join index 18706790196e..17aa2b23c551 100644 --- a/pkg/sql/opt/xform/testdata/rules/join +++ b/pkg/sql/opt/xform/testdata/rules/join @@ -3078,12 +3078,14 @@ SELECT a,b,n,m FROM small JOIN abcd ON a=m AND b>1 ---- inner-join (lookup abcd@secondary) ├── columns: a:6!null b:7!null n:2 m:1!null - ├── key columns: [1] = [6] + ├── lookup expression + │ └── filters + │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] ├── fd: (1)==(6), (6)==(1) ├── scan small │ └── columns: m:1 n:2 - └── filters - └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + └── filters (true) # Covering case, left-join. opt expect=GenerateLookupJoinsWithFilter @@ -3091,11 +3093,13 @@ SELECT a,b,n,m FROM small LEFT JOIN abcd ON a=m AND b>1 ---- left-join (lookup abcd@secondary) ├── columns: a:6 b:7 n:2 m:1 - ├── key columns: [1] = [6] + ├── lookup expression + │ └── filters + │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] ├── scan small │ └── columns: m:1 n:2 - └── filters - └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + └── filters (true) # Non-covering case. opt expect=GenerateLookupJoinsWithFilter @@ -3108,12 +3112,14 @@ inner-join (lookup abcd) ├── fd: (1)==(6), (6)==(1) ├── inner-join (lookup abcd@secondary) │ ├── columns: m:1!null n:2 a:6!null b:7!null abcd.rowid:9!null - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] │ ├── fd: (9)-->(6,7), (1)==(6), (6)==(1) │ ├── scan small │ │ └── columns: m:1 n:2 - │ └── filters - │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + │ └── filters (true) └── filters (true) # Non-covering case, left join. @@ -3126,12 +3132,14 @@ left-join (lookup abcd) ├── lookup columns are key ├── left-join (lookup abcd@secondary) │ ├── columns: m:1 n:2 a:6 b:7 abcd.rowid:9 - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] │ ├── fd: (9)-->(6,7) │ ├── scan small │ │ └── columns: m:1 n:2 - │ └── filters - │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + │ └── filters (true) └── filters (true) # Non-covering case, extra filter bound by index. @@ -3145,13 +3153,15 @@ inner-join (lookup abcd) ├── fd: (1)==(6), (6)==(1) ├── inner-join (lookup abcd@secondary) │ ├── columns: m:1!null n:2!null a:6!null b:7!null abcd.rowid:9!null - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] │ ├── fd: (9)-->(6,7), (1)==(6), (6)==(1) │ ├── scan small │ │ └── columns: m:1 n:2 │ └── filters - │ ├── b:7 > n:2 [outer=(2,7), constraints=(/2: (/NULL - ]; /7: (/NULL - ])] - │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + │ └── b:7 > n:2 [outer=(2,7), constraints=(/2: (/NULL - ]; /7: (/NULL - ])] └── filters (true) # Non-covering case, extra filter bound by index, left join. @@ -3164,13 +3174,15 @@ left-join (lookup abcd) ├── lookup columns are key ├── left-join (lookup abcd@secondary) │ ├── columns: m:1 n:2 a:6 b:7 abcd.rowid:9 - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] │ ├── fd: (9)-->(6,7) │ ├── scan small │ │ └── columns: m:1 n:2 │ └── filters - │ ├── b:7 > n:2 [outer=(2,7), constraints=(/2: (/NULL - ]; /7: (/NULL - ])] - │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + │ └── b:7 > n:2 [outer=(2,7), constraints=(/2: (/NULL - ]; /7: (/NULL - ])] └── filters (true) # Non-covering case, extra filter not bound by index. @@ -3184,12 +3196,14 @@ inner-join (lookup abcd) ├── fd: (1)==(6), (6)==(1) ├── inner-join (lookup abcd@secondary) │ ├── columns: m:1!null n:2 a:6!null b:7!null abcd.rowid:9!null - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] │ ├── fd: (9)-->(6,7), (1)==(6), (6)==(1) │ ├── scan small │ │ └── columns: m:1 n:2 - │ └── filters - │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + │ └── filters (true) └── filters └── c:8 > n:2 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ])] @@ -3444,12 +3458,14 @@ inner-join (lookup abcde) ├── fd: (1)==(6), (6)==(1) ├── inner-join (lookup abcde@secondary) │ ├── columns: m:1!null n:2 a:6!null b:7!null c:8 abcde.rowid:11!null - │ ├── key columns: [1] = [6] + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 < 10 [outer=(7), constraints=(/7: (/NULL - /9]; tight)] │ ├── fd: (11)-->(6-8), (1)==(6), (6)==(1) │ ├── scan small │ │ └── columns: m:1 n:2 - │ └── filters - │ └── b:7 < 10 [outer=(7), constraints=(/7: (/NULL - /9]; tight)] + │ └── filters (true) └── filters (true) # Lookup Joiner uses the constant equality columns at the same time as the explicit @@ -8466,3 +8482,162 @@ anti-join (lookup abc_part) │ │ └── fd: ()-->(19-22) │ └── filters (true) └── filters (true) + +# illustrative examples from GH #51576 +exec-ddl +CREATE TABLE metrics ( + id SERIAL PRIMARY KEY, + name STRING, + INDEX name_index (name) +) +---- + +exec-ddl +CREATE TABLE metric_values ( + metric_id INT8, + time TIMESTAMPTZ, + value INT8, + PRIMARY KEY (metric_id, time) +) +---- + +# Add some metrics to force lookup join to be chosen. +exec-ddl +ALTER TABLE metric_values INJECT STATISTICS +'[ + { + "columns": ["metric_id"], + "created_at": "2018-01-01 1:00:00.00000+00:00", + "row_count": 1000, + "distinct_count": 10 + }, + { + "columns": ["time"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 1000, + "distinct_count": 1000 + }, + { + "columns": ["value"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 1000, + "distinct_count": 1000 + } +]' +---- + +exec-ddl +ALTER TABLE metrics INJECT STATISTICS +'[ + { + "columns": ["id"], + "created_at": "2018-01-01 1:00:00.00000+00:00", + "row_count": 10, + "distinct_count": 10 + }, + { + "columns": ["name"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 10, + "distinct_count": 10 + } +]' +---- + +opt expect=GenerateLookupJoinsWithFilter +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time BETWEEN '2020-01-01 00:00:00+00:00' AND '2020-01-01 00:10:00+00:00' AND + name='cpu' +---- +inner-join (lookup metric_values) + ├── columns: metric_id:1!null time:2!null value:3 id:6!null name:7!null + ├── lookup expression + │ └── filters + │ ├── metric_id:1 = id:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ └── (time:2 >= '2020-01-01 00:00:00+00:00') AND (time:2 <= '2020-01-01 00:10:00+00:00') [outer=(2), constraints=(/2: [/'2020-01-01 00:00:00+00:00' - /'2020-01-01 00:10:00+00:00']; tight)] + ├── key: (2,6) + ├── fd: ()-->(7), (1,2)-->(3), (1)==(6), (6)==(1) + ├── scan metrics@name_index + │ ├── columns: id:6!null name:7!null + │ ├── constraint: /7/6: [/'cpu' - /'cpu'] + │ ├── key: (6) + │ └── fd: ()-->(7) + └── filters (true) + +opt expect=GenerateLookupJoinsWithFilter +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time BETWEEN '2020-01-01 00:00:00+00:00' AND '2020-01-01 00:10:00+00:00' AND + name IN ('cpu','mem') +---- +inner-join (lookup metric_values) + ├── columns: metric_id:1!null time:2!null value:3 id:6!null name:7!null + ├── lookup expression + │ └── filters + │ ├── metric_id:1 = id:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ └── (time:2 >= '2020-01-01 00:00:00+00:00') AND (time:2 <= '2020-01-01 00:10:00+00:00') [outer=(2), constraints=(/2: [/'2020-01-01 00:00:00+00:00' - /'2020-01-01 00:10:00+00:00']; tight)] + ├── key: (2,6) + ├── fd: (1,2)-->(3), (6)-->(7), (1)==(6), (6)==(1) + ├── scan metrics@name_index + │ ├── columns: id:6!null name:7!null + │ ├── constraint: /7/6 + │ │ ├── [/'cpu' - /'cpu'] + │ │ └── [/'mem' - /'mem'] + │ ├── key: (6) + │ └── fd: (6)-->(7) + └── filters (true) + +# We don't support turning LIKE into scans yet, test that we fall back to a +# filter. +opt expect-not=GenerateLookupJoins +SELECT * +FROM metric_values +INNER JOIN metrics +ON metric_id=id +WHERE + time::STRING LIKE '202%' AND + name='cpu' +---- +inner-join (lookup metric_values) + ├── columns: metric_id:1!null time:2!null value:3 id:6!null name:7!null + ├── key columns: [6] = [1] + ├── stable + ├── key: (2,6) + ├── fd: ()-->(7), (1,2)-->(3), (1)==(6), (6)==(1) + ├── scan metrics@name_index + │ ├── columns: id:6!null name:7!null + │ ├── constraint: /7/6: [/'cpu' - /'cpu'] + │ ├── key: (6) + │ └── fd: ()-->(7) + └── filters + └── time:2::STRING LIKE '202%' [outer=(2), stable] + +opt expect=GenerateLookupJoinsWithFilter +SELECT * +FROM metrics +LEFT JOIN metric_values +ON metric_id=id +AND time BETWEEN '2020-01-01 00:00:00+00:00' AND '2020-01-01 00:10:00+00:00' +AND name='cpu' +---- +left-join (lookup metric_values) + ├── columns: id:1!null name:2 metric_id:5 time:6 value:7 + ├── lookup expression + │ └── filters + │ ├── metric_id:5 = id:1 [outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] + │ └── (time:6 >= '2020-01-01 00:00:00+00:00') AND (time:6 <= '2020-01-01 00:10:00+00:00') [outer=(6), constraints=(/6: [/'2020-01-01 00:00:00+00:00' - /'2020-01-01 00:10:00+00:00']; tight)] + ├── key: (1,5,6) + ├── fd: (1)-->(2), (5,6)-->(7) + ├── scan metrics + │ ├── columns: id:1!null name:2 + │ ├── key: (1) + │ └── fd: (1)-->(2) + └── filters + └── name:2 = 'cpu' [outer=(2), constraints=(/2: [/'cpu' - /'cpu']; tight), fd=()-->(2)] diff --git a/pkg/sql/rowexec/joinreader.go b/pkg/sql/rowexec/joinreader.go index f03d52086e83..c4714b3fbe8f 100644 --- a/pkg/sql/rowexec/joinreader.go +++ b/pkg/sql/rowexec/joinreader.go @@ -382,14 +382,13 @@ func (jr *joinReader) initJoinReaderStrategy( spanBuilder.SetNeededColumns(neededRightCols) var generator joinReaderSpanGenerator - var keyToInputRowIndices map[string][]int - if readerType != indexJoinReaderType { - keyToInputRowIndices = make(map[string][]int) - } - // Else: see the comment in defaultSpanGenerator on why we don't need - // this map for index joins. - if jr.lookupExpr.Expr == nil { + var keyToInputRowIndices map[string][]int + // See the comment in defaultSpanGenerator on why we don't need + // this map for index joins. + if readerType != indexJoinReaderType { + keyToInputRowIndices = make(map[string][]int) + } generator = &defaultSpanGenerator{ spanBuilder: spanBuilder, keyToInputRowIndices: keyToInputRowIndices, @@ -415,7 +414,6 @@ func (jr *joinReader) initJoinReaderStrategy( spanBuilder, numKeyCols, len(jr.input.OutputTypes()), - keyToInputRowIndices, &jr.lookupExpr, tableOrdToIndexOrd, ); err != nil { @@ -428,7 +426,6 @@ func (jr *joinReader) initJoinReaderStrategy( spanBuilder, numKeyCols, len(jr.input.OutputTypes()), - keyToInputRowIndices, &jr.lookupExpr, &jr.remoteLookupExpr, tableOrdToIndexOrd, diff --git a/pkg/sql/rowexec/joinreader_span_generator.go b/pkg/sql/rowexec/joinreader_span_generator.go index f019c8b8a3b1..ba5db29c0aa2 100644 --- a/pkg/sql/rowexec/joinreader_span_generator.go +++ b/pkg/sql/rowexec/joinreader_span_generator.go @@ -12,6 +12,7 @@ package rowexec import ( "fmt" + "sort" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -137,6 +138,19 @@ func (g *defaultSpanGenerator) maxLookupCols() int { return len(g.lookupCols) } +type spanRowIndex struct { + span roachpb.Span + rowIndices []int +} + +type spanRowIndices []spanRowIndex + +func (s spanRowIndices) Len() int { return len(s) } +func (s spanRowIndices) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s spanRowIndices) Less(i, j int) bool { return s[i].span.Key.Compare(s[j].span.Key) < 0 } + +var _ sort.Interface = &spanRowIndices{} + // multiSpanGenerator is the joinReaderSpanGenerator used when each lookup will // scan multiple spans in the index. This is the case when some of the index // columns can take on multiple constant values. For example, the @@ -152,8 +166,8 @@ type multiSpanGenerator struct { // indexColInfos stores info about the values that each index column can // take on in the spans produced by the multiSpanGenerator. See the comment - // above multiSpanGeneratorIndexColInfo for more details. - indexColInfos []multiSpanGeneratorIndexColInfo + // above multiSpanGeneratorColInfo for more details. + indexColInfos []multiSpanGeneratorColInfo // indexKeyRows and indexKeySpans are used to generate the spans for a single // input row. They are allocated once in init(), and then reused for every row. @@ -162,9 +176,18 @@ type multiSpanGenerator struct { // keyToInputRowIndices maps a lookup span key to the input row indices that // desire that span. This is used for de-duping spans, and to map the fetched - // rows to the input rows that need to join with them. + // rows to the input rows that need to join with them. If we have inequality + // exprs we can't use this from getMatchingRowIndices because the spans are + // ranges and not point spans so we build this map using the start keys and + // then convert it into a spanToInputRowIndices. keyToInputRowIndices map[string][]int + // spanToInputRowIndices maps a lookup span to the input row indices that + // desire that span. This is a range based equivalent of the + // keyToInputRowIndices that is only used when there are range based, i.e. + // inequality conditions. This is a sorted set we do binary searches on. + spanToInputRowIndices spanRowIndices + // spansCount is the number of spans generated for each input row. spansCount int @@ -180,32 +203,74 @@ type multiSpanGenerator struct { // numInputCols is the number of columns in the input to the joinReader. numInputCols int + // inequalityColIdx is the index of inequality colinfo (there can be only one), + // -1 otherwise. + inequalityColIdx int + scratchSpans roachpb.Spans } -// multiSpanGeneratorIndexColInfo contains info about the values that a specific +// multiSpanGeneratorColInfo contains info about the values that a specific // index column can take on in the spans produced by the multiSpanGenerator. The // column ordinal is not contained in this struct, but depends on the location // of this struct in the indexColInfos slice; the position in the slice // corresponds to the position in the index. -// - If len(constVals) > 0, the index column can equal any of the given -// constant values. This is the case when there is a join filter such as -// c IN ('a', 'b', 'c'), where c is a key column in the index. -// - If constVals is empty, then inputRowIdx corresponds to an index into the -// input row. This is the case for join filters such as c = a, where c is a -// column in the index and a is a column in the input. -type multiSpanGeneratorIndexColInfo struct { - constVals tree.Datums +type multiSpanGeneratorColInfo interface { + String() string +} + +// multiSpanGeneratorValuesColInfo is used to represent a column constrained +// by a set of constants (i.e. '=' or 'in' expressions). +type multiSpanGeneratorValuesColInfo struct { + constVals tree.Datums +} + +func (i multiSpanGeneratorValuesColInfo) String() string { + return fmt.Sprintf("[constVals: %s]", i.constVals.String()) +} + +// multiSpanGeneratorIndexVarColInfo represents a column that matches a column +// in the input row. inputRowIdx corresponds to an index into the input row. +// This is the case for join filters such as c = a, where c is a column in the +// index and a is a column in the input. +type multiSpanGeneratorIndexVarColInfo struct { inputRowIdx int } -func (i multiSpanGeneratorIndexColInfo) String() string { - if len(i.constVals) > 0 { - return fmt.Sprintf("[constVals: %s]", i.constVals.String()) - } +func (i multiSpanGeneratorIndexVarColInfo) String() string { return fmt.Sprintf("[inputRowIdx: %d]", i.inputRowIdx) } +// multiSpanGeneratorInequalityColInfo represents a column that is bound by a +// range expression. If there are <,>, >= or <= inequalities we distill them +// into a start and end datum. +type multiSpanGeneratorInequalityColInfo struct { + start tree.Datum + startInclusive bool + end tree.Datum + endInclusive bool +} + +func (i multiSpanGeneratorInequalityColInfo) String() string { + var startBoundary byte + if i.startInclusive { + startBoundary = '[' + } else { + startBoundary = '(' + } + var endBoundary rune + if i.endInclusive { + endBoundary = ']' + } else { + endBoundary = ')' + } + return fmt.Sprintf("%c%v - %v%c", startBoundary, i.start, i.end, endBoundary) +} + +var _ multiSpanGeneratorColInfo = &multiSpanGeneratorValuesColInfo{} +var _ multiSpanGeneratorColInfo = &multiSpanGeneratorIndexVarColInfo{} +var _ multiSpanGeneratorColInfo = &multiSpanGeneratorInequalityColInfo{} + // maxLookupCols is part of the joinReaderSpanGenerator interface. func (g *multiSpanGenerator) maxLookupCols() int { return len(g.indexColInfos) @@ -217,14 +282,14 @@ func (g *multiSpanGenerator) init( spanBuilder *span.Builder, numKeyCols int, numInputCols int, - keyToInputRowIndices map[string][]int, exprHelper *execinfrapb.ExprHelper, tableOrdToIndexOrd util.FastIntMap, ) error { g.spanBuilder = spanBuilder g.numInputCols = numInputCols - g.keyToInputRowIndices = keyToInputRowIndices + g.keyToInputRowIndices = make(map[string][]int) g.tableOrdToIndexOrd = tableOrdToIndexOrd + g.inequalityColIdx = -1 // Initialize the spansCount to 1, since we'll always have at least one span. // This number may increase when we call fillInIndexColInfos() below. @@ -232,7 +297,7 @@ func (g *multiSpanGenerator) init( // Process the given expression to fill in g.indexColInfos with info from the // join conditions. This info will be used later to generate the spans. - g.indexColInfos = make([]multiSpanGeneratorIndexColInfo, 0, numKeyCols) + g.indexColInfos = make([]multiSpanGeneratorColInfo, 0, numKeyCols) if err := g.fillInIndexColInfos(exprHelper.Expr); err != nil { return err } @@ -272,19 +337,21 @@ func (g *multiSpanGenerator) init( // [ 'east' - 2 - ] // [ 'west' - 2 - ] // + + // Make first pass flushing out the structure with const values. g.indexKeyRows = make([]rowenc.EncDatumRow, 1, g.spansCount) g.indexKeyRows[0] = make(rowenc.EncDatumRow, 0, lookupColsCount) for _, info := range g.indexColInfos { - if len(info.constVals) > 0 { + if valuesInfo, ok := info.(multiSpanGeneratorValuesColInfo); ok { for i, n := 0, len(g.indexKeyRows); i < n; i++ { indexKeyRow := g.indexKeyRows[i] - for j := 1; j < len(info.constVals); j++ { + for j := 1; j < len(valuesInfo.constVals); j++ { newIndexKeyRow := make(rowenc.EncDatumRow, len(indexKeyRow), lookupColsCount) copy(newIndexKeyRow, indexKeyRow) - newIndexKeyRow = append(newIndexKeyRow, rowenc.EncDatum{Datum: info.constVals[j]}) + newIndexKeyRow = append(newIndexKeyRow, rowenc.EncDatum{Datum: valuesInfo.constVals[j]}) g.indexKeyRows = append(g.indexKeyRows, newIndexKeyRow) } - g.indexKeyRows[i] = append(indexKeyRow, rowenc.EncDatum{Datum: info.constVals[0]}) + g.indexKeyRows[i] = append(indexKeyRow, rowenc.EncDatum{Datum: valuesInfo.constVals[0]}) } } else { for i := 0; i < len(g.indexKeyRows); i++ { @@ -307,8 +374,11 @@ func (g *multiSpanGenerator) init( // 1. Equalities between input columns and index columns, such as c1 = c2. // 2. Equalities or IN conditions between index columns and constants, such // as c = 5 or c IN ('a', 'b', 'c'). +// 3. Inequalities from (possibly AND'd) <,>,<=,>= exprs. +// // The optimizer should have ensured that all conditions fall into one of -// these two categories. Any other expression types will return an error. +// these categories. Any other expression types will return an error. +// TODO(treilly): We should probably be doing this at compile time, see #65773 func (g *multiSpanGenerator) fillInIndexColInfos(expr tree.TypedExpr) error { switch t := expr.(type) { case *tree.AndExpr: @@ -318,16 +388,26 @@ func (g *multiSpanGenerator) fillInIndexColInfos(expr tree.TypedExpr) error { return g.fillInIndexColInfos(t.Right.(tree.TypedExpr)) case *tree.ComparisonExpr: - if t.Operator.Symbol != tree.EQ && t.Operator.Symbol != tree.In { - return errors.AssertionFailedf("comparison operator must be EQ or In. Found %s", t.Operator) + setOfVals := false + inequality := false + switch t.Operator.Symbol { + case tree.EQ, tree.In: + setOfVals = true + case tree.GE, tree.LE, tree.GT, tree.LT: + inequality = true + default: + // This should never happen because of enforcement at opt time. + return errors.AssertionFailedf("comparison operator not supported. Found %s", t.Operator) } tabOrd := -1 - info := multiSpanGeneratorIndexColInfo{inputRowIdx: -1} - // Since we only support EQ and In, we don't need to check anything other - // than the types of the arguments in order to extract the info. - getInfo := func(typedExpr tree.TypedExpr) error { + var info multiSpanGeneratorColInfo + + // For EQ and In, we just need to check the types of the arguments in order + // to extract the info. For inequalities we return the const datums that + // will form the span boundaries. + getInfo := func(typedExpr tree.TypedExpr) (tree.Datum, error) { switch t := typedExpr.(type) { case *tree.IndexedVar: // IndexedVars can either be from the input or the index. If the @@ -336,38 +416,71 @@ func (g *multiSpanGenerator) fillInIndexColInfos(expr tree.TypedExpr) error { if t.Idx >= g.numInputCols { tabOrd = t.Idx - g.numInputCols } else { - info.inputRowIdx = t.Idx + info = multiSpanGeneratorIndexVarColInfo{inputRowIdx: t.Idx} } case tree.Datum: - switch t.ResolvedType().Family() { - case types.TupleFamily: - info.constVals = t.(*tree.DTuple).D - default: - info.constVals = tree.Datums{t} + if setOfVals { + var values tree.Datums + switch t.ResolvedType().Family() { + case types.TupleFamily: + values = t.(*tree.DTuple).D + default: + values = tree.Datums{t} + } + // Every time there are multiple possible values, we multiply the + // spansCount by the number of possibilities. We will need to create + // spans representing the cartesian product of possible values for + // each column. + info = multiSpanGeneratorValuesColInfo{constVals: values} + g.spansCount *= len(values) + } else { + return t, nil } - // Every time there are multiple possible values, we multiply the - // spansCount by the number of possibilities. We will need to create - // spans representing the cartesian product of possible values for - // each column. - g.spansCount *= len(info.constVals) default: - return errors.AssertionFailedf("unhandled comparison argument type %T", t) + return nil, errors.AssertionFailedf("unhandled comparison argument type %T", t) } - return nil + return nil, nil } - if err := getInfo(t.Left.(tree.TypedExpr)); err != nil { + + // NB: we make no attempt to deal with column direction here, that is sorted + // out later in the span builder. + var inequalityInfo multiSpanGeneratorInequalityColInfo + if lval, err := getInfo(t.Left.(tree.TypedExpr)); err != nil { return err + } else if lval != nil { + if t.Operator.Symbol == tree.LT || t.Operator.Symbol == tree.LE { + inequalityInfo.start = lval + inequalityInfo.startInclusive = t.Operator.Symbol == tree.LE + } else { + inequalityInfo.end = lval + inequalityInfo.endInclusive = t.Operator.Symbol == tree.GE + } } - if err := getInfo(t.Right.(tree.TypedExpr)); err != nil { + + if rval, err := getInfo(t.Right.(tree.TypedExpr)); err != nil { return err + } else if rval != nil { + if t.Operator.Symbol == tree.LT || t.Operator.Symbol == tree.LE { + inequalityInfo.end = rval + inequalityInfo.endInclusive = t.Operator.Symbol == tree.LE + } else { + inequalityInfo.start = rval + inequalityInfo.startInclusive = t.Operator.Symbol == tree.GE + } } idxOrd, ok := g.tableOrdToIndexOrd.Get(tabOrd) if !ok { return errors.AssertionFailedf("table column %d not found in index", tabOrd) } + + if inequality { + info = inequalityInfo + g.inequalityColIdx = idxOrd + } + if len(g.indexColInfos) <= idxOrd { g.indexColInfos = g.indexColInfos[:idxOrd+1] } @@ -383,36 +496,76 @@ func (g *multiSpanGenerator) fillInIndexColInfos(expr tree.TypedExpr) error { // generateNonNullSpans generates spans for a given row. It does not include // null values, since those values would not match the lookup condition anyway. func (g *multiSpanGenerator) generateNonNullSpans(row rowenc.EncDatumRow) (roachpb.Spans, error) { - // Fill in the holes in g.indexKeyRows that correspond to input row - // values. - for j, info := range g.indexColInfos { - if len(info.constVals) == 0 { - for i := 0; i < len(g.indexKeyRows); i++ { - g.indexKeyRows[i][j] = row[info.inputRowIdx] + // Fill in the holes in g.indexKeyRows that correspond to input row values. + for i := 0; i < len(g.indexKeyRows); i++ { + for j, info := range g.indexColInfos { + if inf, ok := info.(multiSpanGeneratorIndexVarColInfo); ok { + g.indexKeyRows[i][j] = row[inf.inputRowIdx] } } } // Convert the index key rows to spans. g.indexKeySpans = g.indexKeySpans[:0] + + // Hoist inequality lookup out of loop if we have one. + var inequalityInfo multiSpanGeneratorInequalityColInfo + if g.inequalityColIdx != -1 { + inequalityInfo = g.indexColInfos[g.inequalityColIdx].(multiSpanGeneratorInequalityColInfo) + } + + // Build spans for each row. for _, indexKeyRow := range g.indexKeyRows { - span, containsNull, err := g.spanBuilder.SpanFromEncDatums(indexKeyRow, len(g.indexColInfos)) + var s roachpb.Span + var err error + var containsNull bool + if g.inequalityColIdx == -1 { + s, containsNull, err = g.spanBuilder.SpanFromEncDatums(indexKeyRow, len(g.indexColInfos)) + } else { + s, containsNull, err = g.spanBuilder.SpanFromEncDatumsWithRange(indexKeyRow, len(g.indexColInfos), + inequalityInfo.start, inequalityInfo.startInclusive, inequalityInfo.end, inequalityInfo.endInclusive) + } + if err != nil { return roachpb.Spans{}, err } + if !containsNull { - g.indexKeySpans = append(g.indexKeySpans, span) + g.indexKeySpans = append(g.indexKeySpans, s) } } + return g.indexKeySpans, nil } +// findInputRowIndicesByKey does a binary search to find the span that contains +// the given key. +func (s *spanRowIndices) findInputRowIndicesByKey(key roachpb.Key) []int { + i, j := 0, s.Len() + for i < j { + h := (i + j) >> 1 + sp := (*s)[h] + switch sp.span.CompareKey(key) { + case 0: + return sp.rowIndices + case -1: + j = h + case 1: + i = h + 1 + } + } + + return nil +} + // generateSpans is part of the joinReaderSpanGenerator interface. func (g *multiSpanGenerator) generateSpans(rows []rowenc.EncDatumRow) (roachpb.Spans, error) { // This loop gets optimized to a runtime.mapclear call. for k := range g.keyToInputRowIndices { delete(g.keyToInputRowIndices, k) } + g.spanToInputRowIndices = g.spanToInputRowIndices[:0] + // We maintain a map from index key to the corresponding input rows so we can // join the index results to the inputs. g.scratchSpans = g.scratchSpans[:0] @@ -425,18 +578,43 @@ func (g *multiSpanGenerator) generateSpans(rows []rowenc.EncDatumRow) (roachpb.S generatedSpan := &generatedSpans[j] inputRowIndices := g.keyToInputRowIndices[string(generatedSpan.Key)] if inputRowIndices == nil { - g.scratchSpans = g.spanBuilder.MaybeSplitSpanIntoSeparateFamilies( - g.scratchSpans, *generatedSpan, len(g.indexColInfos), false /* containsNull */) + // MaybeSplitSpanIntoSeparateFamilies is an optimization for doing more + // efficient point lookups when the span hits multiple column families. + // It doesn't work with inequality ranges because the prefixLen we pass + // in here is wrong and possibly other reasons. + if g.inequalityColIdx != -1 { + g.scratchSpans = append(g.scratchSpans, *generatedSpan) + } else { + g.scratchSpans = g.spanBuilder.MaybeSplitSpanIntoSeparateFamilies( + g.scratchSpans, *generatedSpan, len(g.indexColInfos), false /* containsNull */) + } } + g.keyToInputRowIndices[string(generatedSpan.Key)] = append(inputRowIndices, i) } } + // If we need to map against range spans instead of point spans convert the + // map into a sorted set of spans we can binary search against. + if g.inequalityColIdx != -1 { + for _, s := range g.scratchSpans { + g.spanToInputRowIndices = append(g.spanToInputRowIndices, spanRowIndex{span: s, rowIndices: g.keyToInputRowIndices[string(s.Key)]}) + } + sort.Sort(g.spanToInputRowIndices) + // We don't need this anymore. + for k := range g.keyToInputRowIndices { + delete(g.keyToInputRowIndices, k) + } + } + return g.scratchSpans, nil } // getMatchingRowIndices is part of the joinReaderSpanGenerator interface. func (g *multiSpanGenerator) getMatchingRowIndices(key roachpb.Key) []int { + if g.inequalityColIdx != -1 { + return g.spanToInputRowIndices.findInputRowIndicesByKey(key) + } return g.keyToInputRowIndices[string(key)] } @@ -455,18 +633,17 @@ func (g *localityOptimizedSpanGenerator) init( spanBuilder *span.Builder, numKeyCols int, numInputCols int, - keyToInputRowIndices map[string][]int, localExprHelper *execinfrapb.ExprHelper, remoteExprHelper *execinfrapb.ExprHelper, tableOrdToIndexOrd util.FastIntMap, ) error { if err := g.localSpanGen.init( - spanBuilder, numKeyCols, numInputCols, keyToInputRowIndices, localExprHelper, tableOrdToIndexOrd, + spanBuilder, numKeyCols, numInputCols, localExprHelper, tableOrdToIndexOrd, ); err != nil { return err } if err := g.remoteSpanGen.init( - spanBuilder, numKeyCols, numInputCols, keyToInputRowIndices, remoteExprHelper, tableOrdToIndexOrd, + spanBuilder, numKeyCols, numInputCols, remoteExprHelper, tableOrdToIndexOrd, ); err != nil { return err } diff --git a/pkg/sql/span/span_builder.go b/pkg/sql/span/span_builder.go index e4619bd3e4b4..96f9100034f4 100644 --- a/pkg/sql/span/span_builder.go +++ b/pkg/sql/span/span_builder.go @@ -145,6 +145,68 @@ func (s *Builder) SpanFromEncDatums( values[:prefixLen], s.indexColTypes[:prefixLen], s.indexColDirs[:prefixLen], s.table, s.index, &s.alloc, s.KeyPrefix) } +// SpanFromEncDatumsWithRange encodes a range span. The inequality is assumed to +// be the end of the span and the start/end keys are generated by putting them +// in the values row at the prefixLen - 1 position. Only one of start or end +// need be non-nil, omitted one causing an open ended range span to be +// generated. Since the exec code knows nothing about index column sorting +// direction we assume ascending if they are descending we deal with that here. +func (s *Builder) SpanFromEncDatumsWithRange( + values rowenc.EncDatumRow, + prefixLen int, + startDatum tree.Datum, + startInclusive bool, + endDatum tree.Datum, + endInclusive bool, +) (_ roachpb.Span, containsNull bool, err error) { + + if s.indexColDirs[prefixLen-1] == descpb.IndexDescriptor_DESC { + startDatum, endDatum = endDatum, startDatum + startInclusive, endInclusive = endInclusive, startInclusive + } + + makeKeyFromRow := func(r rowenc.EncDatumRow, l int) (k roachpb.Key, cn bool, e error) { + k, _, cn, e = rowenc.MakeKeyFromEncDatums(r[:l], s.indexColTypes[:l], s.indexColDirs[:l], + s.table, s.index, &s.alloc, s.KeyPrefix) + return + } + + var startKey, endKey roachpb.Key + var startContainsNull, endContainsNull bool + if startDatum != nil { + values[prefixLen-1] = rowenc.EncDatum{Datum: startDatum} + startKey, startContainsNull, err = makeKeyFromRow(values, prefixLen) + if !startInclusive { + startKey = startKey.Next() + } + } else { + startKey, startContainsNull, err = makeKeyFromRow(values, prefixLen-1) + startKey = encoding.EncodeNullAscending(startKey) + startKey = startKey.Next() + } + + if err != nil { + return roachpb.Span{}, false, err + } + + if endDatum != nil { + values[prefixLen-1] = rowenc.EncDatum{Datum: endDatum} + endKey, endContainsNull, err = makeKeyFromRow(values, prefixLen) + if endInclusive { + endKey = endKey.PrefixEnd() + } + } else { + endKey, endContainsNull, err = makeKeyFromRow(values, prefixLen-1) + endKey = endKey.PrefixEnd() + } + + if err != nil { + return roachpb.Span{}, false, err + } + + return roachpb.Span{Key: startKey, EndKey: endKey}, startContainsNull || endContainsNull, nil +} + // SpanFromDatumRow generates an index span with prefixLen constraint columns from the index. // SpanFromDatumRow assumes that values is a valid table row for the Builder's table. // It also returns whether or not the input values contain a null value or not, which can be