Skip to content

Commit

Permalink
Extract sharding key from conditions
Browse files Browse the repository at this point in the history
PR #181 introduced support of DDL sharding keys. But if sharding key
hasn't got a separate index in schema, select with equal conditions
for all required sharding key fields still led to map-reduce instead of
a single storage call. This patch introduces impoved support of
sharding keys extraction and fixes the issue.

Closes #213
  • Loading branch information
DifferentialOrange committed Nov 30, 2021
1 parent cc2e0e8 commit 1bd6e85
Show file tree
Hide file tree
Showing 7 changed files with 398 additions and 58 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.

* Use tuple-merger backed select implementation on tarantool 2.10+ (it gives
less pressure on Lua GC).
* DDL sharding key now can be extracted from select conditions even if
there are no separate index.

## [0.9.0] - 20-10-21

Expand Down
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,6 @@ Current limitations for using custom sharding key:
updated on storages, see
[#212](https://github.com/tarantool/crud/issues/212). However it is possible
to do it manually with `require('crud.sharding_key').update_cache()`.
- CRUD select may lead map reduce in some cases, see
[#213](https://github.com/tarantool/crud/issues/213).
- No support of JSON path for sharding key, see
[#219](https://github.com/tarantool/crud/issues/219).
- `primary_index_fieldno_map` is not cached, see
Expand Down
106 changes: 70 additions & 36 deletions crud/select/plan.lua
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@ if has_keydef then
keydef_lib = compat.require('tuple.keydef', 'key_def')
end

local select_plan = {}
local select_plan = {
_internal = {},
}

local IndexTypeError = errors.new_class('IndexTypeError', {capture_stack = false})
local FilterFieldsError = errors.new_class('FilterFieldsError', {capture_stack = false})
Expand Down Expand Up @@ -48,49 +50,83 @@ local function get_index_for_condition(space_indexes, space_format, condition)
end
end

local function extract_sharding_key_from_scan_value(scan_value, scan_index, sharding_index)
if #scan_value < #sharding_index.parts then
return nil
end
function select_plan._internal.extract_sharding_key_from_conditions(conditions, sharding_index,
space_indexes, fieldno_map)
dev_checks('table', 'table', 'table', 'table')

if scan_index.id == sharding_index.id then
return scan_value
end
-- If name is both valid index name and field name,
-- it is interpreted as index name.
local filled_fields = {}
for _, condition in ipairs(conditions) do
if condition.operator ~= compare_conditions.operators.EQ then
goto continue
end

local scan_value_fields_values = {}
for i, scan_index_part in ipairs(scan_index.parts) do
scan_value_fields_values[scan_index_part.fieldno] = scan_value[i]
end
local index = space_indexes[condition.operand]
if index ~= nil then
for i, part in ipairs(index.parts) do
if filled_fields[part.fieldno] == nil then
filled_fields[part.fieldno] = condition.values[i]
end
end

-- check that sharding key is included in the scan index fields
local sharding_key = {}
for _, sharding_key_part in ipairs(sharding_index.parts) do
local fieldno = sharding_key_part.fieldno
goto continue
end

-- sharding key isn't included in scan key
if scan_value_fields_values[fieldno] == nil then
return nil
local fieldno = fieldno_map[condition.operand]
if fieldno == nil then
goto continue
end
filled_fields[fieldno] = condition.values[1]

local field_value = scan_value_fields_values[fieldno]
::continue::
end

-- sharding key contains nil values
if field_value == nil then
local sharding_key = {}
for i, v in ipairs(sharding_index.parts) do
if filled_fields[v.fieldno] == nil then
return nil
end

table.insert(sharding_key, field_value)
sharding_key[i] = filled_fields[v.fieldno]
end

return sharding_key
end

function select_plan._internal.get_sharding_key_from_scan_value(scan_value, scan_index, scan_iter, sharding_index)
dev_checks('?', 'table', 'number', 'table')

if scan_value == nil then
return nil
end

if scan_iter ~= box.index.EQ and scan_iter ~= box.index.REQ then
return nil
end

if scan_index.id == sharding_index.id then
if type(scan_value) ~= 'table' then
return scan_value
end

for i, _ in ipairs(sharding_index.parts) do
if scan_value[i] == nil then return nil end
end
return scan_value
end

return nil
end

-- We need to construct after_tuple by field_names
-- because if `fields` option is specified we have after_tuple with partial fields
-- and these fields are ordered by field_names + primary key + scan key
-- this order can be differ from order in space format
-- so we need to cast after_tuple to space format for scrolling tuples on storage
local function construct_after_tuple_by_fields(space_format, field_names, tuple)
local function construct_after_tuple_by_fields(fieldno_map, field_names, tuple)
dev_checks('?table', '?table', '?table|cdata')

if tuple == nil then
return nil
end
Expand All @@ -99,15 +135,10 @@ local function construct_after_tuple_by_fields(space_format, field_names, tuple)
return tuple
end

local positions = {}
local transformed_tuple = {}

for i, field in ipairs(space_format) do
positions[field.name] = i
end

for i, field_name in ipairs(field_names) do
local fieldno = positions[field_name]
local fieldno = fieldno_map[field_name]
if fieldno == nil then
return nil, FilterFieldsError:new(
'Space format doesn\'t contain field named %q', field_name
Expand Down Expand Up @@ -145,6 +176,8 @@ function select_plan.new(space, conditions, opts)
local scan_value
local scan_condition_num

local fieldno_map = utils.get_format_fieldno_map(space_format)

-- search index to iterate over
for i, condition in ipairs(conditions) do
scan_index = get_index_for_condition(space_indexes, space_format, condition)
Expand Down Expand Up @@ -176,9 +209,7 @@ function select_plan.new(space, conditions, opts)

-- handle opts.first
local total_tuples_count
local scan_after_tuple, err = construct_after_tuple_by_fields(
space_format, field_names, opts.after_tuple
)
local scan_after_tuple, err = construct_after_tuple_by_fields(fieldno_map, field_names, opts.after_tuple)
if err ~= nil then
return nil, err
end
Expand Down Expand Up @@ -230,9 +261,12 @@ function select_plan.new(space, conditions, opts)
local sharding_index = opts.sharding_key_as_index_obj or primary_index

-- get sharding key value
local sharding_key
if scan_value ~= nil and (scan_iter == box.index.EQ or scan_iter == box.index.REQ) then
sharding_key = extract_sharding_key_from_scan_value(scan_value, scan_index, sharding_index)
local sharding_key = select_plan._internal.get_sharding_key_from_scan_value(scan_value, scan_index,
scan_iter, sharding_index)

if sharding_key == nil then
sharding_key = select_plan._internal.extract_sharding_key_from_conditions(conditions, sharding_index,
space_indexes, fieldno_map)
end

local plan = {
Expand Down
33 changes: 33 additions & 0 deletions test/entrypoint/srv_ddl.lua
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,14 @@ package.preload['customers-storage'] = function()
{path = 'name', is_nullable = false, type = 'string'},
},
}
local age_index = {
name = 'age',
type = 'TREE',
unique = false,
parts = {
{path = 'age', is_nullable = false, type = 'number'},
},
}
local secondary_index = {
name = 'secondary',
type = 'TREE',
Expand All @@ -71,6 +79,17 @@ package.preload['customers-storage'] = function()
},
}

local three_fields_index = {
name = 'three_fields',
type = 'TREE',
unique = false,
parts = {
{path = 'age', is_nullable = false, type = 'number'},
{path = 'name', is_nullable = false, type = 'string'},
{path = 'id', is_nullable = false, type = 'unsigned'},
},
}

local customers_name_key_schema = table.deepcopy(customers_schema)
customers_name_key_schema.sharding_key = {'name'}
table.insert(customers_name_key_schema.indexes, primary_index)
Expand Down Expand Up @@ -100,13 +119,27 @@ package.preload['customers-storage'] = function()
table.insert(customers_age_key_schema.indexes, primary_index)
table.insert(customers_age_key_schema.indexes, bucket_id_index)

local customers_name_age_key_different_indexes_schema = table.deepcopy(customers_schema)
customers_name_age_key_different_indexes_schema.sharding_key = {'name', 'age'}
table.insert(customers_name_age_key_different_indexes_schema.indexes, primary_index)
table.insert(customers_name_age_key_different_indexes_schema.indexes, bucket_id_index)
table.insert(customers_name_age_key_different_indexes_schema.indexes, age_index)

local customers_name_age_key_three_fields_index_schema = table.deepcopy(customers_schema)
customers_name_age_key_three_fields_index_schema.sharding_key = {'name', 'age'}
table.insert(customers_name_age_key_three_fields_index_schema.indexes, primary_index_id)
table.insert(customers_name_age_key_three_fields_index_schema.indexes, bucket_id_index)
table.insert(customers_name_age_key_three_fields_index_schema.indexes, three_fields_index)

local schema = {
spaces = {
customers_name_key = customers_name_key_schema,
customers_name_key_uniq_index = customers_name_key_uniq_index_schema,
customers_name_key_non_uniq_index = customers_name_key_non_uniq_index_schema,
customers_secondary_idx_name_key = customers_secondary_idx_name_key_schema,
customers_age_key = customers_age_key_schema,
customers_name_age_key_different_indexes = customers_name_age_key_different_indexes_schema,
customers_name_age_key_three_fields_index = customers_name_age_key_three_fields_index_schema,
}
}

Expand Down
12 changes: 12 additions & 0 deletions test/helpers/storage_stat.lua
Original file line number Diff line number Diff line change
Expand Up @@ -95,4 +95,16 @@ function storage_stat.diff(a, b)
return diff
end

-- Accepts collect (or diff) return value and returns
-- total number of select requests across all storages.
function storage_stat.total(stats)
local total = 0

for _, stat in pairs(stats) do
total = total + (stat.select_requests or 0)
end

return total
end

return storage_stat
101 changes: 82 additions & 19 deletions test/integration/ddl_sharding_key_test.lua
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ pgroup.before_each(function(g)
helpers.truncate_space_on_cluster(g.cluster, 'customers_name_key_non_uniq_index')
helpers.truncate_space_on_cluster(g.cluster, 'customers_secondary_idx_name_key')
helpers.truncate_space_on_cluster(g.cluster, 'customers_age_key')
helpers.truncate_space_on_cluster(g.cluster, 'customers_name_age_key_different_indexes')
helpers.truncate_space_on_cluster(g.cluster, 'customers_name_age_key_three_fields_index')
end)

pgroup.test_insert_object = function(g)
Expand Down Expand Up @@ -279,13 +281,7 @@ pgroup.test_select = function(g)
t.assert_equals(result.rows[1], tuple)
end

-- TODO: After enabling support of sharding keys that are not equal to primary
-- keys, we should handle it differently: it is not enough to look just on scan
-- value, we should traverse all conditions. Now missed cases lead to
-- map-reduce. Will be resolved in #213.
pgroup.test_select_wont_lead_map_reduce = function(g)
local space_name = 'customers_name_key_uniq_index'

local prepare_data_name_sharding_key = function(g, space_name)
local conn_s1 = g.cluster:server('s1-master').net_box
local conn_s2 = g.cluster:server('s2-master').net_box

Expand All @@ -301,12 +297,85 @@ pgroup.test_select_wont_lead_map_reduce = function(g)
-- bucket_id is 1161, storage is s-2
local result = conn_s2.space[space_name]:insert({4, 1161, 'James Joyce', 59})
t.assert_not_equals(result, nil)
end

local prepare_data_name_age_sharding_key = function(g, space_name)
local conn_s1 = g.cluster:server('s1-master').net_box
local conn_s2 = g.cluster:server('s2-master').net_box

-- bucket_id is 2310, storage is s-1
local result = conn_s1.space[space_name]:insert({1, 2310, 'Viktor Pelevin', 58})
t.assert_not_equals(result, nil)
-- bucket_id is 63, storage is s-2
local result = conn_s2.space[space_name]:insert({2, 63, 'Isaac Asimov', 72})
t.assert_not_equals(result, nil)
-- bucket_id is 2901, storage is s-1
local result = conn_s1.space[space_name]:insert({3, 2901, 'Aleksandr Solzhenitsyn', 89})
t.assert_not_equals(result, nil)
-- bucket_id is 1365, storage is s-2
local result = conn_s2.space[space_name]:insert({4, 1365, 'James Joyce', 59})
t.assert_not_equals(result, nil)
end

local cases = {
select_for_indexed_sharding_key = {
space_name = 'customers_name_key_uniq_index',
prepare_data = prepare_data_name_sharding_key,
conditions = {{'==', 'name', 'Viktor Pelevin'}},
},
select_for_sharding_key_as_index_part = {
space_name = 'customers_name_key',
prepare_data = prepare_data_name_sharding_key,
conditions = {{'==', 'name', 'Viktor Pelevin'}},
},
select_for_sharding_key_as_several_indexes_parts = {
space_name = 'customers_name_age_key_different_indexes',
prepare_data = prepare_data_name_age_sharding_key,
conditions = {{'==', 'name', 'Viktor Pelevin'}, {'==', 'age', 58}},
},
select_by_index_cond_for_sharding_key_as_several_indexes_parts = {
space_name = 'customers_name_age_key_different_indexes',
prepare_data = prepare_data_name_age_sharding_key,
conditions = {{'==', 'id', {1, 'Viktor Pelevin'}}, {'==', 'age', 58}},
},
select_by_partial_index_cond_for_sharding_key_included = {
space_name = 'customers_name_age_key_three_fields_index',
prepare_data = prepare_data_name_age_sharding_key,
conditions = {{'==', 'three_fields', {58, 'Viktor Pelevin', nil}}},
},
}

for name, case in pairs(cases) do
pgroup[('test_%s_wont_lead_to_map_reduce'):format(name)] = function(g)
case.prepare_data(g, case.space_name)

local stat_a = storage_stat.collect(g.cluster)

local result, err = g.cluster.main_server.net_box:call('crud.select', {
case.space_name, case.conditions
})
t.assert_equals(err, nil)
t.assert_not_equals(result, nil)
t.assert_equals(#result.rows, 1)

local stat_b = storage_stat.collect(g.cluster)

-- Check a number of select() requests made by CRUD on cluster's storages
-- after calling select() on a router. Make sure only a single storage has
-- a single select() request. Otherwise we lead to map-reduce.
local stats = storage_stat.diff(stat_b, stat_a)
t.assert_equals(storage_stat.total(stats), 1, 'Select request was not a map reduce')
end
end

pgroup.test_select_for_part_of_sharding_key_will_lead_to_map_reduce = function(g)
local space_name = 'customers_name_age_key_different_indexes'
prepare_data_name_age_sharding_key(g, space_name)

local stat_a = storage_stat.collect(g.cluster)

-- Select a tuple with name 'Viktor Pelevin'.
local result, err = g.cluster.main_server.net_box:call('crud.select', {
space_name, {{'==', 'name', 'Viktor Pelevin'}}
space_name, {{'==', 'age', 58}},
})
t.assert_equals(err, nil)
t.assert_not_equals(result, nil)
Expand All @@ -315,16 +384,10 @@ pgroup.test_select_wont_lead_map_reduce = function(g)
local stat_b = storage_stat.collect(g.cluster)

-- Check a number of select() requests made by CRUD on cluster's storages
-- after calling select() on a router. Make sure only a single storage has
-- a single select() request. Otherwise we lead map-reduce.
t.assert_equals(storage_stat.diff(stat_b, stat_a), {
['s-1'] = {
select_requests = 0,
},
['s-2'] = {
select_requests = 1,
},
})
-- after calling select() on a router. Make sure it was a map-reduce
-- since we do not have sharding key values in conditions.
local stats = storage_stat.diff(stat_b, stat_a)
t.assert_equals(storage_stat.total(stats), 2, 'Select request was a map reduce')
end

pgroup.test_select_secondary_idx = function(g)
Expand Down
Loading

0 comments on commit 1bd6e85

Please sign in to comment.