diff --git a/test/luatest_helpers/server.lua b/test/luatest_helpers/server.lua index 714c5372..eb839dc9 100644 --- a/test/luatest_helpers/server.lua +++ b/test/luatest_helpers/server.lua @@ -160,6 +160,21 @@ function Server:instance_uuid() return uuid end +function Server:replicaset_uuid() + -- Cache the value when found it first time. + if self.replicaset_uuid_value then + return self.replicaset_uuid_value + end + local uuid = self:exec(function() return box.info.cluster.uuid end) + if uuid == nil then + -- Probably didn't bootstrap yet. Listen starts before replicaset UUID + -- is assigned. + return nil + end + self.replicaset_uuid_value = uuid + return uuid +end + -- TODO: Add the 'wait_for_readiness' parameter for the restart() -- method. @@ -192,6 +207,7 @@ function Server:cleanup() end self.instance_id_value = nil self.instance_uuid_value = nil + self.replicaset_uuid_value = nil end function Server:drop() diff --git a/test/luatest_helpers/vtest.lua b/test/luatest_helpers/vtest.lua index 4f55cec2..e5da876b 100644 --- a/test/luatest_helpers/vtest.lua +++ b/test/luatest_helpers/vtest.lua @@ -21,13 +21,9 @@ end local function config_new(templ) local res = table.deepcopy(templ) local sharding = {} - local meta = {replicasets = {}} res.sharding = sharding for i, replicaset_templ in pairs(templ.sharding) do local replicaset_uuid = uuid_next() - meta.replicasets[i] = { - uuid = replicaset_uuid - } local replicas = {} local replicaset = table.deepcopy(replicaset_templ) replicaset.replicas = replicas @@ -40,7 +36,7 @@ local function config_new(templ) end sharding[replicaset_uuid] = replicaset end - return res, meta + return res end -- diff --git a/test/router-luatest/router_test.lua b/test/router-luatest/router_test.lua index 187a43b3..988a80c1 100644 --- a/test/router-luatest/router_test.lua +++ b/test/router-luatest/router_test.lua @@ -4,7 +4,7 @@ local vutil = require('vshard.util') local wait_timeout = 120 local g = t.group('router') -local cluster_cfg, cfg_meta = vtest.config_new({ +local cluster_cfg = vtest.config_new({ sharding = { { replicas = { @@ -206,9 +206,11 @@ g.test_map_callrw_raw = function(g) err = err, } end, {wait_timeout}) + local rs1_uuid = g.replica_1_a:replicaset_uuid() + local rs2_uuid = g.replica_2_a:replicaset_uuid() local expected = { - [cfg_meta.replicasets[1].uuid] = {{1, 3}}, - [cfg_meta.replicasets[2].uuid] = {{2, 3}}, + [rs1_uuid] = {{1, 3}}, + [rs2_uuid] = {{2, 3}}, } t.assert_equals(res.val, expected, 'map callrw success') t.assert_equals(res.map_type, 'userdata', 'values are msgpacks') @@ -226,7 +228,7 @@ g.test_map_callrw_raw = function(g) return_raw = true}) end, {wait_timeout}) expected = { - [cfg_meta.replicasets[1].uuid] = {{1}}, + [rs1_uuid] = {{1}}, } t.assert_equals(res, expected, 'map callrw without one value success') -- @@ -248,7 +250,7 @@ g.test_map_callrw_raw = function(g) type = 'ClientError', message = 'map_err' }, 'error object') - t.assert_equals(err_uuid, cfg_meta.replicasets[2].uuid, 'error uuid') + t.assert_equals(err_uuid, rs2_uuid, 'error uuid') -- -- Cleanup. --