PBM. Add test to check user restoration during PITR restore from selective / full backup #204
GitHub Actions / JUnit Test Report
failed
Dec 12, 2024 in 0s
49 tests run, 40 passed, 8 skipped, 1 failed.
Annotations
Check failure on line 74 in pbm-functional/pytest/test_user_roles.py
github-actions / JUnit Test Report
test_user_roles.test_logical_PBM_T216[full_bck]
pymongo.errors.ServerSelectionTimeoutError: No replica set members match selector "Primary()", Timeout: 30s, Topology Description: <TopologyDescription id: 675a43ad3ebd0c5033f46620, topology_type: ReplicaSetNoPrimary, servers: [<ServerDescription ('rs101', 27017) server_type: Unknown, rtt: None>, <ServerDescription ('rs102', 27017) server_type: RSSecondary, rtt: 0.0005587551916809172>, <ServerDescription ('rs103', 27017) server_type: RSSecondary, rtt: 0.00045299083186773356>]>
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f47cd90bc10>
newcluster = <cluster.Cluster object at 0x7f47cd908a90>
restore_type = 'full_bck'
@pytest.mark.parametrize('restore_type',['part_bck','full_bck_part_rst_wo_user','full_bck_part_rst_user','full_bck'])
@pytest.mark.timeout(350, func_only=True)
def test_logical_PBM_T216(start_cluster, cluster, newcluster, restore_type):
cluster.check_pbm_status()
client = pymongo.MongoClient(cluster.connection)
client_shard = pymongo.MongoClient("mongodb://root:root@rs101:27017/")
client.admin.command({"enableSharding": "test_db1", "primaryShard": "rs1"})
client.admin.command({"enableSharding": "test_db2", "primaryShard": "rs2"})
client.admin.command("shardCollection", "test_db1.test_coll11", key={"_id": "hashed"})
client.admin.command('updateUser', 'pbm_test', pwd='pbmpass_test2')
client.admin.command('createUser', 'admin_random_user1', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
client_shard.admin.command('createUser', 'admin_random_user2', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
client.test_db1.command('createUser', 'test_random_user1', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
client_shard.test_db1.command('createUser', 'test_random_user2', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
for i in range(10):
client["test_db1"]["test_coll11"].insert_one({"key": i, "data": i})
client["test_db2"]["test_coll21"].insert_one({"key": i, "data": i})
backup_full = cluster.make_backup("logical")
backup_partial = cluster.make_backup("logical --ns=test_db1.*,test_db2.*")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
client.admin.command('createUser', 'admin_random_user3', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
client_shard.admin.command('createUser', 'admin_random_user4', pwd='test123', roles=[{'role':'readWrite','db':'admin'}, 'userAdminAnyDatabase', 'clusterAdmin'])
client.test_db1.command('createUser', 'test_random_user3', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
client_shard.test_db1.command('createUser', 'test_random_user4', pwd='test123', roles=[{'role':'readWrite','db':'test_db1'}, {'role':'clusterManager','db':'admin'}])
for i in range(10):
client["test_db1"]["test_coll11"].insert_one({"key": i+10, "data": i+10})
client["test_db2"]["test_coll21"].insert_one({"key": i+10, "data": i+10})
time.sleep(5)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
cluster.disable_pitr(pitr)
pitr = " --time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
client.drop_database("test_db1")
client.drop_database("test_db2")
client.admin.command("dropUser", "admin_random_user1")
client_shard.admin.command("dropUser", "admin_random_user2")
client.admin.command("dropUser", "admin_random_user3")
client_shard.admin.command("dropUser", "admin_random_user4")
client.test_db1.command("dropUser", "test_random_user1")
client_shard.test_db1.command("dropUser", "test_random_user2")
client.test_db1.command("dropUser", "test_random_user3")
client_shard.test_db1.command("dropUser", "test_random_user4")
# restoring users and roles from selective backup is not supported
restore_commands = {
'part_bck': " --base-snapshot=" + backup_partial + pitr,
'full_bck_part_rst_wo_user': " --base-snapshot=" + backup_full + pitr + " --ns=test_db1.*,test_db2.*",
'full_bck_part_rst_user': " --base-snapshot=" + backup_full + pitr + " --ns=test_db1.*,test_db2.* --with-users-and-roles",
'full_bck': " --base-snapshot=" + backup_full + pitr
}
# re-create cluster with new PBM user for connection to check that restore and connection to DB are OK
# despite the same user with different password is present in backup
if restore_type == 'full_bck':
cluster.destroy()
newcluster.create()
newcluster.setup_pbm()
newcluster.check_pbm_status()
newcluster.make_restore(restore_commands.get(restore_type), check_pbm_status=True)
else:
cluster.make_restore(restore_commands.get(restore_type), check_pbm_status=True)
assert client["test_db1"]["test_coll11"].count_documents({}) == 20
assert client["test_db1"].command("collstats", "test_coll11").get("sharded", False)
assert client["test_db2"]["test_coll21"].count_documents({}) == 20
assert client["test_db2"].command("collstats", "test_coll21").get("sharded", True) is False
assert check_user(client, "admin", "admin_random_user1", {'readWrite', 'userAdminAnyDatabase', 'clusterAdmin'}) == \
(restore_type == 'full_bck'), \
f"Failed for {restore_type}: admin_random_user1 role mismatch"
> assert check_user(client_shard, "admin", "admin_random_user2", {'readWrite', 'userAdminAnyDatabase', 'clusterAdmin'}) == \
(restore_type == 'full_bck'), \
f"Failed for {restore_type}: admin_random_user2 role mismatch"
test_user_roles.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
test_user_roles.py:74: in check_user
db_query = client.db.command({"usersInfo": {"user": username, "db": db_name}})
/usr/local/lib/python3.11/site-packages/pymongo/_csot.py:119: in csot_wrapper
return func(self, *args, **kwargs)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/database.py:926: in command
with self._client._conn_for_reads(read_preference, session, operation=command_name) as (
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/mongo_client.py:1701: in _conn_for_reads
server = self._select_server(read_preference, session, operation)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/mongo_client.py:1649: in _select_server
server = topology.select_server(
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/topology.py:398: in select_server
server = self._select_server(
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/topology.py:376: in _select_server
servers = self.select_servers(
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/topology.py:283: in select_servers
server_descriptions = self._select_servers_loop(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <Topology <TopologyDescription id: 675a43ad3ebd0c5033f46620, topology_type: ReplicaSetNoPrimary, servers: [<ServerDesc...: 0.0005587551916809172>, <ServerDescription ('rs103', 27017) server_type: RSSecondary, rtt: 0.00045299083186773356>]>>
selector = Primary(), timeout = 30, operation = 'usersInfo', operation_id = None
address = None
def _select_servers_loop(
self,
selector: Callable[[Selection], Selection],
timeout: float,
operation: str,
operation_id: Optional[int],
address: Optional[_Address],
) -> list[ServerDescription]:
"""select_servers() guts. Hold the lock when calling this."""
now = time.monotonic()
end_time = now + timeout
logged_waiting = False
if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG):
_debug_log(
_SERVER_SELECTION_LOGGER,
message=_ServerSelectionStatusMessage.STARTED,
selector=selector,
operation=operation,
operationId=operation_id,
topologyDescription=self.description,
clientId=self.description._topology_settings._topology_id,
)
server_descriptions = self._description.apply_selector(
selector, address, custom_selector=self._settings.server_selector
)
while not server_descriptions:
# No suitable servers.
if timeout == 0 or now > end_time:
if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG):
_debug_log(
_SERVER_SELECTION_LOGGER,
message=_ServerSelectionStatusMessage.FAILED,
selector=selector,
operation=operation,
operationId=operation_id,
topologyDescription=self.description,
clientId=self.description._topology_settings._topology_id,
failure=self._error_message(selector),
)
> raise ServerSelectionTimeoutError(
f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}"
)
E pymongo.errors.ServerSelectionTimeoutError: No replica set members match selector "Primary()", Timeout: 30s, Topology Description: <TopologyDescription id: 675a43ad3ebd0c5033f46620, topology_type: ReplicaSetNoPrimary, servers: [<ServerDescription ('rs101', 27017) server_type: Unknown, rtt: None>, <ServerDescription ('rs102', 27017) server_type: RSSecondary, rtt: 0.0005587551916809172>, <ServerDescription ('rs103', 27017) server_type: RSSecondary, rtt: 0.00045299083186773356>]>
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/topology.py:333: ServerSelectionTimeoutError
Loading