Skip to content

PBM-1422 test selective restore with name remapping #182

PBM-1422 test selective restore with name remapping

PBM-1422 test selective restore with name remapping #182

GitHub Actions / JUnit Test Report failed Nov 13, 2024 in 0s

40 tests run, 28 passed, 4 skipped, 8 failed.

Annotations

Check failure on line 71 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_crud_PBM_T270[inserts]

AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
 +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'inserts'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_crud_PBM_T270(start_cluster,cluster,collection):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        for i in range(10):
            client = pymongo.MongoClient(cluster.connection)
            client["test"]["inserts"].insert_one({"key": i+10, "data": i+10})
            client["test"]["replaces"].replace_one({"key": i}, {"key": i+10, "data": i+10})
            client["test"]["updates"].update_one({"key": i}, {"$inc": { "data": 10 }})
            client["test"]["deletes"].delete_one({"key": i})
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(5)
        backup_to_fail=pitr + " --ns-from=test." + collection + " --ns-to=test.test"
        result=cluster.exec_pbm_cli("restore" + backup_to_fail + " --wait")
        assert result.rc != 0
>       assert "cloning namespace (--ns-to) is already in use" in result.stderr
E       AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
E        +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr

test_rename_replicaset.py:71: AssertionError

Check failure on line 71 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_crud_PBM_T270[replaces]

AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
 +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'replaces'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_crud_PBM_T270(start_cluster,cluster,collection):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        for i in range(10):
            client = pymongo.MongoClient(cluster.connection)
            client["test"]["inserts"].insert_one({"key": i+10, "data": i+10})
            client["test"]["replaces"].replace_one({"key": i}, {"key": i+10, "data": i+10})
            client["test"]["updates"].update_one({"key": i}, {"$inc": { "data": 10 }})
            client["test"]["deletes"].delete_one({"key": i})
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(5)
        backup_to_fail=pitr + " --ns-from=test." + collection + " --ns-to=test.test"
        result=cluster.exec_pbm_cli("restore" + backup_to_fail + " --wait")
        assert result.rc != 0
>       assert "cloning namespace (--ns-to) is already in use" in result.stderr
E       AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
E        +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr

test_rename_replicaset.py:71: AssertionError

Check failure on line 71 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_crud_PBM_T270[updates]

AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
 +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'updates'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_crud_PBM_T270(start_cluster,cluster,collection):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        for i in range(10):
            client = pymongo.MongoClient(cluster.connection)
            client["test"]["inserts"].insert_one({"key": i+10, "data": i+10})
            client["test"]["replaces"].replace_one({"key": i}, {"key": i+10, "data": i+10})
            client["test"]["updates"].update_one({"key": i}, {"$inc": { "data": 10 }})
            client["test"]["deletes"].delete_one({"key": i})
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(5)
        backup_to_fail=pitr + " --ns-from=test." + collection + " --ns-to=test.test"
        result=cluster.exec_pbm_cli("restore" + backup_to_fail + " --wait")
        assert result.rc != 0
>       assert "cloning namespace (--ns-to) is already in use" in result.stderr
E       AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
E        +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr

test_rename_replicaset.py:71: AssertionError

Check failure on line 52 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_crud_PBM_T270[deletes]

AssertionError: Backup failed{"Error":"get backup metadata: get: context deadline exceeded"}

2024-11-13T10:56:26Z I [rs1/rs101:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T10:56:26Z I [rs1/rs101:27017] starting PITR routine
2024-11-13T10:56:26Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:56:26Z I [rs1/rs102:27017] starting PITR routine
2024-11-13T10:56:26Z I [rs1/rs101:27017] node: rs1/rs101:27017
2024-11-13T10:56:26Z I [rs1/rs102:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T10:56:26Z I [rs1/rs103:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T10:56:26Z I [rs1/rs103:27017] starting PITR routine
2024-11-13T10:56:26Z I [rs1/rs101:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T10:56:26Z I [rs1/rs103:27017] node: rs1/rs103:27017
2024-11-13T10:56:26Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:56:26Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:56:26Z I [rs1/rs102:27017] node: rs1/rs102:27017
2024-11-13T10:56:26Z I [rs1/rs101:27017] listening for the commands
2024-11-13T10:56:26Z I [rs1/rs103:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T10:56:26Z I [rs1/rs103:27017] listening for the commands
2024-11-13T10:56:26Z I [rs1/rs102:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T10:56:26Z I [rs1/rs102:27017] listening for the commands
2024-11-13T10:56:31Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:56:31Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:56:31Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:56:33Z I [rs1/rs101:27017] got command resync <ts: 1731495392>, opid: 673485e0a55d4255536d667d
2024-11-13T10:56:33Z I [rs1/rs103:27017] got command resync <ts: 1731495392>, opid: 673485e0a55d4255536d667d
2024-11-13T10:56:33Z I [rs1/rs101:27017] got epoch {1731495391 6}
2024-11-13T10:56:33Z I [rs1/rs103:27017] got epoch {1731495391 6}
2024-11-13T10:56:33Z I [rs1/rs102:27017] got command resync <ts: 1731495392>, opid: 673485e0a55d4255536d667d
2024-11-13T10:56:33Z D [rs1/rs103:27017] [resync] lock not acquired
2024-11-13T10:56:33Z I [rs1/rs102:27017] got epoch {1731495391 6}
2024-11-13T10:56:33Z I [rs1/rs101:27017] [resync] started
2024-11-13T10:56:33Z D [rs1/rs102:27017] [resync] lock not acquired
2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] uploading ".pbm.init" [size hint: 5 (5.00B); part size: 10485760 (10.00MB)]
2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] got backups list: 0
2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] got physical restores list: 0
2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] epoch set to {1731495393 14}
2024-11-13T10:56:33Z I [rs1/rs101:27017] [resync] succeed
2024-11-13T10:56:38Z I [rs1/rs101:27017] got command resync <ts: 1731495397>, opid: 673485e5036baaaf52c19cae
2024-11-13T10:56:38Z I [rs1/rs103:27017] got command resync <ts: 1731495397>, opid: 673485e5036baaaf52c19cae
2024-11-13T10:56:38Z I [rs1/rs101:27017] got epoch {1731495393 14}
2024-11-13T10:56:38Z I [rs1/rs103:27017] got epoch {1731495393 14}
2024-11-13T10:56:38Z I [rs1/rs102:27017] got command resync <ts: 1731495397>, opid: 673485e5036baaaf52c19cae
2024-11-13T10:56:38Z I [rs1/rs102:27017] got epoch {1731495393 14}
2024-11-13T10:56:38Z D [rs1/rs101:27017] [resync] lock not acquired
2024-11-13T10:56:38Z I [rs1/rs103:27017] [resync] started
2024-11-13T10:56:38Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T10:56:38Z, compression: none (level: default)] <ts: 1731495398>, opid: 673485e6cfad5d68b982cf79
2024-11-13T10:56:38Z D [rs1/rs102:27017] [resync] lock not acquired
2024-11-13T10:56:38Z I [rs1/rs101:27017] got epoch {1731495393 14}
2024-11-13T10:56:38Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T10:56:38Z, compression: none (level: default)] <ts: 1731495398>, opid: 673485e6cfad5d68b982cf79
2024-11-13T10:56:38Z D [rs1/rs103:27017] [resync] got backups list: 0
2024-11-13T10:56:38Z I [rs1/rs102:27017] got epoch {1731495393 14}
2024-11-13T10:56:38Z E [rs1/rs101:27017] [backup/2024-11-13T10:56:38Z] unable to proceed with the backup, active lock is present
2024-11-13T10:56:38Z D [rs1/rs103:27017] [resync] got physical restores list: 0
2024-11-13T10:56:38Z D [rs1/rs103:27017] [resync] epoch set to {1731495398 67}
2024-11-13T10:56:38Z I [rs1/rs103:27017] [resync] succeed
2024-11-13T10:56:38Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T10:56:38Z, compression: none (level: default)] <ts: 1731495398>, opid: 673485e6cfad5d68b982cf79
2024-11-13T10:56:38Z I [rs1/rs103:27017] got epoch {1731495398 67}
2024-11-13T10:56:53Z D [rs1/rs102:27017] [backup/2024-11-13T10:56:38Z] nomination timeout
2024-11-13T10:56:53Z D [rs1/rs102:27017] [backup/2024-11-13T10:56:38Z] skip after nomination, probably started by another node
2024-11-13T10:56:53Z D [rs1/rs103:27017] [backup/2024-11-13T10:56:38Z] nomination timeout
2024-11-13T10:56:53Z D [rs1/rs103:27017] [backup/2024-11-13T10:56:38Z] skip after nomination, probably started by another node
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'deletes'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_crud_PBM_T270(start_cluster,cluster,collection):
        cluster.check_pbm_status()
>       cluster.make_backup("logical")

test_rename_replicaset.py:52: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7f6447bba290>, type = 'logical'

    def make_backup(self, type):
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 120
        while True:
            running = self.get_status()['running']
            Cluster.log("Current operation: " + str(running))
            if not running:
                if type:
                    start = n.run(
                        'pbm backup --out=json --type=' + type)
                else:
                    start = n.run('pbm backup --out=json')
                if start.rc == 0:
                    name = json.loads(start.stdout)['name']
                    Cluster.log("Backup started")
                    break
                elif "resync" in start.stdout:
                    Cluster.log("Resync in progress, retrying: " + start.stdout)
                else:
                    logs = n.check_output("pbm logs -sD -t0")
>                   assert False, "Backup failed" + start.stdout + start.stderr + '\n' + logs
E                   AssertionError: Backup failed{"Error":"get backup metadata: get: context deadline exceeded"}
E                   
E                   2024-11-13T10:56:26Z I [rs1/rs101:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T10:56:26Z I [rs1/rs101:27017] starting PITR routine
E                   2024-11-13T10:56:26Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:56:26Z I [rs1/rs102:27017] starting PITR routine
E                   2024-11-13T10:56:26Z I [rs1/rs101:27017] node: rs1/rs101:27017
E                   2024-11-13T10:56:26Z I [rs1/rs102:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T10:56:26Z I [rs1/rs103:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T10:56:26Z I [rs1/rs103:27017] starting PITR routine
E                   2024-11-13T10:56:26Z I [rs1/rs101:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T10:56:26Z I [rs1/rs103:27017] node: rs1/rs103:27017
E                   2024-11-13T10:56:26Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:56:26Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:56:26Z I [rs1/rs102:27017] node: rs1/rs102:27017
E                   2024-11-13T10:56:26Z I [rs1/rs101:27017] listening for the commands
E                   2024-11-13T10:56:26Z I [rs1/rs103:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T10:56:26Z I [rs1/rs103:27017] listening for the commands
E                   2024-11-13T10:56:26Z I [rs1/rs102:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T10:56:26Z I [rs1/rs102:27017] listening for the commands
E                   2024-11-13T10:56:31Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:56:31Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:56:31Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:56:33Z I [rs1/rs101:27017] got command resync <ts: 1731495392>, opid: 673485e0a55d4255536d667d
E                   2024-11-13T10:56:33Z I [rs1/rs103:27017] got command resync <ts: 1731495392>, opid: 673485e0a55d4255536d667d
E                   2024-11-13T10:56:33Z I [rs1/rs101:27017] got epoch {1731495391 6}
E                   2024-11-13T10:56:33Z I [rs1/rs103:27017] got epoch {1731495391 6}
E                   2024-11-13T10:56:33Z I [rs1/rs102:27017] got command resync <ts: 1731495392>, opid: 673485e0a55d4255536d667d
E                   2024-11-13T10:56:33Z D [rs1/rs103:27017] [resync] lock not acquired
E                   2024-11-13T10:56:33Z I [rs1/rs102:27017] got epoch {1731495391 6}
E                   2024-11-13T10:56:33Z I [rs1/rs101:27017] [resync] started
E                   2024-11-13T10:56:33Z D [rs1/rs102:27017] [resync] lock not acquired
E                   2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] uploading ".pbm.init" [size hint: 5 (5.00B); part size: 10485760 (10.00MB)]
E                   2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] got backups list: 0
E                   2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] got physical restores list: 0
E                   2024-11-13T10:56:33Z D [rs1/rs101:27017] [resync] epoch set to {1731495393 14}
E                   2024-11-13T10:56:33Z I [rs1/rs101:27017] [resync] succeed
E                   2024-11-13T10:56:38Z I [rs1/rs101:27017] got command resync <ts: 1731495397>, opid: 673485e5036baaaf52c19cae
E                   2024-11-13T10:56:38Z I [rs1/rs103:27017] got command resync <ts: 1731495397>, opid: 673485e5036baaaf52c19cae
E                   2024-11-13T10:56:38Z I [rs1/rs101:27017] got epoch {1731495393 14}
E                   2024-11-13T10:56:38Z I [rs1/rs103:27017] got epoch {1731495393 14}
E                   2024-11-13T10:56:38Z I [rs1/rs102:27017] got command resync <ts: 1731495397>, opid: 673485e5036baaaf52c19cae
E                   2024-11-13T10:56:38Z I [rs1/rs102:27017] got epoch {1731495393 14}
E                   2024-11-13T10:56:38Z D [rs1/rs101:27017] [resync] lock not acquired
E                   2024-11-13T10:56:38Z I [rs1/rs103:27017] [resync] started
E                   2024-11-13T10:56:38Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T10:56:38Z, compression: none (level: default)] <ts: 1731495398>, opid: 673485e6cfad5d68b982cf79
E                   2024-11-13T10:56:38Z D [rs1/rs102:27017] [resync] lock not acquired
E                   2024-11-13T10:56:38Z I [rs1/rs101:27017] got epoch {1731495393 14}
E                   2024-11-13T10:56:38Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T10:56:38Z, compression: none (level: default)] <ts: 1731495398>, opid: 673485e6cfad5d68b982cf79
E                   2024-11-13T10:56:38Z D [rs1/rs103:27017] [resync] got backups list: 0
E                   2024-11-13T10:56:38Z I [rs1/rs102:27017] got epoch {1731495393 14}
E                   2024-11-13T10:56:38Z E [rs1/rs101:27017] [backup/2024-11-13T10:56:38Z] unable to proceed with the backup, active lock is present
E                   2024-11-13T10:56:38Z D [rs1/rs103:27017] [resync] got physical restores list: 0
E                   2024-11-13T10:56:38Z D [rs1/rs103:27017] [resync] epoch set to {1731495398 67}
E                   2024-11-13T10:56:38Z I [rs1/rs103:27017] [resync] succeed
E                   2024-11-13T10:56:38Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T10:56:38Z, compression: none (level: default)] <ts: 1731495398>, opid: 673485e6cfad5d68b982cf79
E                   2024-11-13T10:56:38Z I [rs1/rs103:27017] got epoch {1731495398 67}
E                   2024-11-13T10:56:53Z D [rs1/rs102:27017] [backup/2024-11-13T10:56:38Z] nomination timeout
E                   2024-11-13T10:56:53Z D [rs1/rs102:27017] [backup/2024-11-13T10:56:38Z] skip after nomination, probably started by another node
E                   2024-11-13T10:56:53Z D [rs1/rs103:27017] [backup/2024-11-13T10:56:38Z] nomination timeout
E                   2024-11-13T10:56:53Z D [rs1/rs103:27017] [backup/2024-11-13T10:56:38Z] skip after nomination, probably started by another node

cluster.py:393: AssertionError

Check failure on line 99 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_with_txn_PBM_T271[inserts]

AssertionError: Backup failed{"Error":"get backup metadata: get: context deadline exceeded"}

2024-11-13T10:57:31Z I [rs1/rs102:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T10:57:31Z I [rs1/rs102:27017] starting PITR routine
2024-11-13T10:57:31Z I [rs1/rs103:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T10:57:31Z I [rs1/rs103:27017] starting PITR routine
2024-11-13T10:57:31Z I [rs1/rs103:27017] node: rs1/rs103:27017
2024-11-13T10:57:31Z I [rs1/rs102:27017] node: rs1/rs102:27017
2024-11-13T10:57:31Z I [rs1/rs101:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T10:57:31Z I [rs1/rs101:27017] starting PITR routine
2024-11-13T10:57:31Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:57:31Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:57:31Z I [rs1/rs102:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T10:57:31Z I [rs1/rs101:27017] node: rs1/rs101:27017
2024-11-13T10:57:31Z I [rs1/rs103:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T10:57:31Z I [rs1/rs102:27017] listening for the commands
2024-11-13T10:57:31Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:57:31Z I [rs1/rs101:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T10:57:31Z I [rs1/rs103:27017] listening for the commands
2024-11-13T10:57:31Z I [rs1/rs101:27017] listening for the commands
2024-11-13T10:57:36Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:57:36Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:57:36Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T10:57:38Z I [rs1/rs102:27017] got command resync <ts: 1731495457>, opid: 673486213897f3b75671514d
2024-11-13T10:57:38Z I [rs1/rs103:27017] got command resync <ts: 1731495457>, opid: 673486213897f3b75671514d
2024-11-13T10:57:38Z I [rs1/rs103:27017] got epoch {1731495456 6}
2024-11-13T10:57:38Z I [rs1/rs102:27017] got epoch {1731495456 6}
2024-11-13T10:57:38Z I [rs1/rs101:27017] got command resync <ts: 1731495457>, opid: 673486213897f3b75671514d
2024-11-13T10:57:38Z I [rs1/rs101:27017] got epoch {1731495456 6}
2024-11-13T10:57:38Z I [rs1/rs103:27017] [resync] started
2024-11-13T10:57:38Z D [rs1/rs102:27017] [resync] lock not acquired
2024-11-13T10:57:38Z D [rs1/rs101:27017] [resync] lock not acquired
2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] uploading ".pbm.init" [size hint: 5 (5.00B); part size: 10485760 (10.00MB)]
2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] got backups list: 0
2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] got physical restores list: 0
2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] epoch set to {1731495458 14}
2024-11-13T10:57:38Z I [rs1/rs103:27017] [resync] succeed
2024-11-13T10:57:43Z I [rs1/rs103:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
2024-11-13T10:57:43Z I [rs1/rs102:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
2024-11-13T10:57:43Z I [rs1/rs103:27017] got epoch {1731495458 14}
2024-11-13T10:57:43Z I [rs1/rs102:27017] got epoch {1731495458 14}
2024-11-13T10:57:43Z I [rs1/rs101:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
2024-11-13T10:57:43Z I [rs1/rs101:27017] got epoch {1731495458 14}
2024-11-13T10:57:43Z D [rs1/rs102:27017] [resync] lock not acquired
2024-11-13T10:57:43Z I [rs1/rs103:27017] [resync] started
2024-11-13T10:57:43Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
2024-11-13T10:57:43Z D [rs1/rs101:27017] [resync] lock not acquired
2024-11-13T10:57:43Z I [rs1/rs102:27017] got epoch {1731495458 14}
2024-11-13T10:57:43Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
2024-11-13T10:57:43Z D [rs1/rs103:27017] [resync] got backups list: 0
2024-11-13T10:57:43Z I [rs1/rs101:27017] got epoch {1731495458 14}
2024-11-13T10:57:43Z D [rs1/rs103:27017] [resync] got physical restores list: 0
2024-11-13T10:57:43Z E [rs1/rs101:27017] [backup/2024-11-13T10:57:43Z] unable to proceed with the backup, active lock is present
2024-11-13T10:57:43Z D [rs1/rs103:27017] [resync] epoch set to {1731495463 78}
2024-11-13T10:57:43Z I [rs1/rs103:27017] [resync] succeed
2024-11-13T10:57:43Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
2024-11-13T10:57:43Z I [rs1/rs103:27017] got epoch {1731495463 78}
2024-11-13T10:57:44Z I [rs1/rs102:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
2024-11-13T10:57:44Z I [rs1/rs102:27017] got epoch {1731495463 78}
2024-11-13T10:57:44Z I [rs1/rs101:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
2024-11-13T10:57:44Z I [rs1/rs101:27017] got epoch {1731495463 78}
2024-11-13T10:57:44Z D [rs1/rs102:27017] [resync] get lock: duplicate operation: 67348627e04fc2e871b058c1 [Resync storage]
2024-11-13T10:57:44Z D [rs1/rs102:27017] [resync] lock not acquired
2024-11-13T10:57:44Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
2024-11-13T10:57:44Z I [rs1/rs102:27017] got epoch {1731495463 78}
2024-11-13T10:57:44Z D [rs1/rs101:27017] [resync] get lock: duplicate operation: 67348627e04fc2e871b058c1 [Resync storage]
2024-11-13T10:57:44Z I [rs1/rs103:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
2024-11-13T10:57:44Z D [rs1/rs101:27017] [resync] lock not acquired
2024-11-13T10:57:44Z I [rs1/rs103:27017] got epoch {1731495463 78}
2024-11-13T10:57:44Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
2024-11-13T10:57:44Z I [rs1/rs101:27017] got epoch {1731495463 78}
2024-11-13T10:57:44Z E [rs1/rs101:27017] [backup/2024-11-13T10:57:43Z] unable to proceed with the backup, active lock is present
2024-11-13T10:57:44Z D [rs1/rs103:27017] [resync] get lock: duplicate operation: 67348627e04fc2e871b058c1 [Resync storage]
2024-11-13T10:57:44Z D [rs1/rs103:27017] [resync] lock not acquired
2024-11-13T10:57:44Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
2024-11-13T10:57:44Z I [rs1/rs103:27017] got epoch {1731495463 78}
2024-11-13T10:57:58Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
2024-11-13T10:57:58Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node
2024-11-13T10:57:58Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
2024-11-13T10:57:58Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node
2024-11-13T10:57:59Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
2024-11-13T10:57:59Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node
2024-11-13T10:57:59Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
2024-11-13T10:57:59Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'inserts'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_with_txn_PBM_T271(start_cluster,cluster,collection):
        cluster.check_pbm_status()
>       cluster.make_backup("logical")

test_rename_replicaset.py:99: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7f6447bba290>, type = 'logical'

    def make_backup(self, type):
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 120
        while True:
            running = self.get_status()['running']
            Cluster.log("Current operation: " + str(running))
            if not running:
                if type:
                    start = n.run(
                        'pbm backup --out=json --type=' + type)
                else:
                    start = n.run('pbm backup --out=json')
                if start.rc == 0:
                    name = json.loads(start.stdout)['name']
                    Cluster.log("Backup started")
                    break
                elif "resync" in start.stdout:
                    Cluster.log("Resync in progress, retrying: " + start.stdout)
                else:
                    logs = n.check_output("pbm logs -sD -t0")
>                   assert False, "Backup failed" + start.stdout + start.stderr + '\n' + logs
E                   AssertionError: Backup failed{"Error":"get backup metadata: get: context deadline exceeded"}
E                   
E                   2024-11-13T10:57:31Z I [rs1/rs102:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T10:57:31Z I [rs1/rs102:27017] starting PITR routine
E                   2024-11-13T10:57:31Z I [rs1/rs103:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T10:57:31Z I [rs1/rs103:27017] starting PITR routine
E                   2024-11-13T10:57:31Z I [rs1/rs103:27017] node: rs1/rs103:27017
E                   2024-11-13T10:57:31Z I [rs1/rs102:27017] node: rs1/rs102:27017
E                   2024-11-13T10:57:31Z I [rs1/rs101:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T10:57:31Z I [rs1/rs101:27017] starting PITR routine
E                   2024-11-13T10:57:31Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:57:31Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:57:31Z I [rs1/rs102:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T10:57:31Z I [rs1/rs101:27017] node: rs1/rs101:27017
E                   2024-11-13T10:57:31Z I [rs1/rs103:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T10:57:31Z I [rs1/rs102:27017] listening for the commands
E                   2024-11-13T10:57:31Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:57:31Z I [rs1/rs101:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T10:57:31Z I [rs1/rs103:27017] listening for the commands
E                   2024-11-13T10:57:31Z I [rs1/rs101:27017] listening for the commands
E                   2024-11-13T10:57:36Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:57:36Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:57:36Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T10:57:38Z I [rs1/rs102:27017] got command resync <ts: 1731495457>, opid: 673486213897f3b75671514d
E                   2024-11-13T10:57:38Z I [rs1/rs103:27017] got command resync <ts: 1731495457>, opid: 673486213897f3b75671514d
E                   2024-11-13T10:57:38Z I [rs1/rs103:27017] got epoch {1731495456 6}
E                   2024-11-13T10:57:38Z I [rs1/rs102:27017] got epoch {1731495456 6}
E                   2024-11-13T10:57:38Z I [rs1/rs101:27017] got command resync <ts: 1731495457>, opid: 673486213897f3b75671514d
E                   2024-11-13T10:57:38Z I [rs1/rs101:27017] got epoch {1731495456 6}
E                   2024-11-13T10:57:38Z I [rs1/rs103:27017] [resync] started
E                   2024-11-13T10:57:38Z D [rs1/rs102:27017] [resync] lock not acquired
E                   2024-11-13T10:57:38Z D [rs1/rs101:27017] [resync] lock not acquired
E                   2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] uploading ".pbm.init" [size hint: 5 (5.00B); part size: 10485760 (10.00MB)]
E                   2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] got backups list: 0
E                   2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] got physical restores list: 0
E                   2024-11-13T10:57:38Z D [rs1/rs103:27017] [resync] epoch set to {1731495458 14}
E                   2024-11-13T10:57:38Z I [rs1/rs103:27017] [resync] succeed
E                   2024-11-13T10:57:43Z I [rs1/rs103:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
E                   2024-11-13T10:57:43Z I [rs1/rs102:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
E                   2024-11-13T10:57:43Z I [rs1/rs103:27017] got epoch {1731495458 14}
E                   2024-11-13T10:57:43Z I [rs1/rs102:27017] got epoch {1731495458 14}
E                   2024-11-13T10:57:43Z I [rs1/rs101:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
E                   2024-11-13T10:57:43Z I [rs1/rs101:27017] got epoch {1731495458 14}
E                   2024-11-13T10:57:43Z D [rs1/rs102:27017] [resync] lock not acquired
E                   2024-11-13T10:57:43Z I [rs1/rs103:27017] [resync] started
E                   2024-11-13T10:57:43Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
E                   2024-11-13T10:57:43Z D [rs1/rs101:27017] [resync] lock not acquired
E                   2024-11-13T10:57:43Z I [rs1/rs102:27017] got epoch {1731495458 14}
E                   2024-11-13T10:57:43Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
E                   2024-11-13T10:57:43Z D [rs1/rs103:27017] [resync] got backups list: 0
E                   2024-11-13T10:57:43Z I [rs1/rs101:27017] got epoch {1731495458 14}
E                   2024-11-13T10:57:43Z D [rs1/rs103:27017] [resync] got physical restores list: 0
E                   2024-11-13T10:57:43Z E [rs1/rs101:27017] [backup/2024-11-13T10:57:43Z] unable to proceed with the backup, active lock is present
E                   2024-11-13T10:57:43Z D [rs1/rs103:27017] [resync] epoch set to {1731495463 78}
E                   2024-11-13T10:57:43Z I [rs1/rs103:27017] [resync] succeed
E                   2024-11-13T10:57:43Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
E                   2024-11-13T10:57:43Z I [rs1/rs103:27017] got epoch {1731495463 78}
E                   2024-11-13T10:57:44Z I [rs1/rs102:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
E                   2024-11-13T10:57:44Z I [rs1/rs102:27017] got epoch {1731495463 78}
E                   2024-11-13T10:57:44Z I [rs1/rs101:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
E                   2024-11-13T10:57:44Z I [rs1/rs101:27017] got epoch {1731495463 78}
E                   2024-11-13T10:57:44Z D [rs1/rs102:27017] [resync] get lock: duplicate operation: 67348627e04fc2e871b058c1 [Resync storage]
E                   2024-11-13T10:57:44Z D [rs1/rs102:27017] [resync] lock not acquired
E                   2024-11-13T10:57:44Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
E                   2024-11-13T10:57:44Z I [rs1/rs102:27017] got epoch {1731495463 78}
E                   2024-11-13T10:57:44Z D [rs1/rs101:27017] [resync] get lock: duplicate operation: 67348627e04fc2e871b058c1 [Resync storage]
E                   2024-11-13T10:57:44Z I [rs1/rs103:27017] got command resync <ts: 1731495463>, opid: 67348627e04fc2e871b058c1
E                   2024-11-13T10:57:44Z D [rs1/rs101:27017] [resync] lock not acquired
E                   2024-11-13T10:57:44Z I [rs1/rs103:27017] got epoch {1731495463 78}
E                   2024-11-13T10:57:44Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
E                   2024-11-13T10:57:44Z I [rs1/rs101:27017] got epoch {1731495463 78}
E                   2024-11-13T10:57:44Z E [rs1/rs101:27017] [backup/2024-11-13T10:57:43Z] unable to proceed with the backup, active lock is present
E                   2024-11-13T10:57:44Z D [rs1/rs103:27017] [resync] get lock: duplicate operation: 67348627e04fc2e871b058c1 [Resync storage]
E                   2024-11-13T10:57:44Z D [rs1/rs103:27017] [resync] lock not acquired
E                   2024-11-13T10:57:44Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T10:57:43Z, compression: none (level: default)] <ts: 1731495463>, opid: 673486274d1ba56fb1eab354
E                   2024-11-13T10:57:44Z I [rs1/rs103:27017] got epoch {1731495463 78}
E                   2024-11-13T10:57:58Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
E                   2024-11-13T10:57:58Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node
E                   2024-11-13T10:57:58Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
E                   2024-11-13T10:57:58Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node
E                   2024-11-13T10:57:59Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
E                   2024-11-13T10:57:59Z D [rs1/rs102:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node
E                   2024-11-13T10:57:59Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] nomination timeout
E                   2024-11-13T10:57:59Z D [rs1/rs103:27017] [backup/2024-11-13T10:57:43Z] skip after nomination, probably started by another node

cluster.py:393: AssertionError

Check failure on line 121 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_with_txn_PBM_T271[replaces]

AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
 +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'replaces'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_with_txn_PBM_T271(start_cluster,cluster,collection):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        for i in range(10):
            client = pymongo.MongoClient(cluster.connection)
            with client.start_session() as session:
                with session.start_transaction():
                    client["test"]["inserts"].insert_one({"key": i+10, "data": i+10}, session=session)
                    client["test"]["replaces"].replace_one({"key": i}, {"key": i+10, "data": i+10}, session=session)
                    client["test"]["updates"].update_one({"key": i}, {"$inc": { "data": 10 }}, session=session)
                    client["test"]["deletes"].delete_one({"key": i}, session=session)
                session.commit_transaction()
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(5)
        backup_to_fail=pitr + " --ns-from=test." + collection + " --ns-to=test.test"
        result=cluster.exec_pbm_cli("restore" + backup_to_fail + " --wait")
        assert result.rc != 0
>       assert "cloning namespace (--ns-to) is already in use" in result.stderr
E       AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
E        +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr

test_rename_replicaset.py:121: AssertionError

Check failure on line 99 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_with_txn_PBM_T271[updates]

AssertionError: Backup failed{"Error":"get backup metadata: get: context deadline exceeded"}

2024-11-13T11:00:26Z I [rs1/rs101:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T11:00:26Z I [rs1/rs101:27017] starting PITR routine
2024-11-13T11:00:26Z I [rs1/rs102:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T11:00:26Z I [rs1/rs102:27017] starting PITR routine
2024-11-13T11:00:26Z I [rs1/rs103:27017] pbm-agent:
Version:   2.7.0
Platform:  linux/amd64
GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
GitBranch: dev
BuildTime: 2024-11-13_09:38_UTC
GoVersion: go1.23.3
2024-11-13T11:00:26Z I [rs1/rs103:27017] starting PITR routine
2024-11-13T11:00:26Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T11:00:26Z I [rs1/rs102:27017] node: rs1/rs102:27017
2024-11-13T11:00:26Z I [rs1/rs103:27017] node: rs1/rs103:27017
2024-11-13T11:00:26Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T11:00:26Z I [rs1/rs103:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T11:00:26Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T11:00:26Z I [rs1/rs102:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T11:00:26Z I [rs1/rs101:27017] node: rs1/rs101:27017
2024-11-13T11:00:26Z I [rs1/rs103:27017] listening for the commands
2024-11-13T11:00:26Z I [rs1/rs101:27017] conn level ReadConcern: majority; WriteConcern: majority
2024-11-13T11:00:26Z I [rs1/rs102:27017] listening for the commands
2024-11-13T11:00:26Z I [rs1/rs101:27017] listening for the commands
2024-11-13T11:00:31Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T11:00:31Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T11:00:31Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
2024-11-13T11:00:33Z I [rs1/rs103:27017] got command resync <ts: 1731495632>, opid: 673486d0f67d8456039450d2
2024-11-13T11:00:33Z I [rs1/rs101:27017] got command resync <ts: 1731495632>, opid: 673486d0f67d8456039450d2
2024-11-13T11:00:33Z I [rs1/rs102:27017] got command resync <ts: 1731495632>, opid: 673486d0f67d8456039450d2
2024-11-13T11:00:33Z I [rs1/rs103:27017] got epoch {1731495631 6}
2024-11-13T11:00:33Z I [rs1/rs101:27017] got epoch {1731495631 6}
2024-11-13T11:00:33Z I [rs1/rs102:27017] got epoch {1731495631 6}
2024-11-13T11:00:33Z D [rs1/rs102:27017] [resync] lock not acquired
2024-11-13T11:00:33Z D [rs1/rs101:27017] [resync] lock not acquired
2024-11-13T11:00:33Z I [rs1/rs103:27017] [resync] started
2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] uploading ".pbm.init" [size hint: 5 (5.00B); part size: 10485760 (10.00MB)]
2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] got backups list: 0
2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] got physical restores list: 0
2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] epoch set to {1731495633 14}
2024-11-13T11:00:33Z I [rs1/rs103:27017] [resync] succeed
2024-11-13T11:00:38Z I [rs1/rs103:27017] got command resync <ts: 1731495637>, opid: 673486d5f9d04a0b40dff19d
2024-11-13T11:00:38Z I [rs1/rs101:27017] got command resync <ts: 1731495637>, opid: 673486d5f9d04a0b40dff19d
2024-11-13T11:00:38Z I [rs1/rs102:27017] got command resync <ts: 1731495637>, opid: 673486d5f9d04a0b40dff19d
2024-11-13T11:00:38Z I [rs1/rs102:27017] got epoch {1731495633 14}
2024-11-13T11:00:38Z I [rs1/rs103:27017] got epoch {1731495633 14}
2024-11-13T11:00:38Z I [rs1/rs101:27017] got epoch {1731495633 14}
2024-11-13T11:00:38Z D [rs1/rs103:27017] [resync] lock not acquired
2024-11-13T11:00:38Z I [rs1/rs102:27017] [resync] started
2024-11-13T11:00:38Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T11:00:38Z, compression: none (level: default)] <ts: 1731495638>, opid: 673486d6f95638ddb968aa33
2024-11-13T11:00:38Z D [rs1/rs101:27017] [resync] lock not acquired
2024-11-13T11:00:38Z I [rs1/rs103:27017] got epoch {1731495633 14}
2024-11-13T11:00:38Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T11:00:38Z, compression: none (level: default)] <ts: 1731495638>, opid: 673486d6f95638ddb968aa33
2024-11-13T11:00:38Z D [rs1/rs102:27017] [resync] got backups list: 0
2024-11-13T11:00:38Z I [rs1/rs101:27017] got epoch {1731495633 14}
2024-11-13T11:00:38Z D [rs1/rs102:27017] [resync] got physical restores list: 0
2024-11-13T11:00:38Z E [rs1/rs101:27017] [backup/2024-11-13T11:00:38Z] unable to proceed with the backup, active lock is present
2024-11-13T11:00:38Z D [rs1/rs102:27017] [resync] epoch set to {1731495638 19}
2024-11-13T11:00:38Z I [rs1/rs102:27017] [resync] succeed
2024-11-13T11:00:38Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T11:00:38Z, compression: none (level: default)] <ts: 1731495638>, opid: 673486d6f95638ddb968aa33
2024-11-13T11:00:38Z I [rs1/rs102:27017] got epoch {1731495638 19}
2024-11-13T11:00:53Z D [rs1/rs103:27017] [backup/2024-11-13T11:00:38Z] nomination timeout
2024-11-13T11:00:53Z D [rs1/rs103:27017] [backup/2024-11-13T11:00:38Z] skip after nomination, probably started by another node
2024-11-13T11:00:53Z D [rs1/rs102:27017] [backup/2024-11-13T11:00:38Z] nomination timeout
2024-11-13T11:00:53Z D [rs1/rs102:27017] [backup/2024-11-13T11:00:38Z] skip after nomination, probably started by another node
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'updates'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_with_txn_PBM_T271(start_cluster,cluster,collection):
        cluster.check_pbm_status()
>       cluster.make_backup("logical")

test_rename_replicaset.py:99: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7f6447bba290>, type = 'logical'

    def make_backup(self, type):
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 120
        while True:
            running = self.get_status()['running']
            Cluster.log("Current operation: " + str(running))
            if not running:
                if type:
                    start = n.run(
                        'pbm backup --out=json --type=' + type)
                else:
                    start = n.run('pbm backup --out=json')
                if start.rc == 0:
                    name = json.loads(start.stdout)['name']
                    Cluster.log("Backup started")
                    break
                elif "resync" in start.stdout:
                    Cluster.log("Resync in progress, retrying: " + start.stdout)
                else:
                    logs = n.check_output("pbm logs -sD -t0")
>                   assert False, "Backup failed" + start.stdout + start.stderr + '\n' + logs
E                   AssertionError: Backup failed{"Error":"get backup metadata: get: context deadline exceeded"}
E                   
E                   2024-11-13T11:00:26Z I [rs1/rs101:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T11:00:26Z I [rs1/rs101:27017] starting PITR routine
E                   2024-11-13T11:00:26Z I [rs1/rs102:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T11:00:26Z I [rs1/rs102:27017] starting PITR routine
E                   2024-11-13T11:00:26Z I [rs1/rs103:27017] pbm-agent:
E                   Version:   2.7.0
E                   Platform:  linux/amd64
E                   GitCommit: 79478b12486e64faa442adaa26183969d9b9a16e
E                   GitBranch: dev
E                   BuildTime: 2024-11-13_09:38_UTC
E                   GoVersion: go1.23.3
E                   2024-11-13T11:00:26Z I [rs1/rs103:27017] starting PITR routine
E                   2024-11-13T11:00:26Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T11:00:26Z I [rs1/rs102:27017] node: rs1/rs102:27017
E                   2024-11-13T11:00:26Z I [rs1/rs103:27017] node: rs1/rs103:27017
E                   2024-11-13T11:00:26Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T11:00:26Z I [rs1/rs103:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T11:00:26Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T11:00:26Z I [rs1/rs102:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T11:00:26Z I [rs1/rs101:27017] node: rs1/rs101:27017
E                   2024-11-13T11:00:26Z I [rs1/rs103:27017] listening for the commands
E                   2024-11-13T11:00:26Z I [rs1/rs101:27017] conn level ReadConcern: majority; WriteConcern: majority
E                   2024-11-13T11:00:26Z I [rs1/rs102:27017] listening for the commands
E                   2024-11-13T11:00:26Z I [rs1/rs101:27017] listening for the commands
E                   2024-11-13T11:00:31Z E [rs1/rs101:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T11:00:31Z E [rs1/rs103:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T11:00:31Z E [rs1/rs102:27017] [agentCheckup] check storage connection: unable to get storage: get config: get: mongo: no documents in result
E                   2024-11-13T11:00:33Z I [rs1/rs103:27017] got command resync <ts: 1731495632>, opid: 673486d0f67d8456039450d2
E                   2024-11-13T11:00:33Z I [rs1/rs101:27017] got command resync <ts: 1731495632>, opid: 673486d0f67d8456039450d2
E                   2024-11-13T11:00:33Z I [rs1/rs102:27017] got command resync <ts: 1731495632>, opid: 673486d0f67d8456039450d2
E                   2024-11-13T11:00:33Z I [rs1/rs103:27017] got epoch {1731495631 6}
E                   2024-11-13T11:00:33Z I [rs1/rs101:27017] got epoch {1731495631 6}
E                   2024-11-13T11:00:33Z I [rs1/rs102:27017] got epoch {1731495631 6}
E                   2024-11-13T11:00:33Z D [rs1/rs102:27017] [resync] lock not acquired
E                   2024-11-13T11:00:33Z D [rs1/rs101:27017] [resync] lock not acquired
E                   2024-11-13T11:00:33Z I [rs1/rs103:27017] [resync] started
E                   2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] uploading ".pbm.init" [size hint: 5 (5.00B); part size: 10485760 (10.00MB)]
E                   2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] got backups list: 0
E                   2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] got physical restores list: 0
E                   2024-11-13T11:00:33Z D [rs1/rs103:27017] [resync] epoch set to {1731495633 14}
E                   2024-11-13T11:00:33Z I [rs1/rs103:27017] [resync] succeed
E                   2024-11-13T11:00:38Z I [rs1/rs103:27017] got command resync <ts: 1731495637>, opid: 673486d5f9d04a0b40dff19d
E                   2024-11-13T11:00:38Z I [rs1/rs101:27017] got command resync <ts: 1731495637>, opid: 673486d5f9d04a0b40dff19d
E                   2024-11-13T11:00:38Z I [rs1/rs102:27017] got command resync <ts: 1731495637>, opid: 673486d5f9d04a0b40dff19d
E                   2024-11-13T11:00:38Z I [rs1/rs102:27017] got epoch {1731495633 14}
E                   2024-11-13T11:00:38Z I [rs1/rs103:27017] got epoch {1731495633 14}
E                   2024-11-13T11:00:38Z I [rs1/rs101:27017] got epoch {1731495633 14}
E                   2024-11-13T11:00:38Z D [rs1/rs103:27017] [resync] lock not acquired
E                   2024-11-13T11:00:38Z I [rs1/rs102:27017] [resync] started
E                   2024-11-13T11:00:38Z I [rs1/rs103:27017] got command backup [name: 2024-11-13T11:00:38Z, compression: none (level: default)] <ts: 1731495638>, opid: 673486d6f95638ddb968aa33
E                   2024-11-13T11:00:38Z D [rs1/rs101:27017] [resync] lock not acquired
E                   2024-11-13T11:00:38Z I [rs1/rs103:27017] got epoch {1731495633 14}
E                   2024-11-13T11:00:38Z I [rs1/rs101:27017] got command backup [name: 2024-11-13T11:00:38Z, compression: none (level: default)] <ts: 1731495638>, opid: 673486d6f95638ddb968aa33
E                   2024-11-13T11:00:38Z D [rs1/rs102:27017] [resync] got backups list: 0
E                   2024-11-13T11:00:38Z I [rs1/rs101:27017] got epoch {1731495633 14}
E                   2024-11-13T11:00:38Z D [rs1/rs102:27017] [resync] got physical restores list: 0
E                   2024-11-13T11:00:38Z E [rs1/rs101:27017] [backup/2024-11-13T11:00:38Z] unable to proceed with the backup, active lock is present
E                   2024-11-13T11:00:38Z D [rs1/rs102:27017] [resync] epoch set to {1731495638 19}
E                   2024-11-13T11:00:38Z I [rs1/rs102:27017] [resync] succeed
E                   2024-11-13T11:00:38Z I [rs1/rs102:27017] got command backup [name: 2024-11-13T11:00:38Z, compression: none (level: default)] <ts: 1731495638>, opid: 673486d6f95638ddb968aa33
E                   2024-11-13T11:00:38Z I [rs1/rs102:27017] got epoch {1731495638 19}
E                   2024-11-13T11:00:53Z D [rs1/rs103:27017] [backup/2024-11-13T11:00:38Z] nomination timeout
E                   2024-11-13T11:00:53Z D [rs1/rs103:27017] [backup/2024-11-13T11:00:38Z] skip after nomination, probably started by another node
E                   2024-11-13T11:00:53Z D [rs1/rs102:27017] [backup/2024-11-13T11:00:38Z] nomination timeout
E                   2024-11-13T11:00:53Z D [rs1/rs102:27017] [backup/2024-11-13T11:00:38Z] skip after nomination, probably started by another node

cluster.py:393: AssertionError

Check failure on line 121 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_with_txn_PBM_T271[deletes]

AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
 +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f6447bba290>
collection = 'deletes'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes'])
    def test_logical_pitr_with_txn_PBM_T271(start_cluster,cluster,collection):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        for i in range(10):
            client = pymongo.MongoClient(cluster.connection)
            with client.start_session() as session:
                with session.start_transaction():
                    client["test"]["inserts"].insert_one({"key": i+10, "data": i+10}, session=session)
                    client["test"]["replaces"].replace_one({"key": i}, {"key": i+10, "data": i+10}, session=session)
                    client["test"]["updates"].update_one({"key": i}, {"$inc": { "data": 10 }}, session=session)
                    client["test"]["deletes"].delete_one({"key": i}, session=session)
                session.commit_transaction()
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(5)
        backup_to_fail=pitr + " --ns-from=test." + collection + " --ns-to=test.test"
        result=cluster.exec_pbm_cli("restore" + backup_to_fail + " --wait")
        assert result.rc != 0
>       assert "cloning namespace (--ns-to) is already in use" in result.stderr
E       AssertionError: assert 'cloning namespace (--ns-to) is already in use' in 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n'
E        +  where 'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n' = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f644c190b90>, exit_status=1, command=b'pbm ...'', _stderr=b'Error: parse --ns-from and --ns-to options: cloning with restore to the point-in-time is not possible\n').stderr

test_rename_replicaset.py:121: AssertionError