Skip to content

PBM. Add tests for selective backup/restore and PBM-1391/PBM-1344, fix test names for zephyr #191

PBM. Add tests for selective backup/restore and PBM-1391/PBM-1344, fix test names for zephyr

PBM. Add tests for selective backup/restore and PBM-1391/PBM-1344, fix test names for zephyr #191

GitHub Actions / JUnit Test Report failed Dec 9, 2024 in 0s

45 tests run, 34 passed, 8 skipped, 3 failed.

Annotations

Check failure on line 99 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_crud_PBM_T270[indexes]

AssertionError: assert 'new_index' in {'_id_': {'key': [('_id', 1)], 'v': 2}}
 +  where {'_id_': {'key': [('_id', 1)], 'v': 2}} = index_information()
 +    where index_information = Collection(Database(MongoClient(host=['rs101:27017'], document_class=dict, tz_aware=False, connect=True), 'restored'), 'indexes').index_information
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fdbc843f250>
collection = 'indexes'

    @pytest.mark.timeout(300,func_only=True)
    @pytest.mark.parametrize('collection',['inserts','replaces','updates','deletes','indexes'])
    def test_logical_pitr_crud_PBM_T270(start_cluster,cluster,collection):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        for i in range(10):
            client = pymongo.MongoClient(cluster.connection)
            client["test"]["inserts"].insert_one({"key": i+10, "data": i+10})
            client["test"]["replaces"].replace_one({"key": i}, {"key": i+10, "data": i+10})
            client["test"]["updates"].update_one({"key": i}, {"$inc": { "data": 10 }})
            client["test"]["deletes"].delete_one({"key": i})
        client["test"]["indexes"].drop_index("old_index")
        client["test"]["indexes"].create_index("data",name="new_index")
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(5)
        backup_to_fail=pitr + " --ns-from=test." + collection + " --ns-to=test.test"
        result=cluster.exec_pbm_cli("restore" + backup_to_fail + " --wait")
        assert result.rc != 0
        assert "cloning namespace (--ns-to) is already in use" in result.stderr
        backup=pitr + " --ns-from=test." + collection + " --ns-to=restored." + collection
        cluster.make_restore(backup)
        client = pymongo.MongoClient(cluster.connection)
        assert client["test"]["inserts"].count_documents({})==20
        assert client["test"]["replaces"].count_documents({})==10
        assert client["test"]["updates"].count_documents({})==10
        assert client["test"]["deletes"].count_documents({})==0
        if collection=='inserts':
            assert client["restored"]["inserts"].count_documents({})==20
            for i in range(20):
                assert client["restored"]["inserts"].find_one({"key": i, "data": i})
        elif collection=='replaces':
            assert client["restored"]["replaces"].count_documents({})==10
            for i in range(10):
                assert client["restored"]["replaces"].find_one({"key": i+10, "data": i+10})
        elif collection=='updates':
            assert client["restored"]["updates"].count_documents({})==10
            for i in range(10):
                assert client["restored"]["updates"].find_one({"key": i, "data": i+10})
        elif collection=='deletes':
            assert client["restored"]["deletes"].count_documents({})==0
        else:
            assert client["restored"]["indexes"].count_documents({})==10
>           assert 'new_index' in client["restored"]["indexes"].index_information()
E           AssertionError: assert 'new_index' in {'_id_': {'key': [('_id', 1)], 'v': 2}}
E            +  where {'_id_': {'key': [('_id', 1)], 'v': 2}} = index_information()
E            +    where index_information = Collection(Database(MongoClient(host=['rs101:27017'], document_class=dict, tz_aware=False, connect=True), 'restored'), 'indexes').index_information

test_rename_replicaset.py:99: AssertionError

Check failure on line 177 in pbm-functional/pytest/test_rename_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_rename_replicaset.test_logical_pitr_ddl_PBM_T273

AssertionError: assert 'new_index' in {'_id_': {'key': [('_id', 1)], 'v': 2}}
 +  where {'_id_': {'key': [('_id', 1)], 'v': 2}} = index_information()
 +    where index_information = Collection(Database(MongoClient(host=['rs101:27017'], document_class=dict, tz_aware=False, connect=True), 'restored'), 'indexes').index_information
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fdbc843f250>

    @pytest.mark.timeout(300,func_only=True)
    def test_logical_pitr_ddl_PBM_T273(start_cluster,cluster):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        client = pymongo.MongoClient(cluster.connection)
        client.drop_database('test')
        for i in range(10):
            client["test"]["indexes"].insert_one({"key": i+10, "data": i+10})
        client["test"]["indexes"].create_index("data",name="new_index")
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(10)
        cluster.disable_pitr()
        time.sleep(5)
        backup=pitr + " --ns-from=test.indexes --ns-to=restored.indexes"
        cluster.make_restore(backup)
        client = pymongo.MongoClient(cluster.connection)
        assert client["restored"]["indexes"].count_documents({})==10
        for i in range(10):
            assert client["restored"]["indexes"].find_one({"key": i+10, "data": i+10})
>       assert 'new_index' in client["restored"]["indexes"].index_information()
E       AssertionError: assert 'new_index' in {'_id_': {'key': [('_id', 1)], 'v': 2}}
E        +  where {'_id_': {'key': [('_id', 1)], 'v': 2}} = index_information()
E        +    where index_information = Collection(Database(MongoClient(host=['rs101:27017'], document_class=dict, tz_aware=False, connect=True), 'restored'), 'indexes').index_information

test_rename_replicaset.py:177: AssertionError

Check failure on line 99 in pbm-functional/pytest/test_sharded.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_sharded.test_logical_selective_PBM_T218

AssertionError: assert 0 == 10
 +  where 0 = count_documents({})
 +    where count_documents = Collection(Database(MongoClient(host=['mongos:27017'], document_class=dict, tz_aware=False, connect=True), 'test1'), 'test_coll11').count_documents
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fdbc6dc7d50>

    @pytest.mark.timeout(300,func_only=True)
    def test_logical_selective_PBM_T218(start_cluster,cluster):
        cluster.check_pbm_status()
        client=pymongo.MongoClient(cluster.connection)
        client.admin.command("enableSharding", "test2")
        client.admin.command("shardCollection", "test2.test_coll21", key={"_id": "hashed"})
        for i in range(10):
         client["test1"]["test_coll11"].insert_one({"key": i, "data": i})
         client["test2"]["test_coll21"].insert_one({"key": i, "data": i})
         client["test2"]["test_coll22"].insert_one({"key": i, "data": i})
        client["test1"]["test_coll11"].create_index(["key"],name="test_coll11_index_old")
        client["test2"]["test_coll21"].create_index(["key"],name="test_coll21_index_old")
        backup_full=cluster.make_backup("logical")
        backup_partial=cluster.make_backup("logical --ns=test1.test_coll11,test2.*")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.1")
        time.sleep(5)
        client.drop_database('test1')
        for i in range(10):
            client["test1"]["test_coll11"].insert_one({"key": i+10, "data": i+10})
        client["test1"]["test_coll11"].create_index("data",name="test_coll11_index_new")
        client["test2"]["test_coll22"].create_index("data",name="test_coll22_index_new")
        time.sleep(10)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        pitr=" --time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        cluster.disable_pitr()
        time.sleep(10)
        client.drop_database('test1')
        client.drop_database('test2')
        backup_partial=" --base-snapshot="+ backup_partial + pitr
        backup_full=" --base-snapshot="+ backup_full + pitr + " --ns=test1.test_coll11,test2.*"
        cluster.make_restore(backup_partial,check_pbm_status=True)
>       assert client["test1"]["test_coll11"].count_documents({})==10
E       AssertionError: assert 0 == 10
E        +  where 0 = count_documents({})
E        +    where count_documents = Collection(Database(MongoClient(host=['mongos:27017'], document_class=dict, tz_aware=False, connect=True), 'test1'), 'test_coll11').count_documents

test_sharded.py:99: AssertionError