From 0030dd0e129a35731ab2663832734d379fabc697 Mon Sep 17 00:00:00 2001 From: senthil Date: Thu, 25 Jun 2020 23:02:29 +0530 Subject: [PATCH] import collection configs from snapshot This PR builds the collection config store for a ledger using the snapshot files. FAB-18003 Signed-off-by: senthil --- core/ledger/confighistory/db_helper.go | 13 + core/ledger/confighistory/db_helper_test.go | 39 +++ core/ledger/confighistory/mgr.go | 56 +++- core/ledger/confighistory/mgr_test.go | 303 ++++++++++++++------ 4 files changed, 322 insertions(+), 89 deletions(-) diff --git a/core/ledger/confighistory/db_helper.go b/core/ledger/confighistory/db_helper.go index 98a2ae475b4..61240b05bb1 100644 --- a/core/ledger/confighistory/db_helper.go +++ b/core/ledger/confighistory/db_helper.go @@ -111,6 +111,19 @@ func (d *db) getNamespaceIterator(ns string) (*leveldbhelper.Iterator, error) { return d.GetIterator(nsStartKey, nsEndKey) } +func (d *db) isEmpty() (bool, error) { + itr, err := d.GetIterator(nil, nil) + if err != nil { + return false, err + } + defer itr.Release() + entryExist := itr.Next() + if err := itr.Error(); err != nil { + return false, errors.WithMessagef(err, "internal leveldb error while obtaining next entry from iterator") + } + return !entryExist, nil +} + func encodeCompositeKey(ns, key string, blockNum uint64) []byte { b := []byte(keyPrefix + ns) b = append(b, separatorByte) diff --git a/core/ledger/confighistory/db_helper_test.go b/core/ledger/confighistory/db_helper_test.go index 867273d999a..a7e5e6f0152 100644 --- a/core/ledger/confighistory/db_helper_test.go +++ b/core/ledger/confighistory/db_helper_test.go @@ -111,7 +111,46 @@ func TestGetNamespaceIterator(t *testing.T) { require.EqualError(t, err, "internal leveldb error while obtaining db iterator: leveldb: closed") require.Nil(t, itr) }) +} + +func TestIsNotEmpty(t *testing.T) { + testDBPath := "/tmp/fabric/core/ledger/confighistory" + deleteTestPath(t, testDBPath) + provider, err := newDBProvider(testDBPath) + require.NoError(t, err) + defer deleteTestPath(t, testDBPath) + + db := provider.getDB("ledger1") + + t.Run("db is empty", func(t *testing.T) { + empty, err := db.isEmpty() + require.NoError(t, err) + require.True(t, empty) + }) + + t.Run("db is not empty", func(t *testing.T) { + sampleData := []*compositeKV{ + { + &compositeKey{ + ns: "ns1", + key: "key1", + blockNum: 40, + }, + []byte("val1_40"), + }, + } + populateDBWithSampleData(t, db, sampleData) + empty, err := db.isEmpty() + require.NoError(t, err) + require.False(t, empty) + }) + t.Run("iter error", func(t *testing.T) { + provider.Close() + empty, err := db.isEmpty() + require.EqualError(t, err, "internal leveldb error while obtaining db iterator: leveldb: closed") + require.False(t, empty) + }) } func verifyNsEntries(t *testing.T, nsItr *leveldbhelper.Iterator, expectedEntries []*compositeKV) { diff --git a/core/ledger/confighistory/mgr.go b/core/ledger/confighistory/mgr.go index af0253122d2..8c46df118dc 100644 --- a/core/ledger/confighistory/mgr.go +++ b/core/ledger/confighistory/mgr.go @@ -16,11 +16,15 @@ import ( "github.com/hyperledger/fabric-protos-go/peer" "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/ledger/snapshot" + "github.com/hyperledger/fabric/common/ledger/util/leveldbhelper" "github.com/hyperledger/fabric/core/ledger" "github.com/pkg/errors" ) -var logger = flogging.MustGetLogger("confighistory") +var ( + logger = flogging.MustGetLogger("confighistory") + importConfigsBatchSize = 1024 * 1024 +) const ( collectionConfigNamespace = "lscc" // lscc namespace was introduced in version 1.2 and we continue to use this in order to be compatible with existing data @@ -104,6 +108,56 @@ func (m *Mgr) HandleStateUpdates(trigger *ledger.StateUpdateTrigger) error { return dbHandle.writeBatch(batch, true) } +func (m *Mgr) ImportConfigHistory(ledgerID string, dir string) error { + db := m.dbProvider.getDB(ledgerID) + empty, err := db.isEmpty() + if err != nil { + return err + } + if !empty { + return errors.New(fmt.Sprintf( + "config history for ledger [%s] exists. Incremental import is not supported. "+ + "Remove the existing ledger data before retry", + ledgerID, + )) + } + + configMetadata, err := snapshot.OpenFile(filepath.Join(dir, snapshotMetadataFileName), snapshotFileFormat) + if err != nil { + return err + } + numCollectionConfigs, err := configMetadata.DecodeUVarInt() + if err != nil { + return err + } + collectionConfigData, err := snapshot.OpenFile(filepath.Join(dir, snapshotDataFileName), snapshotFileFormat) + if err != nil { + return err + } + + batch := leveldbhelper.NewUpdateBatch() + currentBatchSize := 0 + for i := uint64(0); i < numCollectionConfigs; i++ { + key, err := collectionConfigData.DecodeBytes() + if err != nil { + return err + } + val, err := collectionConfigData.DecodeBytes() + if err != nil { + return err + } + batch.Put(key, val) + currentBatchSize += len(key) + len(val) + if currentBatchSize >= importConfigsBatchSize { + if err := db.WriteBatch(batch, true); err != nil { + return err + } + batch = leveldbhelper.NewUpdateBatch() + } + } + return db.WriteBatch(batch, true) +} + // GetRetriever returns an implementation of `ledger.ConfigHistoryRetriever` for the given ledger id. func (m *Mgr) GetRetriever(ledgerID string, ledgerInfoRetriever LedgerInfoRetriever) *Retriever { return &Retriever{ diff --git a/core/ledger/confighistory/mgr_test.go b/core/ledger/confighistory/mgr_test.go index 2885fa53740..befc59392cb 100644 --- a/core/ledger/confighistory/mgr_test.go +++ b/core/ledger/confighistory/mgr_test.go @@ -13,7 +13,7 @@ import ( "io/ioutil" "math" "os" - "path" + "path/filepath" "testing" "github.com/golang/protobuf/proto" @@ -268,7 +268,6 @@ func TestWithImplicitColls(t *testing.T) { type testEnvForSnapshot struct { mgr *Mgr - retriever *Retriever testSnapshotDir string cleanup func() } @@ -276,13 +275,11 @@ type testEnvForSnapshot struct { func newTestEnvForSnapshot(t *testing.T) *testEnvForSnapshot { dbPath, err := ioutil.TempDir("", "confighistory") require.NoError(t, err) - p, err := newDBProvider(dbPath) + mgr, err := NewMgr(dbPath, &mock.DeployedChaincodeInfoProvider{}) if err != nil { os.RemoveAll(dbPath) - t.Fatalf("Failed to create new leveldb provider: %s", err) + t.Fatalf("Failed to create new config history manager: %s", err) } - mgr := &Mgr{dbProvider: p} - retriever := mgr.GetRetriever("ledger1", nil) testSnapshotDir, err := ioutil.TempDir("", "confighistorysnapshot") if err != nil { @@ -291,7 +288,6 @@ func newTestEnvForSnapshot(t *testing.T) *testEnvForSnapshot { } return &testEnvForSnapshot{ mgr: mgr, - retriever: retriever, testSnapshotDir: testSnapshotDir, cleanup: func() { os.RemoveAll(dbPath) @@ -300,86 +296,200 @@ func newTestEnvForSnapshot(t *testing.T) *testEnvForSnapshot { } } -func TestExportConfigHistory(t *testing.T) { +func TestExportAndImportConfigHistory(t *testing.T) { env := newTestEnvForSnapshot(t) defer env.cleanup() - // config history database is empty - fileHashes, err := env.retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) - require.NoError(t, err) - require.Empty(t, fileHashes) - files, err := ioutil.ReadDir(env.testSnapshotDir) - require.NoError(t, err) - require.Len(t, files, 0) + setup := func(ledgerID string) ([]*compositeKV, map[string][]*ledger.CollectionConfigInfo) { + cc1CollConfigPackage := testutilCreateCollConfigPkg([]string{"Explicit-cc1-coll-1", "Explicit-cc1-coll-2"}) + cc2CollConfigPackage := testutilCreateCollConfigPkg([]string{"Explicit-cc2-coll-1", "Explicit-cc2-coll-2"}) + cc3CollConfigPackage := testutilCreateCollConfigPkg([]string{"Explicit-cc3-coll-1", "Explicit-cc3-coll-2"}) + cc1CollConfigPackageNew := testutilCreateCollConfigPkg([]string{"Explicit-cc1-coll-1", "Explicit-cc1-coll-2", "Explicit-cc1-coll-3"}) + cc2CollConfigPackageNew := testutilCreateCollConfigPkg([]string{"Explicit-cc2-coll-1", "Explicit-cc2-coll-2", "Explicit-cc2-coll-3"}) + cc3CollConfigPackageNew := testutilCreateCollConfigPkg([]string{"Explicit-cc3-coll-1", "Explicit-cc3-coll-2", "Explicit-cc3-coll-3"}) + + ccConfigInfo := map[string][]*ledger.CollectionConfigInfo{ + "chaincode1": { + { + CollectionConfig: cc1CollConfigPackage, + CommittingBlockNum: 50, + }, + { + CollectionConfig: cc1CollConfigPackageNew, + CommittingBlockNum: 100, + }, + }, + "chaincode2": { + { + CollectionConfig: cc2CollConfigPackage, + CommittingBlockNum: 50, + }, + { + CollectionConfig: cc2CollConfigPackageNew, + CommittingBlockNum: 100, + }, + }, + "chaincode3": { + { + CollectionConfig: cc3CollConfigPackage, + CommittingBlockNum: 50, + }, + { + CollectionConfig: cc3CollConfigPackageNew, + CommittingBlockNum: 100, + }, + }, + } - // config history database has 3 chaincodes each with 1 collection config entry in the - // collectionConfigNamespace - dbHandle := env.mgr.dbProvider.getDB("ledger1") - cc1collConfigPackage := testutilCreateCollConfigPkg([]string{"Explicit-cc1-coll-1", "Explicit-cc1-coll-2"}) - cc2collConfigPackage := testutilCreateCollConfigPkg([]string{"Explicit-cc2-coll-1", "Explicit-cc2-coll-2"}) - cc3collConfigPackage := testutilCreateCollConfigPkg([]string{"Explicit-cc3-coll-1", "Explicit-cc3-coll-2"}) - batch, err := prepareDBBatch( - map[string]*peer.CollectionConfigPackage{ - "chaincode1": cc1collConfigPackage, - "chaincode2": cc2collConfigPackage, - "chaincode3": cc3collConfigPackage, - }, - 50, - ) - require.NoError(t, err) - require.NoError(t, dbHandle.writeBatch(batch, true)) + db := env.mgr.dbProvider.getDB(ledgerID) + batch, err := prepareDBBatch( + map[string]*peer.CollectionConfigPackage{ + "chaincode1": cc1CollConfigPackage, + "chaincode2": cc2CollConfigPackage, + "chaincode3": cc3CollConfigPackage, + }, + 50, + ) + require.NoError(t, err) + require.NoError(t, db.writeBatch(batch, true)) - fileHashes, err = env.retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) - require.NoError(t, err) - cc1configBytes, err := proto.Marshal(cc1collConfigPackage) - require.NoError(t, err) - cc2configBytes, err := proto.Marshal(cc2collConfigPackage) - require.NoError(t, err) - cc3configBytes, err := proto.Marshal(cc3collConfigPackage) - require.NoError(t, err) - expectedCollectionConfigs := []*compositeKV{ - {&compositeKey{ns: "lscc", key: "chaincode1~collection", blockNum: 50}, cc1configBytes}, - {&compositeKey{ns: "lscc", key: "chaincode2~collection", blockNum: 50}, cc2configBytes}, - {&compositeKey{ns: "lscc", key: "chaincode3~collection", blockNum: 50}, cc3configBytes}, - } - verifyExportedConfigHistory(t, env.testSnapshotDir, fileHashes, expectedCollectionConfigs) - os.Remove(path.Join(env.testSnapshotDir, snapshotDataFileName)) - os.Remove(path.Join(env.testSnapshotDir, snapshotMetadataFileName)) - - // config history database has 3 chaincodes each with 2 collection config entries in the - // collectionConfigNamespace - cc1collConfigPackageNew := testutilCreateCollConfigPkg([]string{"Explicit-cc1-coll-1", "Explicit-cc1-coll-2", "Explicit-cc1-coll-3"}) - cc2collConfigPackageNew := testutilCreateCollConfigPkg([]string{"Explicit-cc2-coll-1", "Explicit-cc2-coll-2", "Explicit-cc2-coll-3"}) - cc3collConfigPackageNew := testutilCreateCollConfigPkg([]string{"Explicit-cc3-coll-1", "Explicit-cc3-coll-2", "Explicit-cc3-coll-3"}) - batch, err = prepareDBBatch( - map[string]*peer.CollectionConfigPackage{ - "chaincode1": cc1collConfigPackageNew, - "chaincode2": cc2collConfigPackageNew, - "chaincode3": cc3collConfigPackageNew, - }, - 100, - ) - require.NoError(t, err) - require.NoError(t, dbHandle.writeBatch(batch, true)) + batch, err = prepareDBBatch( + map[string]*peer.CollectionConfigPackage{ + "chaincode1": cc1CollConfigPackageNew, + "chaincode2": cc2CollConfigPackageNew, + "chaincode3": cc3CollConfigPackageNew, + }, + 100, + ) + require.NoError(t, err) + require.NoError(t, db.writeBatch(batch, true)) - fileHashes, err = env.retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) - require.NoError(t, err) + cc1configBytes, err := proto.Marshal(cc1CollConfigPackage) + require.NoError(t, err) + cc2configBytes, err := proto.Marshal(cc2CollConfigPackage) + require.NoError(t, err) + cc3configBytes, err := proto.Marshal(cc3CollConfigPackage) + require.NoError(t, err) + cc1configBytesNew, err := proto.Marshal(cc1CollConfigPackageNew) + require.NoError(t, err) + cc2configBytesNew, err := proto.Marshal(cc2CollConfigPackageNew) + require.NoError(t, err) + cc3configBytesNew, err := proto.Marshal(cc3CollConfigPackageNew) + require.NoError(t, err) - cc1configBytesNew, err := proto.Marshal(cc1collConfigPackageNew) - require.NoError(t, err) - cc2configBytesNew, err := proto.Marshal(cc2collConfigPackageNew) - require.NoError(t, err) - cc3configBytesNew, err := proto.Marshal(cc3collConfigPackageNew) - require.NoError(t, err) - expectedCollectionConfigs = []*compositeKV{ - {&compositeKey{ns: "lscc", key: "chaincode1~collection", blockNum: 100}, cc1configBytesNew}, - {&compositeKey{ns: "lscc", key: "chaincode1~collection", blockNum: 50}, cc1configBytes}, - {&compositeKey{ns: "lscc", key: "chaincode2~collection", blockNum: 100}, cc2configBytesNew}, - {&compositeKey{ns: "lscc", key: "chaincode2~collection", blockNum: 50}, cc2configBytes}, - {&compositeKey{ns: "lscc", key: "chaincode3~collection", blockNum: 100}, cc3configBytesNew}, - {&compositeKey{ns: "lscc", key: "chaincode3~collection", blockNum: 50}, cc3configBytes}, + storedKVs := []*compositeKV{ + {&compositeKey{ns: "lscc", key: "chaincode1~collection", blockNum: 100}, cc1configBytesNew}, + {&compositeKey{ns: "lscc", key: "chaincode1~collection", blockNum: 50}, cc1configBytes}, + {&compositeKey{ns: "lscc", key: "chaincode2~collection", blockNum: 100}, cc2configBytesNew}, + {&compositeKey{ns: "lscc", key: "chaincode2~collection", blockNum: 50}, cc2configBytes}, + {&compositeKey{ns: "lscc", key: "chaincode3~collection", blockNum: 100}, cc3configBytesNew}, + {&compositeKey{ns: "lscc", key: "chaincode3~collection", blockNum: 50}, cc3configBytes}, + } + + return storedKVs, ccConfigInfo } - verifyExportedConfigHistory(t, env.testSnapshotDir, fileHashes, expectedCollectionConfigs) + + cleanup := func() { + require.NoError(t, os.RemoveAll(filepath.Join(env.testSnapshotDir, snapshotDataFileName))) + require.NoError(t, os.RemoveAll(filepath.Join(env.testSnapshotDir, snapshotMetadataFileName))) + } + + t.Run("confighistory is empty", func(t *testing.T) { + retriever := env.mgr.GetRetriever("ledger0", nil) + fileHashes, err := retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + require.NoError(t, err) + require.Empty(t, fileHashes) + files, err := ioutil.ReadDir(env.testSnapshotDir) + require.NoError(t, err) + require.Len(t, files, 0) + }) + + t.Run("export confighistory", func(t *testing.T) { + // setup ledger1 => export ledger1 + storedKVs, _ := setup("ledger1") + defer cleanup() + retriever := env.mgr.GetRetriever("ledger1", nil) + fileHashes, err := retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + require.NoError(t, err) + verifyExportedConfigHistory(t, env.testSnapshotDir, fileHashes, storedKVs) + }) + + t.Run("import confighistory and verify queries", func(t *testing.T) { + // setup ledger2 => export ledger2 => import into ledger3 + _, ccConfigInfo := setup("ledger2") + defer cleanup() + retriever := env.mgr.GetRetriever("ledger2", nil) + _, err := retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + require.NoError(t, err) + + importConfigsBatchSize = 100 + require.NoError(t, env.mgr.ImportConfigHistory("ledger3", env.testSnapshotDir)) + dummyLedgerInfoRetriever := &dummyLedgerInfoRetriever{ + info: &common.BlockchainInfo{Height: 1000}, + qe: &mock.QueryExecutor{}, + } + + retriever = env.mgr.GetRetriever("ledger3", dummyLedgerInfoRetriever) + verifyImportedConfigHistory(t, retriever, ccConfigInfo) + }) + + t.Run("export from an imported confighistory", func(t *testing.T) { + // setup ledger4 => export ledger4 => import into ledger5 => export ledger5 + storedKVs, _ := setup("ledger4") + defer cleanup() + retriever := env.mgr.GetRetriever("ledger4", nil) + _, err := retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + require.NoError(t, err) + + importConfigsBatchSize = 100 + require.NoError(t, env.mgr.ImportConfigHistory("ledger5", env.testSnapshotDir)) + cleanup() + + retriever = env.mgr.GetRetriever("ledger5", nil) + fileHashes, err := retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + require.NoError(t, err) + verifyExportedConfigHistory(t, env.testSnapshotDir, fileHashes, storedKVs) + }) + + t.Run("import confighistory error cases", func(t *testing.T) { + setup("ledger6") + defer cleanup() + retriever := env.mgr.GetRetriever("ledger6", nil) + _, err := retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + require.NoError(t, err) + + err = env.mgr.ImportConfigHistory("ledger6", env.testSnapshotDir) + expectedErrStr := "config history for ledger [ledger6] exists. Incremental import is not supported. Remove the existing ledger data before retry" + require.EqualError(t, err, expectedErrStr) + + require.NoError(t, os.RemoveAll(filepath.Join(env.testSnapshotDir, snapshotDataFileName))) + err = env.mgr.ImportConfigHistory("ledger8", env.testSnapshotDir) + require.Contains(t, err.Error(), "confighistory.data: no such file or directory") + + dataFileWriter, err := snapshot.CreateFile(filepath.Join(env.testSnapshotDir, snapshotDataFileName), snapshotFileFormat, testNewHashFunc) + require.NoError(t, err) + defer dataFileWriter.Close() + require.NoError(t, dataFileWriter.EncodeUVarint(1)) + err = env.mgr.ImportConfigHistory("ledger8", env.testSnapshotDir) + require.Contains(t, err.Error(), "error while reading from the snapshot file") + require.Contains(t, err.Error(), "confighistory.data: EOF") + + require.NoError(t, os.RemoveAll(filepath.Join(env.testSnapshotDir, snapshotMetadataFileName))) + err = env.mgr.ImportConfigHistory("ledger8", env.testSnapshotDir) + require.Contains(t, err.Error(), "confighistory.metadata: no such file or directory") + + dataFileWriter, err = snapshot.CreateFile(filepath.Join(env.testSnapshotDir, snapshotMetadataFileName), snapshotFileFormat, testNewHashFunc) + require.NoError(t, err) + defer dataFileWriter.Close() + require.NoError(t, dataFileWriter.EncodeBytes([]byte("junk"))) + err = env.mgr.ImportConfigHistory("ledger8", env.testSnapshotDir) + require.Contains(t, err.Error(), "error while reading from the snapshot file") + require.Contains(t, err.Error(), "confighistory.metadata: EOF") + + env.mgr.dbProvider.Close() + err = env.mgr.ImportConfigHistory("ledger8", env.testSnapshotDir) + require.EqualError(t, err, "internal leveldb error while obtaining db iterator: leveldb: closed") + }) } func verifyExportedConfigHistory(t *testing.T, dir string, fileHashes map[string][]byte, expectedCollectionConfigs []*compositeKV) { @@ -387,13 +497,13 @@ func verifyExportedConfigHistory(t *testing.T, dir string, fileHashes map[string require.Contains(t, fileHashes, snapshotDataFileName) require.Contains(t, fileHashes, snapshotMetadataFileName) - dataFile := path.Join(dir, snapshotDataFileName) + dataFile := filepath.Join(dir, snapshotDataFileName) dataFileContent, err := ioutil.ReadFile(dataFile) require.NoError(t, err) dataFileHash := sha256.Sum256(dataFileContent) require.Equal(t, dataFileHash[:], fileHashes[snapshotDataFileName]) - metadataFile := path.Join(dir, snapshotMetadataFileName) + metadataFile := filepath.Join(dir, snapshotMetadataFileName) metadataFileContent, err := ioutil.ReadFile(metadataFile) require.NoError(t, err) metadataFileHash := sha256.Sum256(metadataFileContent) @@ -423,11 +533,27 @@ func verifyExportedConfigHistory(t *testing.T, dir string, fileHashes map[string require.Equal(t, expectedCollectionConfigs, retrievedCollectionConfigs) } +func verifyImportedConfigHistory(t *testing.T, retriever *Retriever, expectedCCConfigInfo map[string][]*ledger.CollectionConfigInfo) { + for chaincodeName, ccConfigInfos := range expectedCCConfigInfo { + for _, expectedCCConfig := range ccConfigInfos { + ccConfig, err := retriever.CollectionConfigAt(expectedCCConfig.CommittingBlockNum, chaincodeName) + require.NoError(t, err) + require.True(t, proto.Equal(expectedCCConfig.CollectionConfig, ccConfig.CollectionConfig)) + require.Equal(t, expectedCCConfig.CommittingBlockNum, ccConfig.CommittingBlockNum) + + ccConfig, err = retriever.MostRecentCollectionConfigBelow(expectedCCConfig.CommittingBlockNum+1, chaincodeName) + require.NoError(t, err) + require.True(t, proto.Equal(expectedCCConfig.CollectionConfig, ccConfig.CollectionConfig)) + require.Equal(t, expectedCCConfig.CommittingBlockNum, ccConfig.CommittingBlockNum) + } + } +} + func TestExportConfigHistoryErrorCase(t *testing.T) { env := newTestEnvForSnapshot(t) defer env.cleanup() - dbHandle := env.mgr.dbProvider.getDB("ledger1") + db := env.mgr.dbProvider.getDB("ledger1") cc1collConfigPackage := testutilCreateCollConfigPkg([]string{"Explicit-cc1-coll-1", "Explicit-cc1-coll-2"}) batch, err := prepareDBBatch( map[string]*peer.CollectionConfigPackage{ @@ -436,30 +562,31 @@ func TestExportConfigHistoryErrorCase(t *testing.T) { 50, ) require.NoError(t, err) - require.NoError(t, dbHandle.writeBatch(batch, true)) + require.NoError(t, db.writeBatch(batch, true)) // error during data file creation - dataFilePath := path.Join(env.testSnapshotDir, snapshotDataFileName) + dataFilePath := filepath.Join(env.testSnapshotDir, snapshotDataFileName) _, err = os.Create(dataFilePath) require.NoError(t, err) - _, err = env.retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + retriever := env.mgr.GetRetriever("ledger1", nil) + _, err = retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) require.Contains(t, err.Error(), "error while creating the snapshot file: "+dataFilePath) os.RemoveAll(env.testSnapshotDir) // error during metadata file creation require.NoError(t, os.MkdirAll(env.testSnapshotDir, 0700)) - metadataFilePath := path.Join(env.testSnapshotDir, snapshotMetadataFileName) + metadataFilePath := filepath.Join(env.testSnapshotDir, snapshotMetadataFileName) _, err = os.Create(metadataFilePath) require.NoError(t, err) - _, err = env.retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + _, err = retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) require.Contains(t, err.Error(), "error while creating the snapshot file: "+metadataFilePath) os.RemoveAll(env.testSnapshotDir) // error while reading from leveldb require.NoError(t, os.MkdirAll(env.testSnapshotDir, 0700)) env.mgr.dbProvider.Close() - _, err = env.retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) + _, err = retriever.ExportConfigHistory(env.testSnapshotDir, testNewHashFunc) require.EqualError(t, err, "internal leveldb error while obtaining db iterator: leveldb: closed") os.RemoveAll(env.testSnapshotDir) }