From 4ade907cefcc3a1978508cd634d92c7a0518c33c Mon Sep 17 00:00:00 2001
From: Garand Tyson <garand@stellar.org>
Date: Wed, 27 Nov 2024 10:25:06 -0800
Subject: [PATCH 1/2] Removed SQL from test requiring direct commits via ltx

---
 docs/integration.md                           |   2 +-
 docs/quick-reference.md                       |  40 --
 docs/software/commands.md                     |   6 -
 docs/stellar-core_example.cfg                 |  13 -
 src/bucket/BucketManager.cpp                  |  15 +-
 src/bucket/BucketManager.h                    |   1 -
 src/bucket/test/BucketListTests.cpp           |   2 +-
 src/bucket/test/BucketManagerTests.cpp        |   2 +-
 src/bucket/test/BucketTests.cpp               |   2 +-
 src/catchup/ApplyBucketsWork.cpp              |  14 +-
 src/catchup/ApplyBucketsWork.h                |   2 +
 src/database/test/DatabaseTests.cpp           |   8 +-
 src/herder/test/HerderTests.cpp               |  73 +--
 src/herder/test/UpgradesTests.cpp             |  12 +-
 src/history/test/HistoryTests.cpp             |  14 +-
 src/history/test/HistoryTestsUtils.h          |   2 +-
 .../BucketListIsConsistentWithDatabase.cpp    | 235 ++------
 .../BucketListIsConsistentWithDatabase.h      |   2 +-
 src/invariant/Invariant.h                     |   4 +-
 src/invariant/InvariantManager.h              |   9 +-
 src/invariant/InvariantManagerImpl.cpp        |  14 +-
 src/invariant/InvariantManagerImpl.h          |   3 +-
 .../AccountSubEntriesCountIsValidTests.cpp    |   4 +-
 ...ucketListIsConsistentWithDatabaseTests.cpp | 518 ++++------------
 .../test/ConservationOfLumensTests.cpp        |   6 +-
 src/invariant/test/InvariantTests.cpp         |  16 +-
 .../test/LedgerEntryIsValidTests.cpp          |   4 +-
 .../test/LiabilitiesMatchOffersTests.cpp      |  14 +-
 .../test/OrderBookIsNotCrossedTests.cpp       |   4 +-
 .../test/SponsorshipCountIsValidTests.cpp     |   2 +-
 src/ledger/InMemoryLedgerTxn.cpp              | 116 +++-
 src/ledger/InMemoryLedgerTxn.h                |  26 +-
 src/ledger/LedgerManagerImpl.cpp              |  65 +-
 src/ledger/LedgerStateSnapshot.cpp            |   6 +-
 src/ledger/LedgerTxn.cpp                      |   7 +
 .../test/LedgerCloseMetaStreamTests.cpp       | 237 +-------
 src/ledger/test/LedgerTxnTests.cpp            | 555 +++---------------
 src/main/Application.h                        |   5 -
 src/main/ApplicationImpl.cpp                  | 268 ++++-----
 src/main/ApplicationImpl.h                    |   4 +-
 src/main/ApplicationUtils.cpp                 | 140 +----
 src/main/ApplicationUtils.h                   |   4 +-
 src/main/CommandLine.cpp                      |  87 +--
 src/main/Config.cpp                           |  30 +-
 src/main/Config.h                             |  37 +-
 src/main/test/ApplicationUtilsTests.cpp       | 263 +--------
 src/main/test/ExternalQueueTests.cpp          |  44 --
 src/overlay/test/OverlayTests.cpp             |   4 +-
 src/simulation/Simulation.cpp                 |   5 +-
 src/simulation/Simulation.h                   |   5 +-
 src/test/TestUtils.cpp                        |  21 +-
 src/test/TestUtils.h                          |   3 +-
 src/test/test.cpp                             |  32 +-
 src/transactions/test/AllowTrustTests.cpp     |   4 +-
 src/transactions/test/BumpSequenceTests.cpp   |   2 +-
 src/transactions/test/ChangeTrustTests.cpp    |   4 +-
 .../test/ClaimableBalanceTests.cpp            |   2 +-
 .../test/ClawbackClaimableBalanceTests.cpp    |   2 +-
 src/transactions/test/ClawbackTests.cpp       |   2 +-
 src/transactions/test/CreateAccountTests.cpp  |   2 +-
 .../test/EndSponsoringFutureReservesTests.cpp |   2 +-
 .../test/FeeBumpTransactionTests.cpp          |   2 +-
 src/transactions/test/InflationTests.cpp      |   2 +-
 .../test/InvokeHostFunctionTests.cpp          |   2 +-
 .../test/LiquidityPoolDepositTests.cpp        |   2 +-
 .../test/LiquidityPoolTradeTests.cpp          |   2 +-
 .../test/LiquidityPoolWithdrawTests.cpp       |   2 +-
 src/transactions/test/ManageBuyOfferTests.cpp |  16 +-
 src/transactions/test/ManageDataTests.cpp     |   2 +-
 src/transactions/test/MergeTests.cpp          |   2 +-
 src/transactions/test/OfferTests.cpp          |   2 +-
 .../test/PathPaymentStrictSendTests.cpp       |   4 +-
 src/transactions/test/PathPaymentTests.cpp    |   2 +-
 src/transactions/test/PaymentTests.cpp        |  12 +-
 .../test/RevokeSponsorshipTests.cpp           |   2 +-
 src/transactions/test/SetOptionsTests.cpp     |   2 +-
 .../test/SetTrustLineFlagsTests.cpp           |   4 +-
 src/transactions/test/TxEnvelopeTests.cpp     |   2 +-
 78 files changed, 797 insertions(+), 2292 deletions(-)
 delete mode 100644 src/main/test/ExternalQueueTests.cpp

diff --git a/docs/integration.md b/docs/integration.md
index 8140017fb3..deb6147fc3 100644
--- a/docs/integration.md
+++ b/docs/integration.md
@@ -19,7 +19,7 @@ stellar-core generates several types of data that can be used by applications, d
 
 Full [Ledger](ledger.md) snapshots are available in both:
   * [history archives](history.md) (checkpoints, every 64 ledgers, updated every 5 minutes)
-  * in the case of captive-core (enabled via the `--in-memory` command line option) the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates.
+* in the case of captive-core, the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates.
 
 ## Ledger State transition information (transactions, etc)
 
diff --git a/docs/quick-reference.md b/docs/quick-reference.md
index 24c76a6db5..b52cf8ad4f 100644
--- a/docs/quick-reference.md
+++ b/docs/quick-reference.md
@@ -146,46 +146,6 @@ some time, as the entire sequence of ledger _headers_ in the archive (though non
 transactions or ledger states) must be downloaded and verified sequentially. It may therefore be
 worthwhile to save and reuse such a trusted reference file multiple times before regenerating it.
 
-##### Experimental fast "meta data generation"
-`catchup` has a command line flag `--in-memory` that when combined with the
-`METADATA_OUTPUT_STREAM` allows a stellar-core instance to stream meta data instead
-of using a database as intermediate store.
-
-This has been tested as being orders of magnitude faster for replaying large sections
-of history.
-
-If you don't specify any value for stream the command will just replay transactions
-in memory and throw away all meta. This can be useful for performance testing the transaction processing subsystem.
-
-The `--in-memory` flag is also supported by the `run` command, which can be used to
-run a lightweight, stateless validator or watcher node, and this can be combined with
-`METADATA_OUTPUT_STREAM` to stream network activity to another process.
-
-By default, such a stateless node in `run` mode will catch up to the network starting from the
-network's most recent checkpoint, but this behaviour can be further modified using two flags
-(that must be used together) called `--start-at-ledger <N>` and `--start-at-hash <HEXHASH>`. These
-cause the node to start with a fast in-memory catchup to ledger `N` with hash `HEXHASH`, and then
-replay ledgers forward to the current state of the network.
-
-A stateless and meta-streaming node can additionally be configured with
-`EXPERIMENTAL_PRECAUTION_DELAY_META=true` (if unspecified, the default is
-`false`).  If `EXPERIMENTAL_PRECAUTION_DELAY_META` is `true`, then the node will
-delay emitting meta for a ledger `<N>` until the _next_ ledger, `<N+1>`, closes.
-The idea is that if a node suffers local corruption in a ledger because of a
-software bug or hardware fault, it will be unable to close the _next_ ledger
-because it won't be able to reach consensus with other nodes on the input state
-of the next ledger. Therefore, the meta for the corrupted ledger will never be
-emitted.  With `EXPERIMENTAL_PRECAUTION_DELAY_META` set to `false`, a local
-corruption bug could cause a node to emit meta that is inconsistent with that of
-other nodes on the network. Setting `EXPERIMENTAL_PRECAUTION_DELAY_META` to
-`true` does have a cost, though: clients waiting for the meta to determine the
-result of a transaction will have to wait for an extra ledger close duration.
-
-During catchup from history archives, a stateless node will emit meta for any
-historical ledger without delay, even if `EXPERIMENTAL_PRECAUTION_DELAY_META` is
-`true`, because the ledger's results are already part of the validated consensus
-history.
-
 #### Publish backlog
 There is a command `publish` that allows to flush the publish backlog without starting
 core. This can be useful to run to guarantee that certain tasks are done before moving
diff --git a/docs/software/commands.md b/docs/software/commands.md
index 8da3850c46..db17ba6a83 100644
--- a/docs/software/commands.md
+++ b/docs/software/commands.md
@@ -160,12 +160,6 @@ apply.
 * **run**: Runs stellar-core service.<br>
   Option **--wait-for-consensus** lets validators wait to hear from the network
   before participating in consensus.<br>
-  (deprecated) Option **--in-memory** stores the current ledger in memory rather than a
-  database.<br>
-  (deprecated) Option **--start-at-ledger <N>** starts **--in-memory** mode with a catchup to
-  ledger **N** then replays to the current state of the network.<br>
-  (deprecated) Option **--start-at-hash <HASH>** provides a (mandatory) hash for the ledger
-  **N** specified by the **--start-at-ledger** option.
 * **sec-to-pub**:  Reads a secret key on standard input and outputs the
   corresponding public key.  Both keys are in Stellar's standard
   base-32 ASCII format.
diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg
index 103c115cf4..c8325b7476 100644
--- a/docs/stellar-core_example.cfg
+++ b/docs/stellar-core_example.cfg
@@ -601,19 +601,6 @@ MAX_SLOTS_TO_REMEMBER=12
 # only a passive "watcher" node.
 METADATA_OUTPUT_STREAM=""
 
-# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true causes a stateless node
-# which is streaming meta to delay streaming the meta for a given ledger until
-# it closes the next ledger. This ensures that if a local bug had corrupted the
-# given ledger, then the meta for the corrupted ledger will never be emitted, as
-# the node will not be able to reach consensus with the network on the next
-# ledger.
-#
-# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true in combination with a
-# non-empty METADATA_OUTPUT_STREAM (which can be configured on the command line
-# as well as in the config file) requires an in-memory database (specified by
-# using --in-memory on the command line).
-EXPERIMENTAL_PRECAUTION_DELAY_META=false
-
 # Number of ledgers worth of transaction metadata to preserve on disk for
 # debugging purposes. These records are automatically maintained and rotated
 # during processing, and are helpful for recovery in case of a serious error;
diff --git a/src/bucket/BucketManager.cpp b/src/bucket/BucketManager.cpp
index e3e5f5f524..57ca4cdfcf 100644
--- a/src/bucket/BucketManager.cpp
+++ b/src/bucket/BucketManager.cpp
@@ -164,10 +164,6 @@ BucketManager::BucketManager(Application& app)
           app.getMetrics().NewCounter({"bucketlist-archive", "size", "bytes"}))
     , mBucketListEvictionCounters(app)
     , mEvictionStatistics(std::make_shared<EvictionStatistics>())
-    // Minimal DB is stored in the buckets dir, so delete it only when
-    // mode does not use minimal DB
-    , mDeleteEntireBucketDirInDtor(
-          app.getConfig().isInMemoryModeWithoutMinimalDB())
     , mConfig(app.getConfig())
 {
     for (uint32_t t =
@@ -259,15 +255,8 @@ BucketManager::getBucketDir() const
 
 BucketManager::~BucketManager()
 {
-    ZoneScoped;
-    if (mDeleteEntireBucketDirInDtor)
-    {
-        deleteEntireBucketDir();
-    }
-    else
-    {
-        deleteTmpDirAndUnlockBucketDir();
-    }
+
+    deleteTmpDirAndUnlockBucketDir();
 }
 
 void
diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h
index 3b5f85a65b..09f4e1818f 100644
--- a/src/bucket/BucketManager.h
+++ b/src/bucket/BucketManager.h
@@ -106,7 +106,6 @@ class BucketManager : NonMovableOrCopyable
 
     std::future<EvictionResult> mEvictionFuture{};
 
-    bool const mDeleteEntireBucketDirInDtor;
     // Copy app's config for thread-safe access
     Config const mConfig;
 
diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp
index 5bb6a71d52..7cc5a6a64b 100644
--- a/src/bucket/test/BucketListTests.cpp
+++ b/src/bucket/test/BucketListTests.cpp
@@ -861,7 +861,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]")
 TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]")
 {
     VirtualClock clock;
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
     cfg.USE_CONFIG_FOR_GENESIS = true;
 
     auto app = createTestApplication<BucketTestApplication>(clock, cfg);
diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp
index fc5390653f..62fb33af2a 100644
--- a/src/bucket/test/BucketManagerTests.cpp
+++ b/src/bucket/test/BucketManagerTests.cpp
@@ -501,7 +501,7 @@ TEST_CASE("bucketmanager do not leak empty-merge futures",
     // are thereby not leaking. Disable BucketListDB so that snapshots do not
     // hold persist buckets, complicating bucket counting.
     VirtualClock clock;
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
     cfg.ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING = true;
     cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION =
         static_cast<uint32_t>(
diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp
index bedb9adb69..30d82ff71c 100644
--- a/src/bucket/test/BucketTests.cpp
+++ b/src/bucket/test/BucketTests.cpp
@@ -1015,7 +1015,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
 TEST_CASE_VERSIONS("legacy bucket apply", "[bucket]")
 {
     VirtualClock clock;
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
     for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
         Application::pointer app = createTestApplication(clock, cfg);
 
diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp
index e37a82680a..ac564c2b04 100644
--- a/src/catchup/ApplyBucketsWork.cpp
+++ b/src/catchup/ApplyBucketsWork.cpp
@@ -72,6 +72,8 @@ ApplyBucketsWork::ApplyBucketsWork(
     , mLevel(startingLevel())
     , mMaxProtocolVersion(maxProtocolVersion)
     , mCounters(app.getClock().now())
+    , mIsApplyInvariantEnabled(
+          app.getInvariantManager().isBucketApplyInvariantEnabled())
 {
 }
 
@@ -111,6 +113,7 @@ ApplyBucketsWork::doReset()
     mLastPos = 0;
     mBucketToApplyIndex = 0;
     mMinProtocolVersionSeen = UINT32_MAX;
+    mSeenKeysBeforeApply.clear();
     mSeenKeys.clear();
     mBucketsToApply.clear();
     mBucketApplicator.reset();
@@ -201,6 +204,14 @@ ApplyBucketsWork::startBucket()
     auto bucket = mBucketsToApply.at(mBucketToApplyIndex);
     mMinProtocolVersionSeen =
         std::min(mMinProtocolVersionSeen, bucket->getBucketVersion());
+
+    // Take a snapshot of seen keys before applying the bucket, only if
+    // invariants are enabled since this is expensive.
+    if (mIsApplyInvariantEnabled)
+    {
+        mSeenKeysBeforeApply = mSeenKeys;
+    }
+
     // Create a new applicator for the bucket.
     mBucketApplicator = std::make_unique<BucketApplicator>(
         mApp, mMaxProtocolVersion, mMinProtocolVersionSeen, mLevel, bucket,
@@ -297,7 +308,8 @@ ApplyBucketsWork::doWork()
             // bucket.
             mApp.getInvariantManager().checkOnBucketApply(
                 mBucketsToApply.at(mBucketToApplyIndex),
-                mApplyState.currentLedger, mLevel, isCurr, mEntryTypeFilter);
+                mApplyState.currentLedger, mLevel, isCurr,
+                mSeenKeysBeforeApply);
             prepareForNextBucket();
         }
         if (!appliedAllBuckets())
diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h
index 77674e816e..fc239d1592 100644
--- a/src/catchup/ApplyBucketsWork.h
+++ b/src/catchup/ApplyBucketsWork.h
@@ -38,12 +38,14 @@ class ApplyBucketsWork : public Work
     uint32_t mLevel{0};
     uint32_t mMaxProtocolVersion{0};
     uint32_t mMinProtocolVersionSeen{UINT32_MAX};
+    std::unordered_set<LedgerKey> mSeenKeysBeforeApply;
     std::unordered_set<LedgerKey> mSeenKeys;
     std::vector<std::shared_ptr<LiveBucket>> mBucketsToApply;
     std::unique_ptr<BucketApplicator> mBucketApplicator;
     bool mDelayChecked{false};
 
     BucketApplicator::Counters mCounters;
+    bool const mIsApplyInvariantEnabled;
 
     void advance(std::string const& name, BucketApplicator& applicator);
     std::shared_ptr<LiveBucket> getBucket(std::string const& bucketHash);
diff --git a/src/database/test/DatabaseTests.cpp b/src/database/test/DatabaseTests.cpp
index 4a17cd565c..c2fc838bd3 100644
--- a/src/database/test/DatabaseTests.cpp
+++ b/src/database/test/DatabaseTests.cpp
@@ -72,7 +72,7 @@ transactionTest(Application::pointer app)
 
 TEST_CASE("database smoketest", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg, true, false);
@@ -81,7 +81,7 @@ TEST_CASE("database smoketest", "[db]")
 
 TEST_CASE("database on-disk smoketest", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT);
 
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg, true, false);
@@ -201,7 +201,7 @@ checkMVCCIsolation(Application::pointer app)
 
 TEST_CASE("sqlite MVCC test", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT);
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg, true, false);
     checkMVCCIsolation(app);
@@ -349,7 +349,7 @@ TEST_CASE("postgres performance", "[db][pgperf][!hide]")
 
 TEST_CASE("schema test", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg);
diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp
index fcc07519f4..af32283d65 100644
--- a/src/herder/test/HerderTests.cpp
+++ b/src/herder/test/HerderTests.cpp
@@ -1135,7 +1135,7 @@ TEST_CASE("surge pricing", "[herder][txset][soroban]")
 {
     SECTION("max 0 ops per ledger")
     {
-        Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
         cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 0;
 
         VirtualClock clock;
@@ -2564,11 +2564,6 @@ TEST_CASE("SCP State", "[herder]")
         };
 
     auto doTest = [&](bool forceSCP) {
-        SECTION("sqlite")
-        {
-            configure(Config::TestDbMode::TESTDB_ON_DISK_SQLITE);
-        }
-
         SECTION("bucketlistDB")
         {
             configure(Config::TestDbMode::TESTDB_BUCKET_DB_PERSISTENT);
@@ -3258,7 +3253,7 @@ TEST_CASE("accept soroban txs after network upgrade", "[soroban][herder]")
 
     auto simulation =
         Topologies::core(4, 1, Simulation::OVER_LOOPBACK, networkID, [](int i) {
-            auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE);
+            auto cfg = getTestConfig(i, Config::TESTDB_IN_MEMORY);
             cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 100;
             cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION =
                 static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION) - 1;
@@ -3637,42 +3632,6 @@ checkHerder(Application& app, HerderImpl& herder, Herder::State expectedState,
     REQUIRE(herder.trackingConsensusLedgerIndex() == ledger);
 }
 
-// Either setup a v19 -> v20 upgrade, or a fee upgrade in v20
-static void
-setupUpgradeAtNextLedger(Application& app)
-{
-    Upgrades::UpgradeParameters scheduledUpgrades;
-    scheduledUpgrades.mUpgradeTime =
-        VirtualClock::from_time_t(app.getLedgerManager()
-                                      .getLastClosedLedgerHeader()
-                                      .header.scpValue.closeTime +
-                                  5);
-    if (protocolVersionIsBefore(app.getLedgerManager()
-                                    .getLastClosedLedgerHeader()
-                                    .header.ledgerVersion,
-                                SOROBAN_PROTOCOL_VERSION))
-    {
-        scheduledUpgrades.mProtocolVersion =
-            static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION);
-    }
-    else
-    {
-        LedgerTxn ltx(app.getLedgerTxnRoot());
-        ConfigUpgradeSetFrameConstPtr configUpgradeSet;
-        ConfigUpgradeSet configUpgradeSetXdr;
-        auto& configEntry = configUpgradeSetXdr.updatedEntry.emplace_back();
-        configEntry.configSettingID(CONFIG_SETTING_CONTRACT_BANDWIDTH_V0);
-        configEntry.contractBandwidth().ledgerMaxTxsSizeBytes = 1'000'000;
-        configEntry.contractBandwidth().txMaxSizeBytes = 500'000;
-
-        configUpgradeSet = makeConfigUpgradeSet(ltx, configUpgradeSetXdr);
-
-        scheduledUpgrades.mConfigUpgradeSetKey = configUpgradeSet->getKey();
-        ltx.commit();
-    }
-    app.getHerder().setUpgrades(scheduledUpgrades);
-}
-
 // The main purpose of this test is to ensure the externalize path works
 // correctly. This entails properly updating tracking in Herder, forwarding
 // externalize information to LM, and Herder appropriately reacting to ledger
@@ -3687,7 +3646,7 @@ herderExternalizesValuesWithProtocol(uint32_t version)
     auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE);
     auto simulation = std::make_shared<Simulation>(
         Simulation::OVER_LOOPBACK, networkID, [version](int i) {
-            auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE);
+            auto cfg = getTestConfig(i, Config::TESTDB_BUCKET_DB_PERSISTENT);
             cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = version;
             return cfg;
         });
@@ -3720,14 +3679,6 @@ herderExternalizesValuesWithProtocol(uint32_t version)
             Herder::State::HERDER_BOOTING_STATE);
 
     simulation->startAllNodes();
-    if (protocolVersionStartsFrom(version, SOROBAN_PROTOCOL_VERSION))
-    {
-        upgradeSorobanNetworkConfig(
-            [&](SorobanNetworkConfig& cfg) {
-                cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1;
-            },
-            simulation);
-    }
 
     // After SCP is restored, Herder is tracking
     REQUIRE(getC()->getHerder().getState() ==
@@ -3801,22 +3752,28 @@ herderExternalizesValuesWithProtocol(uint32_t version)
     REQUIRE(currentALedger() >= currentLedger);
     REQUIRE(currentCLedger() == currentLedger);
 
+    // Arm the upgrade, but don't close the upgrade ledger yet
+    // C won't upgrade until it's on the right LCL
+    upgradeSorobanNetworkConfig(
+        [&](SorobanNetworkConfig& cfg) {
+            cfg.mLedgerMaxTransactionsSizeBytes = 1'000'000;
+            cfg.mTxMaxSizeBytes = 500'000;
+            cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1;
+        },
+        simulation, /*applyUpgrade=*/false);
+
     // disconnect C
     simulation->dropConnection(validatorAKey.getPublicKey(),
                                validatorCKey.getPublicKey());
 
+    currentLedger = currentALedger();
+
     // Advance A and B a bit further, and collect externalize messages
     std::map<uint32_t, std::pair<SCPEnvelope, StellarMessage>>
         validatorSCPMessagesA;
     std::map<uint32_t, std::pair<SCPEnvelope, StellarMessage>>
         validatorSCPMessagesB;
 
-    for (auto& node : {A, B, getC()})
-    {
-        // C won't upgrade until it's on the right LCL
-        setupUpgradeAtNextLedger(*node);
-    }
-
     auto destinationLedger = waitForAB(4, true);
     for (auto start = currentLedger + 1; start <= destinationLedger; start++)
     {
diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp
index 9ab0e032ba..6b5678ac64 100644
--- a/src/herder/test/UpgradesTests.cpp
+++ b/src/herder/test/UpgradesTests.cpp
@@ -372,7 +372,7 @@ void
 testValidateUpgrades(VirtualClock::system_time_point preferredUpgradeDatetime,
                      bool canBeValid)
 {
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = 10;
     cfg.TESTING_UPGRADE_DESIRED_FEE = 100;
     cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 50;
@@ -630,7 +630,7 @@ TEST_CASE("Ledger Manager applies upgrades properly", "[upgrades]")
 TEST_CASE("config upgrade validation", "[upgrades]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     auto app = createTestApplication(clock, cfg);
 
     auto headerTime = VirtualClock::to_time_t(genesis(0, 2));
@@ -826,7 +826,7 @@ TEST_CASE("config upgrade validation", "[upgrades]")
 TEST_CASE("config upgrades applied to ledger", "[soroban][upgrades]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION =
         static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION) - 1;
     cfg.USE_CONFIG_FOR_GENESIS = false;
@@ -2274,7 +2274,7 @@ TEST_CASE_VERSIONS("upgrade base reserve", "[upgrades]")
 {
     VirtualClock clock;
 
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     auto app = createTestApplication(clock, cfg);
 
     auto& lm = app->getLedgerManager();
@@ -2974,7 +2974,7 @@ TEST_CASE("upgrade from cpp14 serialized data", "[upgrades]")
 
 TEST_CASE("upgrades serialization roundtrip", "[upgrades]")
 {
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
 
@@ -3058,7 +3058,7 @@ TEST_CASE("upgrades serialization roundtrip", "[upgrades]")
 TEST_CASE_VERSIONS("upgrade flags", "[upgrades][liquiditypool]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp
index 1f3bf185d8..b880f8ea69 100644
--- a/src/history/test/HistoryTests.cpp
+++ b/src/history/test/HistoryTests.cpp
@@ -520,7 +520,7 @@ TEST_CASE("History publish with restart", "[history][publish]")
     auto catchupSimulation =
         CatchupSimulation(VirtualClock::VIRTUAL_TIME,
                           std::make_shared<TmpDirHistoryConfigurator>(), true,
-                          Config::TESTDB_ON_DISK_SQLITE);
+                          Config::TESTDB_BUCKET_DB_PERSISTENT);
     auto checkpointLedger = catchupSimulation.getLastCheckpointLedger(2);
 
     // Restart at various points in the checkpoint, core should continue
@@ -570,7 +570,7 @@ TEST_CASE("History publish with restart", "[history][publish]")
             // Now catchup to ensure published checkpoints are valid
             auto app = catchupSimulation.createCatchupApplication(
                 std::numeric_limits<uint32_t>::max(),
-                Config::TESTDB_ON_DISK_SQLITE, "app");
+                Config::TESTDB_BUCKET_DB_PERSISTENT, "app");
             REQUIRE(catchupSimulation.catchupOffline(app, checkpointLedger));
         }
     }
@@ -720,10 +720,8 @@ dbModeName(Config::TestDbMode mode)
 {
     switch (mode)
     {
-    case Config::TESTDB_IN_MEMORY_OFFERS:
-        return "TESTDB_IN_MEMORY_OFFERS";
-    case Config::TESTDB_ON_DISK_SQLITE:
-        return "TESTDB_ON_DISK_SQLITE";
+    case Config::TESTDB_IN_MEMORY:
+        return "TESTDB_IN_MEMORY";
 #ifdef USE_POSTGRES
     case Config::TESTDB_POSTGRESQL:
         return "TESTDB_POSTGRESQL";
@@ -856,7 +854,7 @@ TEST_CASE("History catchup with different modes",
                                     60};
 
     std::vector<Config::TestDbMode> dbModes = {
-        Config::TESTDB_ON_DISK_SQLITE, Config::TESTDB_BUCKET_DB_PERSISTENT};
+        Config::TESTDB_BUCKET_DB_PERSISTENT};
 #ifdef USE_POSTGRES
     if (!force_sqlite)
         dbModes.push_back(Config::TESTDB_POSTGRESQL);
@@ -1726,7 +1724,7 @@ TEST_CASE("Externalize gap while catchup work is running", "[history][catchup]")
 TEST_CASE("CheckpointBuilder", "[history][publish]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE);
+    auto cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT);
     TmpDirHistoryConfigurator().configure(cfg, true);
 
     auto app = createTestApplication(clock, cfg);
diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h
index 88453ef6a6..d3cbc4100b 100644
--- a/src/history/test/HistoryTestsUtils.h
+++ b/src/history/test/HistoryTestsUtils.h
@@ -217,7 +217,7 @@ class CatchupSimulation
         std::shared_ptr<HistoryConfigurator> cg =
             std::make_shared<TmpDirHistoryConfigurator>(),
         bool startApp = true,
-        Config::TestDbMode dbMode = Config::TESTDB_IN_MEMORY_OFFERS);
+        Config::TestDbMode dbMode = Config::TESTDB_IN_MEMORY);
     ~CatchupSimulation();
 
     Application&
diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp
index 9f99e44afb..f1f3202e21 100644
--- a/src/invariant/BucketListIsConsistentWithDatabase.cpp
+++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp
@@ -16,6 +16,7 @@
 #include "main/Application.h"
 #include "main/PersistentState.h"
 #include "medida/timer.h"
+#include "util/GlobalChecks.h"
 #include "util/XDRCereal.h"
 #include <chrono>
 #include <fmt/chrono.h>
@@ -25,7 +26,9 @@
 namespace stellar
 {
 
-static std::string
+namespace
+{
+std::string
 checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry)
 {
     auto fromDb = ltx.loadWithoutRecord(LedgerEntryKey(entry));
@@ -50,7 +53,7 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry)
     }
 }
 
-static std::string
+std::string
 checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key)
 {
     auto fromDb = ltx.loadWithoutRecord(key);
@@ -64,6 +67,25 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key)
     return s;
 }
 
+std::string
+checkDbEntryCounts(Application& app, LedgerRange const& range,
+                   uint64_t expectedOfferCount)
+{
+    std::string msg;
+    auto& ltxRoot = app.getLedgerTxnRoot();
+    uint64_t numInDb = ltxRoot.countObjects(OFFER, range);
+    if (numInDb != expectedOfferCount)
+    {
+        msg = fmt::format(
+            FMT_STRING("Incorrect OFFER count: Bucket = {:d} Database "
+                       "= {:d}"),
+            expectedOfferCount, numInDb);
+    }
+
+    return msg;
+}
+}
+
 std::shared_ptr<Invariant>
 BucketListIsConsistentWithDatabase::registerInvariant(Application& app)
 {
@@ -83,103 +105,6 @@ BucketListIsConsistentWithDatabase::getName() const
     return "BucketListIsConsistentWithDatabase";
 }
 
-struct EntryCounts
-{
-    uint64_t mAccounts{0};
-    uint64_t mTrustLines{0};
-    uint64_t mOffers{0};
-    uint64_t mData{0};
-    uint64_t mClaimableBalance{0};
-    uint64_t mLiquidityPool{0};
-    uint64_t mContractData{0};
-    uint64_t mContractCode{0};
-    uint64_t mConfigSettings{0};
-    uint64_t mTTL{0};
-
-    uint64_t
-    totalEntries() const
-    {
-        return mAccounts + mTrustLines + mOffers + mData + mClaimableBalance +
-               mLiquidityPool + mContractData + mConfigSettings + mTTL;
-    }
-
-    void
-    countLiveEntry(LedgerEntry const& e)
-    {
-        switch (e.data.type())
-        {
-        case ACCOUNT:
-            ++mAccounts;
-            break;
-        case TRUSTLINE:
-            ++mTrustLines;
-            break;
-        case OFFER:
-            ++mOffers;
-            break;
-        case DATA:
-            ++mData;
-            break;
-        case CLAIMABLE_BALANCE:
-            ++mClaimableBalance;
-            break;
-        case LIQUIDITY_POOL:
-            ++mLiquidityPool;
-            break;
-        case CONTRACT_DATA:
-            ++mContractData;
-            break;
-        case CONTRACT_CODE:
-            ++mContractCode;
-            break;
-        case CONFIG_SETTING:
-            ++mConfigSettings;
-            break;
-        case TTL:
-            ++mTTL;
-            break;
-        default:
-            throw std::runtime_error(
-                fmt::format(FMT_STRING("unknown ledger entry type: {:d}"),
-                            static_cast<uint32_t>(e.data.type())));
-        }
-    }
-
-    std::string
-    checkDbEntryCounts(Application& app, LedgerRange const& range,
-                       std::function<bool(LedgerEntryType)> entryTypeFilter)
-    {
-        std::string msg;
-        auto check = [&](LedgerEntryType let, uint64_t numInBucket) {
-            if (entryTypeFilter(let))
-            {
-                auto& ltxRoot = app.getLedgerTxnRoot();
-                uint64_t numInDb = ltxRoot.countObjects(let, range);
-                if (numInDb != numInBucket)
-                {
-                    msg = fmt::format(
-                        FMT_STRING("Incorrect {} count: Bucket = {:d} Database "
-                                   "= {:d}"),
-                        xdr::xdr_traits<LedgerEntryType>::enum_name(let),
-                        numInBucket, numInDb);
-                    return false;
-                }
-            }
-            return true;
-        };
-
-        // Uses short-circuiting to make this compact
-        check(ACCOUNT, mAccounts) && check(TRUSTLINE, mTrustLines) &&
-            check(OFFER, mOffers) && check(DATA, mData) &&
-            check(CLAIMABLE_BALANCE, mClaimableBalance) &&
-            check(LIQUIDITY_POOL, mLiquidityPool) &&
-            check(CONTRACT_DATA, mContractData) &&
-            check(CONTRACT_CODE, mContractCode) &&
-            check(CONFIG_SETTING, mConfigSettings) && check(TTL, mTTL);
-        return msg;
-    }
-};
-
 void
 BucketListIsConsistentWithDatabase::checkEntireBucketlist()
 {
@@ -188,29 +113,29 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist()
     HistoryArchiveState has = lm.getLastClosedLedgerHAS();
     std::map<LedgerKey, LedgerEntry> bucketLedgerMap =
         bm.loadCompleteLedgerState(has);
-    EntryCounts counts;
+    uint64_t offerCount = 0;
     medida::Timer timer(std::chrono::microseconds(1));
 
     {
         LedgerTxn ltx(mApp.getLedgerTxnRoot());
         for (auto const& pair : bucketLedgerMap)
         {
-            // Don't check entry types in BucketListDB when enabled
-            if (mApp.getConfig().isUsingBucketListDB() &&
-                !BucketIndex::typeNotSupported(pair.first.type()))
+            // Don't check entry types supported by BucketListDB, since they
+            // won't exist in SQL
+            if (!BucketIndex::typeNotSupported(pair.first.type()))
             {
                 continue;
             }
 
-            counts.countLiveEntry(pair.second);
+            ++offerCount;
             std::string s;
             timer.Time([&]() { s = checkAgainstDatabase(ltx, pair.second); });
             if (!s.empty())
             {
                 throw std::runtime_error(s);
             }
-            auto i = counts.totalEntries();
-            if ((i & 0x7ffff) == 0)
+
+            if ((offerCount & 0x7ffff) == 0)
             {
                 using namespace std::chrono;
                 nanoseconds ns = timer.duration_unit() *
@@ -219,56 +144,33 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist()
                 CLOG_INFO(Ledger,
                           "Checked bucket-vs-DB consistency for "
                           "{} entries (mean {}/entry)",
-                          i, us);
+                          offerCount, us);
             }
         }
     }
 
-    // Count functionality does not support in-memory LedgerTxn
-    if (!mApp.getConfig().isInMemoryMode())
-    {
-        auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ,
-                                            has.currentLedger);
+    auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ,
+                                        has.currentLedger);
 
-        // If BucketListDB enabled, only types not supported by BucketListDB
-        // should be in SQL DB
-        std::function<bool(LedgerEntryType)> filter;
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            filter = BucketIndex::typeNotSupported;
-        }
-        else
-        {
-            filter = [](LedgerEntryType) { return true; };
-        }
-
-        auto s = counts.checkDbEntryCounts(mApp, range, filter);
-        if (!s.empty())
-        {
-            throw std::runtime_error(s);
-        }
+    auto s = checkDbEntryCounts(mApp, range, offerCount);
+    if (!s.empty())
+    {
+        throw std::runtime_error(s);
     }
 
-    if (mApp.getConfig().isUsingBucketListDB() &&
-        mApp.getPersistentState().getState(PersistentState::kDBBackend) !=
-            BucketIndex::DB_BACKEND_STATE)
+    if (mApp.getPersistentState().getState(PersistentState::kDBBackend) !=
+        BucketIndex::DB_BACKEND_STATE)
     {
-        throw std::runtime_error("BucketListDB enabled but BucketListDB flag "
-                                 "not set in PersistentState.");
+        throw std::runtime_error(
+            "Corrupt DB: BucketListDB flag "
+            "not set in PersistentState. Please run new-db or upgrade-db");
     }
 }
 
 std::string
 BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger)
 {
-    // If BucketListDB is disabled, we've already enforced the invariant on a
-    // per-Bucket level
-    if (!mApp.getConfig().isUsingBucketListDB())
-    {
-        return {};
-    }
-
-    EntryCounts counts;
+    uint64_t offerCount = 0;
     LedgerKeySet seenKeys;
 
     auto perBucketCheck = [&](auto bucket, auto& ltx) {
@@ -289,8 +191,7 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger)
                 auto [_, newKey] = seenKeys.emplace(key);
                 if (newKey)
                 {
-                    counts.countLiveEntry(e.liveEntry());
-
+                    ++offerCount;
                     auto s = checkAgainstDatabase(ltx, e.liveEntry());
                     if (!s.empty())
                     {
@@ -343,17 +244,15 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger)
     auto range =
         LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, newestLedger);
 
-    // SQL only stores offers when BucketListDB is enabled
-    return counts.checkDbEntryCounts(
-        mApp, range, [](LedgerEntryType let) { return let == OFFER; });
+    return checkDbEntryCounts(mApp, range, offerCount);
 }
 
 std::string
 BucketListIsConsistentWithDatabase::checkOnBucketApply(
     std::shared_ptr<LiveBucket const> bucket, uint32_t oldestLedger,
-    uint32_t newestLedger, std::function<bool(LedgerEntryType)> entryTypeFilter)
+    uint32_t newestLedger, std::unordered_set<LedgerKey> const& shadowedKeys)
 {
-    EntryCounts counts;
+    uint64_t offerCount = 0;
     {
         LedgerTxn ltx(mApp.getLedgerTxnRoot());
 
@@ -394,28 +293,25 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply(
                     return s;
                 }
 
-                if (entryTypeFilter(e.liveEntry().data.type()))
+                // Don't check DB against keys shadowed by earlier Buckets
+                if (BucketIndex::typeNotSupported(e.liveEntry().data.type()) &&
+                    shadowedKeys.find(LedgerEntryKey(e.liveEntry())) ==
+                        shadowedKeys.end())
                 {
-                    counts.countLiveEntry(e.liveEntry());
-
-                    // BucketListDB is not compatible with per-Bucket database
-                    // consistency checks
-                    if (!mApp.getConfig().isUsingBucketListDB())
+                    ++offerCount;
+                    auto s = checkAgainstDatabase(ltx, e.liveEntry());
+                    if (!s.empty())
                     {
-                        auto s = checkAgainstDatabase(ltx, e.liveEntry());
-                        if (!s.empty())
-                        {
-                            return s;
-                        }
+                        return s;
                     }
                 }
             }
-            else if (e.type() == DEADENTRY)
+            else
             {
-                // BucketListDB is not compatible with per-Bucket database
-                // consistency checks
-                if (entryTypeFilter(e.deadEntry().type()) &&
-                    !mApp.getConfig().isUsingBucketListDB())
+                // Only check for OFFER keys that are not shadowed by an earlier
+                // bucket
+                if (BucketIndex::typeNotSupported(e.deadEntry().type()) &&
+                    shadowedKeys.find(e.deadEntry()) == shadowedKeys.end())
                 {
                     auto s = checkAgainstDatabase(ltx, e.deadEntry());
                     if (!s.empty())
@@ -428,13 +324,6 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply(
     }
 
     auto range = LedgerRange::inclusive(oldestLedger, newestLedger);
-
-    // BucketListDB not compatible with per-Bucket database consistency checks
-    if (!mApp.getConfig().isUsingBucketListDB())
-    {
-        return counts.checkDbEntryCounts(mApp, range, entryTypeFilter);
-    }
-
-    return std::string{};
+    return checkDbEntryCounts(mApp, range, offerCount);
 }
 }
diff --git a/src/invariant/BucketListIsConsistentWithDatabase.h b/src/invariant/BucketListIsConsistentWithDatabase.h
index 36b5a71559..a9bb3003ac 100644
--- a/src/invariant/BucketListIsConsistentWithDatabase.h
+++ b/src/invariant/BucketListIsConsistentWithDatabase.h
@@ -36,7 +36,7 @@ class BucketListIsConsistentWithDatabase : public Invariant
     virtual std::string checkOnBucketApply(
         std::shared_ptr<LiveBucket const> bucket, uint32_t oldestLedger,
         uint32_t newestLedger,
-        std::function<bool(LedgerEntryType)> entryTypeFilter) override;
+        std::unordered_set<LedgerKey> const& shadowedKeys) override;
 
     virtual std::string checkAfterAssumeState(uint32_t newestLedger) override;
 
diff --git a/src/invariant/Invariant.h b/src/invariant/Invariant.h
index 8a2a12ec04..6a90105477 100644
--- a/src/invariant/Invariant.h
+++ b/src/invariant/Invariant.h
@@ -8,6 +8,7 @@
 #include <functional>
 #include <memory>
 #include <string>
+#include <unordered_set>
 
 namespace stellar
 {
@@ -17,6 +18,7 @@ enum LedgerEntryType : std::int32_t;
 struct LedgerTxnDelta;
 struct Operation;
 struct OperationResult;
+struct LedgerKey;
 
 // NOTE: The checkOn* functions should have a default implementation so that
 //       more can be added in the future without requiring changes to all
@@ -45,7 +47,7 @@ class Invariant
     virtual std::string
     checkOnBucketApply(std::shared_ptr<LiveBucket const> bucket,
                        uint32_t oldestLedger, uint32_t newestLedger,
-                       std::function<bool(LedgerEntryType)> entryTypeFilter)
+                       std::unordered_set<LedgerKey> const& shadowedKeys)
     {
         return std::string{};
     }
diff --git a/src/invariant/InvariantManager.h b/src/invariant/InvariantManager.h
index 220209f1c7..61575fcd49 100644
--- a/src/invariant/InvariantManager.h
+++ b/src/invariant/InvariantManager.h
@@ -35,11 +35,12 @@ class InvariantManager
 
     virtual Json::Value getJsonInfo() = 0;
     virtual std::vector<std::string> getEnabledInvariants() const = 0;
+    virtual bool isBucketApplyInvariantEnabled() const = 0;
 
-    virtual void checkOnBucketApply(
-        std::shared_ptr<LiveBucket const> bucket, uint32_t ledger,
-        uint32_t level, bool isCurr,
-        std::function<bool(LedgerEntryType)> entryTypeFilter) = 0;
+    virtual void
+    checkOnBucketApply(std::shared_ptr<LiveBucket const> bucket,
+                       uint32_t ledger, uint32_t level, bool isCurr,
+                       std::unordered_set<LedgerKey> const& shadowedKeys) = 0;
 
     virtual void checkAfterAssumeState(uint32_t newestLedger) = 0;
 
diff --git a/src/invariant/InvariantManagerImpl.cpp b/src/invariant/InvariantManagerImpl.cpp
index d20177f1f6..1a0a855217 100644
--- a/src/invariant/InvariantManagerImpl.cpp
+++ b/src/invariant/InvariantManagerImpl.cpp
@@ -69,10 +69,18 @@ InvariantManagerImpl::getEnabledInvariants() const
     return res;
 }
 
+bool
+InvariantManagerImpl::isBucketApplyInvariantEnabled() const
+{
+    return std::any_of(mEnabled.begin(), mEnabled.end(), [](auto const& inv) {
+        return inv->getName() == "BucketListIsConsistentWithDatabase";
+    });
+}
+
 void
 InvariantManagerImpl::checkOnBucketApply(
     std::shared_ptr<LiveBucket const> bucket, uint32_t ledger, uint32_t level,
-    bool isCurr, std::function<bool(LedgerEntryType)> entryTypeFilter)
+    bool isCurr, std::unordered_set<LedgerKey> const& shadowedKeys)
 {
     uint32_t oldestLedger =
         isCurr ? LiveBucketList::oldestLedgerInCurr(ledger, level)
@@ -83,8 +91,8 @@ InvariantManagerImpl::checkOnBucketApply(
                 : LiveBucketList::sizeOfSnap(ledger, level));
     for (auto invariant : mEnabled)
     {
-        auto result = invariant->checkOnBucketApply(
-            bucket, oldestLedger, newestLedger, entryTypeFilter);
+        auto result = invariant->checkOnBucketApply(bucket, oldestLedger,
+                                                    newestLedger, shadowedKeys);
         if (result.empty())
         {
             continue;
diff --git a/src/invariant/InvariantManagerImpl.h b/src/invariant/InvariantManagerImpl.h
index 689ab6b750..fbbb35fee8 100644
--- a/src/invariant/InvariantManagerImpl.h
+++ b/src/invariant/InvariantManagerImpl.h
@@ -36,6 +36,7 @@ class InvariantManagerImpl : public InvariantManager
     virtual Json::Value getJsonInfo() override;
 
     virtual std::vector<std::string> getEnabledInvariants() const override;
+    bool isBucketApplyInvariantEnabled() const override;
 
     virtual void checkOnOperationApply(Operation const& operation,
                                        OperationResult const& opres,
@@ -44,7 +45,7 @@ class InvariantManagerImpl : public InvariantManager
     virtual void checkOnBucketApply(
         std::shared_ptr<LiveBucket const> bucket, uint32_t ledger,
         uint32_t level, bool isCurr,
-        std::function<bool(LedgerEntryType)> entryTypeFilter) override;
+        std::unordered_set<LedgerKey> const& shadowedKeys) override;
 
     virtual void checkAfterAssumeState(uint32_t newestLedger) override;
 
diff --git a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp
index acc308be6b..20892ad29e 100644
--- a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp
+++ b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp
@@ -292,7 +292,7 @@ deleteRandomSubEntryFromAccount(Application& app, LedgerEntry& le,
 TEST_CASE("Create account with no subentries",
           "[invariant][accountsubentriescount]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"};
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg);
@@ -309,7 +309,7 @@ TEST_CASE("Create account then add signers and subentries",
           "[invariant][accountsubentriescount]")
 {
     stellar::uniform_int_distribution<int32_t> changesDist(-1, 2);
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"};
 
     for (uint32_t i = 0; i < 50; ++i)
diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp
index 7a2a1f6b62..eabec762fa 100644
--- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp
+++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp
@@ -20,6 +20,7 @@
 #include "test/test.h"
 #include "transactions/TransactionUtils.h"
 #include "util/Decoder.h"
+#include "util/GlobalChecks.h"
 #include "util/Math.h"
 #include "util/UnorderedSet.h"
 #include "util/XDROperators.h"
@@ -43,44 +44,10 @@ struct BucketListGenerator
   public:
     BucketListGenerator() : mLedgerSeq(1)
     {
-        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+        auto cfg = getTestConfig();
         cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true;
         cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1;
         mAppGenerate = createTestApplication(mClock, cfg);
-
-        auto skey = SecretKey::fromSeed(mAppGenerate->getNetworkID());
-        LedgerKey key(ACCOUNT);
-        key.account().accountID = skey.getPublicKey();
-        mLiveKeys.insert(key);
-
-        if (appProtocolVersionStartsFrom(*mAppGenerate,
-                                         SOROBAN_PROTOCOL_VERSION))
-        {
-            // All config settings entries will be created automatically during
-            // the protocol upgrade and NOT generated by tests, so they should
-            // be reflected in the live key set. This allows tests to still run
-            // on those entries.
-            for (auto t : xdr::xdr_traits<ConfigSettingID>::enum_values())
-            {
-#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION
-                // This setting has been introduced in the vnext xdr, but it's
-                // not used in code yet. This check can be replaced with a
-                // runtime protocol check once we create the setting in the
-                // upgrade path.
-                if (static_cast<ConfigSettingID>(t) ==
-                    ConfigSettingID::
-                        CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0)
-                {
-                    continue;
-                }
-#endif
-                LedgerKey ckey(CONFIG_SETTING);
-                ckey.configSetting().configSettingID =
-                    static_cast<ConfigSettingID>(t);
-                mLiveKeys.insert(ckey);
-            }
-        }
-
         LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false);
         REQUIRE(mLedgerSeq == ltx.loadHeader().current().ledgerSeq);
     }
@@ -102,8 +69,8 @@ struct BucketListGenerator
     applyBuckets(Args&&... args)
     {
         VirtualClock clock;
-        Application::pointer app = createTestApplication(
-            clock, getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS));
+        Application::pointer app =
+            createTestApplication(clock, getTestConfig(1));
         applyBuckets<T, Args...>(app, std::forward<Args>(args)...);
     }
 
@@ -164,8 +131,8 @@ struct BucketListGenerator
     generateLiveEntries(AbstractLedgerTxn& ltx)
     {
         auto entries =
-            LedgerTestUtils::generateValidLedgerEntriesWithExclusions(
-                {CONFIG_SETTING}, 5);
+            LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes({OFFER},
+                                                                       5);
         for (auto& le : entries)
         {
             le.lastModifiedLedgerSeq = mLedgerSeq;
@@ -176,12 +143,7 @@ struct BucketListGenerator
     virtual std::vector<LedgerKey>
     generateDeadEntries(AbstractLedgerTxn& ltx)
     {
-        UnorderedSet<LedgerKey> liveDeletable(mLiveKeys.size());
-        std::copy_if(
-            mLiveKeys.begin(), mLiveKeys.end(),
-            std::inserter(liveDeletable, liveDeletable.end()),
-            [](LedgerKey const& key) { return key.type() != CONFIG_SETTING; });
-
+        UnorderedSet<LedgerKey> liveDeletable = mLiveKeys;
         std::vector<LedgerKey> dead;
         while (dead.size() < 2 && !liveDeletable.empty())
         {
@@ -226,8 +188,7 @@ struct BucketListGenerator
                     out.put(*in);
                 }
 
-                auto bucket =
-                    out.getBucket(bmApply, /*shouldSynchronouslyIndex=*/false);
+                auto bucket = out.getBucket(bmApply, false);
             };
             writeBucketFile(level.getCurr());
             writeBucketFile(level.getSnap());
@@ -280,11 +241,10 @@ doesBucketListContain(LiveBucketList& bl, const BucketEntry& be)
 struct SelectBucketListGenerator : public BucketListGenerator
 {
     uint32_t const mSelectLedger;
-    LedgerEntryType const mType;
     std::shared_ptr<LedgerEntry> mSelected;
 
-    SelectBucketListGenerator(uint32_t selectLedger, LedgerEntryType type)
-        : mSelectLedger(selectLedger), mType(type)
+    SelectBucketListGenerator(uint32_t selectLedger)
+        : mSelectLedger(selectLedger)
     {
     }
 
@@ -293,24 +253,35 @@ struct SelectBucketListGenerator : public BucketListGenerator
     {
         if (mLedgerSeq == mSelectLedger)
         {
-            UnorderedSet<LedgerKey> filteredKeys(mLiveKeys.size());
-            std::copy_if(
-                mLiveKeys.begin(), mLiveKeys.end(),
-                std::inserter(filteredKeys, filteredKeys.end()),
-                [this](LedgerKey const& key) { return key.type() == mType; });
-
-            if (!filteredKeys.empty())
+            if (!mLiveKeys.empty())
             {
                 stellar::uniform_int_distribution<size_t> dist(
-                    0, filteredKeys.size() - 1);
-                auto iter = filteredKeys.begin();
+                    0, mLiveKeys.size() - 1);
+                auto iter = mLiveKeys.begin();
                 std::advance(iter, dist(gRandomEngine));
 
                 mSelected = std::make_shared<LedgerEntry>(
                     ltx.loadWithoutRecord(*iter).current());
             }
         }
-        return BucketListGenerator::generateLiveEntries(ltx);
+
+        auto live = BucketListGenerator::generateLiveEntries(ltx);
+
+        // Selected entry must not be shadowed
+        if (mSelected)
+        {
+            auto key = LedgerEntryKey(*mSelected);
+            for (size_t i = 0; i < live.size(); ++i)
+            {
+                if (LedgerEntryKey(live.at(i)) == key)
+                {
+                    live.erase(live.begin() + i);
+                    break;
+                }
+            }
+        }
+
+        return live;
     }
 
     virtual std::vector<LedgerKey>
@@ -341,8 +312,8 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork
         Application& app,
         std::map<std::string, std::shared_ptr<LiveBucket>> const& buckets,
         HistoryArchiveState const& applyState, uint32_t maxProtocolVersion,
-        std::function<bool(LedgerEntryType)> filter, LedgerEntry const& entry)
-        : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, filter)
+        LedgerEntry const& entry)
+        : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion)
         , mEntry(entry)
         , mAdded{false}
     {
@@ -358,13 +329,8 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork
             uint32_t maxLedger = std::numeric_limits<int32_t>::max() - 1;
             auto& ltxRoot = mApp.getLedgerTxnRoot();
 
-            size_t count = 0;
-            for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
-            {
-                count += ltxRoot.countObjects(
-                    static_cast<LedgerEntryType>(let),
-                    LedgerRange::inclusive(minLedger, maxLedger));
-            }
+            auto count = ltxRoot.countObjects(
+                OFFER, LedgerRange::inclusive(minLedger, maxLedger));
 
             if (count > 0)
             {
@@ -433,26 +399,6 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork
     LedgerEntry const mEntry;
     bool mModified;
 
-    void
-    modifyAccountEntry(LedgerEntry& entry)
-    {
-        AccountEntry const& account = mEntry.data.account();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.account() = LedgerTestUtils::generateValidAccountEntry(5);
-        entry.data.account().accountID = account.accountID;
-    }
-
-    void
-    modifyTrustLineEntry(LedgerEntry& entry)
-    {
-        TrustLineEntry const& trustLine = mEntry.data.trustLine();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.trustLine() =
-            LedgerTestUtils::generateValidTrustLineEntry(5);
-        entry.data.trustLine().accountID = trustLine.accountID;
-        entry.data.trustLine().asset = trustLine.asset;
-    }
-
     void
     modifyOfferEntry(LedgerEntry& entry)
     {
@@ -463,90 +409,6 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork
         entry.data.offer().offerID = offer.offerID;
     }
 
-    void
-    modifyDataEntry(LedgerEntry& entry)
-    {
-        DataEntry const& data = mEntry.data.data();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        do
-        {
-            entry.data.data() = LedgerTestUtils::generateValidDataEntry(5);
-        } while (entry.data.data().dataValue == data.dataValue);
-        entry.data.data().accountID = data.accountID;
-        entry.data.data().dataName = data.dataName;
-    }
-
-    void
-    modifyClaimableBalanceEntry(LedgerEntry& entry)
-    {
-        ClaimableBalanceEntry const& cb = mEntry.data.claimableBalance();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.claimableBalance() =
-            LedgerTestUtils::generateValidClaimableBalanceEntry(5);
-
-        entry.data.claimableBalance().balanceID = cb.balanceID;
-    }
-
-    void
-    modifyLiquidityPoolEntry(LedgerEntry& entry)
-    {
-        LiquidityPoolEntry const& lp = mEntry.data.liquidityPool();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.liquidityPool() =
-            LedgerTestUtils::generateValidLiquidityPoolEntry(5);
-
-        entry.data.liquidityPool().liquidityPoolID = lp.liquidityPoolID;
-    }
-
-    void
-    modifyConfigSettingEntry(LedgerEntry& entry)
-    {
-        ConfigSettingEntry const& cfg = mEntry.data.configSetting();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.configSetting() =
-            LedgerTestUtils::generateValidConfigSettingEntry(5);
-
-        entry.data.configSetting().configSettingID(cfg.configSettingID());
-    }
-
-    void
-    modifyContractDataEntry(LedgerEntry& entry)
-    {
-        ContractDataEntry const& cd = mEntry.data.contractData();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.contractData() =
-            LedgerTestUtils::generateValidContractDataEntry(5);
-
-        entry.data.contractData().contract = cd.contract;
-        entry.data.contractData().key = cd.key;
-    }
-
-    void
-    modifyContractCodeEntry(LedgerEntry& entry)
-    {
-        ContractCodeEntry const& cc = mEntry.data.contractCode();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-
-        while (entry.data.contractCode().code ==
-               mEntry.data.contractCode().code)
-        {
-            entry.data.contractCode() =
-                LedgerTestUtils::generateValidContractCodeEntry(5);
-        }
-
-        entry.data.contractCode().hash = cc.hash;
-    }
-
-    void
-    modifyTTLEntry(LedgerEntry& entry)
-    {
-        TTLEntry const& ee = mEntry.data.ttl();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.ttl() = LedgerTestUtils::generateValidTTLEntry(5);
-
-        entry.data.ttl().keyHash = ee.keyHash;
-    }
-
   public:
     ApplyBucketsWorkModifyEntry(
         Application& app,
@@ -569,41 +431,10 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork
             auto entry = ltx.load(mKey);
             while (entry && entry.current() == mEntry)
             {
-                switch (mEntry.data.type())
-                {
-                case ACCOUNT:
-                    modifyAccountEntry(entry.current());
-                    break;
-                case TRUSTLINE:
-                    modifyTrustLineEntry(entry.current());
-                    break;
-                case OFFER:
-                    modifyOfferEntry(entry.current());
-                    break;
-                case DATA:
-                    modifyDataEntry(entry.current());
-                    break;
-                case CLAIMABLE_BALANCE:
-                    modifyClaimableBalanceEntry(entry.current());
-                    break;
-                case LIQUIDITY_POOL:
-                    modifyLiquidityPoolEntry(entry.current());
-                    break;
-                case CONFIG_SETTING:
-                    modifyConfigSettingEntry(entry.current());
-                    break;
-                case CONTRACT_DATA:
-                    modifyContractDataEntry(entry.current());
-                    break;
-                case CONTRACT_CODE:
-                    modifyContractCodeEntry(entry.current());
-                    break;
-                case TTL:
-                    modifyTTLEntry(entry.current());
-                    break;
-                default:
-                    REQUIRE(false);
-                }
+                releaseAssert(
+                    BucketIndex::typeNotSupported(mEntry.data.type()));
+
+                modifyOfferEntry(entry.current());
                 mModified = true;
             }
 
@@ -655,168 +486,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase empty ledgers",
     REQUIRE_NOTHROW(blg.applyBuckets());
 }
 
-TEST_CASE("BucketListIsConsistentWithDatabase test root account",
-          "[invariant][bucketlistconsistent]")
-{
-    struct TestRootBucketListGenerator : public BucketListGenerator
-    {
-        uint32_t const mTargetLedger;
-        bool mModifiedRoot;
-
-        TestRootBucketListGenerator()
-            : mTargetLedger(stellar::uniform_int_distribution<uint32_t>(2, 100)(
-                  gRandomEngine))
-            , mModifiedRoot(false)
-        {
-        }
-
-        virtual std::vector<LedgerEntry>
-        generateLiveEntries(AbstractLedgerTxn& ltx)
-        {
-            if (mLedgerSeq == mTargetLedger)
-            {
-                mModifiedRoot = true;
-                auto& app = mAppGenerate;
-                auto skey = SecretKey::fromSeed(app->getNetworkID());
-                auto root = skey.getPublicKey();
-                auto le =
-                    stellar::loadAccountWithoutRecord(ltx, root).current();
-                le.lastModifiedLedgerSeq = mLedgerSeq;
-                return {le};
-            }
-            else
-            {
-                return BucketListGenerator::generateLiveEntries(ltx);
-            }
-        }
-
-        virtual std::vector<LedgerKey>
-        generateDeadEntries(AbstractLedgerTxn& ltx)
-        {
-            return {};
-        }
-    };
-
-    for (size_t j = 0; j < 5; ++j)
-    {
-        TestRootBucketListGenerator blg;
-        blg.generateLedgers(100);
-        REQUIRE(blg.mModifiedRoot);
-        REQUIRE_NOTHROW(blg.applyBuckets());
-    }
-}
-
 TEST_CASE("BucketListIsConsistentWithDatabase added entries",
           "[invariant][bucketlistconsistent][acceptance]")
 {
-    auto runTest = [](bool withFilter) {
-        for (size_t nTests = 0; nTests < 40; ++nTests)
-        {
-            BucketListGenerator blg;
-            blg.generateLedgers(100);
-
-            stellar::uniform_int_distribution<uint32_t> addAtLedgerDist(
-                2, blg.mLedgerSeq);
-            auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions(
-                {CONFIG_SETTING}, 5);
-            le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine);
-
-            if (!withFilter)
-            {
-                auto filter = [](auto) { return true; };
-                if (le.data.type() == CONFIG_SETTING)
-                {
-                    // Config settings would have a duplicate key due to low key
-                    // space.
-                    REQUIRE_THROWS_AS(
-                        blg.applyBuckets<ApplyBucketsWorkAddEntry>(filter, le),
-                        std::runtime_error);
-                }
-                else
-                {
-                    REQUIRE_THROWS_AS(
-                        blg.applyBuckets<ApplyBucketsWorkAddEntry>(filter, le),
-                        InvariantDoesNotHold);
-                }
-            }
-            else
-            {
-                auto filter = [&](auto let) { return let != le.data.type(); };
-                REQUIRE_NOTHROW(
-                    blg.applyBuckets<ApplyBucketsWorkAddEntry>(filter, le));
-            }
-        }
-    };
-
-    runTest(true);
+    for (size_t nTests = 0; nTests < 40; ++nTests)
+    {
+        BucketListGenerator blg;
+        blg.generateLedgers(100);
 
-    // This tests the filtering behavior of BucketListIsConsistentWithDatabase
-    // because the bucket apply will not add anything of the specified
-    // LedgerEntryType, but we will inject an additional LedgerEntry of that
-    // type anyway. But it shouldn't throw because the invariant isn't looking
-    // for those changes.
-    runTest(false);
+        stellar::uniform_int_distribution<uint32_t> addAtLedgerDist(
+            2, blg.mLedgerSeq);
+        auto le =
+            LedgerTestUtils::generateValidLedgerEntryWithTypes({OFFER}, 10);
+        le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine);
+        REQUIRE_THROWS_AS(blg.applyBuckets<ApplyBucketsWorkAddEntry>(le),
+                          InvariantDoesNotHold);
+    }
 }
 
 TEST_CASE("BucketListIsConsistentWithDatabase deleted entries",
           "[invariant][bucketlistconsistent][acceptance]")
 {
-    for (auto t : xdr::xdr_traits<LedgerEntryType>::enum_values())
+    size_t nTests = 0;
+    while (nTests < 10)
     {
-        size_t nTests = 0;
-        while (nTests < 10)
+        SelectBucketListGenerator blg(100);
+        blg.generateLedgers(100);
+        if (!blg.mSelected)
         {
-            SelectBucketListGenerator blg(100, static_cast<LedgerEntryType>(t));
-            blg.generateLedgers(100);
-            if (!blg.mSelected)
-            {
-                continue;
-            }
-            if (t == CONFIG_SETTING)
-            {
-                // Configuration can not be deleted.
-                REQUIRE_THROWS_AS(blg.applyBuckets<ApplyBucketsWorkDeleteEntry>(
-                                      *blg.mSelected),
-                                  std::runtime_error);
-            }
-            else
-            {
-                REQUIRE_THROWS_AS(blg.applyBuckets<ApplyBucketsWorkDeleteEntry>(
-                                      *blg.mSelected),
-                                  InvariantDoesNotHold);
-            }
-            ++nTests;
+            continue;
         }
+
+        REQUIRE_THROWS_AS(
+            blg.applyBuckets<ApplyBucketsWorkDeleteEntry>(*blg.mSelected),
+            InvariantDoesNotHold);
+        ++nTests;
     }
 }
 
 TEST_CASE("BucketListIsConsistentWithDatabase modified entries",
           "[invariant][bucketlistconsistent][acceptance]")
 {
-    for (auto t : xdr::xdr_traits<LedgerEntryType>::enum_values())
+    size_t nTests = 0;
+    while (nTests < 10)
     {
-        // Skip CONFIG_SETTING for now because the test modification test does
-        // not work unless blg itself updates the entry.
-        if (t == CONFIG_SETTING)
+        SelectBucketListGenerator blg(100);
+        blg.generateLedgers(100);
+        if (!blg.mSelected)
         {
             continue;
         }
 
-        size_t nTests = 0;
-        while (nTests < 10)
-        {
-            SelectBucketListGenerator blg(100, static_cast<LedgerEntryType>(t));
-            blg.generateLedgers(100);
-            if (!blg.mSelected)
-            {
-                continue;
-            }
-
-            REQUIRE_THROWS_AS(
-                blg.applyBuckets<ApplyBucketsWorkModifyEntry>(*blg.mSelected),
-                InvariantDoesNotHold);
-            ++nTests;
-        }
+        REQUIRE_THROWS_AS(
+            blg.applyBuckets<ApplyBucketsWorkModifyEntry>(*blg.mSelected),
+            InvariantDoesNotHold);
+        ++nTests;
     }
 }
 
@@ -916,8 +640,8 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY",
     {
         uint32_t const mTargetLedger;
 
-        MergeBucketListGenerator(LedgerEntryType let)
-            : SelectBucketListGenerator(25, let), mTargetLedger(110)
+        MergeBucketListGenerator()
+            : SelectBucketListGenerator(25), mTargetLedger(110)
         {
         }
 
@@ -940,73 +664,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY",
         return (bool)ltx.load(LedgerEntryKey(le));
     };
 
-    auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(1);
     cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true;
     cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1;
 
     testutil::BucketListDepthModifier<LiveBucket> bldm(3);
-    for (auto t : xdr::xdr_traits<LedgerEntryType>::enum_values())
+    uint32_t nTests = 0;
+    while (nTests < 5)
     {
-        if (t == CONFIG_SETTING)
+        MergeBucketListGenerator blg;
+        auto& blGenerate =
+            blg.mAppGenerate->getBucketManager().getLiveBucketList();
+
+        blg.generateLedgers(100);
+        if (!blg.mSelected)
         {
-            // Merge logic is not applicable to configuration.
             continue;
         }
 
-        uint32_t nTests = 0;
-        while (nTests < 5)
-        {
-            MergeBucketListGenerator blg(static_cast<LedgerEntryType>(t));
-            auto& blGenerate =
-                blg.mAppGenerate->getBucketManager().getLiveBucketList();
-
-            blg.generateLedgers(100);
-            if (!blg.mSelected)
-            {
-                continue;
-            }
-
-            BucketEntry dead(DEADENTRY);
-            dead.deadEntry() = LedgerEntryKey(*blg.mSelected);
-            BucketEntry live(LIVEENTRY);
-            live.liveEntry() = *blg.mSelected;
-            BucketEntry init(INITENTRY);
-            init.liveEntry() = *blg.mSelected;
-
-            {
-                VirtualClock clock;
-                Application::pointer appApply =
-                    createTestApplication(clock, cfg);
-                REQUIRE_NOTHROW(blg.applyBuckets(appApply));
-                REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected));
-                REQUIRE(exists(*appApply, *blg.mSelected));
-            }
+        BucketEntry dead(DEADENTRY);
+        dead.deadEntry() = LedgerEntryKey(*blg.mSelected);
+        BucketEntry live(LIVEENTRY);
+        live.liveEntry() = *blg.mSelected;
+        BucketEntry init(INITENTRY);
+        init.liveEntry() = *blg.mSelected;
 
-            blg.generateLedgers(10);
-            REQUIRE(doesBucketListContain(blGenerate, dead));
-            REQUIRE((doesBucketListContain(blGenerate, live) ||
-                     doesBucketListContain(blGenerate, init)));
+        {
+            VirtualClock clock;
+            Application::pointer appApply = createTestApplication(clock, cfg);
+            REQUIRE_NOTHROW(blg.applyBuckets(appApply));
+            REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected));
+            REQUIRE(exists(*appApply, *blg.mSelected));
+        }
 
-            blg.generateLedgers(100);
-            REQUIRE(!doesBucketListContain(blGenerate, dead));
-            REQUIRE(!(doesBucketListContain(blGenerate, live) ||
-                      doesBucketListContain(blGenerate, init)));
-            REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected));
+        blg.generateLedgers(10);
+        REQUIRE(doesBucketListContain(blGenerate, dead));
+        REQUIRE((doesBucketListContain(blGenerate, live) ||
+                 doesBucketListContain(blGenerate, init)));
 
-            {
-                VirtualClock clock;
-                Application::pointer appApply =
-                    createTestApplication(clock, cfg);
-                REQUIRE_NOTHROW(blg.applyBuckets(appApply));
-                auto& blApply =
-                    appApply->getBucketManager().getLiveBucketList();
-                REQUIRE(!doesBucketListContain(blApply, dead));
-                REQUIRE(!(doesBucketListContain(blApply, live) ||
-                          doesBucketListContain(blApply, init)));
-                REQUIRE(!exists(*appApply, *blg.mSelected));
-            }
+        blg.generateLedgers(100);
+        REQUIRE(!doesBucketListContain(blGenerate, dead));
+        REQUIRE(!(doesBucketListContain(blGenerate, live) ||
+                  doesBucketListContain(blGenerate, init)));
+        REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected));
 
-            ++nTests;
+        {
+            VirtualClock clock;
+            Application::pointer appApply = createTestApplication(clock, cfg);
+            REQUIRE_NOTHROW(blg.applyBuckets(appApply));
+            auto& blApply = appApply->getBucketManager().getLiveBucketList();
+            REQUIRE(!doesBucketListContain(blApply, dead));
+            REQUIRE(!(doesBucketListContain(blApply, live) ||
+                      doesBucketListContain(blApply, init)));
+            REQUIRE(!exists(*appApply, *blg.mSelected));
         }
+
+        ++nTests;
     }
 }
diff --git a/src/invariant/test/ConservationOfLumensTests.cpp b/src/invariant/test/ConservationOfLumensTests.cpp
index 6b91b127b0..e5686c70ef 100644
--- a/src/invariant/test/ConservationOfLumensTests.cpp
+++ b/src/invariant/test/ConservationOfLumensTests.cpp
@@ -153,7 +153,7 @@ TEST_CASE("Fee pool change without inflation",
 TEST_CASE("Account balances changed without inflation",
           "[invariant][conservationoflumens]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"ConservationOfLumens"};
 
     uint32_t const N = 10;
@@ -187,7 +187,7 @@ TEST_CASE("Account balances changed without inflation",
 TEST_CASE("Account balances unchanged without inflation",
           "[invariant][conservationoflumens]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"ConservationOfLumens"};
 
     uint32_t const N = 10;
@@ -228,7 +228,7 @@ TEST_CASE("Account balances unchanged without inflation",
 TEST_CASE("Inflation changes are consistent",
           "[invariant][conservationoflumens]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"ConservationOfLumens"};
     stellar::uniform_int_distribution<uint32_t> payoutsDist(1, 100);
     stellar::uniform_int_distribution<int64_t> amountDist(1, 100000);
diff --git a/src/invariant/test/InvariantTests.cpp b/src/invariant/test/InvariantTests.cpp
index 448dedf7ca..ab78a375fb 100644
--- a/src/invariant/test/InvariantTests.cpp
+++ b/src/invariant/test/InvariantTests.cpp
@@ -53,9 +53,10 @@ class TestInvariant : public Invariant
     }
 
     virtual std::string
-    checkOnBucketApply(std::shared_ptr<LiveBucket const> bucket,
-                       uint32_t oldestLedger, uint32_t newestLedger,
-                       std::function<bool(LedgerEntryType)> filter) override
+    checkOnBucketApply(
+        std::shared_ptr<LiveBucket const> bucket, uint32_t oldestLedger,
+        uint32_t newestLedger,
+        std::unordered_set<LedgerKey> const& shadowedKeys) override
     {
         return mShouldFail ? "fail" : "";
     }
@@ -167,10 +168,9 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]")
         uint32_t ledger = 1;
         uint32_t level = 0;
         bool isCurr = true;
-        REQUIRE_THROWS_AS(
-            app->getInvariantManager().checkOnBucketApply(
-                bucket, ledger, level, isCurr, [](auto) { return true; }),
-            InvariantDoesNotHold);
+        REQUIRE_THROWS_AS(app->getInvariantManager().checkOnBucketApply(
+                              bucket, ledger, level, isCurr, {}),
+                          InvariantDoesNotHold);
     }
 
     {
@@ -188,7 +188,7 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]")
         uint32_t level = 0;
         bool isCurr = true;
         REQUIRE_NOTHROW(app->getInvariantManager().checkOnBucketApply(
-            bucket, ledger, level, isCurr, [](auto) { return true; }));
+            bucket, ledger, level, isCurr, {}));
     }
 }
 
diff --git a/src/invariant/test/LedgerEntryIsValidTests.cpp b/src/invariant/test/LedgerEntryIsValidTests.cpp
index 4d946183ee..082066e6e7 100644
--- a/src/invariant/test/LedgerEntryIsValidTests.cpp
+++ b/src/invariant/test/LedgerEntryIsValidTests.cpp
@@ -19,7 +19,7 @@ using namespace stellar::InvariantTestUtils;
 TEST_CASE("Trigger validity check for each entry type",
           "[invariant][ledgerentryisvalid]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"};
 
     VirtualClock clock;
@@ -67,7 +67,7 @@ TEST_CASE("Trigger validity check for each entry type",
 TEST_CASE("Modify ClaimableBalanceEntry",
           "[invariant][ledgerentryisvalid][claimablebalance]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"};
 
     VirtualClock clock;
diff --git a/src/invariant/test/LiabilitiesMatchOffersTests.cpp b/src/invariant/test/LiabilitiesMatchOffersTests.cpp
index 1c95224341..c4de34c9c0 100644
--- a/src/invariant/test/LiabilitiesMatchOffersTests.cpp
+++ b/src/invariant/test/LiabilitiesMatchOffersTests.cpp
@@ -58,7 +58,7 @@ updateAccountWithRandomBalance(LedgerEntry le, Application& app,
 TEST_CASE("Create account above minimum balance",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -75,7 +75,7 @@ TEST_CASE("Create account above minimum balance",
 TEST_CASE("Create account below minimum balance",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -92,7 +92,7 @@ TEST_CASE("Create account below minimum balance",
 TEST_CASE("Create account then decrease balance below minimum",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -111,7 +111,7 @@ TEST_CASE("Create account then decrease balance below minimum",
 TEST_CASE("Account below minimum balance increases but stays below minimum",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -130,7 +130,7 @@ TEST_CASE("Account below minimum balance increases but stays below minimum",
 TEST_CASE("Account below minimum balance decreases",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -250,7 +250,7 @@ generateBuyingLiabilities(Application& app, LedgerEntry offer, bool excess,
 TEST_CASE("Create account then increase liabilities without changing balance",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     VirtualClock clock;
@@ -289,7 +289,7 @@ TEST_CASE("Create account then increase liabilities without changing balance",
 
 TEST_CASE("Invariant for liabilities", "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     VirtualClock clock;
diff --git a/src/invariant/test/OrderBookIsNotCrossedTests.cpp b/src/invariant/test/OrderBookIsNotCrossedTests.cpp
index c10a6a5daf..7e3b1ab2c4 100644
--- a/src/invariant/test/OrderBookIsNotCrossedTests.cpp
+++ b/src/invariant/test/OrderBookIsNotCrossedTests.cpp
@@ -109,7 +109,7 @@ TEST_CASE("OrderBookIsNotCrossed in-memory order book is consistent with "
           "[invariant][OrderBookIsNotCrossed]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     // When testing the order book not crossed invariant, enable it and no other
     // invariants (these tests do things which violate other invariants).
     cfg.INVARIANT_CHECKS = {};
@@ -185,7 +185,7 @@ TEST_CASE("OrderBookIsNotCrossed properly throws if order book is crossed",
 {
 
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     // When testing the order book not crossed invariant, enable it and no other
     // invariants (these tests do things which violate other invariants).
     cfg.INVARIANT_CHECKS = {};
diff --git a/src/invariant/test/SponsorshipCountIsValidTests.cpp b/src/invariant/test/SponsorshipCountIsValidTests.cpp
index 9f35cd5292..91d75c805b 100644
--- a/src/invariant/test/SponsorshipCountIsValidTests.cpp
+++ b/src/invariant/test/SponsorshipCountIsValidTests.cpp
@@ -18,7 +18,7 @@ using namespace stellar::InvariantTestUtils;
 TEST_CASE("sponsorship invariant", "[invariant][sponsorshipcountisvalid]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"SponsorshipCountIsValid"};
     auto app = createTestApplication(clock, cfg);
 
diff --git a/src/ledger/InMemoryLedgerTxn.cpp b/src/ledger/InMemoryLedgerTxn.cpp
index bcdaca07a2..4c7d47ae83 100644
--- a/src/ledger/InMemoryLedgerTxn.cpp
+++ b/src/ledger/InMemoryLedgerTxn.cpp
@@ -3,11 +3,10 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "ledger/InMemoryLedgerTxn.h"
-#include "crypto/SecretKey.h"
+#include "ledger/LedgerTxn.h"
 #include "ledger/LedgerTxnImpl.h"
 #include "transactions/TransactionUtils.h"
 #include "util/GlobalChecks.h"
-#include "util/XDROperators.h"
 
 namespace stellar
 {
@@ -73,8 +72,9 @@ InMemoryLedgerTxn::FilteredEntryIteratorImpl::clone() const
 }
 
 InMemoryLedgerTxn::InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent,
-                                     Database& db)
-    : LedgerTxn(parent), mDb(db)
+                                     Database& db,
+                                     AbstractLedgerTxnParent* realRoot)
+    : LedgerTxn(parent), mDb(db), mRealRootForOffers(realRoot)
 {
 }
 
@@ -141,6 +141,36 @@ InMemoryLedgerTxn::updateLedgerKeyMap(EntryIterator iter)
     {
         auto const& genKey = iter.key();
         updateLedgerKeyMap(genKey, iter.entryExists());
+
+        // In addition to maintaining in-memory map, commit offers to "real" ltx
+        // root to test SQL backed offers
+        if (mRealRootForOffers &&
+            genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY)
+        {
+            auto const& ledgerKey = genKey.ledgerKey();
+            if (ledgerKey.type() == OFFER)
+            {
+                LedgerTxn ltx(*mRealRootForOffers);
+                if (!iter.entryExists())
+                {
+                    ltx.erase(ledgerKey);
+                }
+                else
+                {
+                    auto ltxe = ltx.load(genKey);
+                    if (!ltxe)
+                    {
+                        ltx.create(iter.entry());
+                    }
+                    else
+                    {
+                        ltxe.current() = iter.entry().ledgerEntry();
+                    }
+                }
+
+                ltx.commit();
+            }
+        }
     }
 }
 
@@ -332,4 +362,82 @@ InMemoryLedgerTxn::getPoolShareTrustLinesByAccountAndAsset(
     return res;
 }
 
+void
+InMemoryLedgerTxn::dropOffers(bool rebuild)
+{
+    if (mRealRootForOffers)
+    {
+        mRealRootForOffers->dropOffers(rebuild);
+    }
+    else
+    {
+        LedgerTxn::dropOffers(rebuild);
+    }
+}
+
+uint64_t
+InMemoryLedgerTxn::countObjects(LedgerEntryType let) const
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->countObjects(let);
+    }
+
+    return 0;
+}
+
+uint64_t
+InMemoryLedgerTxn::countObjects(LedgerEntryType let,
+                                LedgerRange const& ledgers) const
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->countObjects(let, ledgers);
+    }
+
+    return 0;
+}
+
+void
+InMemoryLedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const
+{
+    if (mRealRootForOffers)
+    {
+        mRealRootForOffers->deleteObjectsModifiedOnOrAfterLedger(ledger);
+    }
+}
+
+UnorderedMap<LedgerKey, LedgerEntry>
+InMemoryLedgerTxn::getAllOffers()
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->getAllOffers();
+    }
+
+    return LedgerTxn::getAllOffers();
+}
+
+std::shared_ptr<LedgerEntry const>
+InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling)
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->getBestOffer(buying, selling);
+    }
+
+    return LedgerTxn::getBestOffer(buying, selling);
+}
+
+std::shared_ptr<LedgerEntry const>
+InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling,
+                                OfferDescriptor const& worseThan)
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->getBestOffer(buying, selling, worseThan);
+    }
+
+    return LedgerTxn::getBestOffer(buying, selling, worseThan);
+}
 }
diff --git a/src/ledger/InMemoryLedgerTxn.h b/src/ledger/InMemoryLedgerTxn.h
index 76cf56fcae..38917186cb 100644
--- a/src/ledger/InMemoryLedgerTxn.h
+++ b/src/ledger/InMemoryLedgerTxn.h
@@ -44,6 +44,12 @@ class InMemoryLedgerTxn : public LedgerTxn
     Database& mDb;
     std::unique_ptr<soci::transaction> mTransaction;
 
+    // For some tests, we need to bypass ledger close and commit directly to the
+    // in-memory ltx. However, we still want to test SQL backed offers. The
+    // "never" committing root sets this flag to true such that offer-related
+    // calls get based to the real SQL backed root
+    AbstractLedgerTxnParent* const mRealRootForOffers;
+
     UnorderedMap<AccountID, UnorderedSet<InternalLedgerKey>>
         mOffersAndPoolShareTrustlineKeys;
 
@@ -75,7 +81,8 @@ class InMemoryLedgerTxn : public LedgerTxn
     EntryIterator getFilteredEntryIterator(EntryIterator const& iter);
 
   public:
-    InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db);
+    InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db,
+                      AbstractLedgerTxnParent* realRoot = nullptr);
     virtual ~InMemoryLedgerTxn();
 
     void addChild(AbstractLedgerTxn& child, TransactionMode mode) override;
@@ -100,6 +107,23 @@ class InMemoryLedgerTxn : public LedgerTxn
     UnorderedMap<LedgerKey, LedgerEntry>
     getPoolShareTrustLinesByAccountAndAsset(AccountID const& account,
                                             Asset const& asset) override;
+
+    // These functions call into the real LedgerTxn root to test offer SQL
+    // related functionality
+    UnorderedMap<LedgerKey, LedgerEntry> getAllOffers() override;
+    std::shared_ptr<LedgerEntry const>
+    getBestOffer(Asset const& buying, Asset const& selling) override;
+    std::shared_ptr<LedgerEntry const>
+    getBestOffer(Asset const& buying, Asset const& selling,
+                 OfferDescriptor const& worseThan) override;
+
+    void dropOffers(bool rebuild) override;
+
+    uint64_t countObjects(LedgerEntryType let) const override;
+    uint64_t countObjects(LedgerEntryType let,
+                          LedgerRange const& ledgers) const override;
+
+    void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override;
 };
 
 }
diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp
index b562fe8c6f..6fa5019a0b 100644
--- a/src/ledger/LedgerManagerImpl.cpp
+++ b/src/ledger/LedgerManagerImpl.cpp
@@ -348,41 +348,35 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist,
 
     releaseAssert(latestLedgerHeader.has_value());
 
-    // Step 3. Restore BucketList if we're doing a full core startup
-    // (startServices=true), OR when using BucketListDB
-    if (restoreBucketlist || mApp.getConfig().isUsingBucketListDB())
+    HistoryArchiveState has = getLastClosedLedgerHAS();
+    auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has);
+    auto pubmissing =
+        mApp.getHistoryManager().getMissingBucketsReferencedByPublishQueue();
+    missing.insert(missing.end(), pubmissing.begin(), pubmissing.end());
+    if (!missing.empty())
     {
-        HistoryArchiveState has = getLastClosedLedgerHAS();
-        auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has);
-        auto pubmissing = mApp.getHistoryManager()
-                              .getMissingBucketsReferencedByPublishQueue();
-        missing.insert(missing.end(), pubmissing.begin(), pubmissing.end());
-        if (!missing.empty())
+        CLOG_ERROR(Ledger, "{} buckets are missing from bucket directory '{}'",
+                   missing.size(), mApp.getBucketManager().getBucketDir());
+        throw std::runtime_error("Bucket directory is corrupt");
+    }
+
+    if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
+    {
+        // Only restart merges in full startup mode. Many modes in core
+        // (standalone offline commands, in-memory setup) do not need to
+        // spin up expensive merge processes.
+        auto assumeStateWork =
+            mApp.getWorkScheduler().executeWork<AssumeStateWork>(
+                has, latestLedgerHeader->ledgerVersion, restoreBucketlist);
+        if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS)
         {
-            CLOG_ERROR(Ledger,
-                       "{} buckets are missing from bucket directory '{}'",
-                       missing.size(), mApp.getBucketManager().getBucketDir());
-            throw std::runtime_error("Bucket directory is corrupt");
+            CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}",
+                      ledgerAbbrev(*latestLedgerHeader));
         }
-
-        if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
+        else
         {
-            // Only restart merges in full startup mode. Many modes in core
-            // (standalone offline commands, in-memory setup) do not need to
-            // spin up expensive merge processes.
-            auto assumeStateWork =
-                mApp.getWorkScheduler().executeWork<AssumeStateWork>(
-                    has, latestLedgerHeader->ledgerVersion, restoreBucketlist);
-            if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS)
-            {
-                CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}",
-                          ledgerAbbrev(*latestLedgerHeader));
-            }
-            else
-            {
-                // Work should only fail during graceful shutdown
-                releaseAssertOrThrow(mApp.isStopping());
-            }
+            // Work should only fail during graceful shutdown
+            releaseAssertOrThrow(mApp.isStopping());
         }
     }
 
@@ -1003,14 +997,7 @@ LedgerManagerImpl::closeLedger(LedgerCloseData const& ledgerData)
         // member variable: if we throw while committing below, we will at worst
         // emit duplicate meta, when retrying.
         mNextMetaToEmit = std::move(ledgerCloseMeta);
-
-        // If the LedgerCloseData provided an expected hash, then we validated
-        // it above.
-        if (!mApp.getConfig().EXPERIMENTAL_PRECAUTION_DELAY_META ||
-            ledgerData.getExpectedHash())
-        {
-            emitNextMeta();
-        }
+        emitNextMeta();
     }
 
     // The next 5 steps happen in a relatively non-obvious, subtle order.
diff --git a/src/ledger/LedgerStateSnapshot.cpp b/src/ledger/LedgerStateSnapshot.cpp
index 3455d51131..e04101a0fc 100644
--- a/src/ledger/LedgerStateSnapshot.cpp
+++ b/src/ledger/LedgerStateSnapshot.cpp
@@ -222,7 +222,11 @@ LedgerSnapshot::LedgerSnapshot(AbstractLedgerTxn& ltx)
 
 LedgerSnapshot::LedgerSnapshot(Application& app)
 {
-    if (app.getConfig().DEPRECATED_SQL_LEDGER_STATE)
+    if (app.getConfig().DEPRECATED_SQL_LEDGER_STATE
+#ifdef BUILD_TESTS
+        || app.getConfig().MODE_USES_IN_MEMORY_LEDGER
+#endif
+    )
     {
         // Legacy read-only SQL transaction
         mLegacyLedgerTxn = std::make_unique<LedgerTxn>(
diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp
index 14896d01e0..322c34c10c 100644
--- a/src/ledger/LedgerTxn.cpp
+++ b/src/ledger/LedgerTxn.cpp
@@ -3058,6 +3058,13 @@ uint32_t
 LedgerTxnRoot::Impl::prefetchInternal(UnorderedSet<LedgerKey> const& keys,
                                       LedgerKeyMeter* lkMeter)
 {
+#ifdef BUILD_TESTS
+    if (mApp.getConfig().MODE_USES_IN_MEMORY_LEDGER)
+    {
+        return 0;
+    }
+#endif
+
     ZoneScoped;
     uint32_t total = 0;
 
diff --git a/src/ledger/test/LedgerCloseMetaStreamTests.cpp b/src/ledger/test/LedgerCloseMetaStreamTests.cpp
index 078e7d2d35..9b613555aa 100644
--- a/src/ledger/test/LedgerCloseMetaStreamTests.cpp
+++ b/src/ledger/test/LedgerCloseMetaStreamTests.cpp
@@ -39,11 +39,10 @@ using namespace stellar;
 TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
           "[ledgerclosemetastreamlive]")
 {
-    // Live reqires a multinode simulation, as we're not allowed to run a
+    // Live requires a multinode simulation, as we're not allowed to run a
     // validator and record metadata streams at the same time (to avoid the
     // unbounded-latency stream-write step): N nodes participating in consensus,
-    // and two watching and streaming metadata -- the second one using
-    // EXPERIMENTAL_PRECAUTION_DELAY_META.
+    // and one watching and streaming metadata
 
     Hash expectedLastUnsafeHash, expectedLastSafeHash;
     TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8)));
@@ -71,12 +70,7 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
         SIMULATION_CREATE_NODE(Node1); // Validator
         SIMULATION_CREATE_NODE(Node2); // Validator
         SIMULATION_CREATE_NODE(Node3); // Validator
-
-        // Watcher, !EXPERIMENTAL_PRECAUTION_DELAY_META
-        SIMULATION_CREATE_NODE(Node4);
-
-        // Watcher, EXPERIMENTAL_PRECAUTION_DELAY_META
-        SIMULATION_CREATE_NODE(Node5);
+        SIMULATION_CREATE_NODE(Node4); // Watcher
 
         SCPQuorumSet qSet;
         qSet.threshold = 3;
@@ -88,44 +82,27 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
         Config const& cfg2 = getTestConfig(2);
         Config const& cfg3 = getTestConfig(3);
         Config cfg4 = getTestConfig(4);
-        Config cfg5 = getTestConfig(
-            5,
-            Config::
-                TESTDB_IN_MEMORY_NO_OFFERS); // needed by
-                                             // EXPERIMENTAL_PRECAUTION_DELAY_META
-
-        // Step 2: open writable files and pass them to configs 4 and 5
-        // (watchers).
+
+        // Step 2: open writable files and pass them to watcher config
         cfg4.NODE_IS_VALIDATOR = false;
         cfg4.FORCE_SCP = false;
-        cfg5.NODE_IS_VALIDATOR = false;
-        cfg5.FORCE_SCP = false;
 #ifdef _WIN32
         cfg4.METADATA_OUTPUT_STREAM = metaPath;
-        cfg5.METADATA_OUTPUT_STREAM = metaPathSafe;
 #else
         int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644);
         REQUIRE(fd != -1);
         cfg4.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fd);
-        int fdSafe = ::open(metaPathSafe.c_str(), O_CREAT | O_WRONLY, 0644);
-        REQUIRE(fdSafe != -1);
-        cfg5.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fdSafe);
 #endif
 
-        cfg4.EXPERIMENTAL_PRECAUTION_DELAY_META = false;
-        cfg5.EXPERIMENTAL_PRECAUTION_DELAY_META = true;
-
         // Step 3: Run simulation a few steps to stream metadata.
         auto app1 = simulation->addNode(vNode1SecretKey, qSet, &cfg1);
         auto app2 = simulation->addNode(vNode2SecretKey, qSet, &cfg2);
         auto app3 = simulation->addNode(vNode3SecretKey, qSet, &cfg3);
         auto app4 = simulation->addNode(vNode4SecretKey, qSet, &cfg4);
-        auto app5 = simulation->addNode(vNode5SecretKey, qSet, &cfg5);
 
         simulation->addPendingConnection(vNode1NodeID, vNode2NodeID);
         simulation->addPendingConnection(vNode1NodeID, vNode3NodeID);
         simulation->addPendingConnection(vNode1NodeID, vNode4NodeID);
-        simulation->addPendingConnection(vNode1NodeID, vNode5NodeID);
 
         simulation->startAllNodes();
         bool watchersAreCorrupted = false;
@@ -144,26 +121,21 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
 
                 auto const lastClosedLedger =
                     app4->getLedgerManager().getLastClosedLedgerNum();
-                REQUIRE(app5->getLedgerManager().getLastClosedLedgerNum() ==
-                        lastClosedLedger);
 
                 if (lastClosedLedger == expectedLastWatcherLedger - 1)
                 {
-                    expectedLastSafeHash = app5->getLedgerManager()
+                    expectedLastSafeHash = app4->getLedgerManager()
                                                .getLastClosedLedgerHeader()
                                                .hash;
 
                     if (induceOneLedgerFork)
                     {
-                        for (auto& app : {app4, app5})
-                        {
-                            txtest::closeLedgerOn(
-                                *app, ledgerToCorrupt,
-                                app->getLedgerManager()
-                                        .getLastClosedLedgerHeader()
-                                        .header.scpValue.closeTime +
-                                    1);
-                        }
+                        txtest::closeLedgerOn(
+                            *app4, ledgerToCorrupt,
+                            app4->getLedgerManager()
+                                    .getLastClosedLedgerHeader()
+                                    .header.scpValue.closeTime +
+                                1);
 
                         expectedLastUnsafeHash =
                             app4->getLedgerManager()
@@ -181,8 +153,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
 
         REQUIRE(app4->getLedgerManager().getLastClosedLedgerNum() ==
                 expectedLastWatcherLedger);
-        REQUIRE(app5->getLedgerManager().getLastClosedLedgerNum() ==
-                expectedLastWatcherLedger);
 
         if (!induceOneLedgerFork)
         {
@@ -206,7 +176,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
     };
 
     auto lcms = readLcms(metaPath);
-    auto lcmsSafe = readLcms(metaPathSafe);
     // The "- 1" is because we don't stream meta for the genesis ledger.
     REQUIRE(lcms.size() == expectedLastWatcherLedger - 1);
     if (lcms.back().v() == 0)
@@ -221,188 +190,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
     {
         REQUIRE(false);
     }
-
-    // The node with EXPERIMENTAL_PRECAUTION_DELAY_META should not have streamed
-    // the meta for the latest ledger (or the latest ledger before the corrupt
-    // one) yet.
-    REQUIRE(lcmsSafe.size() == lcms.size() - 1);
-
-    if (lcmsSafe.back().v() == 0)
-    {
-        REQUIRE(lcmsSafe.back().v0().ledgerHeader.hash == expectedLastSafeHash);
-    }
-    else if (lcmsSafe.back().v() == 1)
-    {
-        REQUIRE(lcmsSafe.back().v1().ledgerHeader.hash == expectedLastSafeHash);
-    }
-    REQUIRE(lcmsSafe ==
-            std::vector<LedgerCloseMeta>(lcms.begin(), lcms.end() - 1));
-}
-
-TEST_CASE("LedgerCloseMetaStream file descriptor - REPLAY_IN_MEMORY",
-          "[ledgerclosemetastreamreplay]")
-{
-    // Step 1: generate some history for replay.
-    using namespace stellar::historytestutils;
-    TmpDirHistoryConfigurator tCfg;
-    {
-        Config genCfg = getTestConfig(0, Config::TESTDB_DEFAULT);
-        genCfg.MANUAL_CLOSE = false;
-        VirtualClock genClock;
-        genCfg = tCfg.configure(genCfg, true);
-        auto genApp = createTestApplication(genClock, genCfg);
-        auto& genHam = genApp->getHistoryArchiveManager();
-        genHam.initializeHistoryArchive(tCfg.getArchiveDirName());
-        for (size_t i = 0; i < 100; ++i)
-        {
-            genClock.crank(false);
-        }
-        auto& genHm = genApp->getHistoryManager();
-        while (genHm.getPublishSuccessCount() < 5)
-        {
-            genClock.crank(true);
-        }
-        while (genClock.cancelAllEvents() ||
-               genApp->getProcessManager().getNumRunningProcesses() > 0)
-        {
-            genClock.crank(false);
-        }
-    }
-
-    // Step 2: open a writable file descriptor.
-    TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8)));
-    TmpDir td = tdm.tmpDir("streams");
-    std::string metaPath = td.getName() + "/stream.xdr";
-    auto cfg1 = getTestConfig(1);
-#ifdef _WIN32
-    cfg1.METADATA_OUTPUT_STREAM = metaPath;
-#else
-    int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644);
-    REQUIRE(fd != -1);
-    cfg1.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fd);
-#endif
-
-    bool const delayMeta = GENERATE(true, false);
-
-    // Step 3: pass it to an application and have it catch up to the generated
-    // history, streaming ledgerCloseMeta to the file descriptor.
-    Hash hash;
-    {
-        auto cfg = tCfg.configure(cfg1, false);
-        cfg.NODE_IS_VALIDATOR = false;
-        cfg.FORCE_SCP = false;
-        cfg.RUN_STANDALONE = true;
-        cfg.setInMemoryMode();
-        cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta;
-        SECTION("skip mode")
-        {
-            cfg.MODE_STORES_HISTORY_MISC = true;
-            cfg.CATCHUP_SKIP_KNOWN_RESULTS_FOR_TESTING = true;
-        }
-        VirtualClock clock;
-        auto app = createTestApplication(clock, cfg, /*newdb=*/false);
-
-        CatchupConfiguration cc{CatchupConfiguration::CURRENT,
-                                std::numeric_limits<uint32_t>::max(),
-                                CatchupConfiguration::Mode::OFFLINE_COMPLETE};
-        Json::Value catchupInfo;
-        auto& ham = app->getHistoryArchiveManager();
-        auto& lm = app->getLedgerManager();
-        auto archive = ham.selectRandomReadableHistoryArchive();
-        int res = catchup(app, cc, catchupInfo, archive);
-        REQUIRE(res == 0);
-        hash = lm.getLastClosedLedgerHeader().hash;
-        while (clock.cancelAllEvents() ||
-               app->getProcessManager().getNumRunningProcesses() > 0)
-        {
-            clock.crank(false);
-        }
-    }
-
-    // Step 4: reopen the file as an XDR stream and read back the LCMs
-    // and check they have the expected content.
-    //
-    // The EXPERIMENTAL_PRECAUTION_DELAY_META case should still have streamed
-    // the latest meta, because catchup should have validated that ledger's hash
-    // by validating a chain of hashes back from one obtained from consensus.
-    XDRInputFileStream stream;
-    stream.open(metaPath);
-    LedgerCloseMeta lcm;
-    size_t nLcm = 1;
-    while (stream && stream.readOne(lcm))
-    {
-        ++nLcm;
-    }
-    // 5 checkpoints is ledger 0x13f
-    REQUIRE(nLcm == 0x13f);
-    if (lcm.v() == 0)
-    {
-        REQUIRE(lcm.v0().ledgerHeader.hash == hash);
-    }
-    else if (lcm.v() == 1)
-    {
-        REQUIRE(lcm.v1().ledgerHeader.hash == hash);
-    }
-    else
-    {
-        REQUIRE(false);
-    }
-}
-
-TEST_CASE("EXPERIMENTAL_PRECAUTION_DELAY_META configuration",
-          "[ledgerclosemetastreamlive][ledgerclosemetastreamreplay]")
-{
-    VirtualClock clock;
-    Config cfg = getTestConfig();
-
-    SECTION("EXPERIMENTAL_PRECAUTION_DELAY_META may take either value "
-            "(which is ignored) without METADATA_OUTPUT_STREAM")
-    {
-        cfg.METADATA_OUTPUT_STREAM = "";
-        auto const delayMeta = GENERATE(false, true);
-        auto const inMemory = GENERATE(false, true);
-        cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta;
-        if (inMemory)
-        {
-            cfg.setInMemoryMode();
-        }
-        REQUIRE_NOTHROW(createTestApplication(clock, cfg));
-    }
-
-    SECTION("EXPERIMENTAL_PRECAUTION_DELAY_META together with "
-            "METADATA_OUTPUT_STREAM requires --in-memory")
-    {
-        TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8)));
-        TmpDir td = tdm.tmpDir("streams");
-        std::string metaPath = td.getName() + "/stream.xdr";
-        std::string metaStream;
-
-#ifdef _WIN32
-        metaStream = metaPath;
-#else
-        int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644);
-        REQUIRE(fd != -1);
-        metaStream = fmt::format(FMT_STRING("fd:{}"), fd);
-#endif
-
-        cfg.METADATA_OUTPUT_STREAM = metaStream;
-        auto const delayMeta = GENERATE(false, true);
-        auto const inMemory = GENERATE(false, true);
-        cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta;
-        if (inMemory)
-        {
-            cfg.setInMemoryMode();
-        }
-        if (delayMeta && !inMemory)
-        {
-            REQUIRE_THROWS_AS(createTestApplication(clock, cfg),
-                              std::invalid_argument);
-        }
-        else
-        {
-            REQUIRE_NOTHROW(createTestApplication(clock, cfg));
-        }
-    }
 }
 
 TEST_CASE("METADATA_DEBUG_LEDGERS works", "[metadebug]")
diff --git a/src/ledger/test/LedgerTxnTests.cpp b/src/ledger/test/LedgerTxnTests.cpp
index 10200eea2d..11f0a2c9fd 100644
--- a/src/ledger/test/LedgerTxnTests.cpp
+++ b/src/ledger/test/LedgerTxnTests.cpp
@@ -339,13 +339,18 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
     std::bernoulli_distribution shouldCommitDist;
 
     auto generateNew = [](AbstractLedgerTxn& ltx,
-                          UnorderedMap<LedgerKey, LedgerEntry>& entries) {
+                          UnorderedMap<LedgerKey, LedgerEntry>& entries,
+                          bool offerOnly) {
         size_t const NEW_ENTRIES = 100;
         UnorderedMap<LedgerKey, LedgerEntry> newBatch;
         while (newBatch.size() < NEW_ENTRIES)
         {
-            auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions(
-                {CONFIG_SETTING});
+            auto le =
+                offerOnly
+                    ? LedgerTestUtils::generateValidLedgerEntryOfType(OFFER)
+                    : LedgerTestUtils::generateValidLedgerEntryWithExclusions(
+                          {CONFIG_SETTING});
+
             auto key = LedgerEntryKey(le);
             if (entries.find(LedgerEntryKey(le)) == entries.end())
             {
@@ -428,7 +433,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
         }
     };
 
-    auto runTest = [&](AbstractLedgerTxnParent& ltxParent) {
+    auto runTest = [&](AbstractLedgerTxnParent& ltxParent, bool offerOnly) {
         UnorderedMap<LedgerKey, LedgerEntry> entries;
         UnorderedSet<LedgerKey> dead;
         size_t const NUM_BATCHES = 10;
@@ -439,7 +444,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
             UnorderedMap<LedgerKey, LedgerEntry> updatedEntries = entries;
             UnorderedSet<LedgerKey> updatedDead = dead;
             LedgerTxn ltx1(ltxParent);
-            generateNew(ltx1, updatedEntries);
+            generateNew(ltx1, updatedEntries, offerOnly);
             generateModify(ltx1, updatedEntries);
             generateErase(ltx1, updatedEntries, updatedDead);
 
@@ -459,7 +464,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
             auto app = createTestApplication(clock, getTestConfig(0, mode));
 
             LedgerTxn ltx1(app->getLedgerTxnRoot());
-            runTest(ltx1);
+            runTest(ltx1, false);
         }
 
         SECTION("round trip to LedgerTxnRoot")
@@ -468,13 +473,9 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
             {
                 VirtualClock clock;
                 // BucketListDB incompatible with direct root commits
-                auto app = createTestApplication(
-                    clock,
-                    getTestConfig(0, mode == Config::TESTDB_DEFAULT
-                                         ? Config::TESTDB_IN_MEMORY_NO_OFFERS
-                                         : mode));
+                auto app = createTestApplication(clock, getTestConfig(0, mode));
 
-                runTest(app->getLedgerTxnRoot());
+                runTest(app->getLedgerTxnRoot(), true);
             }
 
             SECTION("with no cache")
@@ -482,31 +483,23 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
                 VirtualClock clock;
 
                 // BucketListDB incompatible with direct root commits
-                auto cfg =
-                    getTestConfig(0, mode == Config::TESTDB_DEFAULT
-                                         ? Config::TESTDB_IN_MEMORY_NO_OFFERS
-                                         : mode);
+                auto cfg = getTestConfig(0, mode);
                 cfg.ENTRY_CACHE_SIZE = 0;
                 auto app = createTestApplication(clock, cfg);
 
-                runTest(app->getLedgerTxnRoot());
+                runTest(app->getLedgerTxnRoot(), true);
             }
         }
     };
 
-    SECTION("default")
-    {
-        runTestWithDbMode(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTestWithDbMode(Config::TESTDB_ON_DISK_SQLITE);
+        runTestWithDbMode(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTestWithDbMode(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTestWithDbMode(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -713,19 +706,14 @@ TEST_CASE("LedgerTxn createWithoutLoading and updateWithoutLoading",
         }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -813,19 +801,14 @@ TEST_CASE("LedgerTxn erase", "[ledgertxn]")
             validate(ltx3, {});
         }
     };
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -918,19 +901,14 @@ TEST_CASE("LedgerTxn eraseWithoutLoading", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -1035,7 +1013,7 @@ testInflationWinners(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         testAtRoot(*app);
     }
@@ -1044,7 +1022,7 @@ testInflationWinners(
     if (updates.size() > 1)
     {
         VirtualClock clock;
-        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
         cfg.ENTRY_CACHE_SIZE = 0;
         auto app = createTestApplication(clock, cfg);
 
@@ -1055,7 +1033,7 @@ testInflationWinners(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         testInflationWinners(app->getLedgerTxnRoot(), maxWinners, minBalance,
                              expected, updates.cbegin(), updates.cend());
@@ -1384,19 +1362,14 @@ TEST_CASE("LedgerTxn loadHeader", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -1494,103 +1467,16 @@ TEST_CASE_VERSIONS("LedgerTxn load", "[ledgertxn]")
                 }
             });
         }
-
-        SECTION("load tests for all versions")
-        {
-            for_all_versions(*app, [&]() {
-                SECTION("invalid keys")
-                {
-                    LedgerTxn ltx1(app->getLedgerTxnRoot());
-
-                    auto acc = txtest::getAccount("acc");
-                    auto acc2 = txtest::getAccount("acc2");
-
-                    {
-                        auto native = txtest::makeNativeAsset();
-                        UNSCOPED_INFO("native asset on trustline key");
-
-                        // Invariant not supported in BucketListDB and in-memory
-                        // mode
-                        if (mode != Config::TESTDB_DEFAULT &&
-                            mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-                        {
-                            REQUIRE_THROWS_AS(ltx1.load(trustlineKey(
-                                                  acc.getPublicKey(), native)),
-                                              NonSociRelatedException);
-                        }
-                    }
-
-                    {
-                        auto usd = txtest::makeAsset(acc, "usd");
-                        UNSCOPED_INFO("issuer on trustline key");
-
-                        // Invariant not supported in BucketListDB and in-memory
-                        // mode
-                        if (mode != Config::TESTDB_DEFAULT &&
-                            mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-                        {
-                            REQUIRE_THROWS_AS(ltx1.load(trustlineKey(
-                                                  acc.getPublicKey(), usd)),
-                                              NonSociRelatedException);
-                        }
-                    }
-
-                    {
-                        std::string accountIDStr, issuerStr, assetCodeStr;
-                        auto invalidAssets = testutil::getInvalidAssets(acc);
-                        for (auto const& asset : invalidAssets)
-                        {
-                            auto key = trustlineKey(acc2.getPublicKey(), asset);
-
-                            // Invariant not supported in BucketListDB and
-                            // in-memory mode
-                            if (mode != Config::TESTDB_DEFAULT &&
-                                mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-                            {
-                                REQUIRE_THROWS_AS(ltx1.load(key),
-                                                  NonSociRelatedException);
-                            }
-                        }
-                    }
-
-                    SECTION("load generated keys")
-                    {
-                        for (int i = 0; i < 1000; ++i)
-                        {
-                            LedgerKey lk = autocheck::generator<LedgerKey>()(5);
-
-                            try
-                            {
-                                ltx1.load(lk);
-                            }
-                            catch (NonSociRelatedException&)
-                            {
-                                // this is fine
-                            }
-                            catch (std::exception&)
-                            {
-                                REQUIRE(false);
-                            }
-                        }
-                    }
-                }
-            });
-        }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -1933,19 +1819,14 @@ TEST_CASE("LedgerTxn loadAllOffers", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -2334,14 +2215,19 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]")
                     loadAccount(ltx2, account.accountID);
                 }
 
-                // Note that we can't prefetch for more than 1000 offers
-                double expectedPrefetchHitRate =
-                    std::min(numOffers - offerID,
-                             static_cast<int64_t>(getMaxOffersToCross())) /
-                    static_cast<double>(accounts.size());
-                REQUIRE(fabs(expectedPrefetchHitRate -
-                             ltx2.getPrefetchHitRate()) < .000001);
-                REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate());
+                // Prefetch doesn't work in in-memory mode, but this is for
+                // testing only so we only care about accuracy
+                if (mode != Config::TESTDB_IN_MEMORY)
+                {
+                    // Note that we can't prefetch for more than 1000 offers
+                    double expectedPrefetchHitRate =
+                        std::min(numOffers - offerID,
+                                 static_cast<int64_t>(getMaxOffersToCross())) /
+                        static_cast<double>(accounts.size());
+                    REQUIRE(fabs(expectedPrefetchHitRate -
+                                 ltx2.getPrefetchHitRate()) < .000001);
+                    REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate());
+                }
             };
 
             SECTION("prefetch for all worse remaining offers")
@@ -2362,14 +2248,16 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
-    SECTION("sqlite")
+    // This mode is only used in testing, but we should still make sure it works
+    // for other tests that leverage it
+    SECTION("in-memory")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -2738,7 +2626,7 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]")
             e.lastModifiedLedgerSeq = 1;
             entrySet.emplace(e);
         }
-        if (cfg.isUsingBucketListDB())
+        if (!cfg.MODE_USES_IN_MEMORY_LEDGER)
         {
             std::vector<LedgerEntry> ledgerVect{entrySet.begin(),
                                                 entrySet.end()};
@@ -2789,14 +2677,9 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(getTestConfig());
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE));
+        runTest(getTestConfig(Config::TESTDB_BUCKET_DB_PERSISTENT));
     }
 
 #ifdef USE_POSTGRES
@@ -2821,7 +2704,9 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]")
         {
             // First add some bulking entries so we're not using a
             // totally empty database.
-            entries = LedgerTestUtils::generateValidLedgerEntries(n);
+            entries =
+                LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+                    {OFFER}, n);
             LedgerTxn ltx(app->getLedgerTxnRoot());
             for (auto e : entries)
             {
@@ -2831,7 +2716,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]")
         }
 
         // Then do some precise timed creates.
-        entries = LedgerTestUtils::generateValidLedgerEntries(n);
+        entries = LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+            {OFFER}, n);
         auto& m =
             app->getMetrics().NewMeter({"ledger", "create", "commit"}, "entry");
         while (!entries.empty())
@@ -2858,8 +2744,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]")
 
     SECTION("sqlite")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE, true);
-        runTest(Config::TESTDB_ON_DISK_SQLITE, false);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false);
     }
 
 #ifdef USE_POSTGRES
@@ -2885,7 +2771,9 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]")
         {
             // First add some bulking entries so we're not using a
             // totally empty database.
-            entries = LedgerTestUtils::generateValidLedgerEntries(n);
+            entries =
+                LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+                    {OFFER}, n);
             LedgerTxn ltx(app->getLedgerTxnRoot());
             for (auto e : entries)
             {
@@ -2921,8 +2809,8 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]")
 
     SECTION("sqlite")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE, true);
-        runTest(Config::TESTDB_ON_DISK_SQLITE, false);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false);
     }
 
 #ifdef USE_POSTGRES
@@ -2941,7 +2829,6 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]")
 
     // Test setup.
     VirtualClock clock;
-    cfg.DEPRECATED_SQL_LEDGER_STATE = false;
     Application::pointer app = createTestApplication(clock, cfg);
     UnorderedSet<LedgerKey> keysToPrefetch;
     auto& root = app->getLedgerTxnRoot();
@@ -3148,219 +3035,6 @@ TEST_CASE("LedgerKeyMeter tests")
     REQUIRE(lkMeter.canLoad(ttlKey, std::numeric_limits<std::uint32_t>::max()));
 }
 
-TEST_CASE("Bulk load batch size benchmark", "[!hide][bulkbatchsizebench]")
-{
-    size_t floor = 1000;
-    size_t ceiling = 20000;
-    size_t bestBatchSize = 0;
-    double bestTime = 0xffffffff;
-
-    auto runTest = [&](Config::TestDbMode mode) {
-        for (; floor <= ceiling; floor += 1000)
-        {
-            UnorderedSet<LedgerKey> keys;
-            VirtualClock clock;
-            Config cfg(getTestConfig(0, mode));
-            cfg.PREFETCH_BATCH_SIZE = floor;
-
-            auto app = createTestApplication(clock, cfg);
-
-            auto& root = app->getLedgerTxnRoot();
-
-            auto entries = LedgerTestUtils::generateValidLedgerEntries(50000);
-            LedgerTxn ltx(root);
-            for (auto e : entries)
-            {
-                ltx.createWithoutLoading(e);
-                keys.insert(LedgerEntryKey(e));
-            }
-            ltx.commit();
-
-            auto& m = app->getMetrics().NewTimer(
-                {"ledger", "bulk-load", std::to_string(floor) + " batch"});
-            LedgerTxn ltx2(root);
-            {
-                m.TimeScope();
-                root.prefetchClassic(keys);
-            }
-            ltx2.commit();
-
-            auto total = m.sum();
-            CLOG_INFO(Ledger, "Bulk Load test batch size: {} took {}", floor,
-                      total);
-
-            if (total < bestTime)
-            {
-                bestBatchSize = floor;
-                bestTime = total;
-            }
-        }
-        CLOG_INFO(Ledger, "Best batch and best time per entry {} : {}",
-                  bestBatchSize, bestTime);
-    };
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
-    }
-
-#ifdef USE_POSTGRES
-    SECTION("postgresql")
-    {
-        runTest(Config::TESTDB_POSTGRESQL);
-    }
-#endif
-}
-
-TEST_CASE("Signers performance benchmark", "[!hide][signersbench]")
-{
-    auto getTimeScope = [](Application& app, uint32_t numSigners,
-                           std::string const& phase) {
-        std::string benchmarkStr = "benchmark-" + std::to_string(numSigners);
-        return app.getMetrics()
-            .NewTimer({"signers", benchmarkStr, phase})
-            .TimeScope();
-    };
-
-    auto getTimeSpent = [](Application& app, uint32_t numSigners,
-                           std::string const& phase) {
-        std::string benchmarkStr = "benchmark-" + std::to_string(numSigners);
-        auto time =
-            app.getMetrics().NewTimer({"signers", benchmarkStr, phase}).sum();
-        return phase + ": " + std::to_string(time) + " ms";
-    };
-
-    auto generateEntries = [](size_t numAccounts, uint32_t numSigners) {
-        std::vector<LedgerEntry> accounts;
-        accounts.reserve(numAccounts);
-        for (size_t i = 0; i < numAccounts; ++i)
-        {
-            LedgerEntry le;
-            le.data.type(ACCOUNT);
-            le.lastModifiedLedgerSeq = 2;
-            le.data.account() = LedgerTestUtils::generateValidAccountEntry();
-
-            auto& signers = le.data.account().signers;
-            if (signers.size() > numSigners)
-            {
-                signers.resize(numSigners);
-            }
-            else if (signers.size() < numSigners)
-            {
-                signers.reserve(numSigners);
-                std::generate_n(std::back_inserter(signers),
-                                numSigners - signers.size(),
-                                std::bind(autocheck::generator<Signer>(), 5));
-                std::sort(signers.begin(), signers.end(),
-                          [](Signer const& lhs, Signer const& rhs) {
-                              return lhs.key < rhs.key;
-                          });
-            }
-
-            accounts.emplace_back(le);
-        }
-        return accounts;
-    };
-
-    auto generateKeys = [](std::vector<LedgerEntry> const& accounts) {
-        std::vector<LedgerKey> keys;
-        keys.reserve(accounts.size());
-        std::transform(
-            accounts.begin(), accounts.end(), std::back_inserter(keys),
-            [](LedgerEntry const& le) { return LedgerEntryKey(le); });
-        return keys;
-    };
-
-    auto writeEntries =
-        [&getTimeScope](Application& app, uint32_t numSigners,
-                        std::vector<LedgerEntry> const& accounts) {
-            CLOG_WARNING(Ledger, "Creating accounts");
-            LedgerTxn ltx(app.getLedgerTxnRoot());
-            {
-                auto timer = getTimeScope(app, numSigners, "create");
-                for (auto const& le : accounts)
-                {
-                    ltx.create(le);
-                }
-            }
-
-            CLOG_WARNING(Ledger, "Writing accounts");
-            {
-                auto timer = getTimeScope(app, numSigners, "write");
-                ltx.commit();
-            }
-        };
-
-    auto readEntriesAndUpdateLastModified =
-        [&getTimeScope](Application& app, uint32_t numSigners,
-                        std::vector<LedgerKey> const& accounts) {
-            CLOG_WARNING(Ledger, "Reading accounts");
-            LedgerTxn ltx(app.getLedgerTxnRoot());
-            {
-                auto timer = getTimeScope(app, numSigners, "read");
-                for (auto const& key : accounts)
-                {
-                    ++ltx.load(key).current().lastModifiedLedgerSeq;
-                }
-            }
-
-            CLOG_WARNING(Ledger, "Writing accounts with unchanged signers");
-            {
-                auto timer = getTimeScope(app, numSigners, "rewrite");
-                ltx.commit();
-            }
-        };
-
-    auto runTest = [&](Config::TestDbMode mode, size_t numAccounts,
-                       uint32_t numSigners) {
-        VirtualClock clock;
-        Config cfg(getTestConfig(0, mode));
-        cfg.ENTRY_CACHE_SIZE = 0;
-        Application::pointer app = createTestApplication(clock, cfg);
-
-        CLOG_WARNING(Ledger, "Generating {} accounts with {} signers each",
-                     numAccounts, numSigners);
-        auto accounts = generateEntries(numAccounts, numSigners);
-        auto keys = generateKeys(accounts);
-
-        writeEntries(*app, numSigners, accounts);
-        readEntriesAndUpdateLastModified(*app, numSigners, keys);
-
-        CLOG_WARNING(Ledger, "Done ({}, {}, {}, {})",
-                     getTimeSpent(*app, numSigners, "create"),
-                     getTimeSpent(*app, numSigners, "write"),
-                     getTimeSpent(*app, numSigners, "read"),
-                     getTimeSpent(*app, numSigners, "rewrite"));
-    };
-
-    auto runTests = [&](Config::TestDbMode mode) {
-        SECTION("0 signers")
-        {
-            runTest(mode, 100000, 0);
-        }
-        SECTION("10 signers")
-        {
-            runTest(mode, 100000, 10);
-        }
-        SECTION("20 signers")
-        {
-            runTest(mode, 100000, 20);
-        }
-    };
-
-    SECTION("sqlite")
-    {
-        runTests(Config::TESTDB_ON_DISK_SQLITE);
-    }
-
-#ifdef USE_POSTGRES
-    SECTION("postgresql")
-    {
-        runTests(Config::TESTDB_POSTGRESQL);
-    }
-#endif
-}
-
 TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]")
 {
     auto getTimeScope = [](Application& app, std::string const& phase) {
@@ -3530,7 +3204,7 @@ TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]")
 
     SECTION("sqlite")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE, 10, 5, 25000);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, 10, 5, 25000);
     }
 }
 
@@ -3936,14 +3610,16 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
-    SECTION("sqlite")
+    // This mode is just used for testing, but we should still make sure it
+    // works
+    SECTION("in-memory")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -3954,7 +3630,7 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]")
 #endif
 }
 
-TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]")
+TEST_CASE("Access deactivated entry", "[ledgertxn]")
 {
     auto runTest = [&](Config::TestDbMode mode) {
         VirtualClock clock;
@@ -3964,47 +3640,6 @@ TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]")
         le1.data.type(OFFER);
         le1.data.offer() = LedgerTestUtils::generateValidOfferEntry();
 
-        LedgerKey lk1 = LedgerEntryKey(le1);
-        auto lk2 = lk1;
-        lk2.offer().sellerID =
-            LedgerTestUtils::generateValidOfferEntry().sellerID;
-
-        {
-            LedgerTxn ltx(app->getLedgerTxnRoot());
-            ltx.create(le1);
-            ltx.commit();
-        }
-
-        for_all_versions(*app, [&]() {
-            app->getLedgerTxnRoot().prefetchClassic({lk1, lk2});
-            LedgerTxn ltx(app->getLedgerTxnRoot());
-            REQUIRE(ltx.load(lk1));
-        });
-    };
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
-    }
-
-#ifdef USE_POSTGRES
-    SECTION("postgresql")
-    {
-        runTest(Config::TESTDB_POSTGRESQL);
-    }
-#endif
-}
-
-TEST_CASE("Access deactivated entry", "[ledgertxn]")
-{
-    auto runTest = [&](Config::TestDbMode mode) {
-        VirtualClock clock;
-        auto app = createTestApplication(clock, getTestConfig(0, mode));
-
-        LedgerEntry le1;
-        le1.data.type(DATA);
-        le1.data.data() = LedgerTestUtils::generateValidDataEntry();
-
         LedgerKey lk1 = LedgerEntryKey(le1);
 
         {
@@ -4120,14 +3755,14 @@ TEST_CASE("Access deactivated entry", "[ledgertxn]")
         }
     };
 
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -4183,7 +3818,7 @@ TEST_CASE("LedgerTxn generalized ledger entries", "[ledgertxn]")
 TEST_CASE("LedgerTxn best offers cache eviction", "[ledgertxn]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     auto app = createTestApplication(clock, cfg);
 
     auto buying = autocheck::generator<Asset>()(UINT32_MAX);
@@ -4400,7 +4035,7 @@ testPoolShareTrustLinesByAccountAndAsset(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         for_versions_from(18, *app, [&] { testAtRoot(*app); });
     }
@@ -4409,7 +4044,7 @@ testPoolShareTrustLinesByAccountAndAsset(
     if (updates.size() > 1)
     {
         VirtualClock clock;
-        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
         cfg.ENTRY_CACHE_SIZE = 0;
         auto app = createTestApplication(clock, cfg);
 
@@ -4420,7 +4055,7 @@ testPoolShareTrustLinesByAccountAndAsset(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         for_versions_from(18, *app, [&] {
             testPoolShareTrustLinesByAccountAndAsset(
@@ -4448,7 +4083,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset",
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         LedgerTxn ltx1(app->getLedgerTxnRoot());
         LedgerTxn ltx2(ltx1);
@@ -4461,7 +4096,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset",
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         LedgerTxn ltx1(app->getLedgerTxnRoot());
         ltx1.getDelta();
@@ -4532,7 +4167,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset",
 TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4574,7 +4209,7 @@ TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]")
 TEST_CASE("InMemoryLedgerTxn getOffersByAccountAndAsset", "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4618,7 +4253,7 @@ TEST_CASE("InMemoryLedgerTxn getPoolShareTrustLinesByAccountAndAsset",
           "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4667,7 +4302,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges",
                    "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4691,7 +4326,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges",
 TEST_CASE("InMemoryLedgerTxn filtering", "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
     auto root = TestAccount::createRoot(*app);
diff --git a/src/main/Application.h b/src/main/Application.h
index 0e5bac078f..ae23517a57 100644
--- a/src/main/Application.h
+++ b/src/main/Application.h
@@ -327,11 +327,6 @@ class Application
         return ret;
     }
 
-    // This method is used in in-memory mode: when rebuilding state from buckets
-    // is not possible, this method resets the database state back to genesis
-    // (while preserving the overlay data).
-    virtual void resetDBForInMemoryMode() = 0;
-
     virtual AppConnector& getAppConnector() = 0;
 
   protected:
diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp
index 6d9c32f613..d5f8f7208b 100644
--- a/src/main/ApplicationImpl.cpp
+++ b/src/main/ApplicationImpl.cpp
@@ -190,105 +190,112 @@ maybeRebuildLedger(Application& app, bool applyBuckets)
     std::set<LedgerEntryType> toRebuild;
     auto& ps = app.getPersistentState();
     auto bucketListDBEnabled = app.getConfig().isUsingBucketListDB();
-    for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
+
+#ifdef BUILD_TESTS
+    if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
-        // If BucketListDB is enabled, drop all tables except for offers
-        LedgerEntryType t = static_cast<LedgerEntryType>(let);
-        if (let != OFFER && bucketListDBEnabled)
+        // in-memory mode must always rebuild SQL table
+        toRebuild.emplace(OFFER);
+    }
+    else
+#endif
+    {
+        for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
         {
-            toDrop.emplace(t);
-            continue;
-        }
+            // If BucketListDB is enabled, drop all tables except for offers
+            LedgerEntryType t = static_cast<LedgerEntryType>(let);
+            if (let != OFFER && bucketListDBEnabled)
+            {
+                toDrop.emplace(t);
+                continue;
+            }
 
-        if (ps.shouldRebuildForType(t))
-        {
-            toRebuild.emplace(t);
+            if (ps.shouldRebuildForType(t))
+            {
+                toRebuild.emplace(t);
+            }
         }
     }
 
-    if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
-    {
-        app.getDatabase().clearPreparedStatementCache();
-        soci::transaction tx(app.getDatabase().getSession());
+    app.getDatabase().clearPreparedStatementCache();
+    soci::transaction tx(app.getDatabase().getSession());
 
-        auto loopEntries = [&](auto const& entryTypeSet, bool shouldRebuild) {
-            for (auto let : entryTypeSet)
+    auto loopEntries = [&](auto const& entryTypeSet, bool shouldRebuild) {
+        for (auto let : entryTypeSet)
+        {
+            switch (let)
             {
-                switch (let)
-                {
-                case ACCOUNT:
-                    LOG_INFO(DEFAULT_LOG, "Dropping accounts");
-                    app.getLedgerTxnRoot().dropAccounts(shouldRebuild);
-                    break;
-                case TRUSTLINE:
-                    LOG_INFO(DEFAULT_LOG, "Dropping trustlines");
-                    app.getLedgerTxnRoot().dropTrustLines(shouldRebuild);
-                    break;
-                case OFFER:
-                    LOG_INFO(DEFAULT_LOG, "Dropping offers");
-                    app.getLedgerTxnRoot().dropOffers(shouldRebuild);
-                    break;
-                case DATA:
-                    LOG_INFO(DEFAULT_LOG, "Dropping accountdata");
-                    app.getLedgerTxnRoot().dropData(shouldRebuild);
-                    break;
-                case CLAIMABLE_BALANCE:
-                    LOG_INFO(DEFAULT_LOG, "Dropping claimablebalances");
-                    app.getLedgerTxnRoot().dropClaimableBalances(shouldRebuild);
-                    break;
-                case LIQUIDITY_POOL:
-                    LOG_INFO(DEFAULT_LOG, "Dropping liquiditypools");
-                    app.getLedgerTxnRoot().dropLiquidityPools(shouldRebuild);
-                    break;
-                case CONTRACT_DATA:
-                    LOG_INFO(DEFAULT_LOG, "Dropping contractdata");
-                    app.getLedgerTxnRoot().dropContractData(shouldRebuild);
-                    break;
-                case CONTRACT_CODE:
-                    LOG_INFO(DEFAULT_LOG, "Dropping contractcode");
-                    app.getLedgerTxnRoot().dropContractCode(shouldRebuild);
-                    break;
-                case CONFIG_SETTING:
-                    LOG_INFO(DEFAULT_LOG, "Dropping configsettings");
-                    app.getLedgerTxnRoot().dropConfigSettings(shouldRebuild);
-                    break;
-                case TTL:
-                    LOG_INFO(DEFAULT_LOG, "Dropping ttl");
-                    app.getLedgerTxnRoot().dropTTL(shouldRebuild);
-                    break;
-                default:
-                    abort();
-                }
+            case ACCOUNT:
+                LOG_INFO(DEFAULT_LOG, "Dropping accounts");
+                app.getLedgerTxnRoot().dropAccounts(shouldRebuild);
+                break;
+            case TRUSTLINE:
+                LOG_INFO(DEFAULT_LOG, "Dropping trustlines");
+                app.getLedgerTxnRoot().dropTrustLines(shouldRebuild);
+                break;
+            case OFFER:
+                LOG_INFO(DEFAULT_LOG, "Dropping offers");
+                app.getLedgerTxnRoot().dropOffers(shouldRebuild);
+                break;
+            case DATA:
+                LOG_INFO(DEFAULT_LOG, "Dropping accountdata");
+                app.getLedgerTxnRoot().dropData(shouldRebuild);
+                break;
+            case CLAIMABLE_BALANCE:
+                LOG_INFO(DEFAULT_LOG, "Dropping claimablebalances");
+                app.getLedgerTxnRoot().dropClaimableBalances(shouldRebuild);
+                break;
+            case LIQUIDITY_POOL:
+                LOG_INFO(DEFAULT_LOG, "Dropping liquiditypools");
+                app.getLedgerTxnRoot().dropLiquidityPools(shouldRebuild);
+                break;
+            case CONTRACT_DATA:
+                LOG_INFO(DEFAULT_LOG, "Dropping contractdata");
+                app.getLedgerTxnRoot().dropContractData(shouldRebuild);
+                break;
+            case CONTRACT_CODE:
+                LOG_INFO(DEFAULT_LOG, "Dropping contractcode");
+                app.getLedgerTxnRoot().dropContractCode(shouldRebuild);
+                break;
+            case CONFIG_SETTING:
+                LOG_INFO(DEFAULT_LOG, "Dropping configsettings");
+                app.getLedgerTxnRoot().dropConfigSettings(shouldRebuild);
+                break;
+            case TTL:
+                LOG_INFO(DEFAULT_LOG, "Dropping ttl");
+                app.getLedgerTxnRoot().dropTTL(shouldRebuild);
+                break;
+            default:
+                abort();
             }
-        };
+        }
+    };
 
-        loopEntries(toRebuild, true);
-        loopEntries(toDrop, false);
-        tx.commit();
+    loopEntries(toRebuild, true);
+    loopEntries(toDrop, false);
+    tx.commit();
 
-        // Nothing to apply, exit early
-        if (toRebuild.empty())
-        {
-            return;
-        }
+    // Nothing to apply, exit early
+    if (toRebuild.empty())
+    {
+        return;
+    }
 
-        // No transaction is needed. ApplyBucketsWork breaks the apply into many
-        // small chunks, each of which has its own transaction. If it fails at
-        // some point in the middle, then rebuildledger will not be cleared so
-        // this will run again on next start up.
-        if (applyBuckets)
+    // No transaction is needed. ApplyBucketsWork breaks the apply into many
+    // small chunks, each of which has its own transaction. If it fails at
+    // some point in the middle, then rebuildledger will not be cleared so
+    // this will run again on next start up.
+    if (applyBuckets)
+    {
+        LOG_INFO(DEFAULT_LOG, "Rebuilding ledger tables by applying buckets");
+        auto filter = [&toRebuild](LedgerEntryType t) {
+            return toRebuild.find(t) != toRebuild.end();
+        };
+        if (!applyBucketsForLCL(app, filter))
         {
-            LOG_INFO(DEFAULT_LOG,
-                     "Rebuilding ledger tables by applying buckets");
-            auto filter = [&toRebuild](LedgerEntryType t) {
-                return toRebuild.find(t) != toRebuild.end();
-            };
-            if (!applyBucketsForLCL(app, filter))
-            {
-                throw std::runtime_error("Could not rebuild ledger tables");
-            }
-            LOG_INFO(DEFAULT_LOG, "Successfully rebuilt ledger tables");
+            throw std::runtime_error("Could not rebuild ledger tables");
         }
+        LOG_INFO(DEFAULT_LOG, "Successfully rebuilt ledger tables");
     }
 
     for (auto let : toRebuild)
@@ -328,29 +335,29 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild)
     mStatusManager = std::make_unique<StatusManager>();
     mAppConnector = std::make_unique<AppConnector>(*this);
 
-    if (getConfig().MODE_USES_IN_MEMORY_LEDGER)
+    if (mConfig.ENTRY_CACHE_SIZE < 20000)
     {
-        resetLedgerState();
+        LOG_WARNING(DEFAULT_LOG,
+                    "ENTRY_CACHE_SIZE({}) is below the recommended minimum "
+                    "of 20000",
+                    mConfig.ENTRY_CACHE_SIZE);
     }
-    else
-    {
-        if (mConfig.ENTRY_CACHE_SIZE < 20000)
-        {
-            LOG_WARNING(DEFAULT_LOG,
-                        "ENTRY_CACHE_SIZE({}) is below the recommended minimum "
-                        "of 20000",
-                        mConfig.ENTRY_CACHE_SIZE);
-        }
-        mLedgerTxnRoot = std::make_unique<LedgerTxnRoot>(
-            *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE
+    mLedgerTxnRoot = std::make_unique<LedgerTxnRoot>(
+        *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE
 #ifdef BEST_OFFER_DEBUGGING
-            ,
-            mConfig.BEST_OFFER_DEBUGGING_ENABLED
+        ,
+        mConfig.BEST_OFFER_DEBUGGING_ENABLED
 #endif
-        );
+    );
 
-        BucketListIsConsistentWithDatabase::registerInvariant(*this);
+#ifdef BUILD_TESTS
+    if (getConfig().MODE_USES_IN_MEMORY_LEDGER)
+    {
+        resetLedgerState();
     }
+#endif
+
+    BucketListIsConsistentWithDatabase::registerInvariant(*this);
 
     AccountSubEntriesCountIsValid::registerInvariant(*this);
     ConservationOfLumens::registerInvariant(*this);
@@ -383,6 +390,7 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild)
 void
 ApplicationImpl::resetLedgerState()
 {
+#ifdef BUILD_TESTS
     if (getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         mNeverCommittingLedgerTxn.reset();
@@ -392,9 +400,10 @@ ApplicationImpl::resetLedgerState()
 #endif
         );
         mNeverCommittingLedgerTxn = std::make_unique<InMemoryLedgerTxn>(
-            *mInMemoryLedgerTxnRoot, getDatabase());
+            *mInMemoryLedgerTxnRoot, getDatabase(), mLedgerTxnRoot.get());
     }
     else
+#endif
     {
         auto& lsRoot = getLedgerTxnRoot();
         lsRoot.deleteObjectsModifiedOnOrAfterLedger(0);
@@ -693,18 +702,6 @@ ApplicationImpl::~ApplicationImpl()
     LOG_INFO(DEFAULT_LOG, "Application destroyed");
 }
 
-void
-ApplicationImpl::resetDBForInMemoryMode()
-{
-    // Load the peer information and reinitialize the DB
-    auto& pm = getOverlayManager().getPeerManager();
-    auto peerData = pm.loadAllPeers();
-    newDB();
-    pm.storePeers(peerData);
-
-    LOG_INFO(DEFAULT_LOG, "In-memory state is reset back to genesis");
-}
-
 uint64_t
 ApplicationImpl::timeNow()
 {
@@ -737,26 +734,6 @@ ApplicationImpl::validateAndLogConfig()
             "RUN_STANDALONE is not set");
     }
 
-    // EXPERIMENTAL_PRECAUTION_DELAY_META is only meaningful when there's a
-    // METADATA_OUTPUT_STREAM.  We only allow EXPERIMENTAL_PRECAUTION_DELAY_META
-    // on a captive core, without a persistent database; old-style ingestion
-    // which reads from the core database could do the delaying itself.
-    if (mConfig.METADATA_OUTPUT_STREAM != "" &&
-        mConfig.EXPERIMENTAL_PRECAUTION_DELAY_META && !mConfig.isInMemoryMode())
-    {
-        throw std::invalid_argument(
-            "Using a METADATA_OUTPUT_STREAM with "
-            "EXPERIMENTAL_PRECAUTION_DELAY_META set to true "
-            "requires --in-memory");
-    }
-
-    if (mConfig.isInMemoryMode())
-    {
-        CLOG_WARNING(
-            Bucket,
-            "in-memory mode is enabled. This feature is deprecated! Node "
-            "may see performance degredation and lose sync with the network.");
-    }
     if (!mDatabase->isSqlite())
     {
         CLOG_WARNING(Database,
@@ -822,8 +799,7 @@ ApplicationImpl::validateAndLogConfig()
                 "DEPRECATED_SQL_LEDGER_STATE set to false but "
                 "deprecated SQL ledger state is active. To disable deprecated "
                 "SQL ledger state, "
-                "MODE_ENABLES_BUCKETLIST must be set and --in-memory flag "
-                "must not be used.");
+                "MODE_ENABLES_BUCKETLIST must be set.");
         }
     }
 
@@ -874,13 +850,6 @@ ApplicationImpl::validateAndLogConfig()
         }
     }
 
-    if (isNetworkedValidator && mConfig.isInMemoryMode())
-    {
-        throw std::invalid_argument(
-            "In-memory mode is set, NODE_IS_VALIDATOR is set, "
-            "and RUN_STANDALONE is not set");
-    }
-
     if (getHistoryArchiveManager().publishEnabled())
     {
         if (!mConfig.modeStoresAllHistory())
@@ -1634,8 +1603,15 @@ AbstractLedgerTxnParent&
 ApplicationImpl::getLedgerTxnRoot()
 {
     releaseAssert(threadIsMain());
-    return mConfig.MODE_USES_IN_MEMORY_LEDGER ? *mNeverCommittingLedgerTxn
-                                              : *mLedgerTxnRoot;
+
+#ifdef BUILD_TESTS
+    if (mConfig.MODE_USES_IN_MEMORY_LEDGER)
+    {
+        return *mNeverCommittingLedgerTxn;
+    }
+#endif
+
+    return *mLedgerTxnRoot;
 }
 
 AppConnector&
diff --git a/src/main/ApplicationImpl.h b/src/main/ApplicationImpl.h
index a7553214f9..e1c0afd73d 100644
--- a/src/main/ApplicationImpl.h
+++ b/src/main/ApplicationImpl.h
@@ -135,8 +135,6 @@ class ApplicationImpl : public Application
 
     virtual AbstractLedgerTxnParent& getLedgerTxnRoot() override;
 
-    virtual void resetDBForInMemoryMode() override;
-
   private:
     VirtualClock& mVirtualClock;
     Config mConfig;
@@ -194,8 +192,10 @@ class ApplicationImpl : public Application
     // is held in the never-committing LedgerTxn in its entirety -- so if it
     // ever grows beyond RAM-size you need to use a mode with some sort of
     // database on secondary storage.
+#ifdef BUILD_TESTS
     std::unique_ptr<InMemoryLedgerTxnRoot> mInMemoryLedgerTxnRoot;
     std::unique_ptr<InMemoryLedgerTxn> mNeverCommittingLedgerTxn;
+#endif
 
     std::unique_ptr<CommandHandler> mCommandHandler;
 
diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp
index 26e5343ef6..bbc14086a6 100644
--- a/src/main/ApplicationUtils.cpp
+++ b/src/main/ApplicationUtils.cpp
@@ -114,79 +114,9 @@ minimalDbPath(Config const& cfg)
     return dpath;
 }
 
-void
-setupMinimalDBForInMemoryMode(Config const& cfg, uint32_t startAtLedger)
-{
-    releaseAssertOrThrow(cfg.isInMemoryMode());
-
-    VirtualClock clock;
-    Application::pointer app;
-
-    // Look for an existing minimal database, and see if it's possible to
-    // restore ledger state from buckets. If it is not possible, reset the
-    // existing database back to genesis. If the minimal database does not
-    // exist, create a new one.
-    bool found = false;
-
-    auto cfgToCheckDB = cfg;
-    cfgToCheckDB.METADATA_OUTPUT_STREAM = "";
-
-    if (std::filesystem::exists(minimalDbPath(cfg)))
-    {
-        app = Application::create(clock, cfgToCheckDB, /* newDB */ false);
-        found = true;
-    }
-    else
-    {
-        LOG_INFO(DEFAULT_LOG, "Minimal database not found, creating one...");
-        app = Application::create(clock, cfgToCheckDB, /* newDB */ true);
-    }
-
-    // Rebuild the state from scratch if:
-    //  - --start-at-ledger was not provided
-    //  - target catchup ledger is before LCL
-    //  - target catchup ledger is too far ahead of LCL
-    // In all other cases, attempt restoring the ledger states via
-    // local bucket application
-    if (found)
-    {
-        LOG_INFO(DEFAULT_LOG, "Found the existing minimal database");
-
-        // DB state might be set to 0 if core previously exited while rebuilding
-        // state. In this case, we want to rebuild the DB from scratch
-        bool rebuildDB =
-            app->getLedgerManager().getLastClosedLedgerHAS().currentLedger <
-            LedgerManager::GENESIS_LEDGER_SEQ;
-
-        if (!rebuildDB)
-        {
-            // Ledger state is not yet ready during this setup step
-            app->getLedgerManager().loadLastKnownLedger(
-                /* restoreBucketlist */ false, /* isLedgerStateReady */ false);
-            auto lcl = app->getLedgerManager().getLastClosedLedgerNum();
-            LOG_INFO(DEFAULT_LOG, "Current in-memory state, got LCL: {}", lcl);
-            rebuildDB =
-                !canRebuildInMemoryLedgerFromBuckets(startAtLedger, lcl);
-        }
-
-        if (rebuildDB)
-        {
-            LOG_INFO(DEFAULT_LOG, "Cannot restore the in-memory state, "
-                                  "rebuilding the state from scratch");
-            app->resetDBForInMemoryMode();
-        }
-    }
-}
-
 Application::pointer
-setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger,
-         std::string const& startAtHash)
+setupApp(Config& cfg, VirtualClock& clock)
 {
-    if (cfg.isInMemoryMode())
-    {
-        setupMinimalDBForInMemoryMode(cfg, startAtLedger);
-    }
-
     LOG_INFO(DEFAULT_LOG, "Starting stellar-core {}", STELLAR_CORE_VERSION);
     Application::pointer app;
     app = Application::create(clock, cfg, false);
@@ -195,13 +125,14 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger,
         return nullptr;
     }
 
-    // With in-memory mode, ledger state is not yet ready during this setup step
+    // With in-memory testing mode, ledger state is not yet ready during this
+    // setup step
     app->getLedgerManager().loadLastKnownLedger(
         /* restoreBucketlist */ false,
-        /* isLedgerStateReady */ !cfg.isInMemoryMode());
+        /* isLedgerStateReady */ !cfg.MODE_USES_IN_MEMORY_LEDGER);
     auto lcl = app->getLedgerManager().getLastClosedLedgerHeader();
 
-    if (cfg.isInMemoryMode() &&
+    if (cfg.MODE_USES_IN_MEMORY_LEDGER &&
         lcl.header.ledgerSeq == LedgerManager::GENESIS_LEDGER_SEQ)
     {
         // If ledger is genesis, rebuild genesis state from buckets
@@ -211,67 +142,6 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger,
         }
     }
 
-    bool doCatchupForInMemoryMode =
-        cfg.isInMemoryMode() && startAtLedger != 0 && !startAtHash.empty();
-    if (doCatchupForInMemoryMode)
-    {
-        // At this point, setupApp has either confirmed that we can rebuild from
-        // the existing buckets, or reset the DB to genesis
-        if (lcl.header.ledgerSeq != LedgerManager::GENESIS_LEDGER_SEQ)
-        {
-            auto lclHashStr = binToHex(lcl.hash);
-            if (lcl.header.ledgerSeq == startAtLedger &&
-                lclHashStr != startAtHash)
-            {
-                LOG_ERROR(DEFAULT_LOG,
-                          "Provided hash {} does not agree with stored hash {}",
-                          startAtHash, lclHashStr);
-                return nullptr;
-            }
-
-            auto has = app->getLedgerManager().getLastClosedLedgerHAS();
-
-            // Collect bucket references to pass to catchup _before_ starting
-            // the app, which may trigger garbage collection
-            std::set<std::shared_ptr<LiveBucket>> retained;
-            for (auto const& b : has.allBuckets())
-            {
-                auto bPtr = app->getBucketManager().getBucketByHash<LiveBucket>(
-                    hexToBin256(b));
-                releaseAssert(bPtr);
-                retained.insert(bPtr);
-            }
-
-            // Start the app with LCL set to 0
-            app->getLedgerManager().setupInMemoryStateRebuild();
-            app->start();
-
-            // Set Herder to track the actual LCL
-            app->getHerder().setTrackingSCPState(lcl.header.ledgerSeq,
-                                                 lcl.header.scpValue, true);
-
-            // Schedule the catchup work that will rebuild state
-            auto cc = CatchupConfiguration(has, lcl);
-            app->getLedgerManager().startCatchup(cc, /* archive */ nullptr,
-                                                 retained);
-        }
-        else
-        {
-            LedgerNumHashPair pair;
-            pair.first = startAtLedger;
-            pair.second = std::optional<Hash>(hexToBin256(startAtHash));
-            auto mode = CatchupConfiguration::Mode::OFFLINE_BASIC;
-            Json::Value catchupInfo;
-            int res =
-                catchup(app, CatchupConfiguration{pair, 0, mode}, catchupInfo,
-                        /* archive */ nullptr);
-            if (res != 0)
-            {
-                return nullptr;
-            }
-        }
-    }
-
     return app;
 }
 
diff --git a/src/main/ApplicationUtils.h b/src/main/ApplicationUtils.h
index 30d2cb0fed..140626bc7f 100644
--- a/src/main/ApplicationUtils.h
+++ b/src/main/ApplicationUtils.h
@@ -15,9 +15,7 @@ namespace stellar
 class CatchupConfiguration;
 
 // Create application and validate its configuration
-Application::pointer setupApp(Config& cfg, VirtualClock& clock,
-                              uint32_t startAtLedger,
-                              std::string const& startAtHash);
+Application::pointer setupApp(Config& cfg, VirtualClock& clock);
 int runApp(Application::pointer app);
 void setForceSCPFlag();
 void initializeDatabase(Config cfg);
diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp
index 0fa277417c..5622fdac23 100644
--- a/src/main/CommandLine.cpp
+++ b/src/main/CommandLine.cpp
@@ -353,54 +353,6 @@ maybeSetMetadataOutputStream(Config& cfg, std::string const& stream)
     }
 }
 
-void
-maybeEnableInMemoryMode(Config& config, bool inMemory, uint32_t startAtLedger,
-                        std::string const& startAtHash, bool persistMinimalData)
-{
-    // First, ensure user parameters are valid
-    if (!inMemory)
-    {
-        if (startAtLedger != 0)
-        {
-            throw std::runtime_error("--start-at-ledger requires --in-memory");
-        }
-        if (!startAtHash.empty())
-        {
-            throw std::runtime_error("--start-at-hash requires --in-memory");
-        }
-        return;
-    }
-    if (startAtLedger != 0 && startAtHash.empty())
-    {
-        throw std::runtime_error("--start-at-ledger requires --start-at-hash");
-    }
-    else if (startAtLedger == 0 && !startAtHash.empty())
-    {
-        throw std::runtime_error("--start-at-hash requires --start-at-ledger");
-    }
-
-    // Adjust configs for live in-memory-replay mode
-    config.setInMemoryMode();
-
-    if (startAtLedger != 0 && !startAtHash.empty())
-    {
-        config.MODE_AUTO_STARTS_OVERLAY = false;
-    }
-
-    // Set database to a small sqlite database used to store minimal data needed
-    // to restore the ledger state
-    if (persistMinimalData)
-    {
-        config.DATABASE = SecretValue{minimalDBForInMemoryMode(config)};
-        config.MODE_STORES_HISTORY_LEDGERHEADERS = true;
-        // Since this mode stores historical data (needed to restore
-        // ledger state in certain scenarios), set maintenance to run
-        // aggressively so that we only store a few ledgers worth of data
-        config.AUTOMATIC_MAINTENANCE_PERIOD = std::chrono::seconds(30);
-        config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT;
-    }
-}
-
 clara::Opt
 ledgerHashParser(std::string& ledgerHash)
 {
@@ -419,23 +371,21 @@ clara::Opt
 inMemoryParser(bool& inMemory)
 {
     return clara::Opt{inMemory}["--in-memory"](
-        "(DEPRECATED) store working ledger in memory rather than database");
+        "(DEPRECATED) flag is ignored and will be removed soon.");
 }
 
 clara::Opt
 startAtLedgerParser(uint32_t& startAtLedger)
 {
     return clara::Opt{startAtLedger, "LEDGER"}["--start-at-ledger"](
-        "(DEPRECATED) start in-memory run with replay from historical ledger "
-        "number");
+        "(DEPRECATED) flag is ignored and will be removed soon.");
 }
 
 clara::Opt
 startAtHashParser(std::string& startAtHash)
 {
     return clara::Opt{startAtHash, "HASH"}["--start-at-hash"](
-        "(DEPRECATED) start in-memory run with replay from historical ledger "
-        "hash");
+        "(DEPRECATED) flag is ignored and will be removed soon.");
 }
 
 clara::Opt
@@ -870,7 +820,8 @@ runCatchup(CommandLineArgs const& args)
          trustedCheckpointHashesParser(trustedCheckpointHashesFile),
          outputFileParser(outputFile), disableBucketGCParser(disableBucketGC),
          validationParser(completeValidation), inMemoryParser(inMemory),
-         ledgerHashParser(hash), forceUntrustedCatchup(forceUntrusted),
+         ledgerHashParser(hash), ledgerHashParser(hash),
+         forceUntrustedCatchup(forceUntrusted),
          metadataOutputStreamParser(stream)},
         [&] {
             auto config = configOption.getConfig();
@@ -891,10 +842,6 @@ runCatchup(CommandLineArgs const& args)
                 config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT;
             }
 
-            // --start-at-ledger and --start-at-hash aren't allowed in catchup,
-            // so pass defaults values
-            maybeEnableInMemoryMode(config, inMemory, 0, "",
-                                    /* persistMinimalData */ false);
             maybeSetMetadataOutputStream(config, stream);
 
             VirtualClock clock(VirtualClock::REAL_TIME);
@@ -1221,13 +1168,12 @@ int
 runNewDB(CommandLineArgs const& args)
 {
     CommandLine::ConfigOption configOption;
-    bool minimalForInMemoryMode = false;
+    [[maybe_unused]] bool minimalForInMemoryMode = false;
 
     auto minimalDBParser = [](bool& minimalForInMemoryMode) {
         return clara::Opt{
             minimalForInMemoryMode}["--minimal-for-in-memory-mode"](
-            "Reset the special database used only for in-memory mode (see "
-            "--in-memory flag");
+            "(DEPRECATED) flag is ignored and will be removed soon.");
     };
 
     return runWithHelp(args,
@@ -1235,11 +1181,6 @@ runNewDB(CommandLineArgs const& args)
                         minimalDBParser(minimalForInMemoryMode)},
                        [&] {
                            auto cfg = configOption.getConfig();
-                           if (minimalForInMemoryMode)
-                           {
-                               cfg.DATABASE =
-                                   SecretValue{minimalDBForInMemoryMode(cfg)};
-                           }
                            initializeDatabase(cfg);
                            return 0;
                        });
@@ -1535,10 +1476,10 @@ run(CommandLineArgs const& args)
     CommandLine::ConfigOption configOption;
     auto disableBucketGC = false;
     std::string stream;
-    bool inMemory = false;
     bool waitForConsensus = false;
-    uint32_t startAtLedger = 0;
-    std::string startAtHash;
+    [[maybe_unused]] bool inMemory = false;
+    [[maybe_unused]] uint32_t startAtLedger = 0;
+    [[maybe_unused]] std::string startAtHash;
 
     return runWithHelp(
         args,
@@ -1564,14 +1505,10 @@ run(CommandLineArgs const& args)
                 {
                     cfg.DATABASE = SecretValue{"sqlite3://:memory:"};
                     cfg.MODE_STORES_HISTORY_MISC = false;
-                    cfg.MODE_USES_IN_MEMORY_LEDGER = false;
                     cfg.MODE_ENABLES_BUCKETLIST = false;
                     cfg.PREFETCH_BATCH_SIZE = 0;
                 }
 
-                maybeEnableInMemoryMode(cfg, inMemory, startAtLedger,
-                                        startAtHash,
-                                        /* persistMinimalData */ true);
                 maybeSetMetadataOutputStream(cfg, stream);
                 cfg.FORCE_SCP =
                     cfg.NODE_IS_VALIDATOR ? !waitForConsensus : false;
@@ -1612,10 +1549,8 @@ run(CommandLineArgs const& args)
                 }
 
                 // Second, setup the app with the final configuration.
-                // Note that when in in-memory mode, additional setup may be
-                // required (such as database reset, catchup, etc)
                 clock = std::make_shared<VirtualClock>(clockMode);
-                app = setupApp(cfg, *clock, startAtLedger, startAtHash);
+                app = setupApp(cfg, *clock);
                 if (!app)
                 {
                     LOG_ERROR(DEFAULT_LOG,
diff --git a/src/main/Config.cpp b/src/main/Config.cpp
index a932d9217a..c7e50a13a8 100644
--- a/src/main/Config.cpp
+++ b/src/main/Config.cpp
@@ -117,7 +117,6 @@ Config::Config() : NODE_SEED(SecretKey::random())
 
     // non configurable
     MODE_ENABLES_BUCKETLIST = true;
-    MODE_USES_IN_MEMORY_LEDGER = false;
     MODE_STORES_HISTORY_MISC = true;
     MODE_STORES_HISTORY_LEDGERHEADERS = true;
     MODE_DOES_CATCHUP = true;
@@ -157,7 +156,6 @@ Config::Config() : NODE_SEED(SecretKey::random())
     MANUAL_CLOSE = false;
     CATCHUP_COMPLETE = false;
     CATCHUP_RECENT = 0;
-    EXPERIMENTAL_PRECAUTION_DELAY_META = false;
     BACKGROUND_OVERLAY_PROCESSING = true;
     DEPRECATED_SQL_LEDGER_STATE = false;
     BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 14; // 2^14 == 16 kb
@@ -307,6 +305,7 @@ Config::Config() : NODE_SEED(SecretKey::random())
 #ifdef BUILD_TESTS
     TEST_CASES_ENABLED = false;
     CATCHUP_SKIP_KNOWN_RESULTS_FOR_TESTING = false;
+    MODE_USES_IN_MEMORY_LEDGER = false;
 #endif
 
 #ifdef BEST_OFFER_DEBUGGING
@@ -1057,10 +1056,6 @@ Config::processConfig(std::shared_ptr<cpptoml::table> t)
                  [&]() { DISABLE_XDR_FSYNC = readBool(item); }},
                 {"METADATA_OUTPUT_STREAM",
                  [&]() { METADATA_OUTPUT_STREAM = readString(item); }},
-                {"EXPERIMENTAL_PRECAUTION_DELAY_META",
-                 [&]() {
-                     EXPERIMENTAL_PRECAUTION_DELAY_META = readBool(item);
-                 }},
                 {"EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING",
                  [&]() {
                      CLOG_WARNING(Overlay,
@@ -2271,29 +2266,12 @@ Config::getExpectedLedgerCloseTime() const
     return Herder::EXP_LEDGER_TIMESPAN_SECONDS;
 }
 
-void
-Config::setInMemoryMode()
-{
-    MODE_USES_IN_MEMORY_LEDGER = true;
-    DATABASE = SecretValue{"sqlite3://:memory:"};
-    MODE_STORES_HISTORY_MISC = false;
-    MODE_STORES_HISTORY_LEDGERHEADERS = false;
-    MODE_ENABLES_BUCKETLIST = true;
-    BACKGROUND_EVICTION_SCAN = false;
-}
-
 bool
 Config::modeDoesCatchupWithBucketList() const
 {
     return MODE_DOES_CATCHUP && MODE_ENABLES_BUCKETLIST;
 }
 
-bool
-Config::isInMemoryMode() const
-{
-    return MODE_USES_IN_MEMORY_LEDGER;
-}
-
 bool
 Config::isUsingBucketListDB() const
 {
@@ -2313,12 +2291,6 @@ Config::isPersistingBucketListDBIndexes() const
     return isUsingBucketListDB() && BUCKETLIST_DB_PERSIST_INDEX;
 }
 
-bool
-Config::isInMemoryModeWithoutMinimalDB() const
-{
-    return MODE_USES_IN_MEMORY_LEDGER && !MODE_STORES_HISTORY_LEDGERHEADERS;
-}
-
 bool
 Config::modeStoresAllHistory() const
 {
diff --git a/src/main/Config.h b/src/main/Config.h
index df88ed4184..e8ab848765 100644
--- a/src/main/Config.h
+++ b/src/main/Config.h
@@ -143,31 +143,24 @@ class Config : public std::enable_shared_from_this<Config>
     //    via applying valid TXs or manually adding entries to the BucketList.
     //    BucketList state is not preserved over restarts. If this mode can be
     //    used, it should be.
-    // 2. TESTDB_IN_MEMORY_NO_OFFERS: allows arbitrary ledger state writes via
-    //    ltx root commits, but does not test the offers table. Suitable for
+    // 2. TESTDB_IN_MEMORY: allows arbitrary ledger state writes via
+    //    ltx root commits. Suitable for
     //    tests that required writes to the ledger state that cannot be achieved
     //    via valid TX application, such as testing invalid TX error codes or
     //    low level op testing.
-    // 3. TESTDB_IN_MEMORY_OFFERS: The same as TESTDB_IN_MEMORY_NO_OFFERS, but
-    //    tests the offers table. Suitable for testing ops that interact with
-    //    offers.
-    // 4. TESTDB_ON_DISK_SQLITE: Should only be used to test SQLITE specific
+    // 3. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific
     //    database operations.
-    // 5. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific
-    //    database operations.
-    // 6. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but
-    //    persists the BucketList over restart. This mode is very slow and
-    //    should only be used for testing restart behavior or some low level
-    //    BucketList features.
+    // 4. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but
+    //    persists the BucketList and SQL DB over restart. This mode is very
+    //    slow and should only be used for testing restart behavior or some low
+    //    level BucketList features or for testing SQLite DB specific behavior.
     enum TestDbMode
     {
         TESTDB_DEFAULT,
-        TESTDB_IN_MEMORY_OFFERS,
-        TESTDB_ON_DISK_SQLITE,
+        TESTDB_IN_MEMORY,
 #ifdef USE_POSTGRES
         TESTDB_POSTGRESQL,
 #endif
-        TESTDB_IN_MEMORY_NO_OFFERS,
         TESTDB_BUCKET_DB_VOLATILE,
         TESTDB_BUCKET_DB_PERSISTENT,
         TESTDB_MODES
@@ -384,12 +377,6 @@ class Config : public std::enable_shared_from_this<Config>
     // be set to `false` only for testing purposes.
     bool MODE_ENABLES_BUCKETLIST;
 
-    // A config parameter that uses a never-committing ledger. This means that
-    // all ledger entries will be kept in memory, and not persisted to DB
-    // (relevant tables won't even be created). This should not be set for
-    // production validators.
-    bool MODE_USES_IN_MEMORY_LEDGER;
-
     // A config parameter that can be set to true (in a captive-core
     // configuration) to delay emitting metadata by one ledger.
     bool EXPERIMENTAL_PRECAUTION_DELAY_META;
@@ -705,6 +692,11 @@ class Config : public std::enable_shared_from_this<Config>
     // doing a graceful shutdown
     bool TEST_CASES_ENABLED;
 
+    // A config parameter that uses a never-committing ledger. This means that
+    // all ledger entries will be kept in memory, and not persisted to DB.
+    // Should only be used for testing.
+    bool MODE_USES_IN_MEMORY_LEDGER;
+
     // Set QUORUM_SET using automatic quorum set configuration based on
     // `validators`.
     void
@@ -737,10 +729,7 @@ class Config : public std::enable_shared_from_this<Config>
 
     std::chrono::seconds getExpectedLedgerCloseTime() const;
 
-    void setInMemoryMode();
     bool modeDoesCatchupWithBucketList() const;
-    bool isInMemoryMode() const;
-    bool isInMemoryModeWithoutMinimalDB() const;
     bool isUsingBucketListDB() const;
     bool isUsingBackgroundEviction() const;
     bool isPersistingBucketListDBIndexes() const;
diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp
index b36f9bf732..a7128d4d87 100644
--- a/src/main/test/ApplicationUtilsTests.cpp
+++ b/src/main/test/ApplicationUtilsTests.cpp
@@ -2,12 +2,14 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/test/BucketTestUtils.h"
 #include "crypto/Random.h"
 #include "history/HistoryArchiveManager.h"
 #include "history/HistoryManagerImpl.h"
 #include "history/test/HistoryTestsUtils.h"
 #include "invariant/BucketListIsConsistentWithDatabase.h"
 #include "ledger/LedgerTxn.h"
+#include "ledger/test/LedgerTestUtils.h"
 #include "lib/catch.hpp"
 #include "main/Application.h"
 #include "main/ApplicationUtils.h"
@@ -52,45 +54,6 @@ class TemporaryFileDamager
     }
 };
 
-class TemporarySQLiteDBDamager : public TemporaryFileDamager
-{
-    Config mConfig;
-    static std::filesystem::path
-    getSQLiteDBPath(Config const& cfg)
-    {
-        auto str = cfg.DATABASE.value;
-        std::string prefix = "sqlite3://";
-        REQUIRE(str.find(prefix) == 0);
-        str = str.substr(prefix.size());
-        REQUIRE(!str.empty());
-        std::filesystem::path path(str);
-        REQUIRE(std::filesystem::exists(path));
-        return path;
-    }
-
-  public:
-    TemporarySQLiteDBDamager(Config const& cfg)
-        : TemporaryFileDamager(getSQLiteDBPath(cfg)), mConfig(cfg)
-    {
-    }
-    void
-    damageVictim() override
-    {
-        // Damage a database by bumping the root account's last-modified.
-        VirtualClock clock;
-        auto app = createTestApplication(clock, mConfig, /*newDB=*/false);
-        LedgerTxn ltx(app->getLedgerTxnRoot(),
-                      /*shouldUpdateLastModified=*/false);
-        {
-            auto rootKey = accountKey(
-                stellar::txtest::getRoot(app->getNetworkID()).getPublicKey());
-            auto rootLe = ltx.load(rootKey);
-            rootLe.current().lastModifiedLedgerSeq += 1;
-        }
-        ltx.commit();
-    }
-};
-
 // Logic to check the state of the bucket list with the state of the DB
 static bool
 checkState(Application& app)
@@ -108,7 +71,7 @@ checkState(Application& app)
         blcOk = false;
     }
 
-    if (app.getConfig().isUsingBucketListDB())
+    if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         auto checkBucket = [&blcOk](auto b) {
             if (!b->isEmpty() && !b->isIndexed())
@@ -307,82 +270,6 @@ class SimulationHelper
     {
         mSimulation->removeNode(mTestNodeID);
     }
-
-    void
-    runStartupTest(bool triggerCatchup, uint32_t startFromLedger,
-                   std::string startFromHash, uint32_t lclLedgerSeq)
-    {
-        bool isInMemoryMode = startFromLedger != 0 && !startFromHash.empty();
-        if (isInMemoryMode)
-        {
-            REQUIRE(canRebuildInMemoryLedgerFromBuckets(startFromLedger,
-                                                        lclLedgerSeq));
-        }
-
-        uint32_t checkpointFrequency = 8;
-
-        // Depending on how many ledgers we buffer during bucket
-        // apply, core might trim some and only keep checkpoint
-        // ledgers. In this case, after bucket application, normal
-        // catchup will be triggered.
-        uint32_t delayBuckets = triggerCatchup ? (2 * checkpointFrequency)
-                                               : (checkpointFrequency / 2);
-        mTestCfg.ARTIFICIALLY_DELAY_BUCKET_APPLICATION_FOR_TESTING =
-            std::chrono::seconds(delayBuckets);
-
-        // Start test app
-        auto app = mSimulation->addNode(mTestCfg.NODE_SEED, mQuorum, &mTestCfg,
-                                        false, startFromLedger, startFromHash);
-        mSimulation->addPendingConnection(mMainNodeID, mTestNodeID);
-        REQUIRE(app);
-        mSimulation->startAllNodes();
-
-        // Ensure nodes are connected
-        if (!app->getConfig().MODE_AUTO_STARTS_OVERLAY)
-        {
-            app->getOverlayManager().start();
-        }
-
-        if (isInMemoryMode)
-        {
-            REQUIRE(app->getLedgerManager().getState() ==
-                    LedgerManager::LM_CATCHING_UP_STATE);
-        }
-
-        auto downloaded =
-            app->getCatchupManager().getCatchupMetrics().mCheckpointsDownloaded;
-
-        Upgrades::UpgradeParameters scheduledUpgrades;
-        scheduledUpgrades.mUpgradeTime =
-            VirtualClock::from_time_t(mMainNode->getLedgerManager()
-                                          .getLastClosedLedgerHeader()
-                                          .header.scpValue.closeTime);
-        scheduledUpgrades.mProtocolVersion =
-            static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION);
-        mMainNode->getHerder().setUpgrades(scheduledUpgrades);
-
-        generateLoad(false);
-        generateLoad(true);
-
-        // State has been rebuilt and node is properly in sync
-        REQUIRE(checkState(*app));
-        REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() + 1 >=
-                getMainNodeLCL().header.ledgerSeq);
-        REQUIRE(app->getLedgerManager().isSynced());
-
-        if (triggerCatchup)
-        {
-            REQUIRE(downloaded < app->getCatchupManager()
-                                     .getCatchupMetrics()
-                                     .mCheckpointsDownloaded);
-        }
-        else
-        {
-            REQUIRE(downloaded == app->getCatchupManager()
-                                      .getCatchupMetrics()
-                                      .mCheckpointsDownloaded);
-        }
-    }
 };
 
 TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]")
@@ -399,7 +286,7 @@ TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]")
     qSet.validators.push_back(vNode1NodeID);
 
     Config cfg1 = getTestConfig(1);
-    Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY);
     cfg2.FORCE_SCP = false;
     cfg2.NODE_IS_VALIDATOR = false;
     cfg2.MODE_DOES_CATCHUP = false;
@@ -446,8 +333,8 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]")
         // Step 2: make a new application and catch it up part-way to the
         // archives (but behind).
         auto app = catchupSimulation.createCatchupApplication(
-            std::numeric_limits<uint32_t>::max(), Config::TESTDB_ON_DISK_SQLITE,
-            "client");
+            std::numeric_limits<uint32_t>::max(),
+            Config::TESTDB_BUCKET_DB_PERSISTENT, "client");
         catchupSimulation.catchupOffline(app, l1);
         chkConfig = app->getConfig();
         victimBucketPath = app->getBucketManager()
@@ -488,146 +375,14 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]")
         damage.damageVictim();
         REQUIRE(selfCheck(chkConfig) == 1);
     }
-    {
-        // Damage the SQL ledger.
-        TemporarySQLiteDBDamager damage(chkConfig);
-        damage.damageVictim();
-        REQUIRE(selfCheck(chkConfig) == 1);
-    }
 }
 
 TEST_CASE("application setup", "[applicationutils]")
 {
     VirtualClock clock;
-
-    SECTION("SQL DB mode")
-    {
-        auto cfg = getTestConfig();
-        auto app = setupApp(cfg, clock, 0, "");
-        REQUIRE(checkState(*app));
-    }
-
-    auto testInMemoryMode = [&](Config& cfg1, Config& cfg2) {
-        // Publish a few checkpoints then shut down test node
-        auto simulation = SimulationHelper(cfg1, cfg2);
-        auto [startFromLedger, startFromHash] =
-            simulation.publishCheckpoints(2);
-        auto lcl = simulation.getTestNodeLCL();
-        simulation.shutdownTestNode();
-
-        SECTION("minimal DB setup")
-        {
-            SECTION("not found")
-            {
-                // Remove `buckets` dir completely
-                fs::deltree(cfg2.BUCKET_DIR_PATH);
-
-                // Initialize new minimal DB from scratch
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-            SECTION("found")
-            {
-                // Found existing minimal DB, reset to genesis
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-        }
-        SECTION("rebuild state")
-        {
-            SECTION("from buckets")
-            {
-                auto selectedLedger = lcl.header.ledgerSeq;
-                auto selectedHash = binToHex(lcl.hash);
-
-                SECTION("replay buffered ledgers")
-                {
-                    simulation.runStartupTest(false, selectedLedger,
-                                              selectedHash,
-                                              lcl.header.ledgerSeq);
-                }
-                SECTION("trigger catchup")
-                {
-                    simulation.runStartupTest(true, selectedLedger,
-                                              selectedHash,
-                                              lcl.header.ledgerSeq);
-                }
-                SECTION("start from future ledger")
-                {
-                    // Validator publishes more checkpoints while the
-                    // captive-core instance is shutdown
-                    auto [selectedLedger2, selectedHash2] =
-                        simulation.publishCheckpoints(4);
-                    simulation.runStartupTest(true, selectedLedger2,
-                                              selectedHash2,
-                                              lcl.header.ledgerSeq);
-                }
-            }
-            SECTION("via catchup")
-            {
-                // startAtLedger is behind LCL, reset to genesis and catchup
-                REQUIRE(!canRebuildInMemoryLedgerFromBuckets(
-                    startFromLedger, lcl.header.ledgerSeq));
-                auto app =
-                    setupApp(cfg2, clock, startFromLedger, startFromHash);
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-                REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() ==
-                        startFromLedger);
-                REQUIRE(app->getLedgerManager().getState() ==
-                        LedgerManager::LM_CATCHING_UP_STATE);
-            }
-
-            SECTION("bad hash")
-            {
-                // Create mismatch between start-from ledger and hash
-                auto app =
-                    setupApp(cfg2, clock, startFromLedger + 1, startFromHash);
-                REQUIRE(!app);
-            }
-        }
-        SECTION("set meta stream")
-        {
-            TmpDirManager tdm(std::string("streamtmp-") +
-                              binToHex(randomBytes(8)));
-            TmpDir td = tdm.tmpDir("streams");
-            std::string path = td.getName() + "/stream.xdr";
-
-            // Remove `buckets` dir completely to ensure multiple apps are
-            // initialized during setup
-            fs::deltree(cfg2.BUCKET_DIR_PATH);
-            SECTION("file path")
-            {
-                cfg2.METADATA_OUTPUT_STREAM = path;
-
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-#ifdef _WIN32
-#else
-            SECTION("fd")
-            {
-                int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0644);
-                REQUIRE(fd != -1);
-                cfg2.METADATA_OUTPUT_STREAM = fmt::format("fd:{}", fd);
-
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-#endif
-        }
-    };
-    SECTION("in memory mode")
-    {
-        Config cfg1 = getTestConfig(1);
-        Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS);
-        cfg2.DATABASE = SecretValue{minimalDBForInMemoryMode(cfg2)};
-        testInMemoryMode(cfg1, cfg2);
-    }
+    auto cfg = getTestConfig();
+    auto app = setupApp(cfg, clock);
+    REQUIRE(checkState(*app));
 }
 
 TEST_CASE("application major version numbers", "[applicationutils]")
diff --git a/src/main/test/ExternalQueueTests.cpp b/src/main/test/ExternalQueueTests.cpp
deleted file mode 100644
index c44713ea7f..0000000000
--- a/src/main/test/ExternalQueueTests.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2014 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "lib/catch.hpp"
-#include "main/Application.h"
-#include "main/CommandHandler.h"
-#include "main/Config.h"
-#include "main/ExternalQueue.h"
-#include "simulation/Simulation.h"
-#include "test/TestUtils.h"
-#include "test/test.h"
-
-using namespace stellar;
-
-TEST_CASE("cursors", "[externalqueue]")
-{
-    VirtualClock clock;
-    Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE);
-    Application::pointer app = createTestApplication(clock, cfg);
-
-    ExternalQueue ps(*app);
-    std::map<std::string, uint32> curMap;
-    app->getCommandHandler().manualCmd("setcursor?id=FOO&cursor=123");
-    app->getCommandHandler().manualCmd("setcursor?id=BAR&cursor=456");
-
-    SECTION("get non-existent cursor")
-    {
-        ps.getCursorForResource("NONEXISTENT", curMap);
-        REQUIRE(curMap.size() == 0);
-    }
-
-    SECTION("get single cursor")
-    {
-        ps.getCursorForResource("FOO", curMap);
-        REQUIRE(curMap.size() == 1);
-    }
-
-    SECTION("get all cursors")
-    {
-        ps.getCursorForResource("", curMap);
-        REQUIRE(curMap.size() == 2);
-    }
-}
diff --git a/src/overlay/test/OverlayTests.cpp b/src/overlay/test/OverlayTests.cpp
index 9c04a429a1..942f0a64a9 100644
--- a/src/overlay/test/OverlayTests.cpp
+++ b/src/overlay/test/OverlayTests.cpp
@@ -140,8 +140,8 @@ TEST_CASE("flow control byte capacity", "[overlay][flowcontrol]")
 {
     VirtualClock clock;
 
-    auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
-    auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY);
+    auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY);
     REQUIRE(cfg1.PEER_FLOOD_READING_CAPACITY !=
             cfg1.PEER_FLOOD_READING_CAPACITY_BYTES);
 
diff --git a/src/simulation/Simulation.cpp b/src/simulation/Simulation.cpp
index f6b91cbf2e..356e1dc4c1 100644
--- a/src/simulation/Simulation.cpp
+++ b/src/simulation/Simulation.cpp
@@ -91,8 +91,7 @@ Simulation::setCurrentVirtualTime(VirtualClock::system_time_point t)
 
 Application::pointer
 Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2,
-                    bool newDB, uint32_t startAtLedger,
-                    std::string const& startAtHash)
+                    bool newDB)
 {
     auto cfg = cfg2 ? std::make_shared<Config>(*cfg2)
                     : std::make_shared<Config>(newConfig());
@@ -140,7 +139,7 @@ Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2,
     }
     else
     {
-        app = setupApp(*cfg, *clock, startAtLedger, startAtHash);
+        app = setupApp(*cfg, *clock);
     }
     mNodes.emplace(nodeKey.getPublicKey(), Node{clock, app});
 
diff --git a/src/simulation/Simulation.h b/src/simulation/Simulation.h
index 8743af37f2..e1385f374d 100644
--- a/src/simulation/Simulation.h
+++ b/src/simulation/Simulation.h
@@ -50,9 +50,8 @@ class Simulation
     // Add new node to the simulation. This function does not start the node.
     // Callers are expected to call `start` or `startAllNodes` manually.
     Application::pointer addNode(SecretKey nodeKey, SCPQuorumSet qSet,
-                                 Config const* cfg = nullptr, bool newDB = true,
-                                 uint32_t startAtLedger = 0,
-                                 std::string const& startAtHash = "");
+                                 Config const* cfg = nullptr,
+                                 bool newDB = true);
     Application::pointer getNode(NodeID nodeID);
     std::vector<Application::pointer> getNodes();
     std::vector<NodeID> getNodeIDs();
diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp
index a4305b03ab..672c8acf22 100644
--- a/src/test/TestUtils.cpp
+++ b/src/test/TestUtils.cpp
@@ -185,7 +185,8 @@ genesis(int minute, int second)
 
 void
 upgradeSorobanNetworkConfig(std::function<void(SorobanNetworkConfig&)> modifyFn,
-                            std::shared_ptr<Simulation> simulation)
+                            std::shared_ptr<Simulation> simulation,
+                            bool applyUpgrade)
 {
     auto nodes = simulation->getNodes();
     auto& lg = nodes[0]->getLoadGenerator();
@@ -247,13 +248,17 @@ upgradeSorobanNetworkConfig(std::function<void(SorobanNetworkConfig&)> modifyFn,
         scheduledUpgrades.mConfigUpgradeSetKey = upgradeSetKey;
         app->getHerder().setUpgrades(scheduledUpgrades);
     }
-    // Wait for upgrade to be applied
-    simulation->crankUntil(
-        [&]() {
-            auto netCfg = app.getLedgerManager().getSorobanNetworkConfig();
-            return netCfg == cfg;
-        },
-        2 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
+
+    if (applyUpgrade)
+    {
+        // Wait for upgrade to be applied
+        simulation->crankUntil(
+            [&]() {
+                auto netCfg = app.getLedgerManager().getSorobanNetworkConfig();
+                return netCfg == cfg;
+            },
+            2 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
+    }
 }
 
 void
diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h
index 5bf421efbd..96a7703604 100644
--- a/src/test/TestUtils.h
+++ b/src/test/TestUtils.h
@@ -113,7 +113,8 @@ void setSorobanNetworkConfigForTest(SorobanNetworkConfig& cfg);
 void overrideSorobanNetworkConfigForTest(Application& app);
 void
 upgradeSorobanNetworkConfig(std::function<void(SorobanNetworkConfig&)> modifyFn,
-                            std::shared_ptr<Simulation> simulation);
+                            std::shared_ptr<Simulation> simulation,
+                            bool applyUpgrade = true);
 void
 modifySorobanNetworkConfig(Application& app,
                            std::function<void(SorobanNetworkConfig&)> modifyFn);
diff --git a/src/test/test.cpp b/src/test/test.cpp
index 9fbb2e7ae4..4499c26a7e 100644
--- a/src/test/test.cpp
+++ b/src/test/test.cpp
@@ -194,10 +194,10 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode)
     instanceNumber += gBaseInstance;
     if (mode == Config::TESTDB_DEFAULT)
     {
-        // by default, tests should be run with in memory SQLITE as it's faster
-        // you can change this by enabling the appropriate line below
-        // mode = Config::TESTDB_IN_MEMORY_OFFERS;
-        // mode = Config::TESTDB_ON_DISK_SQLITE;
+        // by default, tests should be run with volatile BucketList as it's
+        // faster. You can change this by enabling the appropriate line below
+        // mode = Config::TESTDB_IN_MEMORY;
+        // mode = Config::TESTDB_BUCKET_DB_PERSISTENT;
         // mode = Config::TESTDB_POSTGRESQL;
         mode = Config::TESTDB_BUCKET_DB_VOLATILE;
     }
@@ -283,11 +283,11 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode)
         switch (mode)
         {
         case Config::TESTDB_BUCKET_DB_VOLATILE:
-        case Config::TESTDB_IN_MEMORY_OFFERS:
+        case Config::TESTDB_IN_MEMORY:
             dbname << "sqlite3://:memory:";
+            thisConfig.BACKGROUND_EVICTION_SCAN = false;
             break;
         case Config::TESTDB_BUCKET_DB_PERSISTENT:
-        case Config::TESTDB_ON_DISK_SQLITE:
             dbname << "sqlite3://" << rootDir << "test.db";
             thisConfig.DISABLE_XDR_FSYNC = false;
             break;
@@ -295,31 +295,19 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode)
         case Config::TESTDB_POSTGRESQL:
             dbname << "postgresql://dbname=test" << instanceNumber;
             thisConfig.DISABLE_XDR_FSYNC = false;
-            break;
-        case Config::TESTDB_IN_MEMORY_NO_OFFERS:
-            thisConfig.MODE_USES_IN_MEMORY_LEDGER = true;
+            thisConfig.BACKGROUND_EVICTION_SCAN = false;
             break;
 #endif
         default:
             abort();
         }
 
-        if (mode == Config::TESTDB_BUCKET_DB_VOLATILE ||
-            mode == Config::TESTDB_BUCKET_DB_PERSISTENT)
+        if (mode == Config::TESTDB_IN_MEMORY)
         {
-            thisConfig.DEPRECATED_SQL_LEDGER_STATE = false;
-            thisConfig.BACKGROUND_EVICTION_SCAN = true;
-        }
-        else
-        {
-            thisConfig.DEPRECATED_SQL_LEDGER_STATE = true;
-            thisConfig.BACKGROUND_EVICTION_SCAN = false;
+            thisConfig.MODE_USES_IN_MEMORY_LEDGER = true;
         }
 
-        if (mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-        {
-            thisConfig.DATABASE = SecretValue{dbname.str()};
-        }
+        thisConfig.DATABASE = SecretValue{dbname.str()};
 
         thisConfig.REPORT_METRICS = gTestMetrics;
         // disable maintenance
diff --git a/src/transactions/test/AllowTrustTests.cpp b/src/transactions/test/AllowTrustTests.cpp
index 398bee5e28..43c25f3824 100644
--- a/src/transactions/test/AllowTrustTests.cpp
+++ b/src/transactions/test/AllowTrustTests.cpp
@@ -82,7 +82,7 @@ template <int V> struct TestStub
         TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST
                                     : TrustFlagOp::SET_TRUST_LINE_FLAGS;
 
-        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
         VirtualClock clock;
         auto app = createTestApplication(clock, cfg);
@@ -377,7 +377,7 @@ template <int V> struct TestStub
         TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST
                                     : TrustFlagOp::SET_TRUST_LINE_FLAGS;
 
-        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
         VirtualClock clock;
         auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/BumpSequenceTests.cpp b/src/transactions/test/BumpSequenceTests.cpp
index f8a43d42ca..9a09b171f2 100644
--- a/src/transactions/test/BumpSequenceTests.cpp
+++ b/src/transactions/test/BumpSequenceTests.cpp
@@ -25,7 +25,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("bump sequence", "[tx][bumpsequence]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ChangeTrustTests.cpp b/src/transactions/test/ChangeTrustTests.cpp
index 72edb46160..3c62142f2e 100644
--- a/src/transactions/test/ChangeTrustTests.cpp
+++ b/src/transactions/test/ChangeTrustTests.cpp
@@ -23,7 +23,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("change trust", "[tx][changetrust]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
@@ -304,7 +304,7 @@ TEST_CASE_VERSIONS("change trust", "[tx][changetrust]")
 TEST_CASE_VERSIONS("change trust pool share trustline",
                    "[tx][changetrust][liquiditypool]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ClaimableBalanceTests.cpp b/src/transactions/test/ClaimableBalanceTests.cpp
index b3d8980bb2..546eee4392 100644
--- a/src/transactions/test/ClaimableBalanceTests.cpp
+++ b/src/transactions/test/ClaimableBalanceTests.cpp
@@ -298,7 +298,7 @@ validateBalancesOnCreateAndClaim(TestAccount& createAcc, TestAccount& claimAcc,
 
 TEST_CASE_VERSIONS("claimableBalance", "[tx][claimablebalance]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ClawbackClaimableBalanceTests.cpp b/src/transactions/test/ClawbackClaimableBalanceTests.cpp
index c9f9e1dcc8..b11051995a 100644
--- a/src/transactions/test/ClawbackClaimableBalanceTests.cpp
+++ b/src/transactions/test/ClawbackClaimableBalanceTests.cpp
@@ -19,7 +19,7 @@ using namespace stellar::txtest;
 TEST_CASE_VERSIONS("clawbackClaimableBalance",
                    "[tx][clawback][claimablebalance]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ClawbackTests.cpp b/src/transactions/test/ClawbackTests.cpp
index eee797d441..f0238c35f1 100644
--- a/src/transactions/test/ClawbackTests.cpp
+++ b/src/transactions/test/ClawbackTests.cpp
@@ -17,7 +17,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("clawback", "[tx][clawback]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/CreateAccountTests.cpp b/src/transactions/test/CreateAccountTests.cpp
index ece78844dd..66cda7978e 100644
--- a/src/transactions/test/CreateAccountTests.cpp
+++ b/src/transactions/test/CreateAccountTests.cpp
@@ -31,7 +31,7 @@ TEST_CASE_VERSIONS("create account", "[tx][createaccount]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto root = TestAccount::createRoot(*app);
diff --git a/src/transactions/test/EndSponsoringFutureReservesTests.cpp b/src/transactions/test/EndSponsoringFutureReservesTests.cpp
index f321b37b93..b163d3bfd8 100644
--- a/src/transactions/test/EndSponsoringFutureReservesTests.cpp
+++ b/src/transactions/test/EndSponsoringFutureReservesTests.cpp
@@ -34,7 +34,7 @@ TEST_CASE_VERSIONS("confirm and clear sponsor", "[tx][sponsorship]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto root = TestAccount::createRoot(*app);
     int64_t minBalance = app->getLedgerManager().getLastMinBalance(0);
diff --git a/src/transactions/test/FeeBumpTransactionTests.cpp b/src/transactions/test/FeeBumpTransactionTests.cpp
index 573666e942..a020e6c058 100644
--- a/src/transactions/test/FeeBumpTransactionTests.cpp
+++ b/src/transactions/test/FeeBumpTransactionTests.cpp
@@ -66,7 +66,7 @@ TEST_CASE_VERSIONS("fee bump transactions", "[tx][feebump]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto& lm = app->getLedgerManager();
     auto fee = lm.getLastClosedLedgerHeader().header.baseFee;
diff --git a/src/transactions/test/InflationTests.cpp b/src/transactions/test/InflationTests.cpp
index dbf2d8feef..f5cc0697f0 100644
--- a/src/transactions/test/InflationTests.cpp
+++ b/src/transactions/test/InflationTests.cpp
@@ -432,7 +432,7 @@ TEST_CASE_VERSIONS("inflation total coins", "[tx][inflation]")
 
 TEST_CASE_VERSIONS("inflation", "[tx][inflation]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock::system_time_point inflationStart;
     // inflation starts on 1-jul-2014
diff --git a/src/transactions/test/InvokeHostFunctionTests.cpp b/src/transactions/test/InvokeHostFunctionTests.cpp
index 441c22fba1..c0c980819f 100644
--- a/src/transactions/test/InvokeHostFunctionTests.cpp
+++ b/src/transactions/test/InvokeHostFunctionTests.cpp
@@ -2845,7 +2845,7 @@ TEST_CASE("state archival operation errors", "[tx][soroban]")
 TEST_CASE("settings upgrade command line utils", "[tx][soroban][upgrades]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = true;
     auto app = createTestApplication(clock, cfg);
     auto root = TestAccount::createRoot(*app);
diff --git a/src/transactions/test/LiquidityPoolDepositTests.cpp b/src/transactions/test/LiquidityPoolDepositTests.cpp
index 2bf6cd413a..1b8b899eaf 100644
--- a/src/transactions/test/LiquidityPoolDepositTests.cpp
+++ b/src/transactions/test/LiquidityPoolDepositTests.cpp
@@ -18,7 +18,7 @@ TEST_CASE_VERSIONS("liquidity pool deposit", "[tx][liquiditypool]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto const& lm = app->getLedgerManager();
diff --git a/src/transactions/test/LiquidityPoolTradeTests.cpp b/src/transactions/test/LiquidityPoolTradeTests.cpp
index 9cddacf59b..12b0ab3779 100644
--- a/src/transactions/test/LiquidityPoolTradeTests.cpp
+++ b/src/transactions/test/LiquidityPoolTradeTests.cpp
@@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("liquidity pool trade", "[tx][liquiditypool]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto minBal = [&](int32_t n) {
diff --git a/src/transactions/test/LiquidityPoolWithdrawTests.cpp b/src/transactions/test/LiquidityPoolWithdrawTests.cpp
index a6cb9b6c77..df3acf8b3d 100644
--- a/src/transactions/test/LiquidityPoolWithdrawTests.cpp
+++ b/src/transactions/test/LiquidityPoolWithdrawTests.cpp
@@ -17,7 +17,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("liquidity pool withdraw", "[tx][liquiditypool]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ManageBuyOfferTests.cpp b/src/transactions/test/ManageBuyOfferTests.cpp
index 49e836fc8a..e280a13b62 100644
--- a/src/transactions/test/ManageBuyOfferTests.cpp
+++ b/src/transactions/test/ManageBuyOfferTests.cpp
@@ -47,7 +47,7 @@ TEST_CASE_VERSIONS("manage buy offer failure modes", "[tx][offers]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -354,7 +354,7 @@ TEST_CASE_VERSIONS("manage buy offer liabilities", "[tx][offers]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto checkLiabilities = [&](std::string const& section, int64_t buyAmount,
                                 Price const& price, int64_t expectedBuying,
@@ -438,7 +438,7 @@ TEST_CASE_VERSIONS("manage buy offer exactly crosses existing offers",
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -491,7 +491,7 @@ TEST_CASE_VERSIONS(
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -619,7 +619,7 @@ TEST_CASE_VERSIONS(
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -774,7 +774,7 @@ TEST_CASE_VERSIONS(
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -927,7 +927,7 @@ TEST_CASE_VERSIONS("manage buy offer with zero liabilities", "[tx][offers]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("manage buy offer releases liabilities before modify",
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
diff --git a/src/transactions/test/ManageDataTests.cpp b/src/transactions/test/ManageDataTests.cpp
index d1b5dbcfe4..770ba6f2e5 100644
--- a/src/transactions/test/ManageDataTests.cpp
+++ b/src/transactions/test/ManageDataTests.cpp
@@ -26,7 +26,7 @@ using namespace stellar::txtest;
 // add too much data
 TEST_CASE_VERSIONS("manage data", "[tx][managedata]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/MergeTests.cpp b/src/transactions/test/MergeTests.cpp
index c183024d59..ecbd55dd0b 100644
--- a/src/transactions/test/MergeTests.cpp
+++ b/src/transactions/test/MergeTests.cpp
@@ -34,7 +34,7 @@ using namespace stellar::txtest;
 // Merge when you have outstanding data entries
 TEST_CASE_VERSIONS("merge", "[tx][merge]")
 {
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/OfferTests.cpp b/src/transactions/test/OfferTests.cpp
index 65b1257716..b63e939c25 100644
--- a/src/transactions/test/OfferTests.cpp
+++ b/src/transactions/test/OfferTests.cpp
@@ -36,7 +36,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("create offer", "[tx][offers]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/PathPaymentStrictSendTests.cpp b/src/transactions/test/PathPaymentStrictSendTests.cpp
index 6eb6a153a5..21fb6c48f1 100644
--- a/src/transactions/test/PathPaymentStrictSendTests.cpp
+++ b/src/transactions/test/PathPaymentStrictSendTests.cpp
@@ -178,7 +178,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto exchanged = [&](TestMarketOffer const& o, int64_t sold,
                          int64_t bought) {
@@ -2406,7 +2406,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]")
 TEST_CASE_VERSIONS("pathpayment strict send uses all offers in a loop",
                    "[tx][pathpayment]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
 
diff --git a/src/transactions/test/PathPaymentTests.cpp b/src/transactions/test/PathPaymentTests.cpp
index a36a8b460e..71f4fb5ccf 100644
--- a/src/transactions/test/PathPaymentTests.cpp
+++ b/src/transactions/test/PathPaymentTests.cpp
@@ -70,7 +70,7 @@ assetPathToString(const std::deque<Asset>& assets)
 
 TEST_CASE_VERSIONS("pathpayment", "[tx][pathpayment]")
 {
-    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/PaymentTests.cpp b/src/transactions/test/PaymentTests.cpp
index e53faded26..d7bbf0807b 100644
--- a/src/transactions/test/PaymentTests.cpp
+++ b/src/transactions/test/PaymentTests.cpp
@@ -38,7 +38,7 @@ using namespace stellar::txtest;
 // path payment with a transfer rate
 TEST_CASE_VERSIONS("payment", "[tx][payment]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
 
@@ -1510,7 +1510,11 @@ TEST_CASE_VERSIONS("payment", "[tx][payment]")
 
             // Since a1 has a trustline, and there is only 1 trustline, we know
             // that gateway has no trustlines.
-            REQUIRE(app->getLedgerTxnRoot().countObjects(TRUSTLINE) == 1);
+            LedgerSnapshot lsg(*app);
+            LedgerKey trustKey(TRUSTLINE);
+            trustKey.trustLine().accountID = gateway.getPublicKey();
+            trustKey.trustLine().asset = assetToTrustLineAsset(idr);
+            REQUIRE(!lsg.load(trustKey));
         });
     }
     SECTION("authorize flag")
@@ -1930,7 +1934,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]")
 
     SECTION("fee equal to base reserve")
     {
-        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY);
         cfg.TESTING_UPGRADE_DESIRED_FEE = 100000000;
 
         VirtualClock clock;
@@ -2040,7 +2044,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]")
 
     SECTION("fee bigger than base reserve")
     {
-        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY);
         cfg.TESTING_UPGRADE_DESIRED_FEE = 200000000;
 
         VirtualClock clock;
diff --git a/src/transactions/test/RevokeSponsorshipTests.cpp b/src/transactions/test/RevokeSponsorshipTests.cpp
index e88154e458..01fa18cb31 100644
--- a/src/transactions/test/RevokeSponsorshipTests.cpp
+++ b/src/transactions/test/RevokeSponsorshipTests.cpp
@@ -40,7 +40,7 @@ TEST_CASE_VERSIONS("update sponsorship", "[tx][sponsorship]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto minBal = [&](uint32_t n) {
         return app->getLedgerManager().getLastMinBalance(n);
diff --git a/src/transactions/test/SetOptionsTests.cpp b/src/transactions/test/SetOptionsTests.cpp
index d5a5d522ec..fedb026763 100644
--- a/src/transactions/test/SetOptionsTests.cpp
+++ b/src/transactions/test/SetOptionsTests.cpp
@@ -36,7 +36,7 @@ using namespace stellar::txtest;
 // minbalance
 TEST_CASE_VERSIONS("set options", "[tx][setoptions]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/SetTrustLineFlagsTests.cpp b/src/transactions/test/SetTrustLineFlagsTests.cpp
index d030c62f7e..05b13ac7d4 100644
--- a/src/transactions/test/SetTrustLineFlagsTests.cpp
+++ b/src/transactions/test/SetTrustLineFlagsTests.cpp
@@ -105,7 +105,7 @@ getNumOffers(Application& app, TestAccount const& account, Asset const& asset)
 
 TEST_CASE_VERSIONS("set trustline flags", "[tx][settrustlineflags]")
 {
-    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
@@ -380,7 +380,7 @@ TEST_CASE_VERSIONS("revoke from pool",
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto root = TestAccount::createRoot(*app);
diff --git a/src/transactions/test/TxEnvelopeTests.cpp b/src/transactions/test/TxEnvelopeTests.cpp
index b60f3038ea..8f06cfc64c 100644
--- a/src/transactions/test/TxEnvelopeTests.cpp
+++ b/src/transactions/test/TxEnvelopeTests.cpp
@@ -86,7 +86,7 @@ TEST_CASE("txset - correct apply order", "[tx][envelope]")
 
 TEST_CASE_VERSIONS("txenvelope", "[tx][envelope]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);

From 20f5feaede1228b0fdb47df02513a380b4d5344c Mon Sep 17 00:00:00 2001
From: Garand Tyson <garand@stellar.org>
Date: Sun, 1 Dec 2024 11:13:04 -0800
Subject: [PATCH 2/2] in-memory mode rewritten for testing only

---
 docs/integration.md                           |   4 +-
 src/bucket/BucketApplicator.cpp               |   2 +
 src/bucket/test/BucketTests.cpp               |  76 -------------
 src/herder/test/HerderTests.cpp               | 106 +-----------------
 src/ledger/LedgerManager.h                    |   7 +-
 src/ledger/LedgerManagerImpl.cpp              |  80 ++++++-------
 src/ledger/LedgerManagerImpl.h                |   3 +-
 src/ledger/{ => test}/InMemoryLedgerTxn.cpp   |  75 +++++--------
 src/ledger/{ => test}/InMemoryLedgerTxn.h     |  45 ++++++--
 .../{ => test}/InMemoryLedgerTxnRoot.cpp      |   2 +-
 src/ledger/{ => test}/InMemoryLedgerTxnRoot.h |   0
 src/main/ApplicationImpl.cpp                  |  12 +-
 src/main/ApplicationImpl.h                    |   9 +-
 src/main/ApplicationUtils.cpp                 |  29 +----
 src/main/Config.cpp                           |   7 +-
 src/main/Config.h                             |   4 +-
 src/main/test/ApplicationUtilsTests.cpp       |  62 ----------
 src/main/test/ExternalQueueTests.cpp          |  46 ++++++++
 src/simulation/Simulation.cpp                 |  17 +--
 src/simulation/Simulation.h                   |  14 +++
 src/test/TestUtils.cpp                        |  34 +++---
 src/test/TestUtils.h                          |   5 +
 22 files changed, 222 insertions(+), 417 deletions(-)
 rename src/ledger/{ => test}/InMemoryLedgerTxn.cpp (88%)
 rename src/ledger/{ => test}/InMemoryLedgerTxn.h (69%)
 rename src/ledger/{ => test}/InMemoryLedgerTxnRoot.cpp (98%)
 rename src/ledger/{ => test}/InMemoryLedgerTxnRoot.h (100%)
 create mode 100644 src/main/test/ExternalQueueTests.cpp

diff --git a/docs/integration.md b/docs/integration.md
index deb6147fc3..656479f4b3 100644
--- a/docs/integration.md
+++ b/docs/integration.md
@@ -17,9 +17,9 @@ stellar-core generates several types of data that can be used by applications, d
 
 ## Ledger State
 
-Full [Ledger](ledger.md) snapshots are available in both:
+Full [Ledger](ledger.md) snapshots are available via both:
   * [history archives](history.md) (checkpoints, every 64 ledgers, updated every 5 minutes)
-* in the case of captive-core, the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates.
+* a stellar-core instance, where the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates.
 
 ## Ledger State transition information (transactions, etc)
 
diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp
index f9001d3113..4bcbf213ea 100644
--- a/src/bucket/BucketApplicator.cpp
+++ b/src/bucket/BucketApplicator.cpp
@@ -110,11 +110,13 @@ BucketApplicator::advance(BucketApplicator::Counters& counters)
     // directly instead of creating a temporary inner LedgerTxn
     // as "advance" commits changes during each step this does not introduce any
     // new failure mode
+#ifdef BUILD_TESTS
     if (mApp.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         ltx = static_cast<AbstractLedgerTxn*>(&root);
     }
     else
+#endif
     {
         innerLtx = std::make_unique<LedgerTxn>(root, false);
         ltx = innerLtx.get();
diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp
index 30d82ff71c..6277f96f83 100644
--- a/src/bucket/test/BucketTests.cpp
+++ b/src/bucket/test/BucketTests.cpp
@@ -1011,79 +1011,3 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
         }
     });
 }
-
-TEST_CASE_VERSIONS("legacy bucket apply", "[bucket]")
-{
-    VirtualClock clock;
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
-    for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
-        Application::pointer app = createTestApplication(clock, cfg);
-
-        std::vector<LedgerEntry> live(10), noLive;
-        std::vector<LedgerKey> dead, noDead;
-
-        for (auto& e : live)
-        {
-            e.data.type(ACCOUNT);
-            auto& a = e.data.account();
-            a = LedgerTestUtils::generateValidAccountEntry(5);
-            a.balance = 1000000000;
-            dead.emplace_back(LedgerEntryKey(e));
-        }
-
-        std::shared_ptr<LiveBucket> birth = LiveBucket::fresh(
-            app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead,
-            /*countMergeEvents=*/true, clock.getIOContext(),
-            /*doFsync=*/true);
-
-        std::shared_ptr<LiveBucket> death = LiveBucket::fresh(
-            app->getBucketManager(), getAppLedgerVersion(app), {}, noLive, dead,
-            /*countMergeEvents=*/true, clock.getIOContext(),
-            /*doFsync=*/true);
-
-        CLOG_INFO(Bucket, "Applying bucket with {} live entries", live.size());
-        birth->apply(*app);
-        {
-            auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT);
-            REQUIRE(count == live.size() + 1 /* root account */);
-        }
-
-        CLOG_INFO(Bucket, "Applying bucket with {} dead entries", dead.size());
-        death->apply(*app);
-        {
-            auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT);
-            REQUIRE(count == 1 /* root account */);
-        }
-    });
-}
-
-TEST_CASE("bucket apply bench", "[bucketbench][!hide]")
-{
-    auto runtest = [](Config::TestDbMode mode) {
-        VirtualClock clock;
-        Config cfg(getTestConfig(0, mode));
-        Application::pointer app = createTestApplication(clock, cfg);
-
-        std::vector<LedgerEntry> live(100000);
-        std::vector<LedgerKey> noDead;
-
-        for (auto& l : live)
-        {
-            l.data.type(ACCOUNT);
-            auto& a = l.data.account();
-            a = LedgerTestUtils::generateValidAccountEntry(5);
-        }
-
-        std::shared_ptr<LiveBucket> birth = LiveBucket::fresh(
-            app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead,
-            /*countMergeEvents=*/true, clock.getIOContext(),
-            /*doFsync=*/true);
-
-        CLOG_INFO(Bucket, "Applying bucket with {} live entries", live.size());
-        // note: we do not wrap the `apply` call inside a transaction
-        // as bucket applicator commits to the database incrementally
-        birth->apply(*app);
-    };
-
-    runtest(Config::TESTDB_BUCKET_DB_PERSISTENT);
-}
diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp
index af32283d65..fc145aa906 100644
--- a/src/herder/test/HerderTests.cpp
+++ b/src/herder/test/HerderTests.cpp
@@ -3247,106 +3247,6 @@ TEST_CASE("soroban txs each parameter surge priced", "[soroban][herder]")
     }
 }
 
-TEST_CASE("accept soroban txs after network upgrade", "[soroban][herder]")
-{
-    auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE);
-
-    auto simulation =
-        Topologies::core(4, 1, Simulation::OVER_LOOPBACK, networkID, [](int i) {
-            auto cfg = getTestConfig(i, Config::TESTDB_IN_MEMORY);
-            cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 100;
-            cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION =
-                static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION) - 1;
-            return cfg;
-        });
-
-    simulation->startAllNodes();
-    auto nodes = simulation->getNodes();
-    uint32_t numAccounts = 100;
-    auto& loadGen = nodes[0]->getLoadGenerator();
-
-    // Generate some accounts
-    auto& loadGenDone =
-        nodes[0]->getMetrics().NewMeter({"loadgen", "run", "complete"}, "run");
-    auto currLoadGenCount = loadGenDone.count();
-    loadGen.generateLoad(
-        GeneratedLoadConfig::createAccountsLoad(numAccounts, 1));
-    simulation->crankUntil(
-        [&]() { return loadGenDone.count() > currLoadGenCount; },
-        10 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
-
-    // Ensure more transactions get in the ledger post upgrade
-    ConfigUpgradeSetFrameConstPtr res;
-    Upgrades::UpgradeParameters scheduledUpgrades;
-    scheduledUpgrades.mUpgradeTime =
-        VirtualClock::from_time_t(nodes[0]
-                                      ->getLedgerManager()
-                                      .getLastClosedLedgerHeader()
-                                      .header.scpValue.closeTime +
-                                  15);
-    scheduledUpgrades.mProtocolVersion =
-        static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION);
-    for (auto const& app : nodes)
-    {
-        app->getHerder().setUpgrades(scheduledUpgrades);
-    }
-
-    auto& secondLoadGen = nodes[1]->getLoadGenerator();
-    auto& secondLoadGenDone =
-        nodes[1]->getMetrics().NewMeter({"loadgen", "run", "complete"}, "run");
-    currLoadGenCount = loadGenDone.count();
-    auto secondLoadGenCount = secondLoadGenDone.count();
-
-    // Generate classic txs from another node (with offset to prevent
-    // overlapping accounts)
-    secondLoadGen.generateLoad(GeneratedLoadConfig::txLoad(LoadGenMode::PAY, 50,
-                                                           /* nTxs */ 100, 2,
-                                                           /* offset */ 50));
-
-    // Crank a bit and verify that upgrade went through
-    simulation->crankForAtLeast(4 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
-    REQUIRE(nodes[0]
-                ->getLedgerManager()
-                .getLastClosedLedgerHeader()
-                .header.ledgerVersion ==
-            static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION));
-    for (auto node : nodes)
-    {
-        overrideSorobanNetworkConfigForTest(*node);
-    }
-    // Now generate Soroban txs
-    auto sorobanConfig =
-        GeneratedLoadConfig::txLoad(LoadGenMode::SOROBAN_UPLOAD, 50,
-                                    /* nTxs */ 15, 1, /* offset */ 0);
-    sorobanConfig.skipLowFeeTxs = true;
-    loadGen.generateLoad(sorobanConfig);
-    auto& loadGenFailed =
-        nodes[0]->getMetrics().NewMeter({"loadgen", "run", "failed"}, "run");
-    auto& secondLoadGenFailed =
-        nodes[1]->getMetrics().NewMeter({"loadgen", "run", "failed"}, "run");
-
-    simulation->crankUntil(
-        [&]() {
-            return loadGenDone.count() > currLoadGenCount &&
-                   secondLoadGenDone.count() > secondLoadGenCount;
-        },
-        200 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
-    REQUIRE(loadGenFailed.count() == 0);
-    REQUIRE(secondLoadGenFailed.count() == 0);
-
-    //  Ensure some Soroban txs got into the ledger
-    auto totalSoroban =
-        nodes[0]
-            ->getMetrics()
-            .NewMeter({"soroban", "host-fn-op", "success"}, "call")
-            .count() +
-        nodes[0]
-            ->getMetrics()
-            .NewMeter({"soroban", "host-fn-op", "failure"}, "call")
-            .count();
-    REQUIRE(totalSoroban > 0);
-}
-
 TEST_CASE("overlay parallel processing")
 {
     auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE);
@@ -3679,6 +3579,11 @@ herderExternalizesValuesWithProtocol(uint32_t version)
             Herder::State::HERDER_BOOTING_STATE);
 
     simulation->startAllNodes();
+    upgradeSorobanNetworkConfig(
+        [&](SorobanNetworkConfig& cfg) {
+            cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1;
+        },
+        simulation);
 
     // After SCP is restored, Herder is tracking
     REQUIRE(getC()->getHerder().getState() ==
@@ -3758,7 +3663,6 @@ herderExternalizesValuesWithProtocol(uint32_t version)
         [&](SorobanNetworkConfig& cfg) {
             cfg.mLedgerMaxTransactionsSizeBytes = 1'000'000;
             cfg.mTxMaxSizeBytes = 500'000;
-            cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1;
         },
         simulation, /*applyUpgrade=*/false);
 
diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h
index acf6e1ee62..e5d160a7d2 100644
--- a/src/ledger/LedgerManager.h
+++ b/src/ledger/LedgerManager.h
@@ -150,13 +150,10 @@ class LedgerManager
     virtual void startNewLedger() = 0;
 
     // loads the last ledger information from the database with the following
-    // parameters:
+    // parameter:
     //  * restoreBucketlist indicates whether to restore the bucket list fully,
     //  and restart merges
-    //  * isLedgerStateReady indicates whether the ledger state is ready or is
-    //  still being rebuilt (in which case we can't yet load ledger entries)
-    virtual void loadLastKnownLedger(bool restoreBucketlist,
-                                     bool isLedgerStateReady) = 0;
+    virtual void loadLastKnownLedger(bool restoreBucketlist) = 0;
 
     // Return true if core is currently rebuilding in-memory state via local
     // catchup
diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp
index 6fa5019a0b..c5c9ac3cbb 100644
--- a/src/ledger/LedgerManagerImpl.cpp
+++ b/src/ledger/LedgerManagerImpl.cpp
@@ -281,8 +281,7 @@ setLedgerTxnHeader(LedgerHeader const& lh, Application& app)
 }
 
 void
-LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist,
-                                       bool isLedgerStateReady)
+LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist)
 {
     ZoneScoped;
 
@@ -348,35 +347,41 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist,
 
     releaseAssert(latestLedgerHeader.has_value());
 
-    HistoryArchiveState has = getLastClosedLedgerHAS();
-    auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has);
-    auto pubmissing =
-        mApp.getHistoryManager().getMissingBucketsReferencedByPublishQueue();
-    missing.insert(missing.end(), pubmissing.begin(), pubmissing.end());
-    if (!missing.empty())
+    // Step 3. Restore BucketList if we're doing a full core startup
+    // (startServices=true), OR when using BucketListDB
+    if (restoreBucketlist || mApp.getConfig().isUsingBucketListDB())
     {
-        CLOG_ERROR(Ledger, "{} buckets are missing from bucket directory '{}'",
-                   missing.size(), mApp.getBucketManager().getBucketDir());
-        throw std::runtime_error("Bucket directory is corrupt");
-    }
-
-    if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
-    {
-        // Only restart merges in full startup mode. Many modes in core
-        // (standalone offline commands, in-memory setup) do not need to
-        // spin up expensive merge processes.
-        auto assumeStateWork =
-            mApp.getWorkScheduler().executeWork<AssumeStateWork>(
-                has, latestLedgerHeader->ledgerVersion, restoreBucketlist);
-        if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS)
+        HistoryArchiveState has = getLastClosedLedgerHAS();
+        auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has);
+        auto pubmissing = mApp.getHistoryManager()
+                              .getMissingBucketsReferencedByPublishQueue();
+        missing.insert(missing.end(), pubmissing.begin(), pubmissing.end());
+        if (!missing.empty())
         {
-            CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}",
-                      ledgerAbbrev(*latestLedgerHeader));
+            CLOG_ERROR(Ledger,
+                       "{} buckets are missing from bucket directory '{}'",
+                       missing.size(), mApp.getBucketManager().getBucketDir());
+            throw std::runtime_error("Bucket directory is corrupt");
         }
-        else
+
+        if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
         {
-            // Work should only fail during graceful shutdown
-            releaseAssertOrThrow(mApp.isStopping());
+            // Only restart merges in full startup mode. Many modes in core
+            // (standalone offline commands, in-memory setup) do not need to
+            // spin up expensive merge processes.
+            auto assumeStateWork =
+                mApp.getWorkScheduler().executeWork<AssumeStateWork>(
+                    has, latestLedgerHeader->ledgerVersion, restoreBucketlist);
+            if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS)
+            {
+                CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}",
+                          ledgerAbbrev(*latestLedgerHeader));
+            }
+            else
+            {
+                // Work should only fail during graceful shutdown
+                releaseAssertOrThrow(mApp.isStopping());
+            }
         }
     }
 
@@ -391,23 +396,10 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist,
     if (protocolVersionStartsFrom(latestLedgerHeader->ledgerVersion,
                                   SOROBAN_PROTOCOL_VERSION))
     {
-        if (isLedgerStateReady)
-        {
-            // Step 5. If ledger state is ready and core is in v20, load network
-            // configs right away
-            LedgerTxn ltx(mApp.getLedgerTxnRoot());
-            updateNetworkConfig(ltx);
-        }
-        else
-        {
-            // In some modes, e.g. in-memory, core's state is rebuilt
-            // asynchronously via catchup. In this case, we're not able to load
-            // the network config at this time, and instead must let catchup do
-            // it when ready.
-            CLOG_INFO(Ledger,
-                      "Ledger state is being rebuilt, network config will "
-                      "be loaded once the rebuild is done");
-        }
+        // Step 5. If ledger state is ready and core is in v20, load network
+        // configs right away
+        LedgerTxn ltx(mApp.getLedgerTxnRoot());
+        updateNetworkConfig(ltx);
     }
 }
 
diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h
index 61caaf5490..f44dbeb3e8 100644
--- a/src/ledger/LedgerManagerImpl.h
+++ b/src/ledger/LedgerManagerImpl.h
@@ -173,8 +173,7 @@ class LedgerManagerImpl : public LedgerManager
 
     void startNewLedger(LedgerHeader const& genesisLedger);
     void startNewLedger() override;
-    void loadLastKnownLedger(bool restoreBucketlist,
-                             bool isLedgerStateReady) override;
+    void loadLastKnownLedger(bool restoreBucketlist) override;
     virtual bool rebuildingInMemoryState() override;
     virtual void setupInMemoryStateRebuild() override;
 
diff --git a/src/ledger/InMemoryLedgerTxn.cpp b/src/ledger/test/InMemoryLedgerTxn.cpp
similarity index 88%
rename from src/ledger/InMemoryLedgerTxn.cpp
rename to src/ledger/test/InMemoryLedgerTxn.cpp
index 4c7d47ae83..93a18733d7 100644
--- a/src/ledger/InMemoryLedgerTxn.cpp
+++ b/src/ledger/test/InMemoryLedgerTxn.cpp
@@ -2,11 +2,9 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
-#include "ledger/InMemoryLedgerTxn.h"
+#include "ledger/test/InMemoryLedgerTxn.h"
 #include "ledger/LedgerTxn.h"
-#include "ledger/LedgerTxnImpl.h"
 #include "transactions/TransactionUtils.h"
-#include "util/GlobalChecks.h"
 
 namespace stellar
 {
@@ -73,7 +71,7 @@ InMemoryLedgerTxn::FilteredEntryIteratorImpl::clone() const
 
 InMemoryLedgerTxn::InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent,
                                      Database& db,
-                                     AbstractLedgerTxnParent* realRoot)
+                                     AbstractLedgerTxnParent& realRoot)
     : LedgerTxn(parent), mDb(db), mRealRootForOffers(realRoot)
 {
 }
@@ -144,13 +142,12 @@ InMemoryLedgerTxn::updateLedgerKeyMap(EntryIterator iter)
 
         // In addition to maintaining in-memory map, commit offers to "real" ltx
         // root to test SQL backed offers
-        if (mRealRootForOffers &&
-            genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY)
+        if (genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY)
         {
             auto const& ledgerKey = genKey.ledgerKey();
             if (ledgerKey.type() == OFFER)
             {
-                LedgerTxn ltx(*mRealRootForOffers);
+                LedgerTxn ltx(mRealRootForOffers);
                 if (!iter.entryExists())
                 {
                     ltx.erase(ledgerKey);
@@ -365,79 +362,61 @@ InMemoryLedgerTxn::getPoolShareTrustLinesByAccountAndAsset(
 void
 InMemoryLedgerTxn::dropOffers(bool rebuild)
 {
-    if (mRealRootForOffers)
-    {
-        mRealRootForOffers->dropOffers(rebuild);
-    }
-    else
-    {
-        LedgerTxn::dropOffers(rebuild);
-    }
+    mRealRootForOffers.dropOffers(rebuild);
 }
 
 uint64_t
 InMemoryLedgerTxn::countObjects(LedgerEntryType let) const
 {
-    if (mRealRootForOffers)
-    {
-        return mRealRootForOffers->countObjects(let);
-    }
-
-    return 0;
+    return mRealRootForOffers.countObjects(let);
 }
 
 uint64_t
 InMemoryLedgerTxn::countObjects(LedgerEntryType let,
                                 LedgerRange const& ledgers) const
 {
-    if (mRealRootForOffers)
-    {
-        return mRealRootForOffers->countObjects(let, ledgers);
-    }
-
-    return 0;
+    return mRealRootForOffers.countObjects(let, ledgers);
 }
 
 void
 InMemoryLedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const
 {
-    if (mRealRootForOffers)
-    {
-        mRealRootForOffers->deleteObjectsModifiedOnOrAfterLedger(ledger);
-    }
+    mRealRootForOffers.deleteObjectsModifiedOnOrAfterLedger(ledger);
 }
 
 UnorderedMap<LedgerKey, LedgerEntry>
 InMemoryLedgerTxn::getAllOffers()
 {
-    if (mRealRootForOffers)
-    {
-        return mRealRootForOffers->getAllOffers();
-    }
-
-    return LedgerTxn::getAllOffers();
+    return mRealRootForOffers.getAllOffers();
 }
 
 std::shared_ptr<LedgerEntry const>
 InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling)
 {
-    if (mRealRootForOffers)
-    {
-        return mRealRootForOffers->getBestOffer(buying, selling);
-    }
-
-    return LedgerTxn::getBestOffer(buying, selling);
+    return mRealRootForOffers.getBestOffer(buying, selling);
 }
 
 std::shared_ptr<LedgerEntry const>
 InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling,
                                 OfferDescriptor const& worseThan)
 {
-    if (mRealRootForOffers)
-    {
-        return mRealRootForOffers->getBestOffer(buying, selling, worseThan);
-    }
+    return mRealRootForOffers.getBestOffer(buying, selling, worseThan);
+}
 
-    return LedgerTxn::getBestOffer(buying, selling, worseThan);
+#ifdef BEST_OFFER_DEBUGGING
+bool
+InMemoryLedgerTxn::bestOfferDebuggingEnabled() const
+{
+    return mRealRootForOffers.bestOfferDebuggingEnabled();
+}
+
+std::shared_ptr<LedgerEntry const>
+InMemoryLedgerTxn::getBestOfferSlow(Asset const& buying, Asset const& selling,
+                                    OfferDescriptor const* worseThan,
+                                    std::unordered_set<int64_t>& exclude)
+{
+    return mRealRootForOffers.getBestOfferSlow(buying, selling, worseThan,
+                                               exclude);
 }
+#endif
 }
diff --git a/src/ledger/InMemoryLedgerTxn.h b/src/ledger/test/InMemoryLedgerTxn.h
similarity index 69%
rename from src/ledger/InMemoryLedgerTxn.h
rename to src/ledger/test/InMemoryLedgerTxn.h
index 38917186cb..f7c754284f 100644
--- a/src/ledger/InMemoryLedgerTxn.h
+++ b/src/ledger/test/InMemoryLedgerTxn.h
@@ -5,14 +5,22 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "database/Database.h"
-#include "ledger/InMemoryLedgerTxnRoot.h"
 #include "ledger/LedgerTxn.h"
+#include "ledger/test/InMemoryLedgerTxnRoot.h"
 
 // This is a (very small) extension of LedgerTxn to help implement in-memory
-// mode. In-memory mode only holds the _ledger_ contents in memory; it still has
-// a "small" SQL database storing some additional tables, and we still want to
-// have transactional atomicity on those tables in regions of code we have a
-// LedgerTxn open. So that's the _purpose_.
+// mode. Originally this want intended for production use, but is now deprecated
+// and only used for a few tests.
+//
+// In-memory mode holds the _ledger_ contents in memory, allowing tests to
+// directly change ledger state without actually committing a ledger. These
+// direct changes are incompatible with BucketListDB, as the data structure is
+// baked into consensus and arbitrary changes without closing ledgers makes the
+// state machine _very_ unhappy. While we're slowly transitioning to tests that
+// don't directly commit changes and bypass ledger close, we still have a number
+// of older tests that have this assumption baked in. While it would be nice to
+// deprecate this mode entirely, it's a significant undertaking:
+// https://github.com/stellar/stellar-core/issues/4570.
 //
 // On to messy implementation details: in-memory mode is implemented by
 // replacing the normal LedgerTxnRoot with a stub class InMemoryLedgerTxnRoot
@@ -32,9 +40,16 @@
 //        has no soci::transaction      |      has soci::transaction
 //
 //
-// In other words, in-memory mode _moves_ the soci::transaction from the root
+// In other words, in-memory mode _copies_ the soci::transaction from the root
 // to its first (never-closing) child, and commits to the DB when children
 // of that first never-closing child commit to it.
+//
+// Additionally, InMemoryLedgerTxn (not InMemoryLedgerTxnRoot) maintains a
+// reference to the "real" LedgerTxnRoot that has an soci::transaction. Any
+// offer related queries and writes are ignored by InMemoryLedgerTxn and passed
+// through to this real, SQL backed root in order to test offer SQL queries.
+// Unlike all other ledger entry types, offers are stored in SQL, which has no
+// problem with arbitrary writes (unlike the BucketList).
 
 namespace stellar
 {
@@ -46,9 +61,10 @@ class InMemoryLedgerTxn : public LedgerTxn
 
     // For some tests, we need to bypass ledger close and commit directly to the
     // in-memory ltx. However, we still want to test SQL backed offers. The
-    // "never" committing root sets this flag to true such that offer-related
-    // calls get based to the real SQL backed root
-    AbstractLedgerTxnParent* const mRealRootForOffers;
+    // "never" committing in-memory root maintains a reference to the real, SQL
+    // backed LedgerTxnRoot. All offer related queries and writes are forwarded
+    // to the real root in order to test offer SQL queries.
+    AbstractLedgerTxnParent& mRealRootForOffers;
 
     UnorderedMap<AccountID, UnorderedSet<InternalLedgerKey>>
         mOffersAndPoolShareTrustlineKeys;
@@ -82,7 +98,7 @@ class InMemoryLedgerTxn : public LedgerTxn
 
   public:
     InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db,
-                      AbstractLedgerTxnParent* realRoot = nullptr);
+                      AbstractLedgerTxnParent& realRoot);
     virtual ~InMemoryLedgerTxn();
 
     void addChild(AbstractLedgerTxn& child, TransactionMode mode) override;
@@ -124,6 +140,15 @@ class InMemoryLedgerTxn : public LedgerTxn
                           LedgerRange const& ledgers) const override;
 
     void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override;
+
+#ifdef BEST_OFFER_DEBUGGING
+    virtual bool bestOfferDebuggingEnabled() const override;
+
+    virtual std::shared_ptr<LedgerEntry const>
+    getBestOfferSlow(Asset const& buying, Asset const& selling,
+                     OfferDescriptor const* worseThan,
+                     std::unordered_set<int64_t>& exclude) override;
+#endif
 };
 
 }
diff --git a/src/ledger/InMemoryLedgerTxnRoot.cpp b/src/ledger/test/InMemoryLedgerTxnRoot.cpp
similarity index 98%
rename from src/ledger/InMemoryLedgerTxnRoot.cpp
rename to src/ledger/test/InMemoryLedgerTxnRoot.cpp
index 386ceb2e93..7da4f37e1c 100644
--- a/src/ledger/InMemoryLedgerTxnRoot.cpp
+++ b/src/ledger/test/InMemoryLedgerTxnRoot.cpp
@@ -2,7 +2,7 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
-#include "ledger/InMemoryLedgerTxnRoot.h"
+#include "ledger/test/InMemoryLedgerTxnRoot.h"
 #include "ledger/LedgerRange.h"
 #include "ledger/LedgerTxn.h"
 #include "util/XDROperators.h"
diff --git a/src/ledger/InMemoryLedgerTxnRoot.h b/src/ledger/test/InMemoryLedgerTxnRoot.h
similarity index 100%
rename from src/ledger/InMemoryLedgerTxnRoot.h
rename to src/ledger/test/InMemoryLedgerTxnRoot.h
diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp
index d5f8f7208b..5be20c7342 100644
--- a/src/main/ApplicationImpl.cpp
+++ b/src/main/ApplicationImpl.cpp
@@ -32,8 +32,6 @@
 #include "invariant/LedgerEntryIsValid.h"
 #include "invariant/LiabilitiesMatchOffers.h"
 #include "invariant/SponsorshipCountIsValid.h"
-#include "ledger/InMemoryLedgerTxn.h"
-#include "ledger/InMemoryLedgerTxnRoot.h"
 #include "ledger/LedgerHeaderUtils.h"
 #include "ledger/LedgerManager.h"
 #include "ledger/LedgerTxn.h"
@@ -62,6 +60,8 @@
 #include "work/WorkScheduler.h"
 
 #ifdef BUILD_TESTS
+#include "ledger/test/InMemoryLedgerTxn.h"
+#include "ledger/test/InMemoryLedgerTxnRoot.h"
 #include "simulation/LoadGenerator.h"
 #endif
 
@@ -400,7 +400,7 @@ ApplicationImpl::resetLedgerState()
 #endif
         );
         mNeverCommittingLedgerTxn = std::make_unique<InMemoryLedgerTxn>(
-            *mInMemoryLedgerTxnRoot, getDatabase(), mLedgerTxnRoot.get());
+            *mInMemoryLedgerTxnRoot, getDatabase(), *mLedgerTxnRoot);
     }
     else
 #endif
@@ -598,8 +598,7 @@ ApplicationImpl::getJsonInfo(bool verbose)
 void
 ApplicationImpl::reportInfo(bool verbose)
 {
-    mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ false,
-                                        /* isLedgerStateReady */ true);
+    mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ false);
     LOG_INFO(DEFAULT_LOG, "Reporting application info");
     std::cout << getJsonInfo(verbose).toStyledString() << std::endl;
 }
@@ -918,8 +917,7 @@ ApplicationImpl::start()
     CLOG_INFO(Ledger, "Starting up application");
     mStarted = true;
 
-    mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ true,
-                                        /* isLedgerStateReady */ true);
+    mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ true);
     startServices();
 }
 
diff --git a/src/main/ApplicationImpl.h b/src/main/ApplicationImpl.h
index e1c0afd73d..1fc7ea989c 100644
--- a/src/main/ApplicationImpl.h
+++ b/src/main/ApplicationImpl.h
@@ -188,10 +188,11 @@ class ApplicationImpl : public Application
     // the "effective" in-memory root transaction, is returned when a client
     // requests the root.
     //
-    // Note that using this only works when the ledger can fit in RAM -- as it
-    // is held in the never-committing LedgerTxn in its entirety -- so if it
-    // ever grows beyond RAM-size you need to use a mode with some sort of
-    // database on secondary storage.
+    // This is only used in testing scenarios where we need to commit directly
+    // to the LedgerTxn root and bypass the normal ledger close process (since
+    // BucketListDB requires a full ledger close to update DB state). In the
+    // future, this should be removed in favor of tests that are all compatible
+    // with BucketListDB: https://github.com/stellar/stellar-core/issues/4570.
 #ifdef BUILD_TESTS
     std::unique_ptr<InMemoryLedgerTxnRoot> mInMemoryLedgerTxnRoot;
     std::unique_ptr<InMemoryLedgerTxn> mNeverCommittingLedgerTxn;
diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp
index bbc14086a6..2d9d811b53 100644
--- a/src/main/ApplicationUtils.cpp
+++ b/src/main/ApplicationUtils.cpp
@@ -125,23 +125,6 @@ setupApp(Config& cfg, VirtualClock& clock)
         return nullptr;
     }
 
-    // With in-memory testing mode, ledger state is not yet ready during this
-    // setup step
-    app->getLedgerManager().loadLastKnownLedger(
-        /* restoreBucketlist */ false,
-        /* isLedgerStateReady */ !cfg.MODE_USES_IN_MEMORY_LEDGER);
-    auto lcl = app->getLedgerManager().getLastClosedLedgerHeader();
-
-    if (cfg.MODE_USES_IN_MEMORY_LEDGER &&
-        lcl.header.ledgerSeq == LedgerManager::GENESIS_LEDGER_SEQ)
-    {
-        // If ledger is genesis, rebuild genesis state from buckets
-        if (!applyBucketsForLCL(*app))
-        {
-            return nullptr;
-        }
-    }
-
     return app;
 }
 
@@ -320,8 +303,7 @@ selfCheck(Config cfg)
 
     // We run self-checks from a "loaded but dormant" state where the
     // application is not started, but the LM has loaded the LCL.
-    app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false,
-                                                /* isLedgerStateReady */ true);
+    app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false);
 
     // First we schedule the cheap, asynchronous "online" checks that get run by
     // the HTTP "self-check" endpoint, and crank until they're done.
@@ -402,8 +384,7 @@ mergeBucketList(Config cfg, std::string const& outputDir)
     auto& lm = app->getLedgerManager();
     auto& bm = app->getBucketManager();
 
-    lm.loadLastKnownLedger(/* restoreBucketlist */ false,
-                           /* isLedgerStateReady */ true);
+    lm.loadLastKnownLedger(/* restoreBucketlist */ false);
     HistoryArchiveState has = lm.getLastClosedLedgerHAS();
     auto bucket = bm.mergeBuckets(has);
 
@@ -506,8 +487,7 @@ dumpStateArchivalStatistics(Config cfg)
     VirtualClock clock;
     cfg.setNoListen();
     Application::pointer app = Application::create(clock, cfg, false);
-    app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false,
-                                                /* isLedgerStateReady */ true);
+    app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false);
     auto& lm = app->getLedgerManager();
     auto& bm = app->getBucketManager();
     HistoryArchiveState has = lm.getLastClosedLedgerHAS();
@@ -620,8 +600,7 @@ dumpLedger(Config cfg, std::string const& outputFile,
     Application::pointer app = Application::create(clock, cfg, false);
     auto& lm = app->getLedgerManager();
 
-    lm.loadLastKnownLedger(/* restoreBucketlist */ false,
-                           /* isLedgerStateReady */ true);
+    lm.loadLastKnownLedger(/* restoreBucketlist */ false);
     HistoryArchiveState has = lm.getLastClosedLedgerHAS();
     std::optional<uint32_t> minLedger;
     if (lastModifiedLedgerCount)
diff --git a/src/main/Config.cpp b/src/main/Config.cpp
index c7e50a13a8..25ba211fb1 100644
--- a/src/main/Config.cpp
+++ b/src/main/Config.cpp
@@ -2275,8 +2275,11 @@ Config::modeDoesCatchupWithBucketList() const
 bool
 Config::isUsingBucketListDB() const
 {
-    return !DEPRECATED_SQL_LEDGER_STATE && !MODE_USES_IN_MEMORY_LEDGER &&
-           MODE_ENABLES_BUCKETLIST;
+    return !DEPRECATED_SQL_LEDGER_STATE
+#ifdef BUILD_TESTS
+           && !MODE_USES_IN_MEMORY_LEDGER
+#endif
+           && MODE_ENABLES_BUCKETLIST;
 }
 
 bool
diff --git a/src/main/Config.h b/src/main/Config.h
index e8ab848765..3b79fdeccc 100644
--- a/src/main/Config.h
+++ b/src/main/Config.h
@@ -693,7 +693,9 @@ class Config : public std::enable_shared_from_this<Config>
     bool TEST_CASES_ENABLED;
 
     // A config parameter that uses a never-committing ledger. This means that
-    // all ledger entries will be kept in memory, and not persisted to DB.
+    // all ledger entries, except for offers, will be kept in memory, and not
+    // persisted to DB. Since offers are backed by SQL and not BucketListDB,
+    // offers are still commited to the SQL DB even when this mode is enabled.
     // Should only be used for testing.
     bool MODE_USES_IN_MEMORY_LEDGER;
 
diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp
index a7128d4d87..3e56609e37 100644
--- a/src/main/test/ApplicationUtilsTests.cpp
+++ b/src/main/test/ApplicationUtilsTests.cpp
@@ -2,26 +2,20 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
-#include "bucket/test/BucketTestUtils.h"
-#include "crypto/Random.h"
 #include "history/HistoryArchiveManager.h"
 #include "history/HistoryManagerImpl.h"
 #include "history/test/HistoryTestsUtils.h"
 #include "invariant/BucketListIsConsistentWithDatabase.h"
 #include "ledger/LedgerTxn.h"
-#include "ledger/test/LedgerTestUtils.h"
 #include "lib/catch.hpp"
 #include "main/Application.h"
 #include "main/ApplicationUtils.h"
 #include "main/CommandHandler.h"
 #include "main/Config.h"
-#include "overlay/OverlayManager.h"
 #include "simulation/Simulation.h"
 #include "test/TestUtils.h"
-#include "test/TxTests.h"
 #include "test/test.h"
 #include "transactions/TransactionUtils.h"
-#include "util/Logging.h"
 #include <filesystem>
 #include <fstream>
 
@@ -54,54 +48,6 @@ class TemporaryFileDamager
     }
 };
 
-// Logic to check the state of the bucket list with the state of the DB
-static bool
-checkState(Application& app)
-{
-    BucketListIsConsistentWithDatabase blc(app);
-    bool blcOk = true;
-    try
-    {
-        blc.checkEntireBucketlist();
-    }
-    catch (std::runtime_error& e)
-    {
-        LOG_ERROR(DEFAULT_LOG, "Error during bucket-list consistency check: {}",
-                  e.what());
-        blcOk = false;
-    }
-
-    if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
-    {
-        auto checkBucket = [&blcOk](auto b) {
-            if (!b->isEmpty() && !b->isIndexed())
-            {
-                LOG_ERROR(DEFAULT_LOG,
-                          "Error during bucket-list consistency check: "
-                          "unindexed bucket in BucketList");
-                blcOk = false;
-            }
-        };
-
-        auto& bm = app.getBucketManager();
-        for (uint32_t i = 0; i < bm.getLiveBucketList().kNumLevels && blcOk;
-             ++i)
-        {
-            auto& level = bm.getLiveBucketList().getLevel(i);
-            checkBucket(level.getCurr());
-            checkBucket(level.getSnap());
-            auto& nextFuture = level.getNext();
-            if (nextFuture.hasOutputHash())
-            {
-                auto hash = hexToBin256(nextFuture.getOutputHash());
-                checkBucket(bm.getBucketByHash<LiveBucket>(hash));
-            }
-        }
-    }
-
-    return blcOk;
-}
-
 // Sets up a network with a main validator node that publishes checkpoints to
 // a test node. Tests startup behavior of the test node when up to date with
 // validator and out of sync.
@@ -377,14 +323,6 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]")
     }
 }
 
-TEST_CASE("application setup", "[applicationutils]")
-{
-    VirtualClock clock;
-    auto cfg = getTestConfig();
-    auto app = setupApp(cfg, clock);
-    REQUIRE(checkState(*app));
-}
-
 TEST_CASE("application major version numbers", "[applicationutils]")
 {
     CHECK(getStellarCoreMajorReleaseVersion("v19.0.0") ==
diff --git a/src/main/test/ExternalQueueTests.cpp b/src/main/test/ExternalQueueTests.cpp
new file mode 100644
index 0000000000..e5af50427f
--- /dev/null
+++ b/src/main/test/ExternalQueueTests.cpp
@@ -0,0 +1,46 @@
+#ifdef USE_POSTGRES
+// Copyright 2014 Stellar Development Foundation and contributors. Licensed
+// under the Apache License, Version 2.0. See the COPYING file at the root
+// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
+
+#include "lib/catch.hpp"
+#include "main/Application.h"
+#include "main/CommandHandler.h"
+#include "main/Config.h"
+#include "main/ExternalQueue.h"
+#include "simulation/Simulation.h"
+#include "test/TestUtils.h"
+#include "test/test.h"
+
+using namespace stellar;
+
+TEST_CASE("cursors", "[externalqueue]")
+{
+    VirtualClock clock;
+    Config const& cfg = getTestConfig(0, Config::TESTDB_POSTGRESQL);
+    Application::pointer app = createTestApplication(clock, cfg);
+
+    ExternalQueue ps(*app);
+    std::map<std::string, uint32> curMap;
+    app->getCommandHandler().manualCmd("setcursor?id=FOO&cursor=123");
+    app->getCommandHandler().manualCmd("setcursor?id=BAR&cursor=456");
+
+    SECTION("get non-existent cursor")
+    {
+        ps.getCursorForResource("NONEXISTENT", curMap);
+        REQUIRE(curMap.size() == 0);
+    }
+
+    SECTION("get single cursor")
+    {
+        ps.getCursorForResource("FOO", curMap);
+        REQUIRE(curMap.size() == 1);
+    }
+
+    SECTION("get all cursors")
+    {
+        ps.getCursorForResource("", curMap);
+        REQUIRE(curMap.size() == 2);
+    }
+}
+#endif
diff --git a/src/simulation/Simulation.cpp b/src/simulation/Simulation.cpp
index 356e1dc4c1..e4a86a5308 100644
--- a/src/simulation/Simulation.cpp
+++ b/src/simulation/Simulation.cpp
@@ -124,23 +124,16 @@ Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2,
     }
 
     Application::pointer app;
-    if (newDB)
+    if (mMode == OVER_LOOPBACK)
     {
-        if (mMode == OVER_LOOPBACK)
-        {
-            app =
-                createTestApplication<ApplicationLoopbackOverlay, Simulation&>(
-                    *clock, *cfg, *this, newDB, false);
-        }
-        else
-        {
-            app = createTestApplication(*clock, *cfg, newDB, false);
-        }
+        app = createTestApplication<ApplicationLoopbackOverlay, Simulation&>(
+            *clock, *cfg, *this, newDB, false);
     }
     else
     {
-        app = setupApp(*cfg, *clock);
+        app = createTestApplication(*clock, *cfg, newDB, false);
     }
+
     mNodes.emplace(nodeKey.getPublicKey(), Node{clock, app});
 
     mPeerMap.emplace(app->getConfig().PEER_PORT,
diff --git a/src/simulation/Simulation.h b/src/simulation/Simulation.h
index e1385f374d..139c67449a 100644
--- a/src/simulation/Simulation.h
+++ b/src/simulation/Simulation.h
@@ -92,6 +92,18 @@ class Simulation
     // prevent overlay from automatically re-connecting to peers
     void stopOverlayTick();
 
+    bool
+    isSetUpForSorobanUpgrade() const
+    {
+        return mSetupForSorobanUpgrade;
+    }
+
+    void
+    markReadyForSorobanUpgrade()
+    {
+        mSetupForSorobanUpgrade = true;
+    }
+
   private:
     void addLoopbackConnection(NodeID initiator, NodeID acceptor);
     void dropLoopbackConnection(NodeID initiator, NodeID acceptor);
@@ -127,6 +139,8 @@ class Simulation
 
     // Map PEER_PORT to Application
     std::unordered_map<unsigned short, std::weak_ptr<Application>> mPeerMap;
+
+    bool mSetupForSorobanUpgrade{false};
 };
 
 class LoopbackOverlayManager : public OverlayManagerImpl
diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp
index 672c8acf22..e752ad6a5b 100644
--- a/src/test/TestUtils.cpp
+++ b/src/test/TestUtils.cpp
@@ -9,6 +9,7 @@
 #include "test/TxTests.h"
 #include "test/test.h"
 #include "work/WorkScheduler.h"
+#include <limits>
 
 namespace stellar
 {
@@ -195,31 +196,34 @@ upgradeSorobanNetworkConfig(std::function<void(SorobanNetworkConfig&)> modifyFn,
     auto& complete =
         app.getMetrics().NewMeter({"loadgen", "run", "complete"}, "run");
     auto completeCount = complete.count();
-    // Only create an account if there are none aleady created.
-    uint32_t offset = 0;
-    if (app.getMetrics()
-            .NewMeter({"loadgen", "account", "created"}, "account")
-            .count() == 0)
+
+    // Use large offset to avoid conflicts with tests using loadgen.
+    auto const offset = std::numeric_limits<uint32_t>::max() - 1;
+
+    // Only create an account if upgrade has not ran before.
+    if (!simulation->isSetUpForSorobanUpgrade())
     {
         auto createAccountsLoadConfig =
             GeneratedLoadConfig::createAccountsLoad(1, 1);
-        offset = std::numeric_limits<uint32_t>::max() - 1;
         createAccountsLoadConfig.offset = offset;
 
         lg.generateLoad(createAccountsLoadConfig);
         simulation->crankUntil(
             [&]() { return complete.count() == completeCount + 1; },
             300 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
-    }
 
-    // Create upload wasm transaction.
-    auto createUploadCfg = GeneratedLoadConfig::createSorobanUpgradeSetupLoad();
-    createUploadCfg.offset = offset;
-    lg.generateLoad(createUploadCfg);
-    completeCount = complete.count();
-    simulation->crankUntil(
-        [&]() { return complete.count() == completeCount + 1; },
-        300 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
+        // Create upload wasm transaction.
+        auto createUploadCfg =
+            GeneratedLoadConfig::createSorobanUpgradeSetupLoad();
+        createUploadCfg.offset = offset;
+        lg.generateLoad(createUploadCfg);
+        completeCount = complete.count();
+        simulation->crankUntil(
+            [&]() { return complete.count() == completeCount + 1; },
+            300 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false);
+
+        simulation->markReadyForSorobanUpgrade();
+    }
 
     // Create upgrade transaction.
     auto createUpgradeLoadGenConfig = GeneratedLoadConfig::txLoad(
diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h
index 96a7703604..d0175a4e0e 100644
--- a/src/test/TestUtils.h
+++ b/src/test/TestUtils.h
@@ -111,6 +111,11 @@ void setSorobanNetworkConfigForTest(SorobanNetworkConfig& cfg);
 // for most of the unit tests (unless the test is meant to exercise the
 // configuration limits).
 void overrideSorobanNetworkConfigForTest(Application& app);
+
+// Runs loadgen to arm all nodes in simulation for the given upgrade. If
+// applyUpgrade == true, close ledgers until the upgrade has been applied.
+// Otherwise just arm the nodes without closing the ledger containing the
+// upgrade.
 void
 upgradeSorobanNetworkConfig(std::function<void(SorobanNetworkConfig&)> modifyFn,
                             std::shared_ptr<Simulation> simulation,