Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

migration fixes #4135

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/unreleased/bug-fixes/4135-migration-fixes.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
- Strengthen migration checks and ensure to update merkle tree during
migrations. The redundant `namadan update-db` command has been removed in the
process. ([\#4135](https://github.com/anoma/namada/pull/4135))
4 changes: 2 additions & 2 deletions .github/workflows/scripts/e2e.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"e2e::ledger_tests::run_ledger": 5,
"e2e::ledger_tests::run_ledger_load_state_and_reset": 23,
"e2e::ledger_tests::test_namada_shuts_down_if_tendermint_dies": 2,
"e2e::ledger_tests::test_namada_db_migration": 30,
"e2e::ledger_tests::test_namada_db_migration": 80,
"e2e::ledger_tests::test_genesis_validators": 14,
"e2e::ledger_tests::test_node_connectivity_and_consensus": 28,
"e2e::ledger_tests::test_epoch_sleep": 12,
Expand All @@ -34,4 +34,4 @@
"e2e::ledger_tests::masp_txs_and_queries": 82,
"e2e::ledger_tests::test_genesis_chain_id_change": 35,
"e2e::ledger_tests::test_genesis_manipulation": 103
}
}
38 changes: 1 addition & 37 deletions crates/apps/src/bin/namada-node/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@
use eyre::{Context, Result};
use namada_apps_lib::cli::cmds::TestGenesis;
use namada_apps_lib::cli::{self, cmds};
use namada_apps_lib::config::{
Action, ActionAtHeight, NodeLocalConfig, ValidatorLocalConfig,
};
use namada_apps_lib::config::{NodeLocalConfig, ValidatorLocalConfig};
#[cfg(not(feature = "migrations"))]
use namada_apps_lib::display_line;
use namada_apps_lib::migrations::ScheduledMigration;
Expand Down Expand Up @@ -58,40 +56,6 @@ pub fn main() -> Result<()> {
node::rollback(chain_ctx.config.ledger)
.wrap_err("Failed to rollback the Namada node")?;
}
cmds::Ledger::UpdateDB(cmds::LedgerUpdateDB(args)) => {
#[cfg(not(feature = "migrations"))]
{
panic!(
"This command is only available if built with the \
\"migrations\" feature."
)
}
let mut chain_ctx = ctx.take_chain_or_exit();
#[cfg(feature = "migrations")]
node::update_db_keys(
chain_ctx.config.ledger.clone(),
args.updates,
args.dry_run,
);
if !args.dry_run {
let wasm_dir = chain_ctx.wasm_dir();
chain_ctx.config.ledger.shell.action_at_height =
Some(ActionAtHeight {
height: args.last_height.checked_add(2).unwrap(),
action: Action::Halt,
});
std::env::set_var(
"NAMADA_INITIAL_HEIGHT",
args.last_height.to_string(),
);
// don't stop on panics
let handle = std::thread::spawn(|| {
node::run(chain_ctx.config.ledger, wasm_dir, None)
});
_ = handle.join();
std::env::remove_var("NAMADA_INITIAL_HEIGHT");
}
}
cmds::Ledger::QueryDB(cmds::LedgerQueryDB(args)) => {
#[cfg(not(feature = "migrations"))]
{
Expand Down
62 changes: 0 additions & 62 deletions crates/apps_lib/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -893,7 +893,6 @@ pub mod cmds {
RunUntil(LedgerRunUntil),
Reset(LedgerReset),
DumpDb(LedgerDumpDb),
UpdateDB(LedgerUpdateDB),
QueryDB(LedgerQueryDB),
RollBack(LedgerRollBack),
}
Expand All @@ -906,13 +905,11 @@ pub mod cmds {
let run = SubCmd::parse(matches).map(Self::Run);
let reset = SubCmd::parse(matches).map(Self::Reset);
let dump_db = SubCmd::parse(matches).map(Self::DumpDb);
let update_db = SubCmd::parse(matches).map(Self::UpdateDB);
let query_db = SubCmd::parse(matches).map(Self::QueryDB);
let rollback = SubCmd::parse(matches).map(Self::RollBack);
let run_until = SubCmd::parse(matches).map(Self::RunUntil);
run.or(reset)
.or(dump_db)
.or(update_db)
.or(query_db)
.or(rollback)
.or(run_until)
Expand All @@ -936,7 +933,6 @@ pub mod cmds {
.subcommand(LedgerRunUntil::def())
.subcommand(LedgerReset::def())
.subcommand(LedgerDumpDb::def())
.subcommand(LedgerUpdateDB::def())
.subcommand(LedgerQueryDB::def())
.subcommand(LedgerRollBack::def())
}
Expand Down Expand Up @@ -1022,29 +1018,6 @@ pub mod cmds {
}
}

#[derive(Clone, Debug)]
pub struct LedgerUpdateDB(pub args::LedgerUpdateDb);

impl SubCmd for LedgerUpdateDB {
const CMD: &'static str = "update-db";

fn parse(matches: &ArgMatches) -> Option<Self> {
matches
.subcommand_matches(Self::CMD)
.map(|matches| Self(args::LedgerUpdateDb::parse(matches)))
}

fn def() -> App {
App::new(Self::CMD)
.about(wrap!(
"Applies a set of updates to the DB for hard forking. The \
input should be a path to a file dictating a set of keys \
and their new values. Can be dry-run for testing."
))
.add_args::<args::LedgerUpdateDb>()
}
}

#[derive(Clone, Debug)]
pub struct LedgerQueryDB(pub args::LedgerQueryDb);

Expand Down Expand Up @@ -3901,41 +3874,6 @@ pub mod args {
}
}

#[derive(Clone, Debug)]
pub struct LedgerUpdateDb {
pub updates: PathBuf,
pub dry_run: bool,
pub last_height: BlockHeight,
}

impl Args for LedgerUpdateDb {
fn parse(matches: &ArgMatches) -> Self {
let updates = PATH.parse(matches);
let dry_run = DRY_RUN_TX.parse(matches);
let last_height = BLOCK_HEIGHT.parse(matches);
Self {
updates,
dry_run,
last_height,
}
}

fn def(app: App) -> App {
app.arg(PATH.def().help(wrap!(
"The path to a json of key-value pairs to update the DB with."
)))
.arg(DRY_RUN_TX.def().help(wrap!(
"If set, applies the updates but does not persist them. Using \
for testing and debugging."
)))
.arg(
BLOCK_HEIGHT.def().help(wrap!(
"The height at which the hard fork is happening."
)),
)
}
}

#[derive(Clone, Debug)]
pub struct LedgerQueryDb {
pub key: storage::Key,
Expand Down
40 changes: 4 additions & 36 deletions crates/node/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -354,13 +354,14 @@ pub fn query_db(
type_hash: &[u8; 32],
cf: &DbColFam,
) {
use namada_sdk::migrations::DBUpdateVisitor;
use namada_apps_lib::storage::DBUpdateVisitor;

let chain_id = config.chain_id;
let db_path = config.shell.db_dir(&chain_id);

let db = storage::PersistentDB::open(db_path, None);
let db_visitor = storage::RocksDBUpdateVisitor::new(&db);
let bytes = db_visitor.read(key, cf).unwrap();
let db_visitor = storage::RocksDBUpdateVisitor::default();
let bytes = db_visitor.read(&db, key, cf).unwrap();

let deserializer = namada_migrations::get_deserializer(type_hash)
.unwrap_or_else(|| {
Expand All @@ -382,39 +383,6 @@ pub fn query_db(
);
}

/// Change the funds of an account in-place. Use with
/// caution, as this modifies state in storage without
/// going through the consensus protocol.
#[cfg(feature = "migrations")]
pub fn update_db_keys(config: config::Ledger, updates: PathBuf, dry_run: bool) {
use std::io::Read;

let mut update_json = String::new();
let mut file = std::fs::File::open(updates)
.expect("Could not fine updates file at the specified path.");
file.read_to_string(&mut update_json)
.expect("Unable to read the updates json file");
let updates: namada_sdk::migrations::DbChanges =
serde_json::from_str(&update_json)
.expect("Could not parse the updates file as json");
let cometbft_path = config.cometbft_dir();
let chain_id = config.chain_id;
let db_path = config.shell.db_dir(&chain_id);

let db = storage::PersistentDB::open(db_path, None);
let batch = db.apply_migration_to_batch(updates.changes).unwrap();
if !dry_run {
tracing::info!("Persisting DB changes...");
db.exec_batch(batch).expect("Failed to execute write batch");
db.flush(true).expect("Failed to flush data to disk");

// reset CometBFT's state, such that we can resume with a different appq
// hash
tendermint_node::reset_state(cometbft_path)
.expect("Failed to reset CometBFT state");
}
}

/// Roll Namada state back to the previous height
pub fn rollback(config: config::Ledger) -> Result<(), shell::Error> {
shell::rollback(config)
Expand Down
32 changes: 23 additions & 9 deletions crates/node/src/shell/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ where
/// Log of events emitted by `FinalizeBlock` ABCI calls.
event_log: EventLog,
/// A migration that can be scheduled at a given block height
pub scheduled_migration: Option<ScheduledMigration<D::Migrator>>,
pub scheduled_migration: Option<ScheduledMigration>,
/// When set, indicates after how many blocks a new snapshot
/// will be taken (counting from the first block)
pub blocks_between_snapshots: Option<NonZeroU64>,
Expand Down Expand Up @@ -477,7 +477,7 @@ where
broadcast_sender: UnboundedSender<Vec<u8>>,
eth_oracle: Option<EthereumOracleChannels>,
db_cache: Option<&D::Cache>,
scheduled_migration: Option<ScheduledMigration<D::Migrator>>,
scheduled_migration: Option<ScheduledMigration>,
vp_wasm_compilation_cache: u64,
tx_wasm_compilation_cache: u64,
) -> Self {
Expand Down Expand Up @@ -781,20 +781,34 @@ where
/// hash.
pub fn commit(&mut self) -> shim::Response {
self.bump_last_processed_eth_block();
let height_to_commit = self.state.in_mem().block.height;

let migration = match self.scheduled_migration.as_ref() {
Some(migration) if height_to_commit == migration.height => Some(
self.scheduled_migration
.take()
.unwrap()
.load_and_validate()
.expect("The scheduled migration is not valid."),
),
_ => None,
};

self.state
.commit_block()
.expect("Encountered a storage error while committing a block");
let committed_height = self.state.in_mem().get_last_block_height();
migrations::commit(
self.state.db(),
committed_height,
&mut self.scheduled_migration,
);

if let Some(migration) = migration {
migrations::commit(&mut self.state, migration);
self.state
.update_last_block_merkle_tree()
.expect("Must update merkle tree after migration");
}

let merkle_root = self.state.in_mem().merkle_root();

tracing::info!(
"Committed block hash: {merkle_root}, height: {committed_height}",
"Committed block hash: {merkle_root}, height: {height_to_commit}",
);

self.broadcast_queued_txs();
Expand Down
Loading
Loading