Skip to content

Commit

Permalink
Merge branch 'main' into database-metadata-refactor
Browse files Browse the repository at this point in the history
# Conflicts:
#	CHANGELOG.md
  • Loading branch information
fab-10 committed Feb 15, 2024
2 parents 6996199 + 8be243f commit 17fb5b2
Show file tree
Hide file tree
Showing 28 changed files with 571 additions and 83 deletions.
19 changes: 17 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Changelog

## 24.1.3-SNAPSHOT
## 24.2.1-SNAPSHOT

### Breaking Changes
- RocksDB database metadata format has changed to be more expressive, the migration of an existing metadata file to the new format is automatic at startup. Before performing a downgrade to a previous version it is mandatory to revert to the original format using the subcommand `besu --data-path=/path/to/besu/datadir storage revert-metadata v2-to-v1`.
Expand All @@ -12,7 +12,7 @@

### Bug fixes

## 24.1.2-SNAPSHOT
## 24.2.0-SNAPSHOT

### Breaking Changes
- Following the OpenMetrics convention, the updated Prometheus client adds the `_total` suffix to every metrics of type counter, with the effect that some existing metrics have been renamed to have this suffix. If you are using the official Besu Grafana dashboard [(available here)](https://grafana.com/grafana/dashboards/16455-besu-full/), just update it to the latest revision, that accepts the old and the new name of the affected metrics. If you have a custom dashboard or use the metrics in other ways, then you need to manually update it to support the new naming.
Expand Down Expand Up @@ -42,13 +42,28 @@
- Support for `shanghaiTime` fork and Shanghai EVM smart contracts in QBFT/IBFT chains [#6353](https://github.com/hyperledger/besu/pull/6353)
- Change ExecutionHaltReason for contract creation collision case to return ILLEGAL_STATE_CHANGE [#6518](https://github.com/hyperledger/besu/pull/6518)
- Experimental feature `--Xbonsai-code-using-code-hash-enabled` for storing Bonsai code storage by code hash [#6505](https://github.com/hyperledger/besu/pull/6505)
- More accurate column size `storage rocksdb usage` subcommand [#6540](https://github.com/hyperledger/besu/pull/6540)
- Adds `storage rocksdb x-stats` subcommand [#6540](https://github.com/hyperledger/besu/pull/6540)

### Bug fixes
- Fix the way an advertised host configured with `--p2p-host` is treated when communicating with the originator of a PING packet [#6225](https://github.com/hyperledger/besu/pull/6225)
- Fix `poa-block-txs-selection-max-time` option that was inadvertently reset to its default after being configured [#6444](https://github.com/hyperledger/besu/pull/6444)

### Download Links

## 24.1.2

### Bug fixes
- Fix ETC Spiral upgrade breach of consensus [#6524](https://github.com/hyperledger/besu/pull/6524)

### Additions and Improvements
- Adds timestamp to enable Cancun upgrade on mainnet [#6545](https://github.com/hyperledger/besu/pull/6545)
- Github Actions based build.[#6427](https://github.com/hyperledger/besu/pull/6427)

### Download Links
https://hyperledger.jfrog.io/artifactory/besu-binaries/besu/24.1.2/besu-24.1.2.zip / sha256 9033f300edd81c770d3aff27a29f59dd4b6142a113936886a8f170718e412971
https://hyperledger.jfrog.io/artifactory/besu-binaries/besu/24.1.2/besu-24.1.2.tar.gz / sha256 082db8cf4fb67527aa0dd757e5d254b3b497f5027c23287f9c0a74a6a743bf08

## 24.1.1

### Breaking Changes
Expand Down
8 changes: 6 additions & 2 deletions besu/src/main/java/org/hyperledger/besu/cli/BesuCommand.java
Original file line number Diff line number Diff line change
Expand Up @@ -255,8 +255,12 @@
synopsisHeading = "%n",
descriptionHeading = "%n@|bold,fg(cyan) Description:|@%n%n",
optionListHeading = "%n@|bold,fg(cyan) Options:|@%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
footerHeading = "%nBesu is licensed under the Apache License 2.0%n",
footer = {
"%n%n@|fg(cyan) To get started quickly, just choose a network to sync and a profile to run with suggested defaults:|@",
"%n@|fg(cyan) for Mainnet|@ --network=mainnet --profile=[minimalist_staker|staker]",
"%nMore info and other profiles at https://besu.hyperledger.org%n"
})
public class BesuCommand implements DefaultCommandValues, Runnable {

@SuppressWarnings("PrivateStaticFinalLoggers")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.stream.Stream;

import org.bouncycastle.util.Arrays;
import org.rocksdb.ColumnFamilyDescriptor;
Expand Down Expand Up @@ -66,36 +67,210 @@ static void forEachColumnFamily(
}
}

static void printUsageForColumnFamily(
static void printStatsForColumnFamily(
final RocksDB rocksdb, final ColumnFamilyHandle cfHandle, final PrintWriter out)
throws RocksDBException, NumberFormatException {
throws RocksDBException {
final String size = rocksdb.getProperty(cfHandle, "rocksdb.estimate-live-data-size");
final String numberOfKeys = rocksdb.getProperty(cfHandle, "rocksdb.estimate-num-keys");
boolean emptyColumnFamily = false;
if (!size.isBlank() && !numberOfKeys.isBlank()) {
final long sizeLong = Long.parseLong(size);
final long numberOfKeysLong = Long.parseLong(numberOfKeys);
if (!size.isBlank()
&& !numberOfKeys.isBlank()
&& isPopulatedColumnFamily(sizeLong, numberOfKeysLong)) {
out.println("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=");
out.println("Column Family: " + getNameById(cfHandle.getName()));

final String prefix = "rocksdb.";
final String cfstats = "cfstats";
final String cfstats_no_file_histogram = "cfstats-no-file-histogram";
final String cf_file_histogram = "cf-file-histogram";
final String cf_write_stall_stats = "cf-write-stall-stats";
final String dbstats = "dbstats";
final String db_write_stall_stats = "db-write-stall-stats";
final String levelstats = "levelstats";
final String block_cache_entry_stats = "block-cache-entry-stats";
final String fast_block_cache_entry_stats = "fast-block-cache-entry-stats";
final String num_immutable_mem_table = "num-immutable-mem-table";
final String num_immutable_mem_table_flushed = "num-immutable-mem-table-flushed";
final String mem_table_flush_pending = "mem-table-flush-pending";
final String compaction_pending = "compaction-pending";
final String background_errors = "background-errors";
final String cur_size_active_mem_table = "cur-size-active-mem-table";
final String cur_size_all_mem_tables = "cur-size-all-mem-tables";
final String size_all_mem_tables = "size-all-mem-tables";
final String num_entries_active_mem_table = "num-entries-active-mem-table";
final String num_entries_imm_mem_tables = "num-entries-imm-mem-tables";
final String num_deletes_active_mem_table = "num-deletes-active-mem-table";
final String num_deletes_imm_mem_tables = "num-deletes-imm-mem-tables";
final String estimate_num_keys = "estimate-num-keys";
final String estimate_table_readers_mem = "estimate-table-readers-mem";
final String is_file_deletions_enabled = "is-file-deletions-enabled";
final String num_snapshots = "num-snapshots";
final String oldest_snapshot_time = "oldest-snapshot-time";
final String oldest_snapshot_sequence = "oldest-snapshot-sequence";
final String num_live_versions = "num-live-versions";
final String current_version_number = "current-super-version-number";
final String estimate_live_data_size = "estimate-live-data-size";
final String min_log_number_to_keep_str = "min-log-number-to-keep";
final String min_obsolete_sst_number_to_keep_str = "min-obsolete-sst-number-to-keep";
final String base_level_str = "base-level";
final String total_sst_files_size = "total-sst-files-size";
final String live_sst_files_size = "live-sst-files-size";
final String obsolete_sst_files_size = "obsolete-sst-files-size";
final String live_sst_files_size_at_temperature = "live-sst-files-size-at-temperature";
final String estimate_pending_comp_bytes = "estimate-pending-compaction-bytes";
final String aggregated_table_properties = "aggregated-table-properties";
final String num_running_compactions = "num-running-compactions";
final String num_running_flushes = "num-running-flushes";
final String actual_delayed_write_rate = "actual-delayed-write-rate";
final String is_write_stopped = "is-write-stopped";
final String estimate_oldest_key_time = "estimate-oldest-key-time";
final String block_cache_capacity = "block-cache-capacity";
final String block_cache_usage = "block-cache-usage";
final String block_cache_pinned_usage = "block-cache-pinned-usage";
final String options_statistics = "options-statistics";
final String num_blob_files = "num-blob-files";
final String blob_stats = "blob-stats";
final String total_blob_file_size = "total-blob-file-size";
final String live_blob_file_size = "live-blob-file-size";
final String live_blob_file_garbage_size = "live-blob-file-garbage-size";
final String blob_cache_capacity = "blob-cache-capacity";
final String blob_cache_usage = "blob-cache-usage";
final String blob_cache_pinned_usage = "blob-cache-pinned-usage";
Stream.of(
cfstats,
cfstats_no_file_histogram,
cf_file_histogram,
cf_write_stall_stats,
dbstats,
db_write_stall_stats,
levelstats,
block_cache_entry_stats,
fast_block_cache_entry_stats,
num_immutable_mem_table,
num_immutable_mem_table_flushed,
mem_table_flush_pending,
compaction_pending,
background_errors,
cur_size_active_mem_table,
cur_size_all_mem_tables,
size_all_mem_tables,
num_entries_active_mem_table,
num_entries_imm_mem_tables,
num_deletes_active_mem_table,
num_deletes_imm_mem_tables,
estimate_num_keys,
estimate_table_readers_mem,
is_file_deletions_enabled,
num_snapshots,
oldest_snapshot_time,
oldest_snapshot_sequence,
num_live_versions,
current_version_number,
estimate_live_data_size,
min_log_number_to_keep_str,
min_obsolete_sst_number_to_keep_str,
base_level_str,
total_sst_files_size,
live_sst_files_size,
obsolete_sst_files_size,
live_sst_files_size_at_temperature,
estimate_pending_comp_bytes,
aggregated_table_properties,
num_running_compactions,
num_running_flushes,
actual_delayed_write_rate,
is_write_stopped,
estimate_oldest_key_time,
block_cache_capacity,
block_cache_usage,
block_cache_pinned_usage,
options_statistics,
num_blob_files,
blob_stats,
total_blob_file_size,
live_blob_file_size,
live_blob_file_garbage_size,
blob_cache_capacity,
blob_cache_usage,
blob_cache_pinned_usage)
.forEach(
prop -> {
try {
final String value = rocksdb.getProperty(cfHandle, prefix + prop);
if (!value.isBlank()) {
out.println(prop + ": " + value);
}
} catch (RocksDBException e) {
LOG.debug("couldn't get property {}", prop);
}
});
out.println("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=");
}
}

static ColumnFamilyUsage getAndPrintUsageForColumnFamily(
final RocksDB rocksdb, final ColumnFamilyHandle cfHandle, final PrintWriter out)
throws RocksDBException, NumberFormatException {
final String numberOfKeys = rocksdb.getProperty(cfHandle, "rocksdb.estimate-num-keys");
if (!numberOfKeys.isBlank()) {
try {
final long sizeLong = Long.parseLong(size);
final long numberOfKeysLong = Long.parseLong(numberOfKeys);
final String totalSstFilesSize =
rocksdb.getProperty(cfHandle, "rocksdb.total-sst-files-size");
final long totalSstFilesSizeLong =
!totalSstFilesSize.isBlank() ? Long.parseLong(totalSstFilesSize) : 0;
if (sizeLong == 0 && numberOfKeysLong == 0) {
emptyColumnFamily = true;
}

if (!emptyColumnFamily) {
final String totalBlobFilesSize =
rocksdb.getProperty(cfHandle, "rocksdb.total-blob-file-size");
final long totalBlobFilesSizeLong =
!totalBlobFilesSize.isBlank() ? Long.parseLong(totalBlobFilesSize) : 0;

final long totalFilesSize = totalSstFilesSizeLong + totalBlobFilesSizeLong;
if (isPopulatedColumnFamily(0, numberOfKeysLong)) {
printLine(
out,
getNameById(cfHandle.getName()),
rocksdb.getProperty(cfHandle, "rocksdb.estimate-num-keys"),
formatOutputSize(sizeLong),
formatOutputSize(totalSstFilesSizeLong));
formatOutputSize(totalFilesSize),
formatOutputSize(totalSstFilesSizeLong),
formatOutputSize(totalBlobFilesSizeLong));
}
return new ColumnFamilyUsage(
getNameById(cfHandle.getName()),
numberOfKeysLong,
totalFilesSize,
totalSstFilesSizeLong,
totalBlobFilesSizeLong);
} catch (NumberFormatException e) {
LOG.error("Failed to parse string into long: " + e.getMessage());
}
}
// return empty usage on error
return new ColumnFamilyUsage(getNameById(cfHandle.getName()), 0, 0, 0, 0);
}

static void printTotals(final PrintWriter out, final List<ColumnFamilyUsage> columnFamilyUsages) {
final long totalKeys = columnFamilyUsages.stream().mapToLong(ColumnFamilyUsage::keys).sum();
final long totalSize =
columnFamilyUsages.stream().mapToLong(ColumnFamilyUsage::totalSize).sum();
final long totalSsts =
columnFamilyUsages.stream().mapToLong(ColumnFamilyUsage::sstFilesSize).sum();
final long totalBlobs =
columnFamilyUsages.stream().mapToLong(ColumnFamilyUsage::blobFilesSize).sum();
printSeparator(out);
printLine(
out,
"ESTIMATED TOTAL",
String.valueOf(totalKeys),
formatOutputSize(totalSize),
formatOutputSize(totalSsts),
formatOutputSize(totalBlobs));
printSeparator(out);
}

private static boolean isPopulatedColumnFamily(final long size, final long numberOfKeys) {
return size != 0 || numberOfKeys != 0;
}

static String formatOutputSize(final long size) {
Expand Down Expand Up @@ -123,19 +298,28 @@ private static String getNameById(final byte[] id) {
}

static void printTableHeader(final PrintWriter out) {
printSeparator(out);
out.format(
"| Column Family | Keys | Column Size | SST Files Size |\n");
"| Column Family | Keys | Total Size | SST Files Size | Blob Files Size | \n");
printSeparator(out);
}

private static void printSeparator(final PrintWriter out) {
out.format(
"|--------------------------------|-----------------|--------------|-----------------|\n");
"|--------------------------------|-----------------|-------------|-----------------|------------------|\n");
}

static void printLine(
final PrintWriter out,
final String cfName,
final String keys,
final String columnSize,
final String sstFilesSize) {
final String format = "| %-30s | %-15s | %-12s | %-15s |\n";
out.format(format, cfName, keys, columnSize, sstFilesSize);
final String totalFilesSize,
final String sstFilesSize,
final String blobFilesSize) {
final String format = "| %-30s | %-15s | %-11s | %-15s | %-16s |\n";
out.format(format, cfName, keys, totalFilesSize, sstFilesSize, blobFilesSize);
}

record ColumnFamilyUsage(
String name, long keys, long totalSize, long sstFilesSize, long blobFilesSize) {}
}
Loading

0 comments on commit 17fb5b2

Please sign in to comment.