Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/repacking 2.9 #684

Merged
merged 10 commits into from
Jan 8, 2025
459 changes: 242 additions & 217 deletions apps/arweave/e2e/ar_e2e.erl

Large diffs are not rendered by default.

50 changes: 49 additions & 1 deletion apps/arweave/e2e/ar_repack_mine_tests.erl
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,27 @@
%% --------------------------------------------------------------------------------------------
repack_mine_test_() ->
[
{timeout, 300, {with, {replica_2_9, replica_2_9}, [fun test_repacking_blocked/1]}},
{timeout, 300, {with, {replica_2_9, spora_2_6}, [fun test_repacking_blocked/1]}},
{timeout, 300, {with, {replica_2_9, composite_1}, [fun test_repacking_blocked/1]}},
{timeout, 300, {with, {replica_2_9, composite_2}, [fun test_repacking_blocked/1]}},
{timeout, 300, {with, {replica_2_9, unpacked}, [fun test_repacking_blocked/1]}},
{timeout, 300, {with, {unpacked, replica_2_9}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {unpacked, spora_2_6}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {unpacked, composite_1}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {unpacked, composite_2}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {unpacked, unpacked}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {spora_2_6, replica_2_9}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {spora_2_6, spora_2_6}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {spora_2_6, composite_1}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {spora_2_6, composite_2}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {spora_2_6, unpacked}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_1, replica_2_9}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_1, spora_2_6}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_1, composite_1}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_1, composite_2}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_1, unpacked}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_2, replica_2_9}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_2, spora_2_6}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_2, composite_1}, [fun test_repack_mine/1]}},
{timeout, 300, {with, {composite_2, composite_2}, [fun test_repack_mine/1]}},
Expand Down Expand Up @@ -60,6 +70,8 @@ test_repack_mine({FromPackingType, ToPackingType}) ->
mining_addr = AddrB
}),
ar_test_node:restart(RepackerNode),
ar_e2e:assert_syncs_range(RepackerNode, ?PARTITION_SIZE, 2*?PARTITION_SIZE),

ar_e2e:assert_chunks(RepackerNode, ToPacking, Chunks),

case ToPackingType of
Expand All @@ -78,14 +90,50 @@ test_repack_mine({FromPackingType, ToPackingType}) ->
?assertEqual(RepackerBlock, ValidatorBlock)
end.

test_repacking_blocked({FromPackingType, ToPackingType}) ->
ar_e2e:delayed_print(<<" ~p -> ~p ">>, [FromPackingType, ToPackingType]),
ValidatorNode = peer1,
RepackerNode = peer2,
{Blocks, _AddrA, Chunks} = ar_e2e:start_source_node(
RepackerNode, FromPackingType, wallet_a),

[B0 | _] = Blocks,
start_validator_node(ValidatorNode, RepackerNode, B0),

{WalletB, StorageModules} = ar_e2e:source_node_storage_modules(
RepackerNode, ToPackingType, wallet_b),
AddrB = case WalletB of
undefined -> undefined;
_ -> ar_wallet:to_address(WalletB)
end,
ToPacking = ar_e2e:packing_type_to_packing(ToPackingType, AddrB),
{ok, Config} = ar_test_node:get_config(RepackerNode),
ar_test_node:update_config(RepackerNode, Config#config{
storage_modules = Config#config.storage_modules ++ StorageModules,
mining_addr = AddrB
}),
ar_test_node:restart(RepackerNode),

ar_e2e:assert_empty_partition(RepackerNode, 1, ToPacking),
ar_e2e:assert_no_chunks(RepackerNode, Chunks),

ar_test_node:update_config(RepackerNode, Config#config{
storage_modules = StorageModules,
mining_addr = AddrB
}),
ar_test_node:restart(RepackerNode),

ar_e2e:assert_empty_partition(RepackerNode, 1, ToPacking),
ar_e2e:assert_no_chunks(RepackerNode, Chunks).

start_validator_node(ValidatorNode, RepackerNode, B0) ->
{ok, Config} = ar_test_node:get_config(ValidatorNode),
?assertEqual(ar_test_node:peer_name(ValidatorNode),
ar_test_node:start_other_node(ValidatorNode, B0, Config#config{
peers = [ar_test_node:peer_ip(RepackerNode)],
start_from_latest_state = true,
auto_join = true
auto_join = true,
storage_modules = []
}, true)
),
ok.
Expand Down
26 changes: 13 additions & 13 deletions apps/arweave/include/ar.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

%% The mainnet name. Does not change at the hard forks.
-ifndef(NETWORK_NAME).
-ifdef(TEST).
-ifdef(AR_TEST).
-define(NETWORK_NAME, "arweave.localtest").
-else.
-define(NETWORK_NAME, "arweave.N.1").
Expand Down Expand Up @@ -101,29 +101,29 @@

%% How far into the past or future the block can be in order to be accepted for
%% processing.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(STORE_BLOCKS_BEHIND_CURRENT, 10).
-else.
-define(STORE_BLOCKS_BEHIND_CURRENT, 50).
-endif.

%% The maximum lag when fork recovery (chain reorganisation) is performed.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(CHECKPOINT_DEPTH, 4).
-else.
-define(CHECKPOINT_DEPTH, 18).
-endif.

%% The recommended depth of the block to use as an anchor for transactions.
%% The corresponding block hash is returned by the GET /tx_anchor endpoint.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(SUGGESTED_TX_ANCHOR_DEPTH, 5).
-else.
-define(SUGGESTED_TX_ANCHOR_DEPTH, 6).
-endif.

%% The number of blocks returned in the /info 'recent' field
-ifdef(TEST).
-ifdef(AR_TEST).
-define(RECENT_BLOCKS_WITHOUT_TIMESTAMP, 2).
-else.
-define(RECENT_BLOCKS_WITHOUT_TIMESTAMP, 5).
Expand All @@ -145,7 +145,7 @@
-define(BLOCK_TX_DATA_SIZE_LIMIT, ?TX_DATA_SIZE_LIMIT).

%% The maximum number of transactions (both format=1 and format=2) in a block.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(BLOCK_TX_COUNT_LIMIT, 10).
-else.
-define(BLOCK_TX_COUNT_LIMIT, 1000).
Expand All @@ -169,7 +169,7 @@
%% The maximum allowed size of transaction headers stored in mempool.
%% The data field of a format=1 transaction is considered to belong to
%% its headers.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(MEMPOOL_HEADER_SIZE_LIMIT, 50 * 1024 * 1024).
-else.
-define(MEMPOOL_HEADER_SIZE_LIMIT, 250 * 1024 * 1024).
Expand All @@ -178,7 +178,7 @@
%% The maximum allowed size of transaction data stored in mempool.
%% The format=1 transactions are not counted as their data is considered
%% to be part of the header.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(MEMPOOL_DATA_SIZE_LIMIT, 50 * 1024 * 1024).
-else.
-define(MEMPOOL_DATA_SIZE_LIMIT, 500 * 1024 * 1024).
Expand Down Expand Up @@ -209,7 +209,7 @@
-define(BAD_BLOCK_BAN_TIME, 24 * 60 * 60).

%% A part of transaction propagation delay independent from the size, in seconds.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(BASE_TX_PROPAGATION_DELAY, 0).
-else.
-ifndef(BASE_TX_PROPAGATION_DELAY).
Expand All @@ -221,7 +221,7 @@
%% estimate the transaction propagation delay. It does not include
%% the base delay, the time the transaction spends in the priority
%% queue, and the time it takes to propagate the transaction to peers.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(TX_PROPAGATION_BITS_PER_SECOND, 1000000000).
-else.
-define(TX_PROPAGATION_BITS_PER_SECOND, 3000000). % 3 mbps
Expand Down Expand Up @@ -291,7 +291,7 @@

%% The adjustment of difficutly going from SHA-384 to RandomX.
-define(RANDOMX_DIFF_ADJUSTMENT, (-14)).
-ifdef(TEST).
-ifdef(AR_TEST).
-define(RANDOMX_KEY_SWAP_FREQ, (?STORE_BLOCKS_BEHIND_CURRENT + 1)).
-define(RANDOMX_MIN_KEY_GEN_AHEAD, 1).
-define(RANDOMX_MAX_KEY_GEN_AHEAD, 4).
Expand Down Expand Up @@ -335,7 +335,7 @@
-define(NOTE_SIZE, 32).

%% Disk cache size in MB
-ifdef(TEST).
-ifdef(AR_TEST).
-define(DISK_CACHE_SIZE, 1).
-define(DISK_CACHE_CLEAN_PERCENT_MAX, 20).
-else.
Expand All @@ -344,7 +344,7 @@
-endif.

%% The speed in chunks/s of moving the fork 2.5 packing threshold.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(PACKING_2_5_THRESHOLD_CHUNKS_PER_SECOND, 1).
-else.
-define(PACKING_2_5_THRESHOLD_CHUNKS_PER_SECOND, 10).
Expand Down
2 changes: 1 addition & 1 deletion apps/arweave/include/ar_blacklist_middleware.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ end).
?RPM_BY_PATH(Path, LimitByIP, Config#config.requests_per_minute_limit)()
end).

-ifdef(TEST).
-ifdef(AR_TEST).
-define(RPM_BY_PATH(Path, LimitByIP, DefaultPathLimit), fun() ->
case Path of
[<<"chunk">> | _] ->
Expand Down
12 changes: 6 additions & 6 deletions apps/arweave/include/ar_config.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

%% The number of data sync jobs to run. Each job periodically picks a range
%% and downloads it from peers.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(DEFAULT_SYNC_JOBS, 10).
-else.
-define(DEFAULT_SYNC_JOBS, 100).
Expand All @@ -40,14 +40,14 @@
-define(DEFAULT_DISK_POOL_DATA_ROOT_EXPIRATION_TIME_S, 30 * 60).

%% The default size limit for unconfirmed and seeded chunks, per data root.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB, 50).
-else.
-define(DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB, 10000).
-endif.

%% The default total size limit for unconfirmed and seeded chunks.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(DEFAULT_MAX_DISK_POOL_BUFFER_MB, 100).
-else.
-define(DEFAULT_MAX_DISK_POOL_BUFFER_MB, 100000).
Expand Down Expand Up @@ -86,14 +86,14 @@
max(1, (erlang:system_info(schedulers_online) - 1))).

%% Accept a block from the given IP only once in so many milliseconds.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(DEFAULT_BLOCK_THROTTLE_BY_IP_INTERVAL_MS, 10).
-else.
-define(DEFAULT_BLOCK_THROTTLE_BY_IP_INTERVAL_MS, 1000).
-endif.

%% Accept a block with the given solution hash only once in so many milliseconds.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(DEFAULT_BLOCK_THROTTLE_BY_SOLUTION_INTERVAL_MS, 10).
-else.
-define(DEFAULT_BLOCK_THROTTLE_BY_SOLUTION_INTERVAL_MS, 2000).
Expand All @@ -120,7 +120,7 @@
-define(DEFAULT_MAX_REPLICA_2_9_ENTROPY_CACHE_SIZE, 33_554_432). % 8_388_608 * 4.

%% The number of 2.9 storage modules allowed to prepare the storage at a time.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(DEFAULT_REPLICA_2_9_WORKERS, 2).
-else.
-define(DEFAULT_REPLICA_2_9_WORKERS, 8).
Expand Down
16 changes: 8 additions & 8 deletions apps/arweave/include/ar_consensus.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
-define(RANDOMX_SCRATCHPAD_SIZE, 2097152).

%% The size in bytes of the total RX2 entropy (# of lanes * scratchpad size).
-ifdef(TEST).
-ifdef(AR_TEST).
%% 24_576 bytes worth of entropy.
-define(REPLICA_2_9_ENTROPY_SIZE, (3 * ?COMPOSITE_PACKING_SUB_CHUNK_SIZE)).
-else.
Expand Down Expand Up @@ -72,7 +72,7 @@
%% map to chunks that are as far as possible from each other within a partition. With
%% an entropy size of 8_388_608 bytes and a slice size of 8192 bytes, there are 1024 slices per
%% entropy, which yields 1024 sectors per partition.
-ifdef(TEST).
-ifdef(AR_TEST).
%% 2_097_152 / 24_576 = 85.33333333333333
%% (85 + 11) = 96 the nearest multiple of 32
-define(REPLICA_2_9_ENTROPY_COUNT, 96).
Expand All @@ -89,29 +89,29 @@
%% The size of the mining partition. The weave is broken down into partitions
%% of equal size. A miner can search for a solution in each of the partitions
%% in parallel, per mining address.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(PARTITION_SIZE, 2_097_152). % 8 * 256 * 1024
-else.
-define(PARTITION_SIZE, 3_600_000_000_000). % 90% of 4 TB.
-endif.

%% The size of a recall range. The first range is randomly chosen from the given
%% mining partition. The second range is chosen from the entire weave.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(RECALL_RANGE_SIZE, (128 * 1024)).
-else.
-define(RECALL_RANGE_SIZE, 26_214_400). % == 25 * 1024 * 1024
-endif.

%% The size of a recall range before the fork 2.8.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(LEGACY_RECALL_RANGE_SIZE, (512 * 1024)).
-else.
-define(LEGACY_RECALL_RANGE_SIZE, 104_857_600). % == 100 * 1024 * 1024
-endif.

-ifdef(FORKS_RESET).
-ifdef(TEST).
-ifdef(AR_TEST).
-define(STRICT_DATA_SPLIT_THRESHOLD, (262144 * 3)).
-else.
-define(STRICT_DATA_SPLIT_THRESHOLD, 0).
Expand All @@ -123,7 +123,7 @@
-endif.

-ifdef(FORKS_RESET).
-ifdef(TEST).
-ifdef(AR_TEST).
-define(MERKLE_REBASE_SUPPORT_THRESHOLD, (?STRICT_DATA_SPLIT_THRESHOLD * 2)).
-else.
-define(MERKLE_REBASE_SUPPORT_THRESHOLD, 0).
Expand All @@ -136,7 +136,7 @@

%% Recall bytes are only picked from the subspace up to the size
%% of the weave at the block of the depth defined by this constant.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(SEARCH_SPACE_UPPER_BOUND_DEPTH, 3).
-else.
-define(SEARCH_SPACE_UPPER_BOUND_DEPTH, 50).
Expand Down
4 changes: 2 additions & 2 deletions apps/arweave/include/ar_data_discovery.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
%% an interval, we process it bucket by bucket: for every bucket, a few peers who are known to
%% to have some data there are asked for the intervals they have and check which of them
%% cross the desired interval.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(NETWORK_DATA_BUCKET_SIZE, 10_000_000). % 10 MB
-else.
-define(NETWORK_DATA_BUCKET_SIZE, 10_000_000_000). % 10 GB
-endif.

%% The maximum number of synced intervals shared with peers.
-ifdef(TEST).
-ifdef(AR_TEST).
-define(MAX_SHARED_SYNCED_INTERVALS_COUNT, 20).
-else.
-define(MAX_SHARED_SYNCED_INTERVALS_COUNT, 10_000).
Expand Down
Loading
Loading