Skip to content
Permalink

Comparing changes

This is a direct comparison between two commits made in this repository or its related repositories. View the default comparison for this range or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: ArweaveTeam/arweave
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: f5b43581c8f86186ec99c01d5b5285cdd4e69869
Choose a base ref
..
head repository: ArweaveTeam/arweave
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: 1a8a2ce25b993da9e0cec6282ed00abbf28da073
Choose a head ref
Showing with 51 additions and 50 deletions.
  1. +51 −50 apps/arweave/src/ar_mining_server.erl
101 changes: 51 additions & 50 deletions apps/arweave/src/ar_mining_server.erl
Original file line number Diff line number Diff line change
@@ -409,56 +409,8 @@ update_cache_limits(State) ->
update_cache_limits(0, State) ->
State;
update_cache_limits(NumActivePartitions, State) ->
{MinimumCacheLimitMiB, OverallCacheLimitMiB, PartitionCacheLimit, VDFQueueLimit,
GarbageCollectionFrequency} =
calculate_cache_limits(NumActivePartitions, State#state.packing_difficulty),
case PartitionCacheLimit == State#state.chunk_cache_limit of
true ->
State;
false ->
maps:foreach(
fun(_Partition, Worker) ->
ar_mining_worker:set_cache_limits(
Worker, PartitionCacheLimit, VDFQueueLimit)
end,
State#state.workers
),

ar:console(
"~nSetting the mining chunk cache size limit to ~B MiB "
"(~B sub-chunks per partition).~n",
[OverallCacheLimitMiB, PartitionCacheLimit]),
?LOG_INFO([{event, update_mining_cache_limits},
{overall_limit_mb, OverallCacheLimitMiB},
{per_partition_sub_chunks, PartitionCacheLimit},
{vdf_queue_limit_steps, VDFQueueLimit}]),
case OverallCacheLimitMiB < MinimumCacheLimitMiB of
true ->
ar:console("~nChunk cache size limit (~p MiB) is below minimum limit of "
"~p MiB. Mining performance may be impacted.~n"
"Consider changing the 'mining_cache_size_mb' option.",
[OverallCacheLimitMiB, MinimumCacheLimitMiB]);
false -> ok
end,

GCRef =
case State#state.gc_frequency_ms == undefined of
true ->
%% This is the first time setting the garbage collection frequency,
%% so kick off the periodic call.
Ref = make_ref(),
ar_util:cast_after(GarbageCollectionFrequency, ?MODULE,
{manual_garbage_collect, Ref}),
Ref;
false ->
State#state.gc_process_ref
end,
State#state{
chunk_cache_limit = PartitionCacheLimit,
gc_frequency_ms = GarbageCollectionFrequency,
gc_process_ref = GCRef
}
end.
Limits = calculate_cache_limits(NumActivePartitions, State#state.packing_difficulty),
maybe_update_cache_limits(Limits, State).

calculate_cache_limits(NumActivePartitions, PackingDifficulty) ->
%% This allows the cache to store enough chunks for 4 concurrent VDF steps per partition.
@@ -501,6 +453,55 @@ calculate_cache_limits(NumActivePartitions, PackingDifficulty) ->
{MinimumCacheLimitMiB, OverallCacheLimitMiB, PartitionCacheLimit, VDFQueueLimit,
GarbageCollectionFrequency}.

maybe_update_cache_limits({_, _, PartitionCacheLimit, _, _},
#state{chunk_cache_limit = PartitionCacheLimit} = State) ->
State;
maybe_update_cache_limits(Limits, State) ->
{MinimumCacheLimitMiB, OverallCacheLimitMiB, PartitionCacheLimit, VDFQueueLimit,
GarbageCollectionFrequency} = Limits,
maps:foreach(
fun(_Partition, Worker) ->
ar_mining_worker:set_cache_limits(
Worker, PartitionCacheLimit, VDFQueueLimit)
end,
State#state.workers
),

ar:console(
"~nSetting the mining chunk cache size limit to ~B MiB "
"(~B sub-chunks per partition).~n",
[OverallCacheLimitMiB, PartitionCacheLimit]),
?LOG_INFO([{event, update_mining_cache_limits},
{overall_limit_mb, OverallCacheLimitMiB},
{per_partition_sub_chunks, PartitionCacheLimit},
{vdf_queue_limit_steps, VDFQueueLimit}]),
case OverallCacheLimitMiB < MinimumCacheLimitMiB of
true ->
ar:console("~nChunk cache size limit (~p MiB) is below minimum limit of "
"~p MiB. Mining performance may be impacted.~n"
"Consider changing the 'mining_cache_size_mb' option.",
[OverallCacheLimitMiB, MinimumCacheLimitMiB]);
false -> ok
end,

GCRef =
case State#state.gc_frequency_ms == undefined of
true ->
%% This is the first time setting the garbage collection frequency,
%% so kick off the periodic call.
Ref = make_ref(),
ar_util:cast_after(GarbageCollectionFrequency, ?MODULE,
{manual_garbage_collect, Ref}),
Ref;
false ->
State#state.gc_process_ref
end,
State#state{
chunk_cache_limit = PartitionCacheLimit,
gc_frequency_ms = GarbageCollectionFrequency,
gc_process_ref = GCRef
}.

distribute_output(Candidate, State) ->
distribute_output(ar_mining_io:get_partitions(), Candidate, State).