Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve performance test #540

Merged
merged 9 commits into from
Mar 27, 2023
Merged
8 changes: 4 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,11 @@ jobs:
at: .
- run: cp -r benchmark/ ~/benchmark_backup/
- run: cp mix.exs ~/benchmark_backup/
- run: docker run -e MIX_ENV=benchmark -v ./:/root/app -v ~/results:/root/results -w /root/app membraneframeworklabs/docker_membrane mix run benchmark/run.exs /root/results/results.res
- run: git checkout master
- run: docker run -e MIX_ENV=benchmark -v ./:/root/app -v ~/results:/root/results -w /root/app membraneframeworklabs/docker_membrane mix run benchmark/run.exs /root/results/feature_branch_results
- run: git checkout -f master
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why?

Copy link
Contributor Author

@varsill varsill Mar 23, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because on this feature branch I have needed to delete a file which still exists on master (benchmark/metrics/final_memory.ex) - https://app.circleci.com/pipelines/github/membraneframework/membrane_core/2833/workflows/270f902c-ef60-4113-80cd-de23b8fc5c5a/jobs/7641

- run: cp ~/benchmark_backup/mix.exs ~/app
- run: docker run -e MIX_ENV=benchmark -v ./:/root/app -v ~/results:/root/results -v ~/benchmark_backup/benchmark:/root/app/benchmark -w /root/app membraneframeworklabs/docker_membrane mix run benchmark/run.exs /root/results/results_ref.res
- run: docker run -e MIX_ENV=benchmark -v ./:/root/app -v ~/results:/root/results -v ~/benchmark_backup/benchmark:/root/app/benchmark -w /root/app membraneframeworklabs/docker_membrane mix run benchmark/compare.exs /root/results/results.res /root/results/results_ref.res
- run: docker run -e MIX_ENV=benchmark -v ./:/root/app -v ~/results:/root/results -v ~/benchmark_backup/benchmark:/root/app/benchmark -w /root/app membraneframeworklabs/docker_membrane mix run benchmark/run.exs /root/results/master_results
- run: docker run -e MIX_ENV=benchmark -v ./:/root/app -v ~/results:/root/results -v ~/benchmark_backup/benchmark:/root/app/benchmark -w /root/app membraneframeworklabs/docker_membrane mix run benchmark/compare.exs /root/results/feature_branch_results /root/results/master_results
- run:
command: rm ~/results/*
when: always
Expand Down
16 changes: 9 additions & 7 deletions benchmark/compare.exs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
defmodule Benchmark.Compare do
require Logger

def run(results, ref_results) do
def run(results, ref_results, results_name, ref_results_name) do
if Map.keys(results) != Map.keys(ref_results),
do: raise("Incompatible performance test result files!")

Expand All @@ -20,17 +20,17 @@ defmodule Benchmark.Compare do
Enum.map(Map.keys(test_case_results), fn metric_module ->
"""
METRIC: #{metric_module}
#{inspect(Map.get(test_case_results, metric_module), pretty: true)}
vs
#{inspect(Map.get(test_case_results_ref, metric_module), pretty: true)}

1. In #{results_name}:
#{inspect(Map.get(test_case_results_ref, metric_module), pretty: true, limit: :infinity)}
2. In #{ref_results_name}:
#{inspect(Map.get(test_case_results, metric_module), pretty: true, limit: :infinity)}
"""
end)
|> Enum.join()

Logger.debug("""
TEST CASE:
#{inspect(test_case, pretty: true)}
#{inspect(test_case, pretty: true, limit: :infinity)}

#{results_str}

Expand All @@ -50,4 +50,6 @@ end
[results_filename, ref_results_filename] = System.argv() |> Enum.take(2)
results = File.read!(results_filename) |> :erlang.binary_to_term()
ref_results = File.read!(ref_results_filename) |> :erlang.binary_to_term()
Benchmark.Compare.run(results, ref_results)
results_name = String.split(results_filename, "/") |> Enum.at(-1)
ref_results_name = String.split(ref_results_filename, "/") |> Enum.at(-1)
Benchmark.Compare.run(results, ref_results, results_name, ref_results_name)
32 changes: 0 additions & 32 deletions benchmark/metric/final_memory.ex

This file was deleted.

4 changes: 2 additions & 2 deletions benchmark/metric/in_progress_memory.ex
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
defmodule Benchmark.Metric.InProgressMemory do
@behaviour Benchmark.Metric

@allowed_worsening_factor 0.5
@tolerance_factor 0.5
@sampling_period 100

@impl true
def assert(memory_samples, memory_samples_ref, test_case) do
cumulative_memory = integrate(memory_samples)
cumulative_memory_ref = integrate(memory_samples_ref)

if cumulative_memory > cumulative_memory_ref * (1 + @allowed_worsening_factor),
if cumulative_memory > cumulative_memory_ref * (1 + @tolerance_factor),
do:
raise(
"The memory performance has got worse! For test case: #{inspect(test_case, pretty: true)}
Expand Down
4 changes: 2 additions & 2 deletions benchmark/metric/message_queues_length.ex
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
defmodule Benchmark.Metric.MessageQueuesLength do
@behaviour Benchmark.Metric

@allowed_worsening_factor 0.5
@tolerance_factor 0.5
@sampling_period 100

@impl true
Expand All @@ -10,7 +10,7 @@ defmodule Benchmark.Metric.MessageQueuesLength do
cumulative_queues_length_ref = integrate(queues_lengths_ref)

if cumulative_queues_length >
cumulative_queues_length_ref * (1 + @allowed_worsening_factor),
cumulative_queues_length_ref * (1 + @tolerance_factor),
do:
raise(
"The cumulative queues length has got worse! For test case: #{inspect(test_case, pretty: true)}
Expand Down
4 changes: 2 additions & 2 deletions benchmark/metric/time.ex
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
defmodule Benchmark.Metric.Time do
@behaviour Benchmark.Metric

@allowed_worsening_factor 0.1
@tolerance_factor 0.15

@impl true
def assert(time, time_ref, test_case) do
if time > time_ref * (1 + @allowed_worsening_factor),
if time > time_ref * (1 + @tolerance_factor),
do:
raise(
"The time performance has got worse! For test case: #{inspect(test_case, pretty: true)} the test
Expand Down
9 changes: 3 additions & 6 deletions benchmark/run.exs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ defmodule Benchmark.Run do
alias Membrane.Pad
alias Benchmark.Run.BranchedFilter
alias Benchmark.Run.LinearFilter
alias Benchmark.Metric.{FinalMemory, InProgressMemory, MessageQueuesLength, Time}
alias Benchmark.Metric.{InProgressMemory, MessageQueuesLength, Time}

require Logger
require Membrane.RCPipeline
Expand All @@ -72,7 +72,7 @@ defmodule Benchmark.Run do
reductions: 1_000,
max_random: 1,
number_of_filters: 100,
number_of_buffers: 50000,
number_of_buffers: 50_000,
buffer_size: 1
],
linear: [
Expand All @@ -92,7 +92,7 @@ defmodule Benchmark.Run do
with_branches: [
struct: [{1, 2}, {1, 2}, {2, 1}, {2, 1}],
reductions: 100,
number_of_buffers: 50_000,
number_of_buffers: 500_000,
buffer_size: 1,
max_random: 10
]
Expand Down Expand Up @@ -245,7 +245,6 @@ defmodule Benchmark.Run do
spec = prepare_pipeline(test_type, params)

time_meassurement = Time.start_meassurement()
final_memory_meassurement = FinalMemory.start_meassurement()

{:ok, _supervisor_pid, pipeline_pid} =
Benchmark.Run.Pipeline.start(
Expand All @@ -267,7 +266,6 @@ defmodule Benchmark.Run do
end

time = Time.stop_meassurement(time_meassurement)
final_memory = FinalMemory.stop_meassurement(final_memory_meassurement)
in_progress_memory = InProgressMemory.stop_meassurement(in_progress_memory_meassurment)

message_queues_length =
Expand All @@ -277,7 +275,6 @@ defmodule Benchmark.Run do

%{
Time => time,
FinalMemory => final_memory,
InProgressMemory => in_progress_memory,
MessageQueuesLength => message_queues_length
}
Expand Down