Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Freeverb merge dry/wet into mix parameter #45

Merged
merged 3 commits into from
Feb 14, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 60 additions & 7 deletions devices/freeverb/freeverb.cc
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// This Source Code Form is licensed MPL-2.0: http://mozilla.org/MPL/2.0
#include "ase/processor.hh"
#include "ase/internal.hh"
#include "devices/blepsynth/linearsmooth.hh"

namespace {

Expand All @@ -15,6 +16,8 @@ class Freeverb : public AudioProcessor {
IBusId stereoin;
OBusId stereout;
revmodel model;
LinearSmooth mix_smooth_;
bool mix_smooth_reset_ = false;
public:
Freeverb (const ProcessorSetup &psetup) :
AudioProcessor (psetup)
Expand All @@ -28,7 +31,7 @@ class Freeverb : public AudioProcessor {
info.website_url = "https://beast.testbit.eu";
info.creator_name = "Jezar at Dreampoint";
}
enum Params { MODE = 1, DRY, WET, ROOMSIZE, DAMPING, WIDTH };
enum Params { MODE = 1, MIX, ROOMSIZE, DAMPING, WIDTH };
void
initialize (SpeakerArrangement busses) override
{
Expand All @@ -40,8 +43,7 @@ class Freeverb : public AudioProcessor {
ParameterMap pmap;

pmap.group = _("Reverb Settings");
pmap[DRY] = Param ("drylevel", _("Dry level"), _("Dry"), scaledry * initialdry, "dB", { 0, scaledry });
pmap[WET] = Param ("wetlevel", _("Wet level"), _("Wet"), scalewet * initialwet, "dB", { 0, scalewet });
pmap[MIX] = Param { "mix", "Mix dry/wet", "Mix", 30, "%", { 0, 100 } };

ChoiceS centries;
centries += { "Signflip 2000", _("Preserve May 2000 Freeverb damping sign flip") };
Expand All @@ -62,8 +64,9 @@ class Freeverb : public AudioProcessor {
{
switch (Params (paramid))
{
case WET: return model.setwet (get_param (paramid) / scalewet);
case DRY: return model.setdry (get_param (paramid) / scaledry);
case MIX: mix_smooth_.set (get_param (paramid) * 0.01, mix_smooth_reset_);
mix_smooth_reset_ = false;
break;
case ROOMSIZE: return model.setroomsize ((get_param (paramid) - offsetroom) / scaleroom);
case WIDTH: return model.setwidth (0.01 * get_param (paramid));
case MODE:
Expand All @@ -75,18 +78,68 @@ class Freeverb : public AudioProcessor {
reset (uint64 target_stamp) override
{
model.setmode (0); // no-freeze, allow mute
model.setdry (0); // no dry, we mix the output signal during render
model.setwet (1); // only need reverb wet
model.mute(); // silence internal buffers
mix_smooth_.reset (sample_rate(), 0.020);
mix_smooth_reset_ = true;
adjust_all_params();
}
void
render_audio (float *input0, float *input1, float *output0, float *output1, uint n_frames)
{
if (!n_frames)
return;

// this only generates the wet signal
model.processreplace (input0, input1, output0, output1, n_frames, 1);

if (mix_smooth_.is_constant()) // faster version: mix parameter is constant during the block
{
float mix = mix_smooth_.get_next();
for (uint i = 0; i < n_frames; i++)
{
output0[i] = input0[i] * (1 - mix) + output0[i] * mix;
output1[i] = input1[i] * (1 - mix) + output1[i] * mix;
}
}
else
{
for (uint i = 0; i < n_frames; i++)
{
float mix = mix_smooth_.get_next();
output0[i] = input0[i] * (1 - mix) + output0[i] * mix;
output1[i] = input1[i] * (1 - mix) + output1[i] * mix;
}
}
}
void
render (uint n_frames) override
{
apply_input_events();
float *input0 = const_cast<float*> (ifloats (stereoin, 0));
float *input1 = const_cast<float*> (ifloats (stereoin, 1));
float *output0 = oblock (stereout, 0);
float *output1 = oblock (stereout, 1);
model.processreplace (input0, input1, output0, output1, n_frames, 1);

uint offset = 0;
MidiEventInput evinput = midi_event_input();
for (const auto &ev : evinput)
{
// process any audio that is before the event
render_audio (input0 + offset, input1 + offset, output0 + offset, output1 + offset, ev.frame - offset);
offset = ev.frame;

switch (ev.message())
{
case MidiMessage::PARAM_VALUE:
apply_event (ev);
adjust_param (ev.param);
break;
default: ;
}
}
// process frames after last event
render_audio (input0 + offset, input1 + offset, output0 + offset, output1 + offset, n_frames - offset);
}
};
static auto freeverb = register_audio_processor<Freeverb> ("Ase::Devices::Freeverb");
Expand Down