Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AVAudioEngine version AudioDeviceModule #158

Draft
wants to merge 11 commits into
base: m125_release
Choose a base branch
from
5 changes: 0 additions & 5 deletions audio/audio_send_stream.cc
Original file line number Diff line number Diff line change
Expand Up @@ -415,11 +415,6 @@ void AudioSendStream::SetMuted(bool muted) {
channel_send_->SetInputMute(muted);
}

bool AudioSendStream::GetMuted() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
return channel_send_->InputMute();
}

webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
return GetStats(true);
}
Expand Down
1 change: 0 additions & 1 deletion audio/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ class AudioSendStream final : public webrtc::AudioSendStream,
int payload_frequency,
int event,
int duration_ms) override;
bool GetMuted() override;
void SetMuted(bool muted) override;
webrtc::AudioSendStream::Stats GetStats() const override;
webrtc::AudioSendStream::Stats GetStats(
Expand Down
72 changes: 14 additions & 58 deletions audio/audio_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,26 +98,22 @@ void AudioState::AddSendingStream(webrtc::AudioSendStream* stream,
UpdateAudioTransportWithSendingStreams();

// Make sure recording is initialized; start recording if enabled.
if (ShouldRecord()) {
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {

// TODO: Verify if the following windows only logic is still required.
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {
#if defined(WEBRTC_WIN)
if (adm->BuiltInAECIsAvailable() && !adm->Playing()) {
if (!adm->PlayoutIsInitialized()) {
adm->InitPlayout();
}
adm->StartPlayout();
if (adm->BuiltInAECIsAvailable() && !adm->Playing()) {
if (!adm->PlayoutIsInitialized()) {
adm->InitPlayout();
}
#endif
adm->StartRecording();
adm->StartPlayout();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
#endif
adm->StartRecording();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
}
}
Expand All @@ -127,10 +123,7 @@ void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) {
auto count = sending_streams_.erase(stream);
RTC_DCHECK_EQ(1, count);
UpdateAudioTransportWithSendingStreams();

bool should_record = ShouldRecord();
RTC_LOG(LS_INFO) << "RemoveSendingStream: should_record = " << should_record;
if (!should_record) {
if (sending_streams_.empty()) {
config_.audio_device_module->StopRecording();
}
}
Expand Down Expand Up @@ -158,7 +151,7 @@ void AudioState::SetRecording(bool enabled) {
if (recording_enabled_ != enabled) {
recording_enabled_ = enabled;
if (enabled) {
if (ShouldRecord()) {
if (!sending_streams_.empty()) {
config_.audio_device_module->StartRecording();
}
} else {
Expand Down Expand Up @@ -218,43 +211,6 @@ void AudioState::UpdateNullAudioPollerState() {
null_audio_poller_.Stop();
}
}

void AudioState::OnMuteStreamChanged() {

auto* adm = config_.audio_device_module.get();
bool should_record = ShouldRecord();

RTC_LOG(LS_INFO) << "OnMuteStreamChanged: should_record = " << should_record;
if (should_record && !adm->Recording()) {
if (adm->InitRecording() == 0) {
adm->StartRecording();
}
} else if (!should_record && adm->Recording()) {
adm->StopRecording();
}
}

bool AudioState::ShouldRecord() {
RTC_LOG(LS_INFO) << "ShouldRecord";
// no streams to send
if (sending_streams_.empty()) {
RTC_LOG(LS_INFO) << "ShouldRecord: send stream = empty";
return false;
}

int stream_count = sending_streams_.size();

int muted_count = 0;
for (const auto& kv : sending_streams_) {
if (kv.first->GetMuted()) {
muted_count++;
}
}

RTC_LOG(LS_INFO) << "ShouldRecord: " << muted_count << " muted, " << stream_count << " sending";
return muted_count != stream_count;
}

} // namespace internal

rtc::scoped_refptr<AudioState> AudioState::Create(
Expand Down
5 changes: 0 additions & 5 deletions audio/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@ class AudioState : public webrtc::AudioState {

void SetStereoChannelSwapping(bool enable) override;

void OnMuteStreamChanged() override;

AudioDeviceModule* audio_device_module() {
RTC_DCHECK(config_.audio_device_module);
return config_.audio_device_module.get();
Expand All @@ -66,9 +64,6 @@ class AudioState : public webrtc::AudioState {
void UpdateAudioTransportWithSendingStreams();
void UpdateNullAudioPollerState() RTC_RUN_ON(&thread_checker_);

// Returns true when at least 1 stream exists and all streams are not muted.
bool ShouldRecord();

SequenceChecker thread_checker_;
SequenceChecker process_thread_checker_{SequenceChecker::kDetached};
const webrtc::AudioState::Config config_;
Expand Down
4 changes: 2 additions & 2 deletions audio/channel_send.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,6 @@ class ChannelSend : public ChannelSendInterface,
// Muting, Volume and Level.
void SetInputMute(bool enable) override;

bool InputMute() const override;

// Stats.
ANAStats GetANAStatistics() const override;

Expand Down Expand Up @@ -165,6 +163,8 @@ class ChannelSend : public ChannelSendInterface,
size_t payloadSize,
int64_t absolute_capture_timestamp_ms) override;

bool InputMute() const;

int32_t SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp_without_offset,
Expand Down
2 changes: 0 additions & 2 deletions audio/channel_send.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,6 @@ class ChannelSendInterface {
virtual bool SendTelephoneEventOutband(int event, int duration_ms) = 0;
virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
virtual int GetTargetBitrate() const = 0;

virtual bool InputMute() const = 0;
virtual void SetInputMute(bool muted) = 0;

virtual void ProcessAndEncodeAudio(
Expand Down
1 change: 0 additions & 1 deletion call/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ class AudioSendStream : public AudioSender {
int event,
int duration_ms) = 0;

virtual bool GetMuted() = 0;
virtual void SetMuted(bool muted) = 0;

virtual Stats GetStats() const = 0;
Expand Down
3 changes: 0 additions & 3 deletions call/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,6 @@ class AudioState : public rtc::RefCountInterface {

virtual void SetStereoChannelSwapping(bool enable) = 0;

// Notify the AudioState that a stream updated it's mute state.
virtual void OnMuteStreamChanged() = 0;

static rtc::scoped_refptr<AudioState> Create(
const AudioState::Config& config);

Expand Down
6 changes: 4 additions & 2 deletions media/engine/webrtc_voice_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1698,8 +1698,10 @@ bool WebRtcVoiceSendChannel::MuteStream(uint32_t ssrc, bool muted) {
ap->set_output_will_be_muted(all_muted);
}

// Notfy the AudioState that the mute state has updated.
engine_->audio_state()->OnMuteStreamChanged();
webrtc::AudioDeviceModule* adm = engine()->adm();
if (adm) {
adm->SetMicrophoneMute(all_muted);
}

return true;
}
Expand Down
3 changes: 1 addition & 2 deletions media/engine/webrtc_voice_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,6 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
// Moved to public so WebRtcVoiceMediaChannel can access it.
webrtc::AudioState* audio_state();

private:
// Every option that is "set" will be applied. Every option not "set" will be
Expand All @@ -147,6 +145,7 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

webrtc::AudioDeviceModule* adm();
webrtc::AudioProcessing* apm() const;
webrtc::AudioState* audio_state();

std::vector<AudioCodec> CollectCodecs(
const std::vector<webrtc::AudioCodecSpec>& specs) const;
Expand Down
4 changes: 2 additions & 2 deletions modules/audio_device/audio_device_data_observer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -307,8 +307,8 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport {
}
#endif // WEBRTC_IOS

int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override {
return impl_->SetAudioDeviceSink(sink);
int32_t SetObserver(AudioDeviceObserver* observer) const override {
return impl_->SetObserver(observer);
}

protected:
Expand Down
2 changes: 1 addition & 1 deletion modules/audio_device/audio_device_generic.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ class AudioDeviceGeneric {
virtual int GetRecordAudioParameters(AudioParameters* params) const;
#endif // WEBRTC_IOS

virtual int32_t SetAudioDeviceSink(AudioDeviceSink* sink) { return -1; }
virtual int32_t SetObserver(AudioDeviceObserver* observer) { return -1; }
virtual int32_t GetPlayoutDevice() const { return -1; }
virtual int32_t GetRecordingDevice() const { return -1; }

Expand Down
20 changes: 10 additions & 10 deletions modules/audio_device/audio_device_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,12 @@
#if defined(WEBRTC_ENABLE_LINUX_PULSE)
#include "modules/audio_device/linux/audio_device_pulse_linux.h"
#endif
#elif defined(WEBRTC_IOS)
#include "sdk/objc/native/src/audio/audio_device_ios.h"
#elif defined(WEBRTC_MAC)
#include "modules/audio_device/mac/audio_device_mac.h"
#endif

#if defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
#include "modules/audio_device/audio_engine_device.h"
#endif

#if defined(WEBRTC_DUMMY_FILE_DEVICES)
#include "modules/audio_device/dummy/file_audio_device.h"
#include "modules/audio_device/dummy/file_audio_device_factory.h"
Expand Down Expand Up @@ -246,16 +247,15 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
// iOS ADM implementation.
#if defined(WEBRTC_IOS)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/bypass_voice_processing_));
audio_device_.reset(new AudioEngineDevice(/*bypass_voice_processing=*/bypass_voice_processing_));
RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
}
// END #if defined(WEBRTC_IOS)

// Mac OS X ADM implementation.
#elif defined(WEBRTC_MAC)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(new AudioDeviceMac());
audio_device_.reset(new AudioEngineDevice(/*bypass_voice_processing=*/false));
RTC_LOG(LS_INFO) << "Mac OS X Audio APIs will be utilized.";
}
#endif // WEBRTC_MAC
Expand Down Expand Up @@ -902,9 +902,9 @@ int AudioDeviceModuleImpl::GetRecordAudioParameters(
}
#endif // WEBRTC_IOS

int32_t AudioDeviceModuleImpl::SetAudioDeviceSink(AudioDeviceSink* sink) const {
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << sink << ")";
int32_t ok = audio_device_->SetAudioDeviceSink(sink);
int32_t AudioDeviceModuleImpl::SetObserver(AudioDeviceObserver* observer) const {
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << observer << ")";
int32_t ok = audio_device_->SetObserver(observer);
RTC_LOG(LS_INFO) << "output: " << ok;
return ok;
}
Expand Down
2 changes: 1 addition & 1 deletion modules/audio_device/audio_device_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int GetRecordAudioParameters(AudioParameters* params) const override;
#endif // WEBRTC_IOS

int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override;
int32_t SetObserver(AudioDeviceObserver* observer) const override;
int32_t GetPlayoutDevice() const override;
int32_t GetRecordingDevice() const override;

Expand Down
Loading