aboutsummaryrefslogtreecommitdiff
path: root/talk/media/webrtc/webrtcvoiceengine.cc
diff options
context:
space:
mode:
Diffstat (limited to 'talk/media/webrtc/webrtcvoiceengine.cc')
-rw-r--r--talk/media/webrtc/webrtcvoiceengine.cc1823
1 files changed, 715 insertions, 1108 deletions
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index 27ca1deb2d..9192b72539 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -42,7 +42,10 @@
#include "talk/media/base/audiorenderer.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/streamparams.h"
+#include "talk/media/webrtc/webrtcmediaengine.h"
#include "talk/media/webrtc/webrtcvoe.h"
+#include "webrtc/audio/audio_sink.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/base64.h"
#include "webrtc/base/byteorder.h"
#include "webrtc/base/common.h"
@@ -52,53 +55,26 @@
#include "webrtc/base/stringutils.h"
#include "webrtc/call/rtc_event_log.h"
#include "webrtc/common.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/include/field_trial.h"
+#include "webrtc/system_wrappers/include/trace.h"
namespace cricket {
namespace {
-const int kMaxNumPacketSize = 6;
-struct CodecPref {
- const char* name;
- int clockrate;
- int channels;
- int payload_type;
- bool is_multi_rate;
- int packet_sizes_ms[kMaxNumPacketSize];
-};
-// Note: keep the supported packet sizes in ascending order.
-const CodecPref kCodecPrefs[] = {
- { kOpusCodecName, 48000, 2, 111, true, { 10, 20, 40, 60 } },
- { kIsacCodecName, 16000, 1, 103, true, { 30, 60 } },
- { kIsacCodecName, 32000, 1, 104, true, { 30 } },
- // G722 should be advertised as 8000 Hz because of the RFC "bug".
- { kG722CodecName, 8000, 1, 9, false, { 10, 20, 30, 40, 50, 60 } },
- { kIlbcCodecName, 8000, 1, 102, false, { 20, 30, 40, 60 } },
- { kPcmuCodecName, 8000, 1, 0, false, { 10, 20, 30, 40, 50, 60 } },
- { kPcmaCodecName, 8000, 1, 8, false, { 10, 20, 30, 40, 50, 60 } },
- { kCnCodecName, 32000, 1, 106, false, { } },
- { kCnCodecName, 16000, 1, 105, false, { } },
- { kCnCodecName, 8000, 1, 13, false, { } },
- { kRedCodecName, 8000, 1, 127, false, { } },
- { kDtmfCodecName, 8000, 1, 126, false, { } },
-};
+const int kDefaultTraceFilter = webrtc::kTraceNone | webrtc::kTraceTerseInfo |
+ webrtc::kTraceWarning | webrtc::kTraceError |
+ webrtc::kTraceCritical;
+const int kElevatedTraceFilter = kDefaultTraceFilter | webrtc::kTraceStateInfo |
+ webrtc::kTraceInfo;
-// For Linux/Mac, using the default device is done by specifying index 0 for
-// VoE 4.0 and not -1 (which was the case for VoE 3.5).
-//
// On Windows Vista and newer, Microsoft introduced the concept of "Default
// Communications Device". This means that there are two types of default
// devices (old Wave Audio style default and Default Communications Device).
//
// On Windows systems which only support Wave Audio style default, uses either
// -1 or 0 to select the default device.
-//
-// On Windows systems which support both "Default Communication Device" and
-// old Wave Audio style default, use -1 for Default Communications Device and
-// -2 for Wave Audio style default, which is what we want to use for clips.
-// It's not clear yet whether the -2 index is handled properly on other OSes.
-
#ifdef WIN32
const int kDefaultAudioDeviceId = -1;
#else
@@ -150,6 +126,12 @@ const char kAecDumpByAudioOptionFilename[] = "/sdcard/audio.aecdump";
const char kAecDumpByAudioOptionFilename[] = "audio.aecdump";
#endif
+// Constants from voice_engine_defines.h.
+const int kMinTelephoneEventCode = 0; // RFC4733 (Section 2.3.1)
+const int kMaxTelephoneEventCode = 255;
+const int kMinTelephoneEventDuration = 100;
+const int kMaxTelephoneEventDuration = 60000; // Actual limit is 2^16
+
bool ValidateStreamParams(const StreamParams& sp) {
if (sp.ssrcs.empty()) {
LOG(LS_ERROR) << "No SSRCs in stream parameters: " << sp.ToString();
@@ -177,32 +159,6 @@ std::string ToString(const webrtc::CodecInst& codec) {
return ss.str();
}
-void LogMultiline(rtc::LoggingSeverity sev, char* text) {
- const char* delim = "\r\n";
- for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
- LOG_V(sev) << tok;
- }
-}
-
-// Severity is an integer because it comes is assumed to be from command line.
-int SeverityToFilter(int severity) {
- int filter = webrtc::kTraceNone;
- switch (severity) {
- case rtc::LS_VERBOSE:
- filter |= webrtc::kTraceAll;
- FALLTHROUGH();
- case rtc::LS_INFO:
- filter |= (webrtc::kTraceStateInfo | webrtc::kTraceInfo);
- FALLTHROUGH();
- case rtc::LS_WARNING:
- filter |= (webrtc::kTraceTerseInfo | webrtc::kTraceWarning);
- FALLTHROUGH();
- case rtc::LS_ERROR:
- filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
- }
- return filter;
-}
-
bool IsCodec(const AudioCodec& codec, const char* ref_name) {
return (_stricmp(codec.name.c_str(), ref_name) == 0);
}
@@ -211,19 +167,9 @@ bool IsCodec(const webrtc::CodecInst& codec, const char* ref_name) {
return (_stricmp(codec.plname, ref_name) == 0);
}
-bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
- for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
- if (IsCodec(codec, kCodecPrefs[i].name) &&
- kCodecPrefs[i].clockrate == codec.plfreq) {
- return kCodecPrefs[i].is_multi_rate;
- }
- }
- return false;
-}
-
bool FindCodec(const std::vector<AudioCodec>& codecs,
- const AudioCodec& codec,
- AudioCodec* found_codec) {
+ const AudioCodec& codec,
+ AudioCodec* found_codec) {
for (const AudioCodec& c : codecs) {
if (c.Matches(codec)) {
if (found_codec != NULL) {
@@ -253,38 +199,8 @@ bool IsNackEnabled(const AudioCodec& codec) {
kParamValueEmpty));
}
-int SelectPacketSize(const CodecPref& codec_pref, int ptime_ms) {
- int selected_packet_size_ms = codec_pref.packet_sizes_ms[0];
- for (int packet_size_ms : codec_pref.packet_sizes_ms) {
- if (packet_size_ms && packet_size_ms <= ptime_ms) {
- selected_packet_size_ms = packet_size_ms;
- }
- }
- return selected_packet_size_ms;
-}
-
-// If the AudioCodec param kCodecParamPTime is set, then we will set it to codec
-// pacsize if it's valid, or we will pick the next smallest value we support.
-// TODO(Brave): Query supported packet sizes from ACM when the API is ready.
-bool SetPTimeAsPacketSize(webrtc::CodecInst* codec, int ptime_ms) {
- for (const CodecPref& codec_pref : kCodecPrefs) {
- if ((IsCodec(*codec, codec_pref.name) &&
- codec_pref.clockrate == codec->plfreq) ||
- IsCodec(*codec, kG722CodecName)) {
- int packet_size_ms = SelectPacketSize(codec_pref, ptime_ms);
- if (packet_size_ms) {
- // Convert unit from milli-seconds to samples.
- codec->pacsize = (codec->plfreq / 1000) * packet_size_ms;
- return true;
- }
- }
- }
- return false;
-}
-
// Return true if codec.params[feature] == "1", false otherwise.
-bool IsCodecFeatureEnabled(const AudioCodec& codec,
- const char* feature) {
+bool IsCodecFeatureEnabled(const AudioCodec& codec, const char* feature) {
int value;
return codec.GetParam(feature, &value) && value == 1;
}
@@ -351,109 +267,29 @@ void GetOpusConfig(const AudioCodec& codec, webrtc::CodecInst* voe_codec,
voe_codec->rate = GetOpusBitrate(codec, *max_playback_rate);
}
-// Changes RTP timestamp rate of G722. This is due to the "bug" in the RFC
-// which says that G722 should be advertised as 8 kHz although it is a 16 kHz
-// codec.
-void MaybeFixupG722(webrtc::CodecInst* voe_codec, int new_plfreq) {
- if (IsCodec(*voe_codec, kG722CodecName)) {
- // If the ASSERT triggers, the codec definition in WebRTC VoiceEngine
- // has changed, and this special case is no longer needed.
- RTC_DCHECK(voe_codec->plfreq != new_plfreq);
- voe_codec->plfreq = new_plfreq;
- }
-}
-
-// Gets the default set of options applied to the engine. Historically, these
-// were supplied as a combination of flags from the channel manager (ec, agc,
-// ns, and highpass) and the rest hardcoded in InitInternal.
-AudioOptions GetDefaultEngineOptions() {
- AudioOptions options;
- options.echo_cancellation.Set(true);
- options.auto_gain_control.Set(true);
- options.noise_suppression.Set(true);
- options.highpass_filter.Set(true);
- options.stereo_swapping.Set(false);
- options.audio_jitter_buffer_max_packets.Set(50);
- options.audio_jitter_buffer_fast_accelerate.Set(false);
- options.typing_detection.Set(true);
- options.adjust_agc_delta.Set(0);
- options.experimental_agc.Set(false);
- options.extended_filter_aec.Set(false);
- options.delay_agnostic_aec.Set(false);
- options.experimental_ns.Set(false);
- options.aec_dump.Set(false);
- return options;
-}
-
-std::string GetEnableString(bool enable) {
- return enable ? "enable" : "disable";
-}
-} // namespace {
-
-WebRtcVoiceEngine::WebRtcVoiceEngine()
- : voe_wrapper_(new VoEWrapper()),
- tracing_(new VoETraceWrapper()),
- adm_(NULL),
- log_filter_(SeverityToFilter(kDefaultLogSeverity)),
- is_dumping_aec_(false) {
- Construct();
-}
-
-WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
- VoETraceWrapper* tracing)
- : voe_wrapper_(voe_wrapper),
- tracing_(tracing),
- adm_(NULL),
- log_filter_(SeverityToFilter(kDefaultLogSeverity)),
- is_dumping_aec_(false) {
- Construct();
-}
-
-void WebRtcVoiceEngine::Construct() {
- SetTraceFilter(log_filter_);
- initialized_ = false;
- LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
- SetTraceOptions("");
- if (tracing_->SetTraceCallback(this) == -1) {
- LOG_RTCERR0(SetTraceCallback);
- }
- if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
- LOG_RTCERR0(RegisterVoiceEngineObserver);
- }
- // Clear the default agc state.
- memset(&default_agc_config_, 0, sizeof(default_agc_config_));
-
- // Load our audio codec list.
- ConstructCodecs();
-
- // Load our RTP Header extensions.
- rtp_header_extensions_.push_back(
- RtpHeaderExtension(kRtpAudioLevelHeaderExtension,
- kRtpAudioLevelHeaderExtensionDefaultId));
- rtp_header_extensions_.push_back(
- RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
- kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
- if (webrtc::field_trial::FindFullName("WebRTC-SendSideBwe") == "Enabled") {
- rtp_header_extensions_.push_back(RtpHeaderExtension(
- kRtpTransportSequenceNumberHeaderExtension,
- kRtpTransportSequenceNumberHeaderExtensionDefaultId));
- }
- options_ = GetDefaultEngineOptions();
+webrtc::AudioState::Config MakeAudioStateConfig(VoEWrapper* voe_wrapper) {
+ webrtc::AudioState::Config config;
+ config.voice_engine = voe_wrapper->engine();
+ return config;
}
-void WebRtcVoiceEngine::ConstructCodecs() {
- LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
- int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
- for (int i = 0; i < ncodecs; ++i) {
- webrtc::CodecInst voe_codec;
- if (GetVoeCodec(i, &voe_codec)) {
+class WebRtcVoiceCodecs final {
+ public:
+ // TODO(solenberg): Do this filtering once off-line, add a simple AudioCodec
+ // list and add a test which verifies VoE supports the listed codecs.
+ static std::vector<AudioCodec> SupportedCodecs() {
+ LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
+ std::vector<AudioCodec> result;
+ for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) {
+ // Change the sample rate of G722 to 8000 to match SDP.
+ MaybeFixupG722(&voe_codec, 8000);
// Skip uncompressed formats.
if (IsCodec(voe_codec, kL16CodecName)) {
continue;
}
const CodecPref* pref = NULL;
- for (size_t j = 0; j < ARRAY_SIZE(kCodecPrefs); ++j) {
+ for (size_t j = 0; j < arraysize(kCodecPrefs); ++j) {
if (IsCodec(voe_codec, kCodecPrefs[j].name) &&
kCodecPrefs[j].clockrate == voe_codec.plfreq &&
kCodecPrefs[j].channels == voe_codec.channels) {
@@ -465,9 +301,10 @@ void WebRtcVoiceEngine::ConstructCodecs() {
if (pref) {
// Use the payload type that we've configured in our pref table;
// use the offset in our pref table to determine the sort order.
- AudioCodec codec(pref->payload_type, voe_codec.plname, voe_codec.plfreq,
- voe_codec.rate, voe_codec.channels,
- ARRAY_SIZE(kCodecPrefs) - (pref - kCodecPrefs));
+ AudioCodec codec(
+ pref->payload_type, voe_codec.plname, voe_codec.plfreq,
+ voe_codec.rate, voe_codec.channels,
+ static_cast<int>(arraysize(kCodecPrefs)) - (pref - kCodecPrefs));
LOG(LS_INFO) << ToString(codec);
if (IsCodec(codec, kIsacCodecName)) {
// Indicate auto-bitrate in signaling.
@@ -488,40 +325,183 @@ void WebRtcVoiceEngine::ConstructCodecs() {
// TODO(hellner): Add ptime, sprop-stereo, and stereo
// when they can be set to values other than the default.
}
- codecs_.push_back(codec);
+ result.push_back(codec);
} else {
LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec);
}
}
+ // Make sure they are in local preference order.
+ std::sort(result.begin(), result.end(), &AudioCodec::Preferable);
+ return result;
+ }
+
+ static bool ToCodecInst(const AudioCodec& in,
+ webrtc::CodecInst* out) {
+ for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) {
+ // Change the sample rate of G722 to 8000 to match SDP.
+ MaybeFixupG722(&voe_codec, 8000);
+ AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
+ voe_codec.rate, voe_codec.channels, 0);
+ bool multi_rate = IsCodecMultiRate(voe_codec);
+ // Allow arbitrary rates for ISAC to be specified.
+ if (multi_rate) {
+ // Set codec.bitrate to 0 so the check for codec.Matches() passes.
+ codec.bitrate = 0;
+ }
+ if (codec.Matches(in)) {
+ if (out) {
+ // Fixup the payload type.
+ voe_codec.pltype = in.id;
+
+ // Set bitrate if specified.
+ if (multi_rate && in.bitrate != 0) {
+ voe_codec.rate = in.bitrate;
+ }
+
+ // Reset G722 sample rate to 16000 to match WebRTC.
+ MaybeFixupG722(&voe_codec, 16000);
+
+ // Apply codec-specific settings.
+ if (IsCodec(codec, kIsacCodecName)) {
+ // If ISAC and an explicit bitrate is not specified,
+ // enable auto bitrate adjustment.
+ voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
+ }
+ *out = voe_codec;
+ }
+ return true;
+ }
+ }
+ return false;
}
- // Make sure they are in local preference order.
- std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
-}
-bool WebRtcVoiceEngine::GetVoeCodec(int index, webrtc::CodecInst* codec) {
- if (voe_wrapper_->codec()->GetCodec(index, *codec) == -1) {
+ static bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
+ for (size_t i = 0; i < arraysize(kCodecPrefs); ++i) {
+ if (IsCodec(codec, kCodecPrefs[i].name) &&
+ kCodecPrefs[i].clockrate == codec.plfreq) {
+ return kCodecPrefs[i].is_multi_rate;
+ }
+ }
return false;
}
- // Change the sample rate of G722 to 8000 to match SDP.
- MaybeFixupG722(codec, 8000);
- return true;
+
+ // If the AudioCodec param kCodecParamPTime is set, then we will set it to
+ // codec pacsize if it's valid, or we will pick the next smallest value we
+ // support.
+ // TODO(Brave): Query supported packet sizes from ACM when the API is ready.
+ static bool SetPTimeAsPacketSize(webrtc::CodecInst* codec, int ptime_ms) {
+ for (const CodecPref& codec_pref : kCodecPrefs) {
+ if ((IsCodec(*codec, codec_pref.name) &&
+ codec_pref.clockrate == codec->plfreq) ||
+ IsCodec(*codec, kG722CodecName)) {
+ int packet_size_ms = SelectPacketSize(codec_pref, ptime_ms);
+ if (packet_size_ms) {
+ // Convert unit from milli-seconds to samples.
+ codec->pacsize = (codec->plfreq / 1000) * packet_size_ms;
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ private:
+ static const int kMaxNumPacketSize = 6;
+ struct CodecPref {
+ const char* name;
+ int clockrate;
+ size_t channels;
+ int payload_type;
+ bool is_multi_rate;
+ int packet_sizes_ms[kMaxNumPacketSize];
+ };
+ // Note: keep the supported packet sizes in ascending order.
+ static const CodecPref kCodecPrefs[12];
+
+ static int SelectPacketSize(const CodecPref& codec_pref, int ptime_ms) {
+ int selected_packet_size_ms = codec_pref.packet_sizes_ms[0];
+ for (int packet_size_ms : codec_pref.packet_sizes_ms) {
+ if (packet_size_ms && packet_size_ms <= ptime_ms) {
+ selected_packet_size_ms = packet_size_ms;
+ }
+ }
+ return selected_packet_size_ms;
+ }
+
+ // Changes RTP timestamp rate of G722. This is due to the "bug" in the RFC
+ // which says that G722 should be advertised as 8 kHz although it is a 16 kHz
+ // codec.
+ static void MaybeFixupG722(webrtc::CodecInst* voe_codec, int new_plfreq) {
+ if (IsCodec(*voe_codec, kG722CodecName)) {
+ // If the ASSERT triggers, the codec definition in WebRTC VoiceEngine
+ // has changed, and this special case is no longer needed.
+ RTC_DCHECK(voe_codec->plfreq != new_plfreq);
+ voe_codec->plfreq = new_plfreq;
+ }
+ }
+};
+
+const WebRtcVoiceCodecs::CodecPref WebRtcVoiceCodecs::kCodecPrefs[12] = {
+ { kOpusCodecName, 48000, 2, 111, true, { 10, 20, 40, 60 } },
+ { kIsacCodecName, 16000, 1, 103, true, { 30, 60 } },
+ { kIsacCodecName, 32000, 1, 104, true, { 30 } },
+ // G722 should be advertised as 8000 Hz because of the RFC "bug".
+ { kG722CodecName, 8000, 1, 9, false, { 10, 20, 30, 40, 50, 60 } },
+ { kIlbcCodecName, 8000, 1, 102, false, { 20, 30, 40, 60 } },
+ { kPcmuCodecName, 8000, 1, 0, false, { 10, 20, 30, 40, 50, 60 } },
+ { kPcmaCodecName, 8000, 1, 8, false, { 10, 20, 30, 40, 50, 60 } },
+ { kCnCodecName, 32000, 1, 106, false, { } },
+ { kCnCodecName, 16000, 1, 105, false, { } },
+ { kCnCodecName, 8000, 1, 13, false, { } },
+ { kRedCodecName, 8000, 1, 127, false, { } },
+ { kDtmfCodecName, 8000, 1, 126, false, { } },
+};
+} // namespace {
+
+bool WebRtcVoiceEngine::ToCodecInst(const AudioCodec& in,
+ webrtc::CodecInst* out) {
+ return WebRtcVoiceCodecs::ToCodecInst(in, out);
+}
+
+WebRtcVoiceEngine::WebRtcVoiceEngine()
+ : voe_wrapper_(new VoEWrapper()),
+ audio_state_(webrtc::AudioState::Create(MakeAudioStateConfig(voe()))) {
+ Construct();
+}
+
+WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper)
+ : voe_wrapper_(voe_wrapper) {
+ Construct();
+}
+
+void WebRtcVoiceEngine::Construct() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
+
+ signal_thread_checker_.DetachFromThread();
+ std::memset(&default_agc_config_, 0, sizeof(default_agc_config_));
+ voe_config_.Set<webrtc::VoicePacing>(new webrtc::VoicePacing(true));
+
+ webrtc::Trace::set_level_filter(kDefaultTraceFilter);
+ webrtc::Trace::SetTraceCallback(this);
+
+ // Load our audio codec list.
+ codecs_ = WebRtcVoiceCodecs::SupportedCodecs();
}
WebRtcVoiceEngine::~WebRtcVoiceEngine() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
- if (voe_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
- LOG_RTCERR0(DeRegisterVoiceEngineObserver);
- }
if (adm_) {
voe_wrapper_.reset();
adm_->Release();
adm_ = NULL;
}
-
- tracing_->SetTraceCallback(NULL);
+ webrtc::Trace::SetTraceCallback(nullptr);
}
bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(worker_thread == rtc::Thread::Current());
LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
bool res = InitInternal();
@@ -535,59 +515,37 @@ bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
}
bool WebRtcVoiceEngine::InitInternal() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Temporarily turn logging level up for the Init call
- int old_filter = log_filter_;
- int extended_filter = log_filter_ | SeverityToFilter(rtc::LS_INFO);
- SetTraceFilter(extended_filter);
- SetTraceOptions("");
-
- // Init WebRtc VoiceEngine.
+ webrtc::Trace::set_level_filter(kElevatedTraceFilter);
+ LOG(LS_INFO) << webrtc::VoiceEngine::GetVersionString();
if (voe_wrapper_->base()->Init(adm_) == -1) {
LOG_RTCERR0_EX(Init, voe_wrapper_->error());
- SetTraceFilter(old_filter);
return false;
}
-
- SetTraceFilter(old_filter);
- SetTraceOptions(log_options_);
-
- // Log the VoiceEngine version info
- char buffer[1024] = "";
- voe_wrapper_->base()->GetVersion(buffer);
- LOG(LS_INFO) << "WebRtc VoiceEngine Version:";
- LogMultiline(rtc::LS_INFO, buffer);
+ webrtc::Trace::set_level_filter(kDefaultTraceFilter);
// Save the default AGC configuration settings. This must happen before
- // calling SetOptions or the default will be overwritten.
+ // calling ApplyOptions or the default will be overwritten.
if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) {
LOG_RTCERR0(GetAgcConfig);
return false;
}
- // Set defaults for options, so that ApplyOptions applies them explicitly
- // when we clear option (channel) overrides. External clients can still
- // modify the defaults via SetOptions (on the media engine).
- if (!SetOptions(GetDefaultEngineOptions())) {
- return false;
- }
-
// Print our codec list again for the call diagnostic log
LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
for (const AudioCodec& codec : codecs_) {
LOG(LS_INFO) << ToString(codec);
}
- // Disable the DTMF playout when a tone is sent.
- // PlayDtmfTone will be used if local playout is needed.
- if (voe_wrapper_->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
- LOG_RTCERR1(SetDtmfFeedbackStatus, false);
- }
+ SetDefaultDevices();
initialized_ = true;
return true;
}
void WebRtcVoiceEngine::Terminate() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
initialized_ = false;
@@ -596,62 +554,81 @@ void WebRtcVoiceEngine::Terminate() {
voe_wrapper_->base()->Terminate();
}
+rtc::scoped_refptr<webrtc::AudioState>
+ WebRtcVoiceEngine::GetAudioState() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return audio_state_;
+}
+
VoiceMediaChannel* WebRtcVoiceEngine::CreateChannel(webrtc::Call* call,
const AudioOptions& options) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return new WebRtcVoiceMediaChannel(this, options, call);
}
-bool WebRtcVoiceEngine::SetOptions(const AudioOptions& options) {
- if (!ApplyOptions(options)) {
- return false;
- }
- options_ = options;
- return true;
-}
-
-// AudioOptions defaults are set in InitInternal (for options with corresponding
-// MediaEngineInterface flags) and in SetOptions(int) for flagless options.
bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "ApplyOptions: " << options_in.ToString();
- AudioOptions options = options_in; // The options are modified below.
+
+ // Default engine options.
+ AudioOptions options;
+ options.echo_cancellation = rtc::Optional<bool>(true);
+ options.auto_gain_control = rtc::Optional<bool>(true);
+ options.noise_suppression = rtc::Optional<bool>(true);
+ options.highpass_filter = rtc::Optional<bool>(true);
+ options.stereo_swapping = rtc::Optional<bool>(false);
+ options.audio_jitter_buffer_max_packets = rtc::Optional<int>(50);
+ options.audio_jitter_buffer_fast_accelerate = rtc::Optional<bool>(false);
+ options.typing_detection = rtc::Optional<bool>(true);
+ options.adjust_agc_delta = rtc::Optional<int>(0);
+ options.experimental_agc = rtc::Optional<bool>(false);
+ options.extended_filter_aec = rtc::Optional<bool>(false);
+ options.delay_agnostic_aec = rtc::Optional<bool>(false);
+ options.experimental_ns = rtc::Optional<bool>(false);
+ options.aec_dump = rtc::Optional<bool>(false);
+
+ // Apply any given options on top.
+ options.SetAll(options_in);
+
// kEcConference is AEC with high suppression.
webrtc::EcModes ec_mode = webrtc::kEcConference;
webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone;
webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
webrtc::NsModes ns_mode = webrtc::kNsHighSuppression;
- bool aecm_comfort_noise = false;
- if (options.aecm_generate_comfort_noise.Get(&aecm_comfort_noise)) {
+ if (options.aecm_generate_comfort_noise) {
LOG(LS_VERBOSE) << "Comfort noise explicitly set to "
- << aecm_comfort_noise << " (default is false).";
+ << *options.aecm_generate_comfort_noise
+ << " (default is false).";
}
-#if defined(IOS)
+#if defined(WEBRTC_IOS)
// On iOS, VPIO provides built-in EC and AGC.
- options.echo_cancellation.Set(false);
- options.auto_gain_control.Set(false);
+ options.echo_cancellation = rtc::Optional<bool>(false);
+ options.auto_gain_control = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Always disable AEC and AGC on iOS. Use built-in instead.";
#elif defined(ANDROID)
ec_mode = webrtc::kEcAecm;
#endif
-#if defined(IOS) || defined(ANDROID)
+#if defined(WEBRTC_IOS) || defined(ANDROID)
// Set the AGC mode for iOS as well despite disabling it above, to avoid
// unsupported configuration errors from webrtc.
agc_mode = webrtc::kAgcFixedDigital;
- options.typing_detection.Set(false);
- options.experimental_agc.Set(false);
- options.extended_filter_aec.Set(false);
- options.experimental_ns.Set(false);
+ options.typing_detection = rtc::Optional<bool>(false);
+ options.experimental_agc = rtc::Optional<bool>(false);
+ options.extended_filter_aec = rtc::Optional<bool>(false);
+ options.experimental_ns = rtc::Optional<bool>(false);
#endif
// Delay Agnostic AEC automatically turns on EC if not set except on iOS
// where the feature is not supported.
bool use_delay_agnostic_aec = false;
-#if !defined(IOS)
- if (options.delay_agnostic_aec.Get(&use_delay_agnostic_aec)) {
+#if !defined(WEBRTC_IOS)
+ if (options.delay_agnostic_aec) {
+ use_delay_agnostic_aec = *options.delay_agnostic_aec;
if (use_delay_agnostic_aec) {
- options.echo_cancellation.Set(true);
- options.extended_filter_aec.Set(true);
+ options.echo_cancellation = rtc::Optional<bool>(true);
+ options.extended_filter_aec = rtc::Optional<bool>(true);
ec_mode = webrtc::kEcConference;
}
}
@@ -659,8 +636,7 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
- bool echo_cancellation = false;
- if (options.echo_cancellation.Get(&echo_cancellation)) {
+ if (options.echo_cancellation) {
// Check if platform supports built-in EC. Currently only supported on
// Android and in combination with Java based audio layer.
// TODO(henrika): investigate possibility to support built-in EC also
@@ -671,63 +647,61 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
// overriding it. Enable/Disable it according to the echo_cancellation
// audio option.
const bool enable_built_in_aec =
- echo_cancellation && !use_delay_agnostic_aec;
+ *options.echo_cancellation && !use_delay_agnostic_aec;
if (voe_wrapper_->hw()->EnableBuiltInAEC(enable_built_in_aec) == 0 &&
enable_built_in_aec) {
// Disable internal software EC if built-in EC is enabled,
// i.e., replace the software EC with the built-in EC.
- options.echo_cancellation.Set(false);
- echo_cancellation = false;
+ options.echo_cancellation = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Disabling EC since built-in EC will be used instead";
}
}
- if (voep->SetEcStatus(echo_cancellation, ec_mode) == -1) {
- LOG_RTCERR2(SetEcStatus, echo_cancellation, ec_mode);
+ if (voep->SetEcStatus(*options.echo_cancellation, ec_mode) == -1) {
+ LOG_RTCERR2(SetEcStatus, *options.echo_cancellation, ec_mode);
return false;
} else {
- LOG(LS_INFO) << "Echo control set to " << echo_cancellation
+ LOG(LS_INFO) << "Echo control set to " << *options.echo_cancellation
<< " with mode " << ec_mode;
}
#if !defined(ANDROID)
// TODO(ajm): Remove the error return on Android from webrtc.
- if (voep->SetEcMetricsStatus(echo_cancellation) == -1) {
- LOG_RTCERR1(SetEcMetricsStatus, echo_cancellation);
+ if (voep->SetEcMetricsStatus(*options.echo_cancellation) == -1) {
+ LOG_RTCERR1(SetEcMetricsStatus, *options.echo_cancellation);
return false;
}
#endif
if (ec_mode == webrtc::kEcAecm) {
- if (voep->SetAecmMode(aecm_mode, aecm_comfort_noise) != 0) {
- LOG_RTCERR2(SetAecmMode, aecm_mode, aecm_comfort_noise);
+ bool cn = options.aecm_generate_comfort_noise.value_or(false);
+ if (voep->SetAecmMode(aecm_mode, cn) != 0) {
+ LOG_RTCERR2(SetAecmMode, aecm_mode, cn);
return false;
}
}
}
- bool auto_gain_control = false;
- if (options.auto_gain_control.Get(&auto_gain_control)) {
+ if (options.auto_gain_control) {
const bool built_in_agc = voe_wrapper_->hw()->BuiltInAGCIsAvailable();
if (built_in_agc) {
- if (voe_wrapper_->hw()->EnableBuiltInAGC(auto_gain_control) == 0 &&
- auto_gain_control) {
+ if (voe_wrapper_->hw()->EnableBuiltInAGC(*options.auto_gain_control) ==
+ 0 &&
+ *options.auto_gain_control) {
// Disable internal software AGC if built-in AGC is enabled,
// i.e., replace the software AGC with the built-in AGC.
- options.auto_gain_control.Set(false);
- auto_gain_control = false;
+ options.auto_gain_control = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Disabling AGC since built-in AGC will be used instead";
}
}
- if (voep->SetAgcStatus(auto_gain_control, agc_mode) == -1) {
- LOG_RTCERR2(SetAgcStatus, auto_gain_control, agc_mode);
+ if (voep->SetAgcStatus(*options.auto_gain_control, agc_mode) == -1) {
+ LOG_RTCERR2(SetAgcStatus, *options.auto_gain_control, agc_mode);
return false;
} else {
- LOG(LS_INFO) << "Auto gain set to " << auto_gain_control << " with mode "
- << agc_mode;
+ LOG(LS_INFO) << "Auto gain set to " << *options.auto_gain_control
+ << " with mode " << agc_mode;
}
}
- if (options.tx_agc_target_dbov.IsSet() ||
- options.tx_agc_digital_compression_gain.IsSet() ||
- options.tx_agc_limiter.IsSet()) {
+ if (options.tx_agc_target_dbov || options.tx_agc_digital_compression_gain ||
+ options.tx_agc_limiter) {
// Override default_agc_config_. Generally, an unset option means "leave
// the VoE bits alone" in this function, so we want whatever is set to be
// stored as the new "default". If we didn't, then setting e.g.
@@ -736,15 +710,13 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
// Also, if we don't update default_agc_config_, then adjust_agc_delta
// would be an offset from the original values, and not whatever was set
// explicitly.
- default_agc_config_.targetLeveldBOv =
- options.tx_agc_target_dbov.GetWithDefaultIfUnset(
- default_agc_config_.targetLeveldBOv);
+ default_agc_config_.targetLeveldBOv = options.tx_agc_target_dbov.value_or(
+ default_agc_config_.targetLeveldBOv);
default_agc_config_.digitalCompressionGaindB =
- options.tx_agc_digital_compression_gain.GetWithDefaultIfUnset(
+ options.tx_agc_digital_compression_gain.value_or(
default_agc_config_.digitalCompressionGaindB);
default_agc_config_.limiterEnable =
- options.tx_agc_limiter.GetWithDefaultIfUnset(
- default_agc_config_.limiterEnable);
+ options.tx_agc_limiter.value_or(default_agc_config_.limiterEnable);
if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) {
LOG_RTCERR3(SetAgcConfig,
default_agc_config_.targetLeveldBOv,
@@ -754,84 +726,79 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
}
}
- bool noise_suppression = false;
- if (options.noise_suppression.Get(&noise_suppression)) {
+ if (options.noise_suppression) {
const bool built_in_ns = voe_wrapper_->hw()->BuiltInNSIsAvailable();
if (built_in_ns) {
- if (voe_wrapper_->hw()->EnableBuiltInNS(noise_suppression) == 0 &&
- noise_suppression) {
+ if (voe_wrapper_->hw()->EnableBuiltInNS(*options.noise_suppression) ==
+ 0 &&
+ *options.noise_suppression) {
// Disable internal software NS if built-in NS is enabled,
// i.e., replace the software NS with the built-in NS.
- options.noise_suppression.Set(false);
- noise_suppression = false;
+ options.noise_suppression = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Disabling NS since built-in NS will be used instead";
}
}
- if (voep->SetNsStatus(noise_suppression, ns_mode) == -1) {
- LOG_RTCERR2(SetNsStatus, noise_suppression, ns_mode);
+ if (voep->SetNsStatus(*options.noise_suppression, ns_mode) == -1) {
+ LOG_RTCERR2(SetNsStatus, *options.noise_suppression, ns_mode);
return false;
} else {
- LOG(LS_INFO) << "Noise suppression set to " << noise_suppression
+ LOG(LS_INFO) << "Noise suppression set to " << *options.noise_suppression
<< " with mode " << ns_mode;
}
}
- bool highpass_filter;
- if (options.highpass_filter.Get(&highpass_filter)) {
- LOG(LS_INFO) << "High pass filter enabled? " << highpass_filter;
- if (voep->EnableHighPassFilter(highpass_filter) == -1) {
- LOG_RTCERR1(SetHighpassFilterStatus, highpass_filter);
+ if (options.highpass_filter) {
+ LOG(LS_INFO) << "High pass filter enabled? " << *options.highpass_filter;
+ if (voep->EnableHighPassFilter(*options.highpass_filter) == -1) {
+ LOG_RTCERR1(SetHighpassFilterStatus, *options.highpass_filter);
return false;
}
}
- bool stereo_swapping;
- if (options.stereo_swapping.Get(&stereo_swapping)) {
- LOG(LS_INFO) << "Stereo swapping enabled? " << stereo_swapping;
- voep->EnableStereoChannelSwapping(stereo_swapping);
- if (voep->IsStereoChannelSwappingEnabled() != stereo_swapping) {
- LOG_RTCERR1(EnableStereoChannelSwapping, stereo_swapping);
+ if (options.stereo_swapping) {
+ LOG(LS_INFO) << "Stereo swapping enabled? " << *options.stereo_swapping;
+ voep->EnableStereoChannelSwapping(*options.stereo_swapping);
+ if (voep->IsStereoChannelSwappingEnabled() != *options.stereo_swapping) {
+ LOG_RTCERR1(EnableStereoChannelSwapping, *options.stereo_swapping);
return false;
}
}
- int audio_jitter_buffer_max_packets;
- if (options.audio_jitter_buffer_max_packets.Get(
- &audio_jitter_buffer_max_packets)) {
- LOG(LS_INFO) << "NetEq capacity is " << audio_jitter_buffer_max_packets;
+ if (options.audio_jitter_buffer_max_packets) {
+ LOG(LS_INFO) << "NetEq capacity is "
+ << *options.audio_jitter_buffer_max_packets;
voe_config_.Set<webrtc::NetEqCapacityConfig>(
- new webrtc::NetEqCapacityConfig(audio_jitter_buffer_max_packets));
+ new webrtc::NetEqCapacityConfig(
+ *options.audio_jitter_buffer_max_packets));
}
- bool audio_jitter_buffer_fast_accelerate;
- if (options.audio_jitter_buffer_fast_accelerate.Get(
- &audio_jitter_buffer_fast_accelerate)) {
- LOG(LS_INFO) << "NetEq fast mode? " << audio_jitter_buffer_fast_accelerate;
+ if (options.audio_jitter_buffer_fast_accelerate) {
+ LOG(LS_INFO) << "NetEq fast mode? "
+ << *options.audio_jitter_buffer_fast_accelerate;
voe_config_.Set<webrtc::NetEqFastAccelerate>(
- new webrtc::NetEqFastAccelerate(audio_jitter_buffer_fast_accelerate));
+ new webrtc::NetEqFastAccelerate(
+ *options.audio_jitter_buffer_fast_accelerate));
}
- bool typing_detection;
- if (options.typing_detection.Get(&typing_detection)) {
- LOG(LS_INFO) << "Typing detection is enabled? " << typing_detection;
- if (voep->SetTypingDetectionStatus(typing_detection) == -1) {
+ if (options.typing_detection) {
+ LOG(LS_INFO) << "Typing detection is enabled? "
+ << *options.typing_detection;
+ if (voep->SetTypingDetectionStatus(*options.typing_detection) == -1) {
// In case of error, log the info and continue
- LOG_RTCERR1(SetTypingDetectionStatus, typing_detection);
+ LOG_RTCERR1(SetTypingDetectionStatus, *options.typing_detection);
}
}
- int adjust_agc_delta;
- if (options.adjust_agc_delta.Get(&adjust_agc_delta)) {
- LOG(LS_INFO) << "Adjust agc delta is " << adjust_agc_delta;
- if (!AdjustAgcLevel(adjust_agc_delta)) {
+ if (options.adjust_agc_delta) {
+ LOG(LS_INFO) << "Adjust agc delta is " << *options.adjust_agc_delta;
+ if (!AdjustAgcLevel(*options.adjust_agc_delta)) {
return false;
}
}
- bool aec_dump;
- if (options.aec_dump.Get(&aec_dump)) {
- LOG(LS_INFO) << "Aec dump is enabled? " << aec_dump;
- if (aec_dump)
+ if (options.aec_dump) {
+ LOG(LS_INFO) << "Aec dump is enabled? " << *options.aec_dump;
+ if (*options.aec_dump)
StartAecDump(kAecDumpByAudioOptionFilename);
else
StopAecDump();
@@ -839,28 +806,30 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
webrtc::Config config;
- delay_agnostic_aec_.SetFrom(options.delay_agnostic_aec);
- bool delay_agnostic_aec;
- if (delay_agnostic_aec_.Get(&delay_agnostic_aec)) {
- LOG(LS_INFO) << "Delay agnostic aec is enabled? " << delay_agnostic_aec;
+ if (options.delay_agnostic_aec)
+ delay_agnostic_aec_ = options.delay_agnostic_aec;
+ if (delay_agnostic_aec_) {
+ LOG(LS_INFO) << "Delay agnostic aec is enabled? " << *delay_agnostic_aec_;
config.Set<webrtc::DelayAgnostic>(
- new webrtc::DelayAgnostic(delay_agnostic_aec));
+ new webrtc::DelayAgnostic(*delay_agnostic_aec_));
}
- extended_filter_aec_.SetFrom(options.extended_filter_aec);
- bool extended_filter;
- if (extended_filter_aec_.Get(&extended_filter)) {
- LOG(LS_INFO) << "Extended filter aec is enabled? " << extended_filter;
+ if (options.extended_filter_aec) {
+ extended_filter_aec_ = options.extended_filter_aec;
+ }
+ if (extended_filter_aec_) {
+ LOG(LS_INFO) << "Extended filter aec is enabled? " << *extended_filter_aec_;
config.Set<webrtc::ExtendedFilter>(
- new webrtc::ExtendedFilter(extended_filter));
+ new webrtc::ExtendedFilter(*extended_filter_aec_));
}
- experimental_ns_.SetFrom(options.experimental_ns);
- bool experimental_ns;
- if (experimental_ns_.Get(&experimental_ns)) {
- LOG(LS_INFO) << "Experimental ns is enabled? " << experimental_ns;
+ if (options.experimental_ns) {
+ experimental_ns_ = options.experimental_ns;
+ }
+ if (experimental_ns_) {
+ LOG(LS_INFO) << "Experimental ns is enabled? " << *experimental_ns_;
config.Set<webrtc::ExperimentalNs>(
- new webrtc::ExperimentalNs(experimental_ns));
+ new webrtc::ExperimentalNs(*experimental_ns_));
}
// We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
@@ -870,167 +839,58 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
audioproc->SetExtraOptions(config);
}
- uint32_t recording_sample_rate;
- if (options.recording_sample_rate.Get(&recording_sample_rate)) {
- LOG(LS_INFO) << "Recording sample rate is " << recording_sample_rate;
- if (voe_wrapper_->hw()->SetRecordingSampleRate(recording_sample_rate)) {
- LOG_RTCERR1(SetRecordingSampleRate, recording_sample_rate);
+ if (options.recording_sample_rate) {
+ LOG(LS_INFO) << "Recording sample rate is "
+ << *options.recording_sample_rate;
+ if (voe_wrapper_->hw()->SetRecordingSampleRate(
+ *options.recording_sample_rate)) {
+ LOG_RTCERR1(SetRecordingSampleRate, *options.recording_sample_rate);
}
}
- uint32_t playout_sample_rate;
- if (options.playout_sample_rate.Get(&playout_sample_rate)) {
- LOG(LS_INFO) << "Playout sample rate is " << playout_sample_rate;
- if (voe_wrapper_->hw()->SetPlayoutSampleRate(playout_sample_rate)) {
- LOG_RTCERR1(SetPlayoutSampleRate, playout_sample_rate);
+ if (options.playout_sample_rate) {
+ LOG(LS_INFO) << "Playout sample rate is " << *options.playout_sample_rate;
+ if (voe_wrapper_->hw()->SetPlayoutSampleRate(
+ *options.playout_sample_rate)) {
+ LOG_RTCERR1(SetPlayoutSampleRate, *options.playout_sample_rate);
}
}
return true;
}
-// TODO(juberti): Refactor this so that the core logic can be used to set the
-// soundclip device. At that time, reinstate the soundclip pause/resume code.
-bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
- const Device* out_device) {
-#if !defined(IOS)
- int in_id = in_device ? rtc::FromString<int>(in_device->id) :
- kDefaultAudioDeviceId;
- int out_id = out_device ? rtc::FromString<int>(out_device->id) :
- kDefaultAudioDeviceId;
- // The device manager uses -1 as the default device, which was the case for
- // VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
-#ifndef WIN32
- if (-1 == in_id) {
- in_id = kDefaultAudioDeviceId;
- }
- if (-1 == out_id) {
- out_id = kDefaultAudioDeviceId;
- }
-#endif
-
- std::string in_name = (in_id != kDefaultAudioDeviceId) ?
- in_device->name : "Default device";
- std::string out_name = (out_id != kDefaultAudioDeviceId) ?
- out_device->name : "Default device";
- LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
- << ") and speaker to (id=" << out_id << ", name=" << out_name
- << ")";
+void WebRtcVoiceEngine::SetDefaultDevices() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+#if !defined(WEBRTC_IOS)
+ int in_id = kDefaultAudioDeviceId;
+ int out_id = kDefaultAudioDeviceId;
+ LOG(LS_INFO) << "Setting microphone to (id=" << in_id
+ << ") and speaker to (id=" << out_id << ")";
- // Must also pause all audio playback and capture.
bool ret = true;
- for (WebRtcVoiceMediaChannel* channel : channels_) {
- if (!channel->PausePlayout()) {
- LOG(LS_WARNING) << "Failed to pause playout";
- ret = false;
- }
- if (!channel->PauseSend()) {
- LOG(LS_WARNING) << "Failed to pause send";
- ret = false;
- }
- }
-
- // Find the recording device id in VoiceEngine and set recording device.
- if (!FindWebRtcAudioDeviceId(true, in_name, in_id, &in_id)) {
+ if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
+ LOG_RTCERR1(SetRecordingDevice, in_id);
ret = false;
}
- if (ret) {
- if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
- LOG_RTCERR2(SetRecordingDevice, in_name, in_id);
- ret = false;
- }
- webrtc::AudioProcessing* ap = voe()->base()->audio_processing();
- if (ap)
- ap->Initialize();
+ webrtc::AudioProcessing* ap = voe()->base()->audio_processing();
+ if (ap) {
+ ap->Initialize();
}
- // Find the playout device id in VoiceEngine and set playout device.
- if (!FindWebRtcAudioDeviceId(false, out_name, out_id, &out_id)) {
- LOG(LS_WARNING) << "Failed to find VoiceEngine device id for " << out_name;
+ if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
+ LOG_RTCERR1(SetPlayoutDevice, out_id);
ret = false;
}
- if (ret) {
- if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
- LOG_RTCERR2(SetPlayoutDevice, out_name, out_id);
- ret = false;
- }
- }
-
- // Resume all audio playback and capture.
- for (WebRtcVoiceMediaChannel* channel : channels_) {
- if (!channel->ResumePlayout()) {
- LOG(LS_WARNING) << "Failed to resume playout";
- ret = false;
- }
- if (!channel->ResumeSend()) {
- LOG(LS_WARNING) << "Failed to resume send";
- ret = false;
- }
- }
if (ret) {
- LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
- << ") and speaker to (id="<< out_id << " name=" << out_name
- << ")";
+ LOG(LS_INFO) << "Set microphone to (id=" << in_id
+ << ") and speaker to (id=" << out_id << ")";
}
-
- return ret;
-#else
- return true;
-#endif // !IOS
-}
-
-bool WebRtcVoiceEngine::FindWebRtcAudioDeviceId(
- bool is_input, const std::string& dev_name, int dev_id, int* rtc_id) {
- // In Linux, VoiceEngine uses the same device dev_id as the device manager.
-#if defined(LINUX) || defined(ANDROID)
- *rtc_id = dev_id;
- return true;
-#else
- // In Windows and Mac, we need to find the VoiceEngine device id by name
- // unless the input dev_id is the default device id.
- if (kDefaultAudioDeviceId == dev_id) {
- *rtc_id = dev_id;
- return true;
- }
-
- // Get the number of VoiceEngine audio devices.
- int count = 0;
- if (is_input) {
- if (-1 == voe_wrapper_->hw()->GetNumOfRecordingDevices(count)) {
- LOG_RTCERR0(GetNumOfRecordingDevices);
- return false;
- }
- } else {
- if (-1 == voe_wrapper_->hw()->GetNumOfPlayoutDevices(count)) {
- LOG_RTCERR0(GetNumOfPlayoutDevices);
- return false;
- }
- }
-
- for (int i = 0; i < count; ++i) {
- char name[128];
- char guid[128];
- if (is_input) {
- voe_wrapper_->hw()->GetRecordingDeviceName(i, name, guid);
- LOG(LS_VERBOSE) << "VoiceEngine microphone " << i << ": " << name;
- } else {
- voe_wrapper_->hw()->GetPlayoutDeviceName(i, name, guid);
- LOG(LS_VERBOSE) << "VoiceEngine speaker " << i << ": " << name;
- }
-
- std::string webrtc_name(name);
- if (dev_name.compare(0, webrtc_name.size(), webrtc_name) == 0) {
- *rtc_id = i;
- return true;
- }
- }
- LOG(LS_WARNING) << "VoiceEngine cannot find device: " << dev_name;
- return false;
-#endif
+#endif // !WEBRTC_IOS
}
bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
unsigned int ulevel;
if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) {
LOG_RTCERR1(GetSpeakerVolume, level);
@@ -1041,6 +901,7 @@ bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
}
bool WebRtcVoiceEngine::SetOutputVolume(int level) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(level >= 0 && level <= 255);
if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
LOG_RTCERR1(SetSpeakerVolume, level);
@@ -1050,136 +911,36 @@ bool WebRtcVoiceEngine::SetOutputVolume(int level) {
}
int WebRtcVoiceEngine::GetInputLevel() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
unsigned int ulevel;
return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ?
static_cast<int>(ulevel) : -1;
}
const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() {
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
return codecs_;
}
-bool WebRtcVoiceEngine::FindCodec(const AudioCodec& in) {
- return FindWebRtcCodec(in, NULL);
-}
-
-// Get the VoiceEngine codec that matches |in|, with the supplied settings.
-bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
- webrtc::CodecInst* out) {
- int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
- for (int i = 0; i < ncodecs; ++i) {
- webrtc::CodecInst voe_codec;
- if (GetVoeCodec(i, &voe_codec)) {
- AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
- voe_codec.rate, voe_codec.channels, 0);
- bool multi_rate = IsCodecMultiRate(voe_codec);
- // Allow arbitrary rates for ISAC to be specified.
- if (multi_rate) {
- // Set codec.bitrate to 0 so the check for codec.Matches() passes.
- codec.bitrate = 0;
- }
- if (codec.Matches(in)) {
- if (out) {
- // Fixup the payload type.
- voe_codec.pltype = in.id;
-
- // Set bitrate if specified.
- if (multi_rate && in.bitrate != 0) {
- voe_codec.rate = in.bitrate;
- }
-
- // Reset G722 sample rate to 16000 to match WebRTC.
- MaybeFixupG722(&voe_codec, 16000);
-
- // Apply codec-specific settings.
- if (IsCodec(codec, kIsacCodecName)) {
- // If ISAC and an explicit bitrate is not specified,
- // enable auto bitrate adjustment.
- voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
- }
- *out = voe_codec;
- }
- return true;
- }
- }
- }
- return false;
-}
-const std::vector<RtpHeaderExtension>&
-WebRtcVoiceEngine::rtp_header_extensions() const {
- return rtp_header_extensions_;
-}
-
-void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
- // if min_sev == -1, we keep the current log level.
- if (min_sev >= 0) {
- SetTraceFilter(SeverityToFilter(min_sev));
- }
- log_options_ = filter;
- SetTraceOptions(initialized_ ? log_options_ : "");
+RtpCapabilities WebRtcVoiceEngine::GetCapabilities() const {
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RtpCapabilities capabilities;
+ capabilities.header_extensions.push_back(RtpHeaderExtension(
+ kRtpAudioLevelHeaderExtension, kRtpAudioLevelHeaderExtensionDefaultId));
+ capabilities.header_extensions.push_back(
+ RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
+ kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
+ return capabilities;
}
int WebRtcVoiceEngine::GetLastEngineError() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return voe_wrapper_->error();
}
-void WebRtcVoiceEngine::SetTraceFilter(int filter) {
- log_filter_ = filter;
- tracing_->SetTraceFilter(filter);
-}
-
-// We suppport three different logging settings for VoiceEngine:
-// 1. Observer callback that goes into talk diagnostic logfile.
-// Use --logfile and --loglevel
-//
-// 2. Encrypted VoiceEngine log for debugging VoiceEngine.
-// Use --voice_loglevel --voice_logfilter "tracefile file_name"
-//
-// 3. EC log and dump for debugging QualityEngine.
-// Use --voice_loglevel --voice_logfilter "recordEC file_name"
-//
-// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
-// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
-void WebRtcVoiceEngine::SetTraceOptions(const std::string& options) {
- // Set encrypted trace file.
- std::vector<std::string> opts;
- rtc::tokenize(options, ' ', '"', '"', &opts);
- std::vector<std::string>::iterator tracefile =
- std::find(opts.begin(), opts.end(), "tracefile");
- if (tracefile != opts.end() && ++tracefile != opts.end()) {
- // Write encrypted debug output (at same loglevel) to file
- // EncryptedTraceFile no longer supported.
- if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
- LOG_RTCERR1(SetTraceFile, *tracefile);
- }
- }
-
- // Allow trace options to override the trace filter. We default
- // it to log_filter_ (as a translation of libjingle log levels)
- // elsewhere, but this allows clients to explicitly set webrtc
- // log levels.
- std::vector<std::string>::iterator tracefilter =
- std::find(opts.begin(), opts.end(), "tracefilter");
- if (tracefilter != opts.end() && ++tracefilter != opts.end()) {
- if (!tracing_->SetTraceFilter(rtc::FromString<int>(*tracefilter))) {
- LOG_RTCERR1(SetTraceFilter, *tracefilter);
- }
- }
-
- // Set AEC dump file
- std::vector<std::string>::iterator recordEC =
- std::find(opts.begin(), opts.end(), "recordEC");
- if (recordEC != opts.end()) {
- ++recordEC;
- if (recordEC != opts.end())
- StartAecDump(recordEC->c_str());
- else
- StopAecDump();
- }
-}
-
void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace,
int length) {
+ // Note: This callback can happen on any thread!
rtc::LoggingSeverity sev = rtc::LS_VERBOSE;
if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
sev = rtc::LS_ERROR;
@@ -1201,34 +962,24 @@ void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace,
}
}
-void WebRtcVoiceEngine::CallbackOnError(int channel_id, int err_code) {
- RTC_DCHECK(channel_id == -1);
- LOG(LS_WARNING) << "VoiceEngine error " << err_code << " reported on channel "
- << channel_id << ".";
- rtc::CritScope lock(&channels_cs_);
- for (WebRtcVoiceMediaChannel* channel : channels_) {
- channel->OnError(err_code);
- }
-}
-
void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel* channel) {
- RTC_DCHECK(channel != NULL);
- rtc::CritScope lock(&channels_cs_);
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(channel);
channels_.push_back(channel);
}
void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel* channel) {
- rtc::CritScope lock(&channels_cs_);
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
auto it = std::find(channels_.begin(), channels_.end(), channel);
- if (it != channels_.end()) {
- channels_.erase(it);
- }
+ RTC_DCHECK(it != channels_.end());
+ channels_.erase(it);
}
// Adjusts the default AGC target level by the specified delta.
// NB: If we start messing with other config fields, we'll want
// to save the current webrtc::AgcConfig as well.
bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
webrtc::AgcConfig config = default_agc_config_;
config.targetLeveldBOv -= delta;
@@ -1244,6 +995,7 @@ bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
}
bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (initialized_) {
LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init.";
return false;
@@ -1260,6 +1012,7 @@ bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm) {
}
bool WebRtcVoiceEngine::StartAecDump(rtc::PlatformFile file) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
FILE* aec_dump_file_stream = rtc::FdopenPlatformFileForWriting(file);
if (!aec_dump_file_stream) {
LOG(LS_ERROR) << "Could not open AEC dump file stream.";
@@ -1279,6 +1032,7 @@ bool WebRtcVoiceEngine::StartAecDump(rtc::PlatformFile file) {
}
void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (!is_dumping_aec_) {
// Start dumping AEC when we are not dumping.
if (voe_wrapper_->processing()->StartDebugRecording(
@@ -1291,6 +1045,7 @@ void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
}
void WebRtcVoiceEngine::StopAecDump() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (is_dumping_aec_) {
// Stop dumping AEC when we are dumping.
if (voe_wrapper_->processing()->StopDebugRecording() !=
@@ -1302,14 +1057,17 @@ void WebRtcVoiceEngine::StopAecDump() {
}
bool WebRtcVoiceEngine::StartRtcEventLog(rtc::PlatformFile file) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return voe_wrapper_->codec()->GetEventLog()->StartLogging(file);
}
void WebRtcVoiceEngine::StopRtcEventLog() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
voe_wrapper_->codec()->GetEventLog()->StopLogging();
}
int WebRtcVoiceEngine::CreateVoEChannel() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return voe_wrapper_->base()->CreateChannel(voe_config_);
}
@@ -1317,33 +1075,61 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
: public AudioRenderer::Sink {
public:
WebRtcAudioSendStream(int ch, webrtc::AudioTransport* voe_audio_transport,
- uint32_t ssrc, webrtc::Call* call)
- : channel_(ch),
- voe_audio_transport_(voe_audio_transport),
- call_(call) {
+ uint32_t ssrc, const std::string& c_name,
+ const std::vector<webrtc::RtpExtension>& extensions,
+ webrtc::Call* call)
+ : voe_audio_transport_(voe_audio_transport),
+ call_(call),
+ config_(nullptr) {
RTC_DCHECK_GE(ch, 0);
// TODO(solenberg): Once we're not using FakeWebRtcVoiceEngine anymore:
// RTC_DCHECK(voe_audio_transport);
RTC_DCHECK(call);
audio_capture_thread_checker_.DetachFromThread();
- webrtc::AudioSendStream::Config config(nullptr);
- config.voe_channel_id = channel_;
- config.rtp.ssrc = ssrc;
- stream_ = call_->CreateAudioSendStream(config);
- RTC_DCHECK(stream_);
+ config_.rtp.ssrc = ssrc;
+ config_.rtp.c_name = c_name;
+ config_.voe_channel_id = ch;
+ RecreateAudioSendStream(extensions);
}
+
~WebRtcAudioSendStream() override {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
Stop();
call_->DestroyAudioSendStream(stream_);
}
+ void RecreateAudioSendStream(
+ const std::vector<webrtc::RtpExtension>& extensions) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ if (stream_) {
+ call_->DestroyAudioSendStream(stream_);
+ stream_ = nullptr;
+ }
+ config_.rtp.extensions = extensions;
+ RTC_DCHECK(!stream_);
+ stream_ = call_->CreateAudioSendStream(config_);
+ RTC_CHECK(stream_);
+ }
+
+ bool SendTelephoneEvent(int payload_type, uint8_t event,
+ uint32_t duration_ms) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(stream_);
+ return stream_->SendTelephoneEvent(payload_type, event, duration_ms);
+ }
+
+ webrtc::AudioSendStream::Stats GetStats() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(stream_);
+ return stream_->GetStats();
+ }
+
// Starts the rendering by setting a sink to the renderer to get data
// callback.
// This method is called on the libjingle worker thread.
// TODO(xians): Make sure Start() is called only once.
void Start(AudioRenderer* renderer) {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(renderer);
if (renderer_) {
RTC_DCHECK(renderer_ == renderer);
@@ -1353,16 +1139,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
renderer_ = renderer;
}
- webrtc::AudioSendStream::Stats GetStats() const {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
- return stream_->GetStats();
- }
-
// Stops rendering by setting the sink of the renderer to nullptr. No data
// callback will be received after this method.
// This method is called on the libjingle worker thread.
void Stop() {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (renderer_) {
renderer_->SetSink(nullptr);
renderer_ = nullptr;
@@ -1374,11 +1155,12 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) override {
+ RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(audio_capture_thread_checker_.CalledOnValidThread());
RTC_DCHECK(voe_audio_transport_);
- voe_audio_transport_->OnData(channel_,
+ voe_audio_transport_->OnData(config_.voe_channel_id,
audio_data,
bits_per_sample,
sample_rate,
@@ -1389,7 +1171,7 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
// Callback from the |renderer_| when it is going away. In case Start() has
// never been called, this callback won't be triggered.
void OnClose() override {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Set |renderer_| to nullptr to make sure no more callback will get into
// the renderer.
renderer_ = nullptr;
@@ -1397,16 +1179,18 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
// Accessor to the VoE channel ID.
int channel() const {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
- return channel_;
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return config_.voe_channel_id;
}
private:
- rtc::ThreadChecker signal_thread_checker_;
+ rtc::ThreadChecker worker_thread_checker_;
rtc::ThreadChecker audio_capture_thread_checker_;
- const int channel_ = -1;
webrtc::AudioTransport* const voe_audio_transport_ = nullptr;
webrtc::Call* call_ = nullptr;
+ webrtc::AudioSendStream::Config config_;
+ // The stream is owned by WebRtcAudioSendStream and may be reallocated if
+ // configuration changes.
webrtc::AudioSendStream* stream_ = nullptr;
// Raw pointer to AudioRenderer owned by LocalAudioTrackHandler.
@@ -1419,80 +1203,163 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
public:
- explicit WebRtcAudioReceiveStream(int voe_channel_id)
- : channel_(voe_channel_id) {}
+ WebRtcAudioReceiveStream(int ch, uint32_t remote_ssrc, uint32_t local_ssrc,
+ bool use_combined_bwe, const std::string& sync_group,
+ const std::vector<webrtc::RtpExtension>& extensions,
+ webrtc::Call* call)
+ : call_(call),
+ config_() {
+ RTC_DCHECK_GE(ch, 0);
+ RTC_DCHECK(call);
+ config_.rtp.remote_ssrc = remote_ssrc;
+ config_.rtp.local_ssrc = local_ssrc;
+ config_.voe_channel_id = ch;
+ config_.sync_group = sync_group;
+ RecreateAudioReceiveStream(use_combined_bwe, extensions);
+ }
- int channel() { return channel_; }
+ ~WebRtcAudioReceiveStream() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ call_->DestroyAudioReceiveStream(stream_);
+ }
+
+ void RecreateAudioReceiveStream(
+ const std::vector<webrtc::RtpExtension>& extensions) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RecreateAudioReceiveStream(config_.combined_audio_video_bwe, extensions);
+ }
+ void RecreateAudioReceiveStream(bool use_combined_bwe) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RecreateAudioReceiveStream(use_combined_bwe, config_.rtp.extensions);
+ }
+
+ webrtc::AudioReceiveStream::Stats GetStats() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(stream_);
+ return stream_->GetStats();
+ }
+
+ int channel() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return config_.voe_channel_id;
+ }
+
+ void SetRawAudioSink(rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ stream_->SetSink(std::move(sink));
+ }
private:
- int channel_;
+ void RecreateAudioReceiveStream(bool use_combined_bwe,
+ const std::vector<webrtc::RtpExtension>& extensions) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ if (stream_) {
+ call_->DestroyAudioReceiveStream(stream_);
+ stream_ = nullptr;
+ }
+ config_.rtp.extensions = extensions;
+ config_.combined_audio_video_bwe = use_combined_bwe;
+ RTC_DCHECK(!stream_);
+ stream_ = call_->CreateAudioReceiveStream(config_);
+ RTC_CHECK(stream_);
+ }
+
+ rtc::ThreadChecker worker_thread_checker_;
+ webrtc::Call* call_ = nullptr;
+ webrtc::AudioReceiveStream::Config config_;
+ // The stream is owned by WebRtcAudioReceiveStream and may be reallocated if
+ // configuration changes.
+ webrtc::AudioReceiveStream* stream_ = nullptr;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioReceiveStream);
};
-// WebRtcVoiceMediaChannel
WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine,
const AudioOptions& options,
webrtc::Call* call)
- : engine_(engine),
- send_bitrate_setting_(false),
- send_bitrate_bps_(0),
- options_(),
- dtmf_allowed_(false),
- desired_playout_(false),
- nack_enabled_(false),
- playout_(false),
- typing_noise_detected_(false),
- desired_send_(SEND_NOTHING),
- send_(SEND_NOTHING),
- call_(call) {
+ : engine_(engine), call_(call) {
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel";
- RTC_DCHECK(nullptr != call);
+ RTC_DCHECK(call);
engine->RegisterChannel(this);
SetOptions(options);
}
WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel";
-
- // Remove any remaining send streams.
+ // TODO(solenberg): Should be able to delete the streams directly, without
+ // going through RemoveNnStream(), once stream objects handle
+ // all (de)configuration.
while (!send_streams_.empty()) {
RemoveSendStream(send_streams_.begin()->first);
}
-
- // Remove any remaining receive streams.
- while (!receive_channels_.empty()) {
- RemoveRecvStream(receive_channels_.begin()->first);
+ while (!recv_streams_.empty()) {
+ RemoveRecvStream(recv_streams_.begin()->first);
}
- RTC_DCHECK(receive_streams_.empty());
-
- // Unregister ourselves from the engine.
engine()->UnregisterChannel(this);
}
bool WebRtcVoiceMediaChannel::SetSendParameters(
const AudioSendParameters& params) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSendParameters: "
+ << params.ToString();
// TODO(pthatcher): Refactor this to be more clean now that we have
// all the information at once.
- return (SetSendCodecs(params.codecs) &&
- SetSendRtpHeaderExtensions(params.extensions) &&
- SetMaxSendBandwidth(params.max_bandwidth_bps) &&
- SetOptions(params.options));
+
+ if (!SetSendCodecs(params.codecs)) {
+ return false;
+ }
+
+ if (!ValidateRtpExtensions(params.extensions)) {
+ return false;
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions =
+ FilterRtpExtensions(params.extensions,
+ webrtc::RtpExtension::IsSupportedForAudio, true);
+ if (send_rtp_extensions_ != filtered_extensions) {
+ send_rtp_extensions_.swap(filtered_extensions);
+ for (auto& it : send_streams_) {
+ it.second->RecreateAudioSendStream(send_rtp_extensions_);
+ }
+ }
+
+ if (!SetMaxSendBandwidth(params.max_bandwidth_bps)) {
+ return false;
+ }
+ return SetOptions(params.options);
}
bool WebRtcVoiceMediaChannel::SetRecvParameters(
const AudioRecvParameters& params) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetRecvParameters: "
+ << params.ToString();
// TODO(pthatcher): Refactor this to be more clean now that we have
// all the information at once.
- return (SetRecvCodecs(params.codecs) &&
- SetRecvRtpHeaderExtensions(params.extensions));
+
+ if (!SetRecvCodecs(params.codecs)) {
+ return false;
+ }
+
+ if (!ValidateRtpExtensions(params.extensions)) {
+ return false;
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions =
+ FilterRtpExtensions(params.extensions,
+ webrtc::RtpExtension::IsSupportedForAudio, false);
+ if (recv_rtp_extensions_ != filtered_extensions) {
+ recv_rtp_extensions_.swap(filtered_extensions);
+ for (auto& it : recv_streams_) {
+ it.second->RecreateAudioReceiveStream(recv_rtp_extensions_);
+ }
+ }
+
+ return true;
}
bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "Setting voice channel options: "
<< options.ToString();
@@ -1503,26 +1370,27 @@ bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
// on top. This means there is no way to "clear" options such that
// they go back to the engine default.
options_.SetAll(options);
-
- if (send_ != SEND_NOTHING) {
- if (!engine()->ApplyOptions(options_)) {
- LOG(LS_WARNING) <<
- "Failed to apply engine options during channel SetOptions.";
- return false;
- }
+ if (!engine()->ApplyOptions(options_)) {
+ LOG(LS_WARNING) <<
+ "Failed to apply engine options during channel SetOptions.";
+ return false;
}
if (dscp_option_changed) {
rtc::DiffServCodePoint dscp = rtc::DSCP_DEFAULT;
- if (options_.dscp.GetWithDefaultIfUnset(false))
+ if (options_.dscp.value_or(false)) {
dscp = kAudioDscpValue;
+ }
if (MediaChannel::SetDscp(dscp) != 0) {
LOG(LS_WARNING) << "Failed to set DSCP settings for audio channel";
}
}
// TODO(solenberg): Don't recreate unless options changed.
- RecreateAudioReceiveStreams();
+ for (auto& it : recv_streams_) {
+ it.second->RecreateAudioReceiveStream(
+ options_.combined_audio_video_bwe.value_or(false));
+ }
LOG(LS_INFO) << "Set voice channel options. Current options: "
<< options_.ToString();
@@ -1531,7 +1399,7 @@ bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
bool WebRtcVoiceMediaChannel::SetRecvCodecs(
const std::vector<AudioCodec>& codecs) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Set the payload types to be used for incoming media.
LOG(LS_INFO) << "Setting receive voice codecs.";
@@ -1568,7 +1436,26 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs(
PausePlayout();
}
- bool result = SetRecvCodecsInternal(new_codecs);
+ bool result = true;
+ for (const AudioCodec& codec : new_codecs) {
+ webrtc::CodecInst voe_codec;
+ if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
+ LOG(LS_INFO) << ToString(codec);
+ voe_codec.pltype = codec.id;
+ for (const auto& ch : recv_streams_) {
+ if (engine()->voe()->codec()->SetRecPayloadType(
+ ch.second->channel(), voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, ch.second->channel(),
+ ToString(voe_codec));
+ result = false;
+ }
+ }
+ } else {
+ LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
+ result = false;
+ break;
+ }
+ }
if (result) {
recv_codecs_ = codecs;
}
@@ -1588,7 +1475,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
engine()->voe()->codec()->SetFECStatus(channel, false);
// Scan through the list to figure out the codec to use for sending, along
- // with the proper configuration for VAD and DTMF.
+ // with the proper configuration for VAD.
bool found_send_codec = false;
webrtc::CodecInst send_codec;
memset(&send_codec, 0, sizeof(send_codec));
@@ -1603,7 +1490,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
webrtc::CodecInst voe_codec;
- if (!engine()->FindWebRtcCodec(codec, &voe_codec)) {
+ if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
continue;
}
@@ -1644,7 +1531,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Set packet size if the AudioCodec param kCodecParamPTime is set.
int ptime_ms = 0;
if (codec.GetParam(kCodecParamPTime, &ptime_ms)) {
- if (!SetPTimeAsPacketSize(&send_codec, ptime_ms)) {
+ if (!WebRtcVoiceCodecs::SetPTimeAsPacketSize(&send_codec, ptime_ms)) {
LOG(LS_WARNING) << "Failed to set packet size for codec "
<< send_codec.plname;
return false;
@@ -1687,7 +1574,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Set Opus internal DTX.
LOG(LS_INFO) << "Attempt to "
- << GetEnableString(enable_opus_dtx)
+ << (enable_opus_dtx ? "enable" : "disable")
<< " Opus DTX on channel "
<< channel;
if (engine()->voe()->codec()->SetOpusDtx(channel, enable_opus_dtx)) {
@@ -1717,25 +1604,17 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
SetSendBitrateInternal(send_bitrate_bps_);
}
- // Loop through the codecs list again to config the telephone-event/CN codec.
+ // Loop through the codecs list again to config the CN codec.
for (const AudioCodec& codec : codecs) {
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
webrtc::CodecInst voe_codec;
- if (!engine()->FindWebRtcCodec(codec, &voe_codec)) {
+ if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
continue;
}
- // Find the DTMF telephone event "codec" and tell VoiceEngine channels
- // about it.
- if (IsCodec(codec, kDtmfCodecName)) {
- if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
- channel, codec.id) == -1) {
- LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, codec.id);
- return false;
- }
- } else if (IsCodec(codec, kCnCodecName)) {
+ if (IsCodec(codec, kCnCodecName)) {
// Turn voice activity detection/comfort noise on if supported.
// Set the wideband CN payload type appropriately.
// (narrowband always uses the static payload type 13).
@@ -1789,13 +1668,17 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
bool WebRtcVoiceMediaChannel::SetSendCodecs(
const std::vector<AudioCodec>& codecs) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ // TODO(solenberg): Validate input - that payload types don't overlap, are
+ // within range, filter out codecs we don't support,
+ // redundant codecs etc.
- dtmf_allowed_ = false;
+ // Find the DTMF telephone event "codec" payload type.
+ dtmf_payload_type_ = rtc::Optional<int>();
for (const AudioCodec& codec : codecs) {
- // Find the DTMF telephone event "codec".
if (IsCodec(codec, kDtmfCodecName)) {
- dtmf_allowed_ = true;
+ dtmf_payload_type_ = rtc::Optional<int>(codec.id);
+ break;
}
}
@@ -1808,7 +1691,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
}
// Set nack status on receive channels and update |nack_enabled_|.
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
SetNack(ch.second->channel(), nack_enabled_);
}
@@ -1844,106 +1727,6 @@ bool WebRtcVoiceMediaChannel::SetSendCodec(
return true;
}
-bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- if (receive_extensions_ == extensions) {
- return true;
- }
-
- for (const auto& ch : receive_channels_) {
- if (!SetChannelRecvRtpHeaderExtensions(ch.second->channel(), extensions)) {
- return false;
- }
- }
-
- receive_extensions_ = extensions;
-
- // Recreate AudioReceiveStream:s.
- {
- std::vector<webrtc::RtpExtension> exts;
-
- const RtpHeaderExtension* audio_level_extension =
- FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
- if (audio_level_extension) {
- exts.push_back({
- kRtpAudioLevelHeaderExtension, audio_level_extension->id});
- }
-
- const RtpHeaderExtension* send_time_extension =
- FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
- if (send_time_extension) {
- exts.push_back({
- kRtpAbsoluteSenderTimeHeaderExtension, send_time_extension->id});
- }
-
- recv_rtp_extensions_.swap(exts);
- RecreateAudioReceiveStreams();
- }
-
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::SetChannelRecvRtpHeaderExtensions(
- int channel_id, const std::vector<RtpHeaderExtension>& extensions) {
- const RtpHeaderExtension* audio_level_extension =
- FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetReceiveAudioLevelIndicationStatus, channel_id,
- audio_level_extension)) {
- return false;
- }
-
- const RtpHeaderExtension* send_time_extension =
- FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetReceiveAbsoluteSenderTimeStatus, channel_id,
- send_time_extension)) {
- return false;
- }
-
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- if (send_extensions_ == extensions) {
- return true;
- }
-
- for (const auto& ch : send_streams_) {
- if (!SetChannelSendRtpHeaderExtensions(ch.second->channel(), extensions)) {
- return false;
- }
- }
-
- send_extensions_ = extensions;
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::SetChannelSendRtpHeaderExtensions(
- int channel_id, const std::vector<RtpHeaderExtension>& extensions) {
- const RtpHeaderExtension* audio_level_extension =
- FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
-
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetSendAudioLevelIndicationStatus, channel_id,
- audio_level_extension)) {
- return false;
- }
-
- const RtpHeaderExtension* send_time_extension =
- FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetSendAbsoluteSenderTimeStatus, channel_id,
- send_time_extension)) {
- return false;
- }
-
- return true;
-}
-
bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) {
desired_playout_ = playout;
return ChangePlayout(desired_playout_);
@@ -1958,12 +1741,12 @@ bool WebRtcVoiceMediaChannel::ResumePlayout() {
}
bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (playout_ == playout) {
return true;
}
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
if (!SetPlayout(ch.second->channel(), playout)) {
LOG(LS_ERROR) << "SetPlayout " << playout << " on channel "
<< ch.second->channel() << " failed";
@@ -1995,7 +1778,7 @@ bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
return true;
}
- // Apply channel specific options.
+ // Apply channel specific options when channel is enabled for sending.
if (send == SEND_MICROPHONE) {
engine()->ApplyOptions(options_);
}
@@ -2007,13 +1790,6 @@ bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
}
}
- // Clear up the options after stopping sending. Since we may previously have
- // applied the channel specific options, now apply the original options stored
- // in WebRtcVoiceEngine.
- if (send == SEND_NOTHING) {
- engine()->ApplyOptions(engine()->GetOptions());
- }
-
send_ = send;
return true;
}
@@ -2039,7 +1815,7 @@ bool WebRtcVoiceMediaChannel::SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioRenderer* renderer) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// TODO(solenberg): The state change should be fully rolled back if any one of
// these calls fail.
if (!SetLocalRenderer(ssrc, renderer)) {
@@ -2068,7 +1844,7 @@ int WebRtcVoiceMediaChannel::CreateVoEChannel() {
return id;
}
-bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
+bool WebRtcVoiceMediaChannel::DeleteVoEChannel(int channel) {
if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) {
LOG_RTCERR1(DeRegisterExternalTransport, channel);
}
@@ -2080,7 +1856,7 @@ bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
}
bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "AddSendStream: " << sp.ToString();
uint32_t ssrc = sp.first_ssrc();
@@ -2097,33 +1873,12 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
return false;
}
- // Enable RTCP (for quality stats and feedback messages).
- if (engine()->voe()->rtp()->SetRTCPStatus(channel, true) == -1) {
- LOG_RTCERR2(SetRTCPStatus, channel, 1);
- }
-
- SetChannelSendRtpHeaderExtensions(channel, send_extensions_);
-
- // Set the local (send) SSRC.
- if (engine()->voe()->rtp()->SetLocalSSRC(channel, ssrc) == -1) {
- LOG_RTCERR2(SetLocalSSRC, channel, ssrc);
- DeleteChannel(channel);
- return false;
- }
-
- if (engine()->voe()->rtp()->SetRTCP_CNAME(channel, sp.cname.c_str()) == -1) {
- LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
- DeleteChannel(channel);
- return false;
- }
-
// Save the channel to send_streams_, so that RemoveSendStream() can still
// delete the channel in case failure happens below.
webrtc::AudioTransport* audio_transport =
engine()->voe()->base()->audio_transport();
- send_streams_.insert(
- std::make_pair(ssrc,
- new WebRtcAudioSendStream(channel, audio_transport, ssrc, call_)));
+ send_streams_.insert(std::make_pair(ssrc, new WebRtcAudioSendStream(
+ channel, audio_transport, ssrc, sp.cname, send_rtp_extensions_, call_)));
// Set the current codecs to be used for the new channel. We need to do this
// after adding the channel to send_channels_, because of how max bitrate is
@@ -2138,10 +1893,10 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
// with the same SSRC in order to send receiver reports.
if (send_streams_.size() == 1) {
receiver_reports_ssrc_ = ssrc;
- for (const auto& ch : receive_channels_) {
- int recv_channel = ch.second->channel();
+ for (const auto& stream : recv_streams_) {
+ int recv_channel = stream.second->channel();
if (engine()->voe()->rtp()->SetLocalSSRC(recv_channel, ssrc) != 0) {
- LOG_RTCERR2(SetLocalSSRC, ch.second->channel(), ssrc);
+ LOG_RTCERR2(SetLocalSSRC, recv_channel, ssrc);
return false;
}
engine()->voe()->base()->AssociateSendChannel(recv_channel, channel);
@@ -2154,7 +1909,9 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
}
bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "RemoveSendStream: " << ssrc;
+
auto it = send_streams_.find(ssrc);
if (it == send_streams_.end()) {
LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
@@ -2165,15 +1922,12 @@ bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) {
int channel = it->second->channel();
ChangeSend(channel, SEND_NOTHING);
- // Delete the WebRtcVoiceChannelRenderer object connected to the channel,
- // this will disconnect the audio renderer with the send channel.
- delete it->second;
- send_streams_.erase(it);
-
- // Clean up and delete the send channel.
+ // Clean up and delete the send stream+channel.
LOG(LS_INFO) << "Removing audio send stream " << ssrc
<< " with VoiceEngine channel #" << channel << ".";
- if (!DeleteChannel(channel)) {
+ delete it->second;
+ send_streams_.erase(it);
+ if (!DeleteVoEChannel(channel)) {
return false;
}
if (send_streams_.empty()) {
@@ -2183,14 +1937,14 @@ bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) {
}
bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "AddRecvStream: " << sp.ToString();
if (!ValidateStreamParams(sp)) {
return false;
}
- uint32_t ssrc = sp.first_ssrc();
+ const uint32_t ssrc = sp.first_ssrc();
if (ssrc == 0) {
LOG(LS_WARNING) << "AddRecvStream with ssrc==0 is not supported.";
return false;
@@ -2202,114 +1956,87 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
RemoveRecvStream(ssrc);
}
- if (receive_channels_.find(ssrc) != receive_channels_.end()) {
+ if (GetReceiveChannelId(ssrc) != -1) {
LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
return false;
}
- RTC_DCHECK(receive_stream_params_.find(ssrc) == receive_stream_params_.end());
// Create a new channel for receiving audio data.
- int channel = CreateVoEChannel();
+ const int channel = CreateVoEChannel();
if (channel == -1) {
return false;
}
- if (!ConfigureRecvChannel(channel)) {
- DeleteChannel(channel);
- return false;
- }
-
- WebRtcAudioReceiveStream* stream = new WebRtcAudioReceiveStream(channel);
- receive_channels_.insert(std::make_pair(ssrc, stream));
- receive_stream_params_[ssrc] = sp;
- AddAudioReceiveStream(ssrc);
-
- LOG(LS_INFO) << "New audio stream " << ssrc
- << " registered to VoiceEngine channel #"
- << channel << ".";
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::ConfigureRecvChannel(int channel) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
-
- int send_channel = GetSendChannelId(receiver_reports_ssrc_);
- if (send_channel != -1) {
- // Associate receive channel with first send channel (so the receive channel
- // can obtain RTT from the send channel)
- engine()->voe()->base()->AssociateSendChannel(channel, send_channel);
- LOG(LS_INFO) << "VoiceEngine channel #" << channel
- << " is associated with channel #" << send_channel << ".";
- }
- if (engine()->voe()->rtp()->SetLocalSSRC(channel,
- receiver_reports_ssrc_) == -1) {
- LOG_RTCERR1(SetLocalSSRC, channel);
- return false;
- }
// Turn off all supported codecs.
- int ncodecs = engine()->voe()->codec()->NumOfCodecs();
- for (int i = 0; i < ncodecs; ++i) {
- webrtc::CodecInst voe_codec;
- if (engine()->voe()->codec()->GetCodec(i, voe_codec) != -1) {
- voe_codec.pltype = -1;
- if (engine()->voe()->codec()->SetRecPayloadType(
- channel, voe_codec) == -1) {
- LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
- return false;
- }
+ // TODO(solenberg): Remove once "no codecs" is the default state of a stream.
+ for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) {
+ voe_codec.pltype = -1;
+ if (engine()->voe()->codec()->SetRecPayloadType(channel, voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
+ DeleteVoEChannel(channel);
+ return false;
}
}
// Only enable those configured for this channel.
for (const auto& codec : recv_codecs_) {
webrtc::CodecInst voe_codec;
- if (engine()->FindWebRtcCodec(codec, &voe_codec)) {
+ if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
voe_codec.pltype = codec.id;
if (engine()->voe()->codec()->SetRecPayloadType(
channel, voe_codec) == -1) {
LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
+ DeleteVoEChannel(channel);
return false;
}
}
}
- SetNack(channel, nack_enabled_);
-
- // Set RTP header extension for the new channel.
- if (!SetChannelRecvRtpHeaderExtensions(channel, receive_extensions_)) {
- return false;
+ const int send_channel = GetSendChannelId(receiver_reports_ssrc_);
+ if (send_channel != -1) {
+ // Associate receive channel with first send channel (so the receive channel
+ // can obtain RTT from the send channel)
+ engine()->voe()->base()->AssociateSendChannel(channel, send_channel);
+ LOG(LS_INFO) << "VoiceEngine channel #" << channel
+ << " is associated with channel #" << send_channel << ".";
}
+ recv_streams_.insert(std::make_pair(ssrc, new WebRtcAudioReceiveStream(
+ channel, ssrc, receiver_reports_ssrc_,
+ options_.combined_audio_video_bwe.value_or(false), sp.sync_label,
+ recv_rtp_extensions_, call_)));
+
+ SetNack(channel, nack_enabled_);
SetPlayout(channel, playout_);
+
return true;
}
bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "RemoveRecvStream: " << ssrc;
- auto it = receive_channels_.find(ssrc);
- if (it == receive_channels_.end()) {
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
<< " which doesn't exist.";
return false;
}
- RemoveAudioReceiveStream(ssrc);
- receive_stream_params_.erase(ssrc);
-
- const int channel = it->second->channel();
- delete it->second;
- receive_channels_.erase(it);
-
// Deregister default channel, if that's the one being destroyed.
if (IsDefaultRecvStream(ssrc)) {
default_recv_ssrc_ = -1;
}
- LOG(LS_INFO) << "Removing audio stream " << ssrc
+ const int channel = it->second->channel();
+
+ // Clean up and delete the receive stream+channel.
+ LOG(LS_INFO) << "Removing audio receive stream " << ssrc
<< " with VoiceEngine channel #" << channel << ".";
- return DeleteChannel(channel);
+ it->second->SetRawAudioSink(nullptr);
+ delete it->second;
+ recv_streams_.erase(it);
+ return DeleteVoEChannel(channel);
}
bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32_t ssrc,
@@ -2337,9 +2064,9 @@ bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32_t ssrc,
bool WebRtcVoiceMediaChannel::GetActiveStreams(
AudioInfo::StreamList* actives) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
actives->clear();
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
int level = GetOutputLevel(ch.second->channel());
if (level > 0) {
actives->push_back(std::make_pair(ch.first, level));
@@ -2349,9 +2076,9 @@ bool WebRtcVoiceMediaChannel::GetActiveStreams(
}
int WebRtcVoiceMediaChannel::GetOutputLevel() {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
int highest = 0;
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
highest = std::max(GetOutputLevel(ch.second->channel()), highest);
}
return highest;
@@ -2383,7 +2110,7 @@ void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window,
}
bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (ssrc == 0) {
default_recv_volume_ = volume;
if (default_recv_ssrc_ == -1) {
@@ -2408,64 +2135,48 @@ bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) {
}
bool WebRtcVoiceMediaChannel::CanInsertDtmf() {
- return dtmf_allowed_;
+ return dtmf_payload_type_ ? true : false;
}
-bool WebRtcVoiceMediaChannel::InsertDtmf(uint32_t ssrc,
- int event,
- int duration,
- int flags) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- if (!dtmf_allowed_) {
+bool WebRtcVoiceMediaChannel::InsertDtmf(uint32_t ssrc, int event,
+ int duration) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "WebRtcVoiceMediaChannel::InsertDtmf";
+ if (!dtmf_payload_type_) {
return false;
}
- // Send the event.
- if (flags & cricket::DF_SEND) {
- int channel = -1;
- if (ssrc == 0) {
- if (send_streams_.size() > 0) {
- channel = send_streams_.begin()->second->channel();
- }
- } else {
- channel = GetSendChannelId(ssrc);
- }
- if (channel == -1) {
- LOG(LS_WARNING) << "InsertDtmf - The specified ssrc "
- << ssrc << " is not in use.";
- return false;
- }
- // Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
- if (engine()->voe()->dtmf()->SendTelephoneEvent(
- channel, event, true, duration) == -1) {
- LOG_RTCERR4(SendTelephoneEvent, channel, event, true, duration);
- return false;
- }
+ // Figure out which WebRtcAudioSendStream to send the event on.
+ auto it = ssrc != 0 ? send_streams_.find(ssrc) : send_streams_.begin();
+ if (it == send_streams_.end()) {
+ LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
+ return false;
}
-
- // Play the event.
- if (flags & cricket::DF_PLAY) {
- // Play DTMF tone locally.
- if (engine()->voe()->dtmf()->PlayDtmfTone(event, duration) == -1) {
- LOG_RTCERR2(PlayDtmfTone, event, duration);
- return false;
- }
+ if (event < kMinTelephoneEventCode ||
+ event > kMaxTelephoneEventCode) {
+ LOG(LS_WARNING) << "DTMF event code " << event << " out of range.";
+ return false;
}
-
- return true;
+ if (duration < kMinTelephoneEventDuration ||
+ duration > kMaxTelephoneEventDuration) {
+ LOG(LS_WARNING) << "DTMF event duration " << duration << " out of range.";
+ return false;
+ }
+ return it->second->SendTelephoneEvent(*dtmf_payload_type_, event, duration);
}
void WebRtcVoiceMediaChannel::OnPacketReceived(
rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
uint32_t ssrc = 0;
if (!GetRtpSsrc(packet->data(), packet->size(), &ssrc)) {
return;
}
- if (receive_channels_.empty()) {
- // Create new channel, which will be the default receive channel.
+ // If we don't have a default channel, and the SSRC is unknown, create a
+ // default channel.
+ if (default_recv_ssrc_ == -1 && GetReceiveChannelId(ssrc) == -1) {
StreamParams sp;
sp.ssrcs.push_back(ssrc);
LOG(LS_INFO) << "Creating default receive stream for SSRC=" << ssrc << ".";
@@ -2485,7 +2196,13 @@ void WebRtcVoiceMediaChannel::OnPacketReceived(
reinterpret_cast<const uint8_t*>(packet->data()), packet->size(),
webrtc_packet_time);
if (webrtc::PacketReceiver::DELIVERY_OK != delivery_result) {
- return;
+ // If the SSRC is unknown here, route it to the default channel, if we have
+ // one. See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5208
+ if (default_recv_ssrc_ == -1) {
+ return;
+ } else {
+ ssrc = default_recv_ssrc_;
+ }
}
// Find the channel to send this packet to. It must exist since webrtc::Call
@@ -2500,7 +2217,7 @@ void WebRtcVoiceMediaChannel::OnPacketReceived(
void WebRtcVoiceMediaChannel::OnRtcpReceived(
rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Forward packet to Call as well.
const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp,
@@ -2542,7 +2259,7 @@ void WebRtcVoiceMediaChannel::OnRtcpReceived(
}
bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
int channel = GetSendChannelId(ssrc);
if (channel == -1) {
LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
@@ -2601,7 +2318,7 @@ bool WebRtcVoiceMediaChannel::SetSendBitrateInternal(int bps) {
return true;
webrtc::CodecInst codec = *send_codec_;
- bool is_multi_rate = IsCodecMultiRate(codec);
+ bool is_multi_rate = WebRtcVoiceCodecs::IsCodecMultiRate(codec);
if (is_multi_rate) {
// If codec is multi-rate then just set the bitrate.
@@ -2629,7 +2346,7 @@ bool WebRtcVoiceMediaChannel::SetSendBitrateInternal(int bps) {
}
bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(info);
// Get SSRC and stats for each sender.
@@ -2652,15 +2369,14 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
sinfo.echo_delay_std_ms = stats.echo_delay_std_ms;
sinfo.echo_return_loss = stats.echo_return_loss;
sinfo.echo_return_loss_enhancement = stats.echo_return_loss_enhancement;
- sinfo.typing_noise_detected = typing_noise_detected_;
- // TODO(solenberg): Move to AudioSendStream.
- // sinfo.typing_noise_detected = stats.typing_noise_detected;
+ sinfo.typing_noise_detected =
+ (send_ == SEND_NOTHING ? false : stats.typing_noise_detected);
info->senders.push_back(sinfo);
}
// Get SSRC and stats for each receiver.
RTC_DCHECK(info->receivers.size() == 0);
- for (const auto& stream : receive_streams_) {
+ for (const auto& stream : recv_streams_) {
webrtc::AudioReceiveStream::Stats stats = stream.second->GetStats();
VoiceReceiverInfo rinfo;
rinfo.add_ssrc(stats.remote_ssrc);
@@ -2694,15 +2410,17 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
return true;
}
-void WebRtcVoiceMediaChannel::OnError(int error) {
- if (send_ == SEND_NOTHING) {
+void WebRtcVoiceMediaChannel::SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetRawAudioSink";
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ LOG(LS_WARNING) << "SetRawAudioSink: no recv stream" << ssrc;
return;
}
- if (error == VE_TYPING_NOISE_WARNING) {
- typing_noise_detected_ = true;
- } else if (error == VE_TYPING_NOISE_OFF_WARNING) {
- typing_noise_detected_ = false;
- }
+ it->second->SetRawAudioSink(std::move(sink));
}
int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
@@ -2712,16 +2430,16 @@ int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
}
int WebRtcVoiceMediaChannel::GetReceiveChannelId(uint32_t ssrc) const {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- const auto it = receive_channels_.find(ssrc);
- if (it != receive_channels_.end()) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ const auto it = recv_streams_.find(ssrc);
+ if (it != recv_streams_.end()) {
return it->second->channel();
}
return -1;
}
int WebRtcVoiceMediaChannel::GetSendChannelId(uint32_t ssrc) const {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
const auto it = send_streams_.find(ssrc);
if (it != send_streams_.end()) {
return it->second->channel();
@@ -2762,7 +2480,7 @@ bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
if (codec.id == red_pt) {
// If we find the right codec, that will be the codec we pass to
// SetSendCodec, with the desired payload type.
- if (engine()->FindWebRtcCodec(codec, send_codec)) {
+ if (WebRtcVoiceEngine::ToCodecInst(codec, send_codec)) {
return true;
} else {
break;
@@ -2786,117 +2504,6 @@ bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
}
return true;
}
-
-// Convert VoiceEngine error code into VoiceMediaChannel::Error enum.
-VoiceMediaChannel::Error
- WebRtcVoiceMediaChannel::WebRtcErrorToChannelError(int err_code) {
- switch (err_code) {
- case 0:
- return ERROR_NONE;
- case VE_CANNOT_START_RECORDING:
- case VE_MIC_VOL_ERROR:
- case VE_GET_MIC_VOL_ERROR:
- case VE_CANNOT_ACCESS_MIC_VOL:
- return ERROR_REC_DEVICE_OPEN_FAILED;
- case VE_SATURATION_WARNING:
- return ERROR_REC_DEVICE_SATURATION;
- case VE_REC_DEVICE_REMOVED:
- return ERROR_REC_DEVICE_REMOVED;
- case VE_RUNTIME_REC_WARNING:
- case VE_RUNTIME_REC_ERROR:
- return ERROR_REC_RUNTIME_ERROR;
- case VE_CANNOT_START_PLAYOUT:
- case VE_SPEAKER_VOL_ERROR:
- case VE_GET_SPEAKER_VOL_ERROR:
- case VE_CANNOT_ACCESS_SPEAKER_VOL:
- return ERROR_PLAY_DEVICE_OPEN_FAILED;
- case VE_RUNTIME_PLAY_WARNING:
- case VE_RUNTIME_PLAY_ERROR:
- return ERROR_PLAY_RUNTIME_ERROR;
- case VE_TYPING_NOISE_WARNING:
- return ERROR_REC_TYPING_NOISE_DETECTED;
- default:
- return VoiceMediaChannel::ERROR_OTHER;
- }
-}
-
-bool WebRtcVoiceMediaChannel::SetHeaderExtension(ExtensionSetterFunction setter,
- int channel_id, const RtpHeaderExtension* extension) {
- bool enable = false;
- int id = 0;
- std::string uri;
- if (extension) {
- enable = true;
- id = extension->id;
- uri = extension->uri;
- }
- if ((engine()->voe()->rtp()->*setter)(channel_id, enable, id) != 0) {
- LOG_RTCERR4(*setter, uri, channel_id, enable, id);
- return false;
- }
- return true;
-}
-
-void WebRtcVoiceMediaChannel::RecreateAudioReceiveStreams() {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- for (const auto& it : receive_channels_) {
- RemoveAudioReceiveStream(it.first);
- }
- for (const auto& it : receive_channels_) {
- AddAudioReceiveStream(it.first);
- }
-}
-
-void WebRtcVoiceMediaChannel::AddAudioReceiveStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- WebRtcAudioReceiveStream* stream = receive_channels_[ssrc];
- RTC_DCHECK(stream != nullptr);
- RTC_DCHECK(receive_streams_.find(ssrc) == receive_streams_.end());
- webrtc::AudioReceiveStream::Config config;
- config.rtp.remote_ssrc = ssrc;
- // Only add RTP extensions if we support combined A/V BWE.
- config.rtp.extensions = recv_rtp_extensions_;
- config.combined_audio_video_bwe =
- options_.combined_audio_video_bwe.GetWithDefaultIfUnset(false);
- config.voe_channel_id = stream->channel();
- config.sync_group = receive_stream_params_[ssrc].sync_label;
- webrtc::AudioReceiveStream* s = call_->CreateAudioReceiveStream(config);
- receive_streams_.insert(std::make_pair(ssrc, s));
-}
-
-void WebRtcVoiceMediaChannel::RemoveAudioReceiveStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- auto stream_it = receive_streams_.find(ssrc);
- if (stream_it != receive_streams_.end()) {
- call_->DestroyAudioReceiveStream(stream_it->second);
- receive_streams_.erase(stream_it);
- }
-}
-
-bool WebRtcVoiceMediaChannel::SetRecvCodecsInternal(
- const std::vector<AudioCodec>& new_codecs) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- for (const AudioCodec& codec : new_codecs) {
- webrtc::CodecInst voe_codec;
- if (engine()->FindWebRtcCodec(codec, &voe_codec)) {
- LOG(LS_INFO) << ToString(codec);
- voe_codec.pltype = codec.id;
- for (const auto& ch : receive_channels_) {
- if (engine()->voe()->codec()->SetRecPayloadType(
- ch.second->channel(), voe_codec) == -1) {
- LOG_RTCERR2(SetRecPayloadType, ch.second->channel(),
- ToString(voe_codec));
- return false;
- }
- }
- } else {
- LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
- return false;
- }
- }
- return true;
-}
-
} // namespace cricket
#endif // HAVE_WEBRTC_VOICE