aboutsummaryrefslogtreecommitdiff
path: root/talk
diff options
context:
space:
mode:
authorhenrike@webrtc.org <henrike@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2013-07-26 19:17:59 +0000
committerhenrike@webrtc.org <henrike@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2013-07-26 19:17:59 +0000
commit1e09a711263dd105e6f7a03812250084c64e5fd8 (patch)
tree0549edb6f2115cbfd763d8ccd6357cb9bc9eeea5 /talk
parent367f640beac67a31a5146ce613302499b5c645df (diff)
downloadwebrtc-1e09a711263dd105e6f7a03812250084c64e5fd8.tar.gz
Update talk folder to revision=49952949
git-svn-id: http://webrtc.googlecode.com/svn/trunk@4413 4adac7df-926f-26a2-2b94-8c16560cd09d
Diffstat (limited to 'talk')
-rw-r--r--talk/OWNERS2
-rw-r--r--talk/app/webrtc/audiotrack.cc3
-rw-r--r--talk/app/webrtc/audiotrack.h7
-rw-r--r--talk/app/webrtc/audiotrackrenderer.cc9
-rw-r--r--talk/app/webrtc/audiotrackrenderer.h12
-rw-r--r--talk/app/webrtc/mediastreamhandler.cc11
-rw-r--r--talk/app/webrtc/mediastreamhandler_unittest.cc30
-rw-r--r--talk/app/webrtc/mediastreaminterface.h4
-rw-r--r--talk/app/webrtc/mediastreamprovider.h9
-rw-r--r--talk/app/webrtc/mediastreamtrackproxy.h2
-rw-r--r--talk/app/webrtc/peerconnectionfactory_unittest.cc21
-rw-r--r--talk/app/webrtc/portallocatorfactory.cc12
-rw-r--r--talk/app/webrtc/webrtcsdp.cc4
-rw-r--r--talk/app/webrtc/webrtcsdp_unittest.cc7
-rw-r--r--talk/app/webrtc/webrtcsession.cc111
-rw-r--r--talk/app/webrtc/webrtcsession.h19
-rw-r--r--talk/app/webrtc/webrtcsession_unittest.cc67
-rw-r--r--talk/base/firewallsocketserver.cc1
-rw-r--r--talk/base/sslidentity_unittest.cc4
-rw-r--r--talk/base/virtualsocketserver.cc3
-rw-r--r--talk/media/base/audiorenderer.h13
-rw-r--r--talk/media/base/capturerenderadapter.cc13
-rw-r--r--talk/media/base/constants.cc4
-rw-r--r--talk/media/base/constants.h1
-rw-r--r--talk/media/base/fakemediaengine.h53
-rw-r--r--talk/media/base/filemediaengine.cc5
-rw-r--r--talk/media/base/filemediaengine.h5
-rw-r--r--talk/media/base/mediachannel.h46
-rw-r--r--talk/media/base/rtpdataengine.cc2
-rw-r--r--talk/media/base/videoengine_unittest.h3
-rw-r--r--talk/media/sctp/sctpdataengine.cc2
-rw-r--r--talk/media/webrtc/fakewebrtcvideoengine.h4
-rw-r--r--talk/media/webrtc/webrtcmediaengine.h9
-rw-r--r--talk/media/webrtc/webrtcvideoengine.cc18
-rw-r--r--talk/media/webrtc/webrtcvoiceengine.cc385
-rw-r--r--talk/media/webrtc/webrtcvoiceengine.h47
-rw-r--r--talk/media/webrtc/webrtcvoiceengine_unittest.cc83
-rw-r--r--talk/p2p/base/dtlstransportchannel_unittest.cc14
-rw-r--r--talk/p2p/base/port_unittest.cc5
-rw-r--r--talk/p2p/base/portallocator.cc1
-rw-r--r--talk/p2p/base/pseudotcp_unittest.cc2
-rw-r--r--talk/p2p/base/relayserver.cc4
-rw-r--r--talk/p2p/base/relayserver.h15
-rw-r--r--talk/p2p/base/session.cc2
-rw-r--r--talk/p2p/base/session_unittest.cc37
-rw-r--r--talk/p2p/base/transport.h13
-rw-r--r--talk/p2p/base/transportdescription.h2
-rw-r--r--talk/p2p/client/connectivitychecker.cc1
-rw-r--r--talk/session/media/call.cc5
-rw-r--r--talk/session/media/channel.cc25
-rw-r--r--talk/session/media/channel.h5
-rw-r--r--talk/session/media/channel_unittest.cc4
-rw-r--r--talk/session/media/mediamessages_unittest.cc11
-rw-r--r--talk/session/media/mediasession.cc2
-rw-r--r--talk/session/media/mediasessionclient.cc31
-rw-r--r--talk/session/media/mediasessionclient_unittest.cc72
-rw-r--r--talk/site_scons/site_tools/talk_linux.py20
57 files changed, 910 insertions, 392 deletions
diff --git a/talk/OWNERS b/talk/OWNERS
index 01c1f13319..159a05d554 100644
--- a/talk/OWNERS
+++ b/talk/OWNERS
@@ -6,4 +6,4 @@ mallinath@webrtc.org
perkj@webrtc.org
sergeyu@chromium.org
tommi@webrtc.org
-wu@webrtc.org
+wu@webrtc.org \ No newline at end of file
diff --git a/talk/app/webrtc/audiotrack.cc b/talk/app/webrtc/audiotrack.cc
index 5bfca42682..a8d45baaca 100644
--- a/talk/app/webrtc/audiotrack.cc
+++ b/talk/app/webrtc/audiotrack.cc
@@ -35,8 +35,7 @@ static const char kAudioTrackKind[] = "audio";
AudioTrack::AudioTrack(const std::string& label,
AudioSourceInterface* audio_source)
: MediaStreamTrack<AudioTrackInterface>(label),
- audio_source_(audio_source),
- renderer_(new AudioTrackRenderer()) {
+ audio_source_(audio_source) {
}
std::string AudioTrack::kind() const {
diff --git a/talk/app/webrtc/audiotrack.h b/talk/app/webrtc/audiotrack.h
index 48098f5d82..fc09d2b9ef 100644
--- a/talk/app/webrtc/audiotrack.h
+++ b/talk/app/webrtc/audiotrack.h
@@ -28,7 +28,6 @@
#ifndef TALK_APP_WEBRTC_AUDIOTRACK_H_
#define TALK_APP_WEBRTC_AUDIOTRACK_H_
-#include "talk/app/webrtc/audiotrackrenderer.h"
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/mediastreamtrack.h"
#include "talk/app/webrtc/notifier.h"
@@ -46,8 +45,9 @@ class AudioTrack : public MediaStreamTrack<AudioTrackInterface> {
return audio_source_.get();
}
- virtual cricket::AudioRenderer* FrameInput() {
- return renderer_.get();
+ // This method is used for supporting multiple sources/sinks for AudioTracks.
+ virtual cricket::AudioRenderer* GetRenderer() {
+ return NULL;
}
// Implement MediaStreamTrack
@@ -58,7 +58,6 @@ class AudioTrack : public MediaStreamTrack<AudioTrackInterface> {
private:
talk_base::scoped_refptr<AudioSourceInterface> audio_source_;
- talk_base::scoped_ptr<AudioTrackRenderer> renderer_;
};
} // namespace webrtc
diff --git a/talk/app/webrtc/audiotrackrenderer.cc b/talk/app/webrtc/audiotrackrenderer.cc
index c8ad522527..92d3449ac8 100644
--- a/talk/app/webrtc/audiotrackrenderer.cc
+++ b/talk/app/webrtc/audiotrackrenderer.cc
@@ -36,13 +36,14 @@ AudioTrackRenderer::AudioTrackRenderer() : channel_id_(-1) {
AudioTrackRenderer::~AudioTrackRenderer() {
}
-void AudioTrackRenderer::SetChannelId(int channel_id) {
- ASSERT(channel_id_ == -1);
+void AudioTrackRenderer::AddChannel(int channel_id) {
+ ASSERT(channel_id_ == -1 || channel_id_ == channel_id);
channel_id_ = channel_id;
}
-int AudioTrackRenderer::GetChannelId() const {
- return channel_id_;
+void AudioTrackRenderer::RemoveChannel(int channel_id) {
+ ASSERT(channel_id_ == -1 || channel_id_ == channel_id);
+ channel_id_ = -1;
}
} // namespace webrtc
diff --git a/talk/app/webrtc/audiotrackrenderer.h b/talk/app/webrtc/audiotrackrenderer.h
index 55de04ea90..a4c58c4fc0 100644
--- a/talk/app/webrtc/audiotrackrenderer.h
+++ b/talk/app/webrtc/audiotrackrenderer.h
@@ -28,6 +28,7 @@
#ifndef TALK_APP_WEBRTC_AUDIOTRACKRENDERER_H_
#define TALK_APP_WEBRTC_AUDIOTRACKRENDERER_H_
+#include "talk/base/thread.h"
#include "talk/media/base/audiorenderer.h"
namespace webrtc {
@@ -35,16 +36,19 @@ namespace webrtc {
// Class used for AudioTrack to get the ID of WebRtc voice channel that
// the AudioTrack is connecting to.
// Each AudioTrack owns a AudioTrackRenderer instance.
-// SetChannelID() should be called only when a AudioTrack is added to a
-// MediaStream and should not be changed afterwards.
+// AddChannel() will be called when an AudioTrack is added to a MediaStream.
+// RemoveChannel will be called when the AudioTrack or WebRtc VoE channel is
+// going away.
+// This implementation only supports one channel, and it is only used by
+// Chrome for remote audio tracks."
class AudioTrackRenderer : public cricket::AudioRenderer {
public:
AudioTrackRenderer();
~AudioTrackRenderer();
// Implements cricket::AudioRenderer.
- virtual void SetChannelId(int channel_id);
- virtual int GetChannelId() const;
+ virtual void AddChannel(int channel_id) OVERRIDE;
+ virtual void RemoveChannel(int channel_id) OVERRIDE;
private:
int channel_id_;
diff --git a/talk/app/webrtc/mediastreamhandler.cc b/talk/app/webrtc/mediastreamhandler.cc
index a6a45b2149..d43c6d5a1d 100644
--- a/talk/app/webrtc/mediastreamhandler.cc
+++ b/talk/app/webrtc/mediastreamhandler.cc
@@ -75,7 +75,7 @@ void LocalAudioTrackHandler::OnStateChanged() {
void LocalAudioTrackHandler::Stop() {
cricket::AudioOptions options;
- provider_->SetAudioSend(ssrc(), false, options);
+ provider_->SetAudioSend(ssrc(), false, options, NULL);
}
void LocalAudioTrackHandler::OnEnabledChanged() {
@@ -84,7 +84,8 @@ void LocalAudioTrackHandler::OnEnabledChanged() {
options = static_cast<LocalAudioSource*>(
audio_track_->GetSource())->options();
}
- provider_->SetAudioSend(ssrc(), audio_track_->enabled(), options);
+ provider_->SetAudioSend(ssrc(), audio_track_->enabled(), options,
+ audio_track_->GetRenderer());
}
RemoteAudioTrackHandler::RemoteAudioTrackHandler(
@@ -95,21 +96,21 @@ RemoteAudioTrackHandler::RemoteAudioTrackHandler(
audio_track_(track),
provider_(provider) {
OnEnabledChanged();
- provider_->SetAudioRenderer(ssrc, audio_track_->FrameInput());
}
RemoteAudioTrackHandler::~RemoteAudioTrackHandler() {
}
void RemoteAudioTrackHandler::Stop() {
- provider_->SetAudioPlayout(ssrc(), false);
+ provider_->SetAudioPlayout(ssrc(), false, NULL);
}
void RemoteAudioTrackHandler::OnStateChanged() {
}
void RemoteAudioTrackHandler::OnEnabledChanged() {
- provider_->SetAudioPlayout(ssrc(), audio_track_->enabled());
+ provider_->SetAudioPlayout(ssrc(), audio_track_->enabled(),
+ audio_track_->GetRenderer());
}
LocalVideoTrackHandler::LocalVideoTrackHandler(
diff --git a/talk/app/webrtc/mediastreamhandler_unittest.cc b/talk/app/webrtc/mediastreamhandler_unittest.cc
index bc4189bf94..a874bde0b4 100644
--- a/talk/app/webrtc/mediastreamhandler_unittest.cc
+++ b/talk/app/webrtc/mediastreamhandler_unittest.cc
@@ -54,10 +54,11 @@ namespace webrtc {
class MockAudioProvider : public AudioProviderInterface {
public:
virtual ~MockAudioProvider() {}
- MOCK_METHOD2(SetAudioPlayout, void(uint32 ssrc, bool enable));
- MOCK_METHOD3(SetAudioSend, void(uint32 ssrc, bool enable,
- const cricket::AudioOptions& options));
- MOCK_METHOD2(SetAudioRenderer, bool(uint32, cricket::AudioRenderer*));
+ MOCK_METHOD3(SetAudioPlayout, void(uint32 ssrc, bool enable,
+ cricket::AudioRenderer* renderer));
+ MOCK_METHOD4(SetAudioSend, void(uint32 ssrc, bool enable,
+ const cricket::AudioOptions& options,
+ cricket::AudioRenderer* renderer));
};
// Helper class to test MediaStreamHandler.
@@ -114,7 +115,7 @@ class MediaStreamHandlerTest : public testing::Test {
}
void AddLocalAudioTrack() {
- EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _));
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
handlers_.AddLocalAudioTrack(stream_, stream_->GetAudioTracks()[0],
kAudioSsrc);
}
@@ -128,7 +129,7 @@ class MediaStreamHandlerTest : public testing::Test {
}
void RemoveLocalAudioTrack() {
- EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _))
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _))
.Times(1);
handlers_.RemoveLocalTrack(stream_, audio_track_);
}
@@ -142,8 +143,7 @@ class MediaStreamHandlerTest : public testing::Test {
}
void AddRemoteAudioTrack() {
- EXPECT_CALL(audio_provider_, SetAudioRenderer(kAudioSsrc, _));
- EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true));
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true, _));
handlers_.AddRemoteAudioTrack(stream_, stream_->GetAudioTracks()[0],
kAudioSsrc);
}
@@ -156,7 +156,7 @@ class MediaStreamHandlerTest : public testing::Test {
}
void RemoveRemoteAudioTrack() {
- EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false));
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false, _));
handlers_.RemoveRemoteTrack(stream_, stream_->GetAudioTracks()[0]);
}
@@ -203,7 +203,7 @@ TEST_F(MediaStreamHandlerTest, RemoveLocalStream) {
.Times(1);
EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _))
.Times(1);
- EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _))
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _))
.Times(1);
handlers_.RemoveLocalStream(stream_);
}
@@ -236,7 +236,7 @@ TEST_F(MediaStreamHandlerTest, RemoveRemoteStream) {
EXPECT_CALL(video_provider_, SetVideoPlayout(kVideoSsrc, false, NULL))
.Times(1);
- EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false))
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false, _))
.Times(1);
handlers_.RemoveRemoteStream(stream_);
}
@@ -244,10 +244,10 @@ TEST_F(MediaStreamHandlerTest, RemoveRemoteStream) {
TEST_F(MediaStreamHandlerTest, LocalAudioTrackDisable) {
AddLocalAudioTrack();
- EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _));
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _));
audio_track_->set_enabled(false);
- EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _));
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
audio_track_->set_enabled(true);
RemoveLocalAudioTrack();
@@ -257,10 +257,10 @@ TEST_F(MediaStreamHandlerTest, LocalAudioTrackDisable) {
TEST_F(MediaStreamHandlerTest, RemoteAudioTrackDisable) {
AddRemoteAudioTrack();
- EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false));
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, false, _));
audio_track_->set_enabled(false);
- EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true));
+ EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true, _));
audio_track_->set_enabled(true);
RemoveRemoteAudioTrack();
diff --git a/talk/app/webrtc/mediastreaminterface.h b/talk/app/webrtc/mediastreaminterface.h
index 6f834d2743..0d3e39d892 100644
--- a/talk/app/webrtc/mediastreaminterface.h
+++ b/talk/app/webrtc/mediastreaminterface.h
@@ -155,11 +155,11 @@ class AudioTrackInterface : public MediaStreamTrackInterface {
// TODO(xians): Figure out if the following interface should be const or not.
virtual AudioSourceInterface* GetSource() const = 0;
- // Gets a pointer to the frame input of this AudioTrack.
+ // Gets a pointer to the audio renderer of this AudioTrack.
// The pointer is valid for the lifetime of this AudioTrack.
// TODO(xians): Make the following interface pure virtual once Chrome has its
// implementation.
- virtual cricket::AudioRenderer* FrameInput() { return NULL; }
+ virtual cricket::AudioRenderer* GetRenderer() { return NULL; }
protected:
virtual ~AudioTrackInterface() {}
diff --git a/talk/app/webrtc/mediastreamprovider.h b/talk/app/webrtc/mediastreamprovider.h
index 4c77fd0765..ae00b1de75 100644
--- a/talk/app/webrtc/mediastreamprovider.h
+++ b/talk/app/webrtc/mediastreamprovider.h
@@ -45,14 +45,13 @@ namespace webrtc {
class AudioProviderInterface {
public:
// Enable/disable the audio playout of a remote audio track with |ssrc|.
- virtual void SetAudioPlayout(uint32 ssrc, bool enable) = 0;
+ virtual void SetAudioPlayout(uint32 ssrc, bool enable,
+ cricket::AudioRenderer* renderer) = 0;
// Enable/disable sending audio on the local audio track with |ssrc|.
// When |enable| is true |options| should be applied to the audio track.
virtual void SetAudioSend(uint32 ssrc, bool enable,
- const cricket::AudioOptions& options) = 0;
- // Sets the renderer to be used for the specified |ssrc|.
- virtual bool SetAudioRenderer(uint32 ssrc,
- cricket::AudioRenderer* renderer) = 0;
+ const cricket::AudioOptions& options,
+ cricket::AudioRenderer* renderer) = 0;
protected:
virtual ~AudioProviderInterface() {}
diff --git a/talk/app/webrtc/mediastreamtrackproxy.h b/talk/app/webrtc/mediastreamtrackproxy.h
index 954874bf38..1efc8c616a 100644
--- a/talk/app/webrtc/mediastreamtrackproxy.h
+++ b/talk/app/webrtc/mediastreamtrackproxy.h
@@ -42,7 +42,7 @@ BEGIN_PROXY_MAP(AudioTrack)
PROXY_CONSTMETHOD0(TrackState, state)
PROXY_CONSTMETHOD0(bool, enabled)
PROXY_CONSTMETHOD0(AudioSourceInterface*, GetSource)
- PROXY_METHOD0(cricket::AudioRenderer*, FrameInput)
+ PROXY_METHOD0(cricket::AudioRenderer*, GetRenderer)
PROXY_METHOD1(bool, set_enabled, bool)
PROXY_METHOD1(bool, set_state, TrackState)
diff --git a/talk/app/webrtc/peerconnectionfactory_unittest.cc b/talk/app/webrtc/peerconnectionfactory_unittest.cc
index a77fed7735..d0e5df3ceb 100644
--- a/talk/app/webrtc/peerconnectionfactory_unittest.cc
+++ b/talk/app/webrtc/peerconnectionfactory_unittest.cc
@@ -157,6 +157,9 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServers) {
ice_server.uri = kTurnIceServer;
ice_server.password = kTurnPassword;
ice_servers.push_back(ice_server);
+ ice_server.uri = kTurnIceServerWithTransport;
+ ice_server.password = kTurnPassword;
+ ice_servers.push_back(ice_server);
talk_base::scoped_refptr<PeerConnectionInterface> pc(
factory_->CreatePeerConnection(ice_servers, NULL,
allocator_factory_.get(),
@@ -164,17 +167,23 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServers) {
&observer_));
EXPECT_TRUE(pc.get() != NULL);
StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun(
- "stun.l.google.com", 19302);
- stun_configs.push_back(stun);
webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "test.com", 1234);
+ "stun.l.google.com", 19302);
stun_configs.push_back(stun1);
+ webrtc::PortAllocatorFactoryInterface::StunConfiguration stun2(
+ "test.com", 1234);
+ stun_configs.push_back(stun2);
+ webrtc::PortAllocatorFactoryInterface::StunConfiguration stun3(
+ "hello.com", kDefaultPort);
+ stun_configs.push_back(stun3);
VerifyStunConfigurations(stun_configs);
TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
+ webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
"test.com", 1234, "test@hello.com", kTurnPassword, "udp");
- turn_configs.push_back(turn);
+ turn_configs.push_back(turn1);
+ webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
+ "hello.com", kDefaultPort, "test", kTurnPassword, "tcp");
+ turn_configs.push_back(turn2);
VerifyTurnConfigurations(turn_configs);
}
diff --git a/talk/app/webrtc/portallocatorfactory.cc b/talk/app/webrtc/portallocatorfactory.cc
index 3ded85a5f8..71cd8358e6 100644
--- a/talk/app/webrtc/portallocatorfactory.cc
+++ b/talk/app/webrtc/portallocatorfactory.cc
@@ -71,18 +71,18 @@ cricket::PortAllocator* PortAllocatorFactory::CreatePortAllocator(
new cricket::BasicPortAllocator(
network_manager_.get(), socket_factory_.get(), stun_addr));
- if (turn.size() > 0) {
- cricket::RelayCredentials credentials(turn[0].username, turn[0].password);
+ for (size_t i = 0; i < turn.size(); ++i) {
+ cricket::RelayCredentials credentials(turn[i].username, turn[i].password);
cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
cricket::ProtocolType protocol;
- if (cricket::StringToProto(turn[0].transport_type.c_str(), &protocol)) {
+ if (cricket::StringToProto(turn[i].transport_type.c_str(), &protocol)) {
relay_server.ports.push_back(cricket::ProtocolAddress(
- turn[0].server, protocol));
+ turn[i].server, protocol));
relay_server.credentials = credentials;
allocator->AddRelay(relay_server);
} else {
- LOG(LS_WARNING) << "Ignoring TURN server " << turn[0].server << ". "
- << "Reason= Incorrect " << turn[0].transport_type
+ LOG(LS_WARNING) << "Ignoring TURN server " << turn[i].server << ". "
+ << "Reason= Incorrect " << turn[i].transport_type
<< " transport parameter.";
}
}
diff --git a/talk/app/webrtc/webrtcsdp.cc b/talk/app/webrtc/webrtcsdp.cc
index 8d9fba17ba..9910a5101b 100644
--- a/talk/app/webrtc/webrtcsdp.cc
+++ b/talk/app/webrtc/webrtcsdp.cc
@@ -68,6 +68,7 @@ using cricket::kCodecParamStereo;
using cricket::kCodecParamUseInbandFec;
using cricket::kCodecParamSctpProtocol;
using cricket::kCodecParamSctpStreams;
+using cricket::kCodecParamMaxAverageBitrate;
using cricket::kWildcardPayloadType;
using cricket::MediaContentDescription;
using cricket::MediaType;
@@ -1439,7 +1440,8 @@ bool IsFmtpParam(const std::string& name) {
kCodecParamMinPTime, kCodecParamSPropStereo,
kCodecParamStereo, kCodecParamUseInbandFec,
kCodecParamMaxBitrate, kCodecParamMinBitrate, kCodecParamMaxQuantization,
- kCodecParamSctpProtocol, kCodecParamSctpStreams
+ kCodecParamSctpProtocol, kCodecParamSctpStreams,
+ kCodecParamMaxAverageBitrate
};
for (size_t i = 0; i < ARRAY_SIZE(kFmtpParams); ++i) {
if (_stricmp(name.c_str(), kFmtpParams[i]) == 0) {
diff --git a/talk/app/webrtc/webrtcsdp_unittest.cc b/talk/app/webrtc/webrtcsdp_unittest.cc
index 4892f58d37..19697d703e 100644
--- a/talk/app/webrtc/webrtcsdp_unittest.cc
+++ b/talk/app/webrtc/webrtcsdp_unittest.cc
@@ -120,6 +120,7 @@ struct CodecParams {
int sprop_stereo;
int stereo;
int useinband;
+ int maxaveragebitrate;
};
// Reference sdp string
@@ -1121,7 +1122,8 @@ class WebRtcSdpTest : public testing::Test {
std::ostringstream os;
os << "minptime=" << params.min_ptime << " stereo=" << params.stereo
<< " sprop-stereo=" << params.sprop_stereo
- << " useinbandfec=" << params.useinband << "\r\n"
+ << " useinbandfec=" << params.useinband
+ << " maxaveragebitrate=" << params.maxaveragebitrate << "\r\n"
<< "a=ptime:" << params.ptime << "\r\n"
<< "a=maxptime:" << params.max_ptime << "\r\n";
sdp += os.str();
@@ -1142,6 +1144,8 @@ class WebRtcSdpTest : public testing::Test {
VerifyCodecParameter(opus.params, "stereo", params.stereo);
VerifyCodecParameter(opus.params, "sprop-stereo", params.sprop_stereo);
VerifyCodecParameter(opus.params, "useinbandfec", params.useinband);
+ VerifyCodecParameter(opus.params, "maxaveragebitrate",
+ params.maxaveragebitrate);
for (size_t i = 0; i < acd->codecs().size(); ++i) {
cricket::AudioCodec codec = acd->codecs()[i];
VerifyCodecParameter(codec.params, "ptime", params.ptime);
@@ -1854,6 +1858,7 @@ TEST_F(WebRtcSdpTest, DeserializeSerializeCodecParams) {
params.sprop_stereo = 1;
params.stereo = 1;
params.useinband = 1;
+ params.maxaveragebitrate = 128000;
TestDeserializeCodecParams(params, &jdesc_output);
TestSerialize(jdesc_output);
}
diff --git a/talk/app/webrtc/webrtcsession.cc b/talk/app/webrtc/webrtcsession.cc
index 840cb3d7ad..4f5b22be91 100644
--- a/talk/app/webrtc/webrtcsession.cc
+++ b/talk/app/webrtc/webrtcsession.cc
@@ -81,6 +81,8 @@ const char kWebRTCIdentityPrefix[] = "WebRTC";
const char kSetLocalSdpFailed[] = "SetLocalDescription failed: ";
const char kSetRemoteSdpFailed[] = "SetRemoteDescription failed: ";
const char kCreateChannelFailed[] = "Failed to create channels.";
+const char kBundleWithoutRtcpMux[] = "RTCP-MUX must be enabled when BUNDLE "
+ "is enabled.";
const char kInvalidCandidates[] = "Description contains invalid candidates.";
const char kInvalidSdp[] = "Invalid session description.";
const char kMlineMismatch[] =
@@ -639,6 +641,11 @@ bool WebRtcSession::SetLocalDescription(SessionDescriptionInterface* desc,
if (!desc || !desc->description()) {
return BadLocalSdp(kInvalidSdp, err_desc);
}
+
+ if (!VerifyBundleSettings(desc->description())) {
+ return BadLocalSdp(kBundleWithoutRtcpMux, err_desc);
+ }
+
Action action = GetAction(desc->type());
if (!ExpectSetLocalDescription(action)) {
std::string type = desc->type();
@@ -706,6 +713,11 @@ bool WebRtcSession::SetRemoteDescription(SessionDescriptionInterface* desc,
if (!desc || !desc->description()) {
return BadRemoteSdp(kInvalidSdp, err_desc);
}
+
+ if (!VerifyBundleSettings(desc->description())) {
+ return BadRemoteSdp(kBundleWithoutRtcpMux, err_desc);
+ }
+
Action action = GetAction(desc->type());
if (!ExpectSetRemoteDescription(action)) {
std::string type = desc->type();
@@ -881,12 +893,18 @@ std::string WebRtcSession::BadStateErrMsg(
return desc.str();
}
-void WebRtcSession::SetAudioPlayout(uint32 ssrc, bool enable) {
+void WebRtcSession::SetAudioPlayout(uint32 ssrc, bool enable,
+ cricket::AudioRenderer* renderer) {
ASSERT(signaling_thread()->IsCurrent());
if (!voice_channel_) {
LOG(LS_ERROR) << "SetAudioPlayout: No audio channel exists.";
return;
}
+ if (!voice_channel_->SetRemoteRenderer(ssrc, renderer)) {
+ // SetRenderer() can fail if the ssrc does not match any playout channel.
+ LOG(LS_ERROR) << "SetAudioPlayout: ssrc is incorrect: " << ssrc;
+ return;
+ }
if (!voice_channel_->SetOutputScaling(ssrc, enable ? 1 : 0, enable ? 1 : 0)) {
// Allow that SetOutputScaling fail if |enable| is false but assert
// otherwise. This in the normal case when the underlying media channel has
@@ -896,12 +914,18 @@ void WebRtcSession::SetAudioPlayout(uint32 ssrc, bool enable) {
}
void WebRtcSession::SetAudioSend(uint32 ssrc, bool enable,
- const cricket::AudioOptions& options) {
+ const cricket::AudioOptions& options,
+ cricket::AudioRenderer* renderer) {
ASSERT(signaling_thread()->IsCurrent());
if (!voice_channel_) {
LOG(LS_ERROR) << "SetAudioSend: No audio channel exists.";
return;
}
+ if (!voice_channel_->SetLocalRenderer(ssrc, renderer)) {
+ // SetRenderer() can fail if the ssrc does not match any send channel.
+ LOG(LS_ERROR) << "SetAudioSend: ssrc is incorrect: " << ssrc;
+ return;
+ }
if (!voice_channel_->MuteStream(ssrc, !enable)) {
// Allow that MuteStream fail if |enable| is false but assert otherwise.
// This in the normal case when the underlying media channel has already
@@ -913,22 +937,6 @@ void WebRtcSession::SetAudioSend(uint32 ssrc, bool enable,
voice_channel_->SetChannelOptions(options);
}
-bool WebRtcSession::SetAudioRenderer(uint32 ssrc,
- cricket::AudioRenderer* renderer) {
- if (!voice_channel_) {
- LOG(LS_ERROR) << "SetAudioRenderer: No audio channel exists.";
- return false;
- }
-
- if (!voice_channel_->SetRenderer(ssrc, renderer)) {
- // SetRenderer() can fail if the ssrc is not mapping to the playout channel.
- LOG(LS_ERROR) << "SetAudioRenderer: ssrc is incorrect: " << ssrc;
- return false;
- }
-
- return true;
-}
-
bool WebRtcSession::SetCaptureDevice(uint32 ssrc,
cricket::VideoCapturer* camera) {
ASSERT(signaling_thread()->IsCurrent());
@@ -1339,9 +1347,12 @@ void WebRtcSession::RemoveUnusedChannelsAndTransports(
}
}
+// TODO(mallinath) - Add a correct error code if the channels are not creatued
+// due to BUNDLE is enabled but rtcp-mux is disabled.
bool WebRtcSession::CreateChannels(const SessionDescription* desc) {
// Disabling the BUNDLE flag in PortAllocator if offer disabled it.
- if (state() == STATE_INIT && !desc->HasGroup(cricket::GROUP_TYPE_BUNDLE)) {
+ bool bundle_enabled = desc->HasGroup(cricket::GROUP_TYPE_BUNDLE);
+ if (state() == STATE_INIT && !bundle_enabled) {
port_allocator()->set_flags(port_allocator()->flags() &
~cricket::PORTALLOCATOR_ENABLE_BUNDLE);
}
@@ -1349,7 +1360,7 @@ bool WebRtcSession::CreateChannels(const SessionDescription* desc) {
// Creating the media channels and transport proxies.
const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(desc);
if (voice && !voice->rejected && !voice_channel_) {
- if (!CreateVoiceChannel(desc)) {
+ if (!CreateVoiceChannel(voice)) {
LOG(LS_ERROR) << "Failed to create voice channel.";
return false;
}
@@ -1357,7 +1368,7 @@ bool WebRtcSession::CreateChannels(const SessionDescription* desc) {
const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc);
if (video && !video->rejected && !video_channel_) {
- if (!CreateVideoChannel(desc)) {
+ if (!CreateVideoChannel(video)) {
LOG(LS_ERROR) << "Failed to create video channel.";
return false;
}
@@ -1366,7 +1377,7 @@ bool WebRtcSession::CreateChannels(const SessionDescription* desc) {
const cricket::ContentInfo* data = cricket::GetFirstDataContent(desc);
if (data_channel_type_ != cricket::DCT_NONE &&
data && !data->rejected && !data_channel_.get()) {
- if (!CreateDataChannel(desc)) {
+ if (!CreateDataChannel(data)) {
LOG(LS_ERROR) << "Failed to create data channel.";
return false;
}
@@ -1375,29 +1386,23 @@ bool WebRtcSession::CreateChannels(const SessionDescription* desc) {
return true;
}
-bool WebRtcSession::CreateVoiceChannel(const SessionDescription* desc) {
- const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(desc);
+bool WebRtcSession::CreateVoiceChannel(const cricket::ContentInfo* content) {
voice_channel_.reset(channel_manager_->CreateVoiceChannel(
- this, voice->name, true));
- return voice_channel_ ? true : false;
+ this, content->name, true));
+ return (voice_channel_ != NULL);
}
-bool WebRtcSession::CreateVideoChannel(const SessionDescription* desc) {
- const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc);
+bool WebRtcSession::CreateVideoChannel(const cricket::ContentInfo* content) {
video_channel_.reset(channel_manager_->CreateVideoChannel(
- this, video->name, true, voice_channel_.get()));
- return video_channel_ ? true : false;
+ this, content->name, true, voice_channel_.get()));
+ return (video_channel_ != NULL);
}
-bool WebRtcSession::CreateDataChannel(const SessionDescription* desc) {
- const cricket::ContentInfo* data = cricket::GetFirstDataContent(desc);
+bool WebRtcSession::CreateDataChannel(const cricket::ContentInfo* content) {
bool rtcp = (data_channel_type_ == cricket::DCT_RTP);
data_channel_.reset(channel_manager_->CreateDataChannel(
- this, data->name, rtcp, data_channel_type_));
- if (!data_channel_.get()) {
- return false;
- }
- return true;
+ this, content->name, rtcp, data_channel_type_));
+ return (data_channel_ != NULL);
}
void WebRtcSession::CopySavedCandidates(
@@ -1434,4 +1439,36 @@ void WebRtcSession::UpdateSessionDescriptionSecurePolicy(
}
}
+// Returns false if bundle is enabled and rtcp_mux is disabled.
+bool WebRtcSession::VerifyBundleSettings(const SessionDescription* desc) {
+ bool bundle_enabled = desc->HasGroup(cricket::GROUP_TYPE_BUNDLE);
+ if (!bundle_enabled)
+ return true;
+
+ const cricket::ContentGroup* bundle_group =
+ desc->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
+ ASSERT(bundle_group != NULL);
+
+ const cricket::ContentInfos& contents = desc->contents();
+ for (cricket::ContentInfos::const_iterator citer = contents.begin();
+ citer != contents.end(); ++citer) {
+ const cricket::ContentInfo* content = (&*citer);
+ ASSERT(content != NULL);
+ if (bundle_group->HasContentName(content->name) &&
+ !content->rejected && content->type == cricket::NS_JINGLE_RTP) {
+ if (!HasRtcpMuxEnabled(content))
+ return false;
+ }
+ }
+ // RTCP-MUX is enabled in all the contents.
+ return true;
+}
+
+bool WebRtcSession::HasRtcpMuxEnabled(
+ const cricket::ContentInfo* content) {
+ const cricket::MediaContentDescription* description =
+ static_cast<cricket::MediaContentDescription*>(content->description);
+ return description->rtcp_mux();
+}
+
} // namespace webrtc
diff --git a/talk/app/webrtc/webrtcsession.h b/talk/app/webrtc/webrtcsession.h
index f4fe202bdf..d3997f3ff6 100644
--- a/talk/app/webrtc/webrtcsession.h
+++ b/talk/app/webrtc/webrtcsession.h
@@ -63,6 +63,7 @@ class MediaStreamSignaling;
extern const char kSetLocalSdpFailed[];
extern const char kSetRemoteSdpFailed[];
extern const char kCreateChannelFailed[];
+extern const char kBundleWithoutRtcpMux[];
extern const char kInvalidCandidates[];
extern const char kInvalidSdp[];
extern const char kMlineMismatch[];
@@ -158,11 +159,11 @@ class WebRtcSession : public cricket::BaseSession,
virtual bool GetTrackIdBySsrc(uint32 ssrc, std::string* id);
// AudioMediaProviderInterface implementation.
- virtual void SetAudioPlayout(uint32 ssrc, bool enable) OVERRIDE;
+ virtual void SetAudioPlayout(uint32 ssrc, bool enable,
+ cricket::AudioRenderer* renderer) OVERRIDE;
virtual void SetAudioSend(uint32 ssrc, bool enable,
- const cricket::AudioOptions& options) OVERRIDE;
- virtual bool SetAudioRenderer(uint32 ssrc,
- cricket::AudioRenderer* renderer) OVERRIDE;
+ const cricket::AudioOptions& options,
+ cricket::AudioRenderer* renderer) OVERRIDE;
// Implements VideoMediaProviderInterface.
virtual bool SetCaptureDevice(uint32 ssrc,
@@ -243,9 +244,10 @@ class WebRtcSession : public cricket::BaseSession,
bool CreateChannels(const cricket::SessionDescription* desc);
// Helper methods to create media channels.
- bool CreateVoiceChannel(const cricket::SessionDescription* desc);
- bool CreateVideoChannel(const cricket::SessionDescription* desc);
- bool CreateDataChannel(const cricket::SessionDescription* desc);
+ bool CreateVoiceChannel(const cricket::ContentInfo* content);
+ bool CreateVideoChannel(const cricket::ContentInfo* content);
+ bool CreateDataChannel(const cricket::ContentInfo* content);
+
// Copy the candidates from |saved_candidates_| to |dest_desc|.
// The |saved_candidates_| will be cleared after this function call.
void CopySavedCandidates(SessionDescriptionInterface* dest_desc);
@@ -262,6 +264,9 @@ class WebRtcSession : public cricket::BaseSession,
std::string BadStateErrMsg(const std::string& type, State state);
void SetIceConnectionState(PeerConnectionInterface::IceConnectionState state);
+ bool VerifyBundleSettings(const cricket::SessionDescription* desc);
+ bool HasRtcpMuxEnabled(const cricket::ContentInfo* content);
+
talk_base::scoped_ptr<cricket::VoiceChannel> voice_channel_;
talk_base::scoped_ptr<cricket::VideoChannel> video_channel_;
talk_base::scoped_ptr<cricket::DataChannel> data_channel_;
diff --git a/talk/app/webrtc/webrtcsession_unittest.cc b/talk/app/webrtc/webrtcsession_unittest.cc
index 48cccf43b2..71691df2f3 100644
--- a/talk/app/webrtc/webrtcsession_unittest.cc
+++ b/talk/app/webrtc/webrtcsession_unittest.cc
@@ -83,6 +83,7 @@ using webrtc::kSetLocalSdpFailed;
using webrtc::kSetRemoteSdpFailed;
using webrtc::kPushDownAnswerTDFailed;
using webrtc::kPushDownPranswerTDFailed;
+using webrtc::kBundleWithoutRtcpMux;
static const SocketAddress kClientAddr1("11.11.11.11", 0);
static const SocketAddress kClientAddr2("22.22.22.22", 0);
@@ -315,6 +316,24 @@ class FakeMediaStreamSignaling : public webrtc::MediaStreamSignaling,
cricket::MediaSessionOptions options_;
};
+class FakeAudioRenderer : public cricket::AudioRenderer {
+ public:
+ FakeAudioRenderer() : channel_id_(-1) {}
+
+ virtual void AddChannel(int channel_id) OVERRIDE {
+ ASSERT(channel_id_ == -1);
+ channel_id_ = channel_id;
+ }
+ virtual void RemoveChannel(int channel_id) OVERRIDE {
+ ASSERT(channel_id == channel_id_);
+ channel_id_ = -1;
+ }
+
+ int channel_id() const { return channel_id_; }
+ private:
+ int channel_id_;
+};
+
class WebRtcSessionTest : public testing::Test {
protected:
// TODO Investigate why ChannelManager crashes, if it's created
@@ -889,10 +908,6 @@ TEST_F(WebRtcSessionTest, TestSessionCandidatesWithRtcpMux) {
TestSessionCandidatesWithBundleRtcpMux(false, true);
}
-TEST_F(WebRtcSessionTest, TestSessionCandidatesWithBundle) {
- TestSessionCandidatesWithBundleRtcpMux(true, false);
-}
-
TEST_F(WebRtcSessionTest, TestSessionCandidatesWithBundleRtcpMux) {
TestSessionCandidatesWithBundleRtcpMux(true, true);
}
@@ -1954,6 +1969,36 @@ TEST_F(WebRtcSessionTest, TestDisabledBundleInAnswer) {
EXPECT_TRUE(kAudioTrack1 == voice_channel_->send_streams()[0].id);
}
+// This test verifies that SetLocalDescription and SetRemoteDescription fails
+// if BUNDLE is enabled but rtcp-mux is disabled in m-lines.
+TEST_F(WebRtcSessionTest, TestDisabledRtcpMuxWithBundleEnabled) {
+ WebRtcSessionTest::Init();
+ mediastream_signaling_.SendAudioVideoStream1();
+ EXPECT_TRUE((cricket::PORTALLOCATOR_ENABLE_BUNDLE & allocator_.flags()) ==
+ cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+ FakeConstraints constraints;
+ constraints.SetMandatoryUseRtpMux(true);
+ SessionDescriptionInterface* offer = session_->CreateOffer(&constraints);
+ std::string offer_str;
+ offer->ToString(&offer_str);
+ // Disable rtcp-mux
+ const std::string rtcp_mux = "rtcp-mux";
+ const std::string xrtcp_mux = "xrtcp-mux";
+ talk_base::replace_substrs(rtcp_mux.c_str(), rtcp_mux.length(),
+ xrtcp_mux.c_str(), xrtcp_mux.length(),
+ &offer_str);
+ JsepSessionDescription *local_offer =
+ new JsepSessionDescription(JsepSessionDescription::kOffer);
+ EXPECT_TRUE((local_offer)->Initialize(offer_str, NULL));
+ SetLocalDescriptionExpectError(kBundleWithoutRtcpMux, local_offer);
+ JsepSessionDescription *remote_offer =
+ new JsepSessionDescription(JsepSessionDescription::kOffer);
+ EXPECT_TRUE((remote_offer)->Initialize(offer_str, NULL));
+ SetRemoteDescriptionExpectError(kBundleWithoutRtcpMux, remote_offer);
+ // Trying unmodified SDP.
+ SetLocalDescriptionWithoutError(offer);
+}
+
TEST_F(WebRtcSessionTest, SetAudioPlayout) {
Init();
mediastream_signaling_.SendAudioVideoStream1();
@@ -1966,14 +2011,17 @@ TEST_F(WebRtcSessionTest, SetAudioPlayout) {
EXPECT_TRUE(channel->GetOutputScaling(receive_ssrc, &left_vol, &right_vol));
EXPECT_EQ(1, left_vol);
EXPECT_EQ(1, right_vol);
- session_->SetAudioPlayout(receive_ssrc, false);
+ talk_base::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
+ session_->SetAudioPlayout(receive_ssrc, false, renderer.get());
EXPECT_TRUE(channel->GetOutputScaling(receive_ssrc, &left_vol, &right_vol));
EXPECT_EQ(0, left_vol);
EXPECT_EQ(0, right_vol);
- session_->SetAudioPlayout(receive_ssrc, true);
+ EXPECT_EQ(0, renderer->channel_id());
+ session_->SetAudioPlayout(receive_ssrc, true, NULL);
EXPECT_TRUE(channel->GetOutputScaling(receive_ssrc, &left_vol, &right_vol));
EXPECT_EQ(1, left_vol);
EXPECT_EQ(1, right_vol);
+ EXPECT_EQ(-1, renderer->channel_id());
}
TEST_F(WebRtcSessionTest, SetAudioSend) {
@@ -1989,15 +2037,18 @@ TEST_F(WebRtcSessionTest, SetAudioSend) {
cricket::AudioOptions options;
options.echo_cancellation.Set(true);
- session_->SetAudioSend(send_ssrc, false, options);
+ talk_base::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
+ session_->SetAudioSend(send_ssrc, false, options, renderer.get());
EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
EXPECT_FALSE(channel->options().echo_cancellation.IsSet());
+ EXPECT_EQ(0, renderer->channel_id());
- session_->SetAudioSend(send_ssrc, true, options);
+ session_->SetAudioSend(send_ssrc, true, options, NULL);
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
bool value;
EXPECT_TRUE(channel->options().echo_cancellation.Get(&value));
EXPECT_TRUE(value);
+ EXPECT_EQ(-1, renderer->channel_id());
}
TEST_F(WebRtcSessionTest, SetVideoPlayout) {
diff --git a/talk/base/firewallsocketserver.cc b/talk/base/firewallsocketserver.cc
index ee2c22d48a..be8f2e1831 100644
--- a/talk/base/firewallsocketserver.cc
+++ b/talk/base/firewallsocketserver.cc
@@ -211,6 +211,7 @@ AsyncSocket* FirewallSocketServer::WrapSocket(AsyncSocket* sock, int type) {
(type == SOCK_STREAM && !tcp_sockets_enabled_) ||
(type == SOCK_DGRAM && !udp_sockets_enabled_)) {
LOG(LS_VERBOSE) << "FirewallSocketServer socket creation denied";
+ delete sock;
return NULL;
}
return new FirewallSocket(this, sock, type);
diff --git a/talk/base/sslidentity_unittest.cc b/talk/base/sslidentity_unittest.cc
index 3605c008af..2194517efd 100644
--- a/talk/base/sslidentity_unittest.cc
+++ b/talk/base/sslidentity_unittest.cc
@@ -65,6 +65,10 @@ class SSLIdentityTest : public testing::Test {
talk_base::InitializeSSL();
}
+ static void TearDownTestCase() {
+ talk_base::CleanupSSL();
+ }
+
virtual void SetUp() {
identity1_.reset(talk_base::SSLIdentity::Generate("test1"));
identity2_.reset(talk_base::SSLIdentity::Generate("test2"));
diff --git a/talk/base/virtualsocketserver.cc b/talk/base/virtualsocketserver.cc
index c8cac0e952..6d95c19d1d 100644
--- a/talk/base/virtualsocketserver.cc
+++ b/talk/base/virtualsocketserver.cc
@@ -114,7 +114,8 @@ class VirtualSocket : public AsyncSocket, public MessageHandler {
public:
VirtualSocket(VirtualSocketServer* server, int family, int type, bool async)
: server_(server), family_(family), type_(type), async_(async),
- state_(CS_CLOSED), listen_queue_(NULL), write_enabled_(false),
+ state_(CS_CLOSED), error_(0), listen_queue_(NULL),
+ write_enabled_(false),
network_size_(0), recv_buffer_size_(0), bound_(false), was_any_(false) {
ASSERT((type_ == SOCK_DGRAM) || (type_ == SOCK_STREAM));
ASSERT(async_ || (type_ != SOCK_STREAM)); // We only support async streams
diff --git a/talk/media/base/audiorenderer.h b/talk/media/base/audiorenderer.h
index 23b17da8bb..273312fab3 100644
--- a/talk/media/base/audiorenderer.h
+++ b/talk/media/base/audiorenderer.h
@@ -30,11 +30,18 @@
namespace cricket {
-// Abstract interface for holding the voice channel ID.
+// Abstract interface for holding the voice channel IDs.
class AudioRenderer {
public:
- virtual void SetChannelId(int channel_id) = 0;
- virtual int GetChannelId() const = 0;
+ // Add the WebRtc VoE channel to the renderer.
+ // For local stream, multiple WebRtc VoE channels can be connected to the
+ // renderer. While for remote stream, only one WebRtc VoE channel can be
+ // connected to the renderer.
+ virtual void AddChannel(int channel_id) = 0;
+
+ // Remove the WebRtc VoE channel from the renderer.
+ // This method is called when the VoE channel is going away.
+ virtual void RemoveChannel(int channel_id) = 0;
protected:
virtual ~AudioRenderer() {}
diff --git a/talk/media/base/capturerenderadapter.cc b/talk/media/base/capturerenderadapter.cc
index d7b3995d6e..a281e66a33 100644
--- a/talk/media/base/capturerenderadapter.cc
+++ b/talk/media/base/capturerenderadapter.cc
@@ -39,8 +39,17 @@ CaptureRenderAdapter::CaptureRenderAdapter(VideoCapturer* video_capturer)
}
CaptureRenderAdapter::~CaptureRenderAdapter() {
- // has_slots destructor will disconnect us from any signals we may be
- // connected to.
+ // Since the signal we're connecting to is multi-threaded,
+ // disconnect_all() will block until all calls are serviced, meaning any
+ // outstanding calls to OnVideoFrame will be done when this is done, and no
+ // more calls will be serviced by this.
+ // We do this explicitly instead of just letting the has_slots<> destructor
+ // take care of it because we need to do this *before* video_renderers_ is
+ // cleared by the destructor; otherwise we could mess with it while
+ // OnVideoFrame is running.
+ // We *don't* take capture_crit_ here since it could deadlock with the lock
+ // taken by the video frame signal.
+ disconnect_all();
}
CaptureRenderAdapter* CaptureRenderAdapter::Create(
diff --git a/talk/media/base/constants.cc b/talk/media/base/constants.cc
index 8fd1bdc33b..5529c2abcf 100644
--- a/talk/media/base/constants.cc
+++ b/talk/media/base/constants.cc
@@ -49,13 +49,15 @@ const char* kCodecParamAssociatedPayloadType = "apt";
const char* kOpusCodecName = "opus";
+// draft-spittka-payload-rtp-opus-03.txt
const char* kCodecParamPTime = "ptime";
const char* kCodecParamMaxPTime = "maxptime";
-
const char* kCodecParamMinPTime = "minptime";
const char* kCodecParamSPropStereo = "sprop-stereo";
const char* kCodecParamStereo = "stereo";
const char* kCodecParamUseInbandFec = "useinbandfec";
+const char* kCodecParamMaxAverageBitrate = "maxaveragebitrate";
+
const char* kCodecParamSctpProtocol = "protocol";
const char* kCodecParamSctpStreams = "streams";
diff --git a/talk/media/base/constants.h b/talk/media/base/constants.h
index 8df4805252..afcfb13ffc 100644
--- a/talk/media/base/constants.h
+++ b/talk/media/base/constants.h
@@ -59,6 +59,7 @@ extern const char* kCodecParamMinPTime;
extern const char* kCodecParamSPropStereo;
extern const char* kCodecParamStereo;
extern const char* kCodecParamUseInbandFec;
+extern const char* kCodecParamMaxAverageBitrate;
extern const char* kCodecParamSctpProtocol;
extern const char* kCodecParamSctpStreams;
diff --git a/talk/media/base/fakemediaengine.h b/talk/media/base/fakemediaengine.h
index f0f83a057d..7f4c9e3ac3 100644
--- a/talk/media/base/fakemediaengine.h
+++ b/talk/media/base/fakemediaengine.h
@@ -36,6 +36,7 @@
#include "talk/base/buffer.h"
#include "talk/base/stringutils.h"
+#include "talk/media/base/audiorenderer.h"
#include "talk/media/base/mediaengine.h"
#include "talk/media/base/rtputils.h"
#include "talk/media/base/streamparams.h"
@@ -69,18 +70,15 @@ template <class Base> class RtpHelper : public Base {
const std::list<std::string>& rtcp_packets() const { return rtcp_packets_; }
bool SendRtp(const void* data, int len) {
- if (!sending_ || !Base::network_interface_) {
+ if (!sending_) {
return false;
}
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
- return Base::network_interface_->SendPacket(&packet);
+ return Base::SendPacket(&packet);
}
bool SendRtcp(const void* data, int len) {
- if (!Base::network_interface_) {
- return false;
- }
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
- return Base::network_interface_->SendRtcp(&packet);
+ return Base::SendRtcp(&packet);
}
bool CheckRtp(const void* data, int len) {
@@ -294,9 +292,44 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
output_scalings_.erase(ssrc);
return true;
}
- virtual bool SetRenderer(uint32 ssrc, AudioRenderer* renderer) {
- // TODO(xians): Implement this.
- return false;
+ virtual bool SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ std::map<uint32, AudioRenderer*>::iterator it =
+ remote_renderers_.find(ssrc);
+ if (renderer) {
+ if (it != remote_renderers_.end()) {
+ ASSERT(it->second == renderer);
+ } else {
+ remote_renderers_.insert(std::make_pair(ssrc, renderer));
+ renderer->AddChannel(0);
+ }
+ } else {
+ if (it != remote_renderers_.end()) {
+ it->second->RemoveChannel(0);
+ remote_renderers_.erase(it);
+ } else {
+ return false;
+ }
+ }
+ return true;
+ }
+ virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ std::map<uint32, AudioRenderer*>::iterator it = local_renderers_.find(ssrc);
+ if (renderer) {
+ if (it != local_renderers_.end()) {
+ ASSERT(it->second == renderer);
+ } else {
+ local_renderers_.insert(std::make_pair(ssrc, renderer));
+ renderer->AddChannel(0);
+ }
+ } else {
+ if (it != local_renderers_.end()) {
+ it->second->RemoveChannel(0);
+ local_renderers_.erase(it);
+ } else {
+ return false;
+ }
+ }
+ return true;
}
virtual bool GetActiveStreams(AudioInfo::StreamList* streams) { return true; }
@@ -394,6 +427,8 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
bool ringback_tone_loop_;
int time_since_last_typing_;
AudioOptions options_;
+ std::map<uint32, AudioRenderer*> local_renderers_;
+ std::map<uint32, AudioRenderer*> remote_renderers_;
};
// A helper function to compare the FakeVoiceMediaChannel::DtmfInfo.
diff --git a/talk/media/base/filemediaengine.cc b/talk/media/base/filemediaengine.cc
index fe48311711..1a3547c0ce 100644
--- a/talk/media/base/filemediaengine.cc
+++ b/talk/media/base/filemediaengine.cc
@@ -245,12 +245,11 @@ bool RtpSenderReceiver::ReadNextPacket(RtpDumpPacket* packet) {
}
bool RtpSenderReceiver::SendRtpPacket(const void* data, size_t len) {
- if (!media_channel_ || !media_channel_->network_interface()) {
+ if (!media_channel_)
return false;
- }
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
- return media_channel_->network_interface()->SendPacket(&packet);
+ return media_channel_->SendPacket(&packet);
}
///////////////////////////////////////////////////////////////////////////
diff --git a/talk/media/base/filemediaengine.h b/talk/media/base/filemediaengine.h
index 70335de59d..2c8525d963 100644
--- a/talk/media/base/filemediaengine.h
+++ b/talk/media/base/filemediaengine.h
@@ -193,7 +193,10 @@ class FileVoiceChannel : public VoiceMediaChannel {
}
virtual bool SetPlayout(bool playout) { return true; }
virtual bool SetSend(SendFlags flag);
- virtual bool SetRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ virtual bool SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ return false;
+ }
+ virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) {
return false;
}
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) { return true; }
diff --git a/talk/media/base/mediachannel.h b/talk/media/base/mediachannel.h
index 82b9ddbeee..07d5f905ad 100644
--- a/talk/media/base/mediachannel.h
+++ b/talk/media/base/mediachannel.h
@@ -413,9 +413,9 @@ class MediaChannel : public sigslot::has_slots<> {
MediaChannel() : network_interface_(NULL) {}
virtual ~MediaChannel() {}
- // Gets/sets the abstract inteface class for sending RTP/RTCP data.
- NetworkInterface *network_interface() { return network_interface_; }
+ // Sets the abstract interface class for sending RTP/RTCP data.
virtual void SetInterface(NetworkInterface *iface) {
+ talk_base::CritScope cs(&network_interface_crit_);
network_interface_ = iface;
}
@@ -451,8 +451,40 @@ class MediaChannel : public sigslot::has_slots<> {
// Sets the rate control to use when sending data.
virtual bool SetSendBandwidth(bool autobw, int bps) = 0;
- protected:
- NetworkInterface *network_interface_;
+ // Base method to send packet using NetworkInterface.
+ bool SendPacket(talk_base::Buffer* packet) {
+ return DoSendPacket(packet, false);
+ }
+
+ bool SendRtcp(talk_base::Buffer* packet) {
+ return DoSendPacket(packet, true);
+ }
+
+ int SetOption(NetworkInterface::SocketType type,
+ talk_base::Socket::Option opt,
+ int option) {
+ talk_base::CritScope cs(&network_interface_crit_);
+ if (!network_interface_)
+ return -1;
+
+ return network_interface_->SetOption(type, opt, option);
+ }
+
+ private:
+ bool DoSendPacket(talk_base::Buffer* packet, bool rtcp) {
+ talk_base::CritScope cs(&network_interface_crit_);
+ if (!network_interface_)
+ return false;
+
+ return (!rtcp) ? network_interface_->SendPacket(packet) :
+ network_interface_->SendRtcp(packet);
+ }
+
+ // |network_interface_| can be accessed from the worker_thread and
+ // from any MediaEngine threads. This critical section is to protect accessing
+ // of network_interface_ object.
+ talk_base::CriticalSection network_interface_crit_;
+ NetworkInterface* network_interface_;
};
enum SendFlags {
@@ -712,8 +744,10 @@ class VoiceMediaChannel : public MediaChannel {
virtual bool SetPlayout(bool playout) = 0;
// Starts or stops sending (and potentially capture) of local audio.
virtual bool SetSend(SendFlags flag) = 0;
- // Sets the renderer object to be used for the specified audio stream.
- virtual bool SetRenderer(uint32 ssrc, AudioRenderer* renderer) = 0;
+ // Sets the renderer object to be used for the specified remote audio stream.
+ virtual bool SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer) = 0;
+ // Sets the renderer object to be used for the specified local audio stream.
+ virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) = 0;
// Gets current energy levels for all incoming streams.
virtual bool GetActiveStreams(AudioInfo::StreamList* actives) = 0;
// Get the current energy level of the stream sent to the speaker.
diff --git a/talk/media/base/rtpdataengine.cc b/talk/media/base/rtpdataengine.cc
index 5a39a54f3f..2a23c10059 100644
--- a/talk/media/base/rtpdataengine.cc
+++ b/talk/media/base/rtpdataengine.cc
@@ -370,7 +370,7 @@ bool RtpDataMediaChannel::SendData(
// << ", timestamp=" << header.timestamp
// << ", len=" << data_len;
- network_interface()->SendPacket(&packet);
+ MediaChannel::SendPacket(&packet);
send_limiter_->Use(packet_len, now);
if (result) {
*result = SDR_SUCCESS;
diff --git a/talk/media/base/videoengine_unittest.h b/talk/media/base/videoengine_unittest.h
index 0679f6a6e8..e9f7612dfa 100644
--- a/talk/media/base/videoengine_unittest.h
+++ b/talk/media/base/videoengine_unittest.h
@@ -1238,7 +1238,8 @@ class VideoMediaChannelTest : public testing::Test,
// Wait until frame of right size is captured.
EXPECT_TRUE_WAIT(renderer_.num_rendered_frames() >= captured_frames &&
format.width == renderer_.width() &&
- format.height == renderer_.height(), kTimeout);
+ format.height == renderer_.height() &&
+ !renderer_.black_frame(), kTimeout);
EXPECT_GE(renderer_.num_rendered_frames(), captured_frames);
EXPECT_EQ(format.width, renderer_.width());
EXPECT_EQ(format.height, renderer_.height());
diff --git a/talk/media/sctp/sctpdataengine.cc b/talk/media/sctp/sctpdataengine.cc
index 935ccb8ba8..7fcb2398e4 100644
--- a/talk/media/sctp/sctpdataengine.cc
+++ b/talk/media/sctp/sctpdataengine.cc
@@ -649,7 +649,7 @@ void SctpDataMediaChannel::OnPacketFromSctpToNetwork(
<< "SCTP seems to have made a poacket that is bigger "
"than its official MTU.";
}
- network_interface()->SendPacket(buffer);
+ MediaChannel::SendPacket(buffer);
}
void SctpDataMediaChannel::OnMessage(talk_base::Message* msg) {
diff --git a/talk/media/webrtc/fakewebrtcvideoengine.h b/talk/media/webrtc/fakewebrtcvideoengine.h
index a64beb95c8..1c406ece6b 100644
--- a/talk/media/webrtc/fakewebrtcvideoengine.h
+++ b/talk/media/webrtc/fakewebrtcvideoengine.h
@@ -597,6 +597,10 @@ class FakeWebRtcVideoEngine
channels_.erase(channel);
return 0;
}
+#ifdef USE_WEBRTC_DEV_BRANCH
+ WEBRTC_STUB(RegisterCpuOveruseObserver,
+ (int channel, webrtc::CpuOveruseObserver* observer));
+#endif
WEBRTC_STUB(ConnectAudioChannel, (const int, const int));
WEBRTC_STUB(DisconnectAudioChannel, (const int));
WEBRTC_FUNC(StartSend, (const int channel)) {
diff --git a/talk/media/webrtc/webrtcmediaengine.h b/talk/media/webrtc/webrtcmediaengine.h
index 1a2de8e1b5..a2ee658761 100644
--- a/talk/media/webrtc/webrtcmediaengine.h
+++ b/talk/media/webrtc/webrtcmediaengine.h
@@ -104,12 +104,6 @@ class WebRtcMediaEngine : public cricket::MediaEngineInterface {
const Device* in_device, const Device* out_device) OVERRIDE {
return delegate_->SetSoundDevices(in_device, out_device);
}
- virtual bool SetVideoCapturer(VideoCapturer* capturer) OVERRIDE {
- return delegate_->SetVideoCapturer(capturer);
- }
- virtual VideoCapturer* GetVideoCapturer() const {
- return delegate_->GetVideoCapturer();
- }
virtual bool GetOutputVolume(int* level) OVERRIDE {
return delegate_->GetOutputVolume(level);
}
@@ -125,9 +119,6 @@ class WebRtcMediaEngine : public cricket::MediaEngineInterface {
virtual bool SetLocalRenderer(VideoRenderer* renderer) OVERRIDE {
return delegate_->SetLocalRenderer(renderer);
}
- virtual bool SetVideoCapture(bool capture) OVERRIDE {
- return delegate_->SetVideoCapture(capture);
- }
virtual const std::vector<AudioCodec>& audio_codecs() OVERRIDE {
return delegate_->audio_codecs();
}
diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc
index e95e1422d7..19ccebfa05 100644
--- a/talk/media/webrtc/webrtcvideoengine.cc
+++ b/talk/media/webrtc/webrtcvideoengine.cc
@@ -2528,10 +2528,9 @@ bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
void WebRtcVideoMediaChannel::SetInterface(NetworkInterface* iface) {
MediaChannel::SetInterface(iface);
// Set the RTP recv/send buffer to a bigger size
- if (network_interface_) {
- network_interface_->SetOption(NetworkInterface::ST_RTP,
- talk_base::Socket::OPT_RCVBUF,
- kVideoRtpBufferSize);
+ MediaChannel::SetOption(NetworkInterface::ST_RTP,
+ talk_base::Socket::OPT_RCVBUF,
+ kVideoRtpBufferSize);
// TODO(sriniv): Remove or re-enable this.
// As part of b/8030474, send-buffer is size now controlled through
@@ -2539,7 +2538,6 @@ void WebRtcVideoMediaChannel::SetInterface(NetworkInterface* iface) {
// network_interface_->SetOption(NetworkInterface::ST_RTP,
// talk_base::Socket::OPT_SNDBUF,
// kVideoRtpBufferSize);
- }
}
void WebRtcVideoMediaChannel::UpdateAspectRatio(int ratio_w, int ratio_h) {
@@ -3350,21 +3348,15 @@ void WebRtcVideoMediaChannel::OnMessage(talk_base::Message* msg) {
int WebRtcVideoMediaChannel::SendPacket(int channel, const void* data,
int len) {
- if (!network_interface_) {
- return -1;
- }
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
- return network_interface_->SendPacket(&packet) ? len : -1;
+ return MediaChannel::SendPacket(&packet) ? len : -1;
}
int WebRtcVideoMediaChannel::SendRTCPPacket(int channel,
const void* data,
int len) {
- if (!network_interface_) {
- return -1;
- }
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
- return network_interface_->SendRtcp(&packet) ? len : -1;
+ return MediaChannel::SendRtcp(&packet) ? len : -1;
}
void WebRtcVideoMediaChannel::QueueBlackFrame(uint32 ssrc, int64 timestamp,
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index c0522c0703..a974d0eb46 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -120,6 +120,24 @@ static const int kOpusMonoBitrate = 32000;
// This value is equivalent to 5 seconds of audio data at 20 ms per packet.
static const int kNackMaxPackets = 250;
static const int kOpusStereoBitrate = 64000;
+// draft-spittka-payload-rtp-opus-03
+// Opus bitrate should be in the range between 6000 and 510000.
+static const int kOpusMinBitrate = 6000;
+static const int kOpusMaxBitrate = 510000;
+
+#if defined(CHROMEOS)
+// Ensure we open the file in a writeable path on ChromeOS. This workaround
+// can be removed when it's possible to specify a filename for audio option
+// based AEC dumps.
+//
+// TODO(grunell): Use a string in the options instead of hardcoding it here
+// and let the embedder choose the filename (crbug.com/264223).
+//
+// NOTE(ajm): Don't use this hardcoded /tmp path on non-ChromeOS platforms.
+static const char kAecDumpByAudioOptionFilename[] = "/tmp/audio.aecdump";
+#else
+static const char kAecDumpByAudioOptionFilename[] = "audio.aecdump";
+#endif
// Dumps an AudioCodec in RFC 2327-ish format.
static std::string ToString(const AudioCodec& codec) {
@@ -346,6 +364,25 @@ static bool IsOpusStereoEnabled(const AudioCodec& codec) {
return param->second == kParamValueTrue;
}
+static bool IsValidOpusBitrate(int bitrate) {
+ return (bitrate >= kOpusMinBitrate && bitrate <= kOpusMaxBitrate);
+}
+
+// Returns 0 if params[kCodecParamMaxAverageBitrate] is not defined or invalid.
+// Returns the value of params[kCodecParamMaxAverageBitrate] otherwise.
+static int GetOpusBitrateFromParams(const AudioCodec& codec) {
+ int bitrate = 0;
+ if (!codec.GetParam(kCodecParamMaxAverageBitrate, &bitrate)) {
+ return 0;
+ }
+ if (!IsValidOpusBitrate(bitrate)) {
+ LOG(LS_WARNING) << "Codec parameter \"maxaveragebitrate\" has an "
+ << "invalid value: " << bitrate;
+ return 0;
+ }
+ return bitrate;
+}
+
void WebRtcVoiceEngine::ConstructCodecs() {
LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
@@ -713,10 +750,8 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
bool aec_dump;
if (options.aec_dump.Get(&aec_dump)) {
- // TODO(grunell): Use a string in the options instead and let the embedder
- // choose the filename.
if (aec_dump)
- StartAecDump("audio.aecdump");
+ StartAecDump(kAecDumpByAudioOptionFilename);
else
StopAecDump();
}
@@ -1430,6 +1465,19 @@ void WebRtcVoiceEngine::StopAecDump() {
}
}
+// This struct relies on the generated copy constructor and assignment operator
+// since it is used in an stl::map.
+struct WebRtcVoiceMediaChannel::WebRtcVoiceChannelInfo {
+ WebRtcVoiceChannelInfo() : channel(-1), renderer(NULL) {}
+ WebRtcVoiceChannelInfo(int ch, AudioRenderer* r)
+ : channel(ch),
+ renderer(r) {}
+ ~WebRtcVoiceChannelInfo() {}
+
+ int channel;
+ AudioRenderer* renderer;
+};
+
// WebRtcVoiceMediaChannel
WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
: WebRtcMediaChannel<VoiceMediaChannel, WebRtcVoiceEngine>(
@@ -1443,6 +1491,7 @@ WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
desired_send_(SEND_NOTHING),
send_(SEND_NOTHING),
send_ssrc_(0),
+ local_renderer_(NULL),
default_receive_ssrc_(0) {
engine->RegisterChannel(this);
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
@@ -1480,8 +1529,8 @@ WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
// Unregister ourselves from the engine.
engine()->UnregisterChannel(this);
// Remove any remaining streams.
- while (!mux_channels_.empty()) {
- RemoveRecvStream(mux_channels_.begin()->first);
+ while (!receive_channels_.empty()) {
+ RemoveRecvStream(receive_channels_.begin()->first);
}
// Delete the primary channel.
@@ -1517,7 +1566,6 @@ bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
bool WebRtcVoiceMediaChannel::SetRecvCodecs(
const std::vector<AudioCodec>& codecs) {
// Set the payload types to be used for incoming media.
- bool ret = true;
LOG(LS_INFO) << "Setting receive voice codecs:";
std::vector<AudioCodec> new_codecs;
@@ -1525,7 +1573,7 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs(
// the payload type of codecs that is already configured since we might
// already be receiving packets with that payload type.
for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
- it != codecs.end() && ret; ++it) {
+ it != codecs.end(); ++it) {
AudioCodec old_codec;
if (FindCodec(recv_codecs_, *it, &old_codec)) {
if (old_codec.id != it->id) {
@@ -1548,24 +1596,34 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs(
PausePlayout();
}
+ bool ret = true;
for (std::vector<AudioCodec>::const_iterator it = new_codecs.begin();
it != new_codecs.end() && ret; ++it) {
webrtc::CodecInst voe_codec;
if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
LOG(LS_INFO) << ToString(*it);
voe_codec.pltype = it->id;
- if (engine()->voe()->codec()->SetRecPayloadType(
- voe_channel(), voe_codec) == -1) {
- LOG_RTCERR2(SetRecPayloadType, voe_channel(), ToString(voe_codec));
- ret = false;
+ if (default_receive_ssrc_ == 0) {
+ // Set the receive codecs on the default channel explicitly if the
+ // default channel is not used by |receive_channels_|, this happens in
+ // conference mode or in non-conference mode when there is no playout
+ // channel.
+ // TODO(xians): Figure out how we use the default channel in conference
+ // mode.
+ if (engine()->voe()->codec()->SetRecPayloadType(
+ voe_channel(), voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, voe_channel(), ToString(voe_codec));
+ ret = false;
+ }
}
// Set the receive codecs on all receiving channels.
- for (ChannelMap::iterator it = mux_channels_.begin();
- it != mux_channels_.end() && ret; ++it) {
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end() && ret; ++it) {
if (engine()->voe()->codec()->SetRecPayloadType(
- it->second, voe_codec) == -1) {
- LOG_RTCERR2(SetRecPayloadType, it->second, ToString(voe_codec));
+ it->second.channel, voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, it->second.channel,
+ ToString(voe_codec));
ret = false;
}
}
@@ -1616,15 +1674,31 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
if (IsOpus(*it)) {
if (IsOpusStereoEnabled(*it)) {
voe_codec.channels = 2;
- if (it->bitrate == 0) {
+ if (!IsValidOpusBitrate(it->bitrate)) {
+ if (it->bitrate != 0) {
+ LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
+ << it->bitrate
+ << ") with default opus stereo bitrate: "
+ << kOpusStereoBitrate;
+ }
voe_codec.rate = kOpusStereoBitrate;
}
} else {
voe_codec.channels = 1;
- if (it->bitrate == 0) {
+ if (!IsValidOpusBitrate(it->bitrate)) {
+ if (it->bitrate != 0) {
+ LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
+ << it->bitrate
+ << ") with default opus mono bitrate: "
+ << kOpusMonoBitrate;
+ }
voe_codec.rate = kOpusMonoBitrate;
}
}
+ int bitrate_from_params = GetOpusBitrateFromParams(*it);
+ if (bitrate_from_params != 0) {
+ voe_codec.rate = bitrate_from_params;
+ }
}
// Find the DTMF telephone event "codec" and tell VoiceEngine about it.
@@ -1706,8 +1780,8 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
}
} else {
send_codec = voe_codec;
- nack_enabled_ = IsNackEnabled(*it);
- SetNack(send_ssrc_, voe_channel(), nack_enabled_);
+ nack_enabled_ = IsNackEnabled(*it);
+ SetNack(send_ssrc_, voe_channel(), nack_enabled_);
}
first = false;
// Set the codec immediately, since SetVADStatus() depends on whether
@@ -1716,9 +1790,9 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
return false;
}
}
- for (ChannelMap::iterator it = mux_channels_.begin();
- it != mux_channels_.end(); ++it) {
- SetNack(it->first, it->second, nack_enabled_);
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ SetNack(it->first, it->second.channel, nack_enabled_);
}
@@ -1815,16 +1889,17 @@ bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
return true;
}
+ // Change the playout of all channels to the new state.
bool result = true;
- if (mux_channels_.empty()) {
+ if (receive_channels_.empty()) {
// Only toggle the default channel if we don't have any other channels.
result = SetPlayout(voe_channel(), playout);
}
- for (ChannelMap::iterator it = mux_channels_.begin();
- it != mux_channels_.end() && result; ++it) {
- if (!SetPlayout(it->second, playout)) {
- LOG(LS_ERROR) << "SetPlayout " << playout << " on channel " << it->second
- << " failed";
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end() && result; ++it) {
+ if (!SetPlayout(it->second.channel, playout)) {
+ LOG(LS_ERROR) << "SetPlayout " << playout << " on channel "
+ << it->second.channel << " failed";
result = false;
}
}
@@ -1942,13 +2017,15 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
// Set the SSRC on the receive channels.
// Receive channels have to have the same SSRC in order to send receiver
// reports with this SSRC.
- for (ChannelMap::const_iterator it = mux_channels_.begin();
- it != mux_channels_.end(); ++it) {
- int channel_id = it->second;
- if (engine()->voe()->rtp()->SetLocalSSRC(channel_id,
- sp.first_ssrc()) != 0) {
- LOG_RTCERR1(SetLocalSSRC, it->first);
- return false;
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ int channel_id = it->second.channel;
+ if (channel_id != voe_channel()) {
+ if (engine()->voe()->rtp()->SetLocalSSRC(channel_id,
+ sp.first_ssrc()) != 0) {
+ LOG_RTCERR1(SetLocalSSRC, it->first);
+ return false;
+ }
}
}
@@ -1961,6 +2038,10 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
send_ssrc_ = sp.first_ssrc();
if (desired_send_ != send_)
return ChangeSend(desired_send_);
+
+ if (local_renderer_)
+ local_renderer_->AddChannel(voe_channel());
+
return true;
}
@@ -1968,31 +2049,38 @@ bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
if (ssrc != send_ssrc_) {
return false;
}
+
+ if (local_renderer_)
+ local_renderer_->RemoveChannel(voe_channel());
+
send_ssrc_ = 0;
ChangeSend(SEND_NOTHING);
return true;
}
bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
- talk_base::CritScope lock(&mux_channels_cs_);
-
- // Reuse default channel for recv stream in 1:1 call.
- bool conference_mode;
- if (!options_.conference_mode.Get(&conference_mode) || !conference_mode) {
- LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
- << " reuse default channel";
- default_receive_ssrc_ = sp.first_ssrc();
- return true;
- }
+ talk_base::CritScope lock(&receive_channels_cs_);
if (!VERIFY(sp.ssrcs.size() == 1))
return false;
uint32 ssrc = sp.first_ssrc();
- if (mux_channels_.find(ssrc) != mux_channels_.end()) {
+ if (receive_channels_.find(ssrc) != receive_channels_.end()) {
+ LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
return false;
}
+ // Reuse default channel for recv stream in non-conference mode call
+ // when the default channel is not being used.
+ if (!InConferenceMode() && default_receive_ssrc_ == 0) {
+ LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
+ << " reuse default channel";
+ default_receive_ssrc_ = sp.first_ssrc();
+ receive_channels_.insert(std::make_pair(
+ default_receive_ssrc_, WebRtcVoiceChannelInfo(voe_channel(), NULL)));
+ return SetPlayout(voe_channel(), playout_);
+ }
+
// Create a new channel for receiving audio data.
int channel = engine()->voe()->base()->CreateChannel();
if (channel == -1) {
@@ -2040,19 +2128,24 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
}
}
- if (mux_channels_.empty() && playout_) {
- // This is the first stream in a multi user meeting. We can now
- // disable playback of the default stream. This since the default
- // stream will probably have received some initial packets before
- // the new stream was added. This will mean that the CN state from
- // the default channel will be mixed in with the other streams
- // throughout the whole meeting, which might be disturbing.
- LOG(LS_INFO) << "Disabling playback on the default voice channel";
- SetPlayout(voe_channel(), false);
+ if (InConferenceMode()) {
+ // To be in par with the video, voe_channel() is not used for receiving in
+ // a conference call.
+ if (receive_channels_.empty() && default_receive_ssrc_ == 0 && playout_) {
+ // This is the first stream in a multi user meeting. We can now
+ // disable playback of the default stream. This since the default
+ // stream will probably have received some initial packets before
+ // the new stream was added. This will mean that the CN state from
+ // the default channel will be mixed in with the other streams
+ // throughout the whole meeting, which might be disturbing.
+ LOG(LS_INFO) << "Disabling playback on the default voice channel";
+ SetPlayout(voe_channel(), false);
+ }
}
SetNack(ssrc, channel, nack_enabled_);
- mux_channels_[ssrc] = channel;
+ receive_channels_.insert(
+ std::make_pair(ssrc, WebRtcVoiceChannelInfo(channel, NULL)));
// TODO(juberti): We should rollback the add if SetPlayout fails.
LOG(LS_INFO) << "New audio stream " << ssrc
@@ -2062,53 +2155,131 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
}
bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
- talk_base::CritScope lock(&mux_channels_cs_);
- ChannelMap::iterator it = mux_channels_.find(ssrc);
+ talk_base::CritScope lock(&receive_channels_cs_);
+ ChannelMap::iterator it = receive_channels_.find(ssrc);
+ if (it == receive_channels_.end())
+ return false;
- if (it != mux_channels_.end()) {
- if (engine()->voe()->network()->DeRegisterExternalTransport(
- it->second) == -1) {
- LOG_RTCERR1(DeRegisterExternalTransport, it->second);
- }
+ if (ssrc == default_receive_ssrc_) {
+ ASSERT(voe_channel() == it->second.channel);
+ // Recycle the default channel is for recv stream.
+ if (playout_)
+ SetPlayout(voe_channel(), false);
+
+ if (it->second.renderer)
+ it->second.renderer->RemoveChannel(voe_channel());
+
+ default_receive_ssrc_ = 0;
+ receive_channels_.erase(it);
+ return true;
+ }
+
+ // Non default channel.
+ // Notify the renderer that channel is going away.
+ if (it->second.renderer)
+ it->second.renderer->RemoveChannel(it->second.channel);
+
+ if (engine()->voe()->network()->DeRegisterExternalTransport(
+ it->second.channel) == -1) {
+ LOG_RTCERR1(DeRegisterExternalTransport, it->second.channel);
+ }
+
+ LOG(LS_INFO) << "Removing audio stream " << ssrc
+ << " with VoiceEngine channel #"
+ << it->second.channel << ".";
+ if (engine()->voe()->base()->DeleteChannel(it->second.channel) == -1) {
+ LOG_RTCERR1(DeleteChannel, voe_channel());
+ // Erase the entry anyhow.
+ receive_channels_.erase(it);
+ return false;
+ }
+
+ receive_channels_.erase(it);
+ bool enable_default_channel_playout = false;
+ if (receive_channels_.empty()) {
+ // The last stream was removed. We can now enable the default
+ // channel for new channels to be played out immediately without
+ // waiting for AddStream messages.
+ // We do this for both conference mode and non-conference mode.
+ // TODO(oja): Does the default channel still have it's CN state?
+ enable_default_channel_playout = true;
+ }
+ if (!InConferenceMode() && receive_channels_.size() == 1 &&
+ default_receive_ssrc_ != 0) {
+ // Only the default channel is active, enable the playout on default
+ // channel.
+ enable_default_channel_playout = true;
+ }
+ if (enable_default_channel_playout && playout_) {
+ LOG(LS_INFO) << "Enabling playback on the default voice channel";
+ SetPlayout(voe_channel(), true);
+ }
+
+ return true;
+}
- LOG(LS_INFO) << "Removing audio stream " << ssrc
- << " with VoiceEngine channel #"
- << it->second << ".";
- if (engine()->voe()->base()->DeleteChannel(it->second) == -1) {
- LOG_RTCERR1(DeleteChannel, voe_channel());
+bool WebRtcVoiceMediaChannel::SetRemoteRenderer(uint32 ssrc,
+ AudioRenderer* renderer) {
+ ChannelMap::iterator it = receive_channels_.find(ssrc);
+ if (it == receive_channels_.end()) {
+ if (renderer) {
+ // Return an error if trying to set a valid renderer with an invalid ssrc.
+ LOG_RTCERR1(SetRemoteRenderer, ssrc);
return false;
}
- mux_channels_.erase(it);
- if (mux_channels_.empty() && playout_) {
- // The last stream was removed. We can now enable the default
- // channel for new channels to be played out immediately without
- // waiting for AddStream messages.
- // TODO(oja): Does the default channel still have it's CN state?
- LOG(LS_INFO) << "Enabling playback on the default voice channel";
- SetPlayout(voe_channel(), true);
+ // The channel likely has gone away, do nothing.
+ return true;
+ }
+
+ if (renderer) {
+ ASSERT(it->second.renderer == NULL || it->second.renderer == renderer);
+ if (!it->second.renderer) {
+ renderer->AddChannel(it->second.channel);
}
+ } else if (it->second.renderer) {
+ // |renderer| == NULL, remove the channel from the renderer.
+ it->second.renderer->RemoveChannel(it->second.channel);
}
+
+ it->second.renderer = renderer;
return true;
}
-bool WebRtcVoiceMediaChannel::SetRenderer(uint32 ssrc,
- AudioRenderer* renderer) {
- ASSERT(renderer != NULL);
- int channel = GetReceiveChannelNum(ssrc);
- if (channel == -1)
+bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32 ssrc,
+ AudioRenderer* renderer) {
+ if (!renderer && !local_renderer_)
+ return true;
+
+ int channel = GetSendChannelNum(ssrc);
+ if (channel == -1) {
+ // Invalidate the |local_renderer_| before quitting.
+ if (!renderer)
+ local_renderer_ = NULL;
+
return false;
+ }
- renderer->SetChannelId(channel);
+ if (renderer) {
+ ASSERT(local_renderer_ == NULL || local_renderer_ == renderer);
+ if (!local_renderer_)
+ renderer->AddChannel(channel);
+ } else {
+ local_renderer_->RemoveChannel(channel);
+ }
+
+ local_renderer_ = renderer;
return true;
}
bool WebRtcVoiceMediaChannel::GetActiveStreams(
AudioInfo::StreamList* actives) {
+ // In conference mode, the default channel should not be in
+ // |receive_channels_|.
actives->clear();
- for (ChannelMap::iterator it = mux_channels_.begin();
- it != mux_channels_.end(); ++it) {
- int level = GetOutputLevel(it->second);
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ int level = GetOutputLevel(it->second.channel);
if (level > 0) {
actives->push_back(std::make_pair(it->first, level));
}
@@ -2119,9 +2290,9 @@ bool WebRtcVoiceMediaChannel::GetActiveStreams(
int WebRtcVoiceMediaChannel::GetOutputLevel() {
// return the highest output level of all streams
int highest = GetOutputLevel(voe_channel());
- for (ChannelMap::iterator it = mux_channels_.begin();
- it != mux_channels_.end(); ++it) {
- int level = GetOutputLevel(it->second);
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ int level = GetOutputLevel(it->second.channel);
highest = talk_base::_max(level, highest);
}
return highest;
@@ -2154,14 +2325,17 @@ void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window,
bool WebRtcVoiceMediaChannel::SetOutputScaling(
uint32 ssrc, double left, double right) {
- talk_base::CritScope lock(&mux_channels_cs_);
+ talk_base::CritScope lock(&receive_channels_cs_);
// Collect the channels to scale the output volume.
std::vector<int> channels;
if (0 == ssrc) { // Collect all channels, including the default one.
- channels.push_back(voe_channel());
- for (ChannelMap::const_iterator it = mux_channels_.begin();
- it != mux_channels_.end(); ++it) {
- channels.push_back(it->second);
+ // Default channel is not in receive_channels_ if it is not being used for
+ // playout.
+ if (default_receive_ssrc_ == 0)
+ channels.push_back(voe_channel());
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ channels.push_back(it->second.channel);
}
} else { // Collect only the channel of the specified ssrc.
int channel = GetReceiveChannelNum(ssrc);
@@ -2203,7 +2377,7 @@ bool WebRtcVoiceMediaChannel::GetOutputScaling(
uint32 ssrc, double* left, double* right) {
if (!left || !right) return false;
- talk_base::CritScope lock(&mux_channels_cs_);
+ talk_base::CritScope lock(&receive_channels_cs_);
// Determine which channel based on ssrc.
int channel = (0 == ssrc) ? voe_channel() : GetReceiveChannelNum(ssrc);
if (channel == -1) {
@@ -2506,11 +2680,12 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
info->senders.push_back(sinfo);
- // Build the list of receivers, one for each mux channel, or 1 in a 1:1 call.
+ // Build the list of receivers, one for each receiving channel, or 1 in
+ // a 1:1 call.
std::vector<int> channels;
- for (ChannelMap::const_iterator it = mux_channels_.begin();
- it != mux_channels_.end(); ++it) {
- channels.push_back(it->second);
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ channels.push_back(it->second.channel);
}
if (channels.empty()) {
channels.push_back(voe_channel());
@@ -2572,7 +2747,7 @@ void WebRtcVoiceMediaChannel::GetLastMediaError(
}
bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
- talk_base::CritScope lock(&mux_channels_cs_);
+ talk_base::CritScope lock(&receive_channels_cs_);
ASSERT(ssrc != NULL);
if (channel_num == voe_channel()) {
unsigned local_ssrc = 0;
@@ -2591,9 +2766,9 @@ bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
return true;
} else {
// Check whether this is a receiving channel.
- for (ChannelMap::const_iterator it = mux_channels_.begin();
- it != mux_channels_.end(); ++it) {
- if (it->second == channel_num) {
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ if (it->second.channel == channel_num) {
*ssrc = it->first;
return true;
}
@@ -2614,9 +2789,9 @@ int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
}
int WebRtcVoiceMediaChannel::GetReceiveChannelNum(uint32 ssrc) {
- ChannelMap::iterator it = mux_channels_.find(ssrc);
- if (it != mux_channels_.end())
- return it->second;
+ ChannelMap::iterator it = receive_channels_.find(ssrc);
+ if (it != receive_channels_.end())
+ return it->second.channel;
return (ssrc == default_receive_ssrc_) ? voe_channel() : -1;
}
diff --git a/talk/media/webrtc/webrtcvoiceengine.h b/talk/media/webrtc/webrtcvoiceengine.h
index edf07f42bb..036117fea6 100644
--- a/talk/media/webrtc/webrtcvoiceengine.h
+++ b/talk/media/webrtc/webrtcvoiceengine.h
@@ -77,6 +77,7 @@ class WebRtcMonitorStream : public webrtc::OutStream {
};
class AudioDeviceModule;
+class AudioRenderer;
class VoETraceWrapper;
class VoEWrapper;
class VoiceProcessor;
@@ -282,10 +283,6 @@ class WebRtcMediaChannel : public T, public webrtc::Transport {
protected:
// implements Transport interface
virtual int SendPacket(int channel, const void *data, int len) {
- if (!T::network_interface_) {
- return -1;
- }
-
// We need to store the sequence number to be able to pick up
// the same sequence when the device is restarted.
// TODO(oja): Remove when WebRtc has fixed the problem.
@@ -297,19 +294,20 @@ class WebRtcMediaChannel : public T, public webrtc::Transport {
LOG(INFO) << "WebRtcVoiceMediaChannel sends first packet seqnum="
<< seq_num;
}
- sequence_number_ = seq_num;
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
- return T::network_interface_->SendPacket(&packet) ? len : -1;
- }
- virtual int SendRTCPPacket(int channel, const void *data, int len) {
- if (!T::network_interface_) {
+ if (!T::SendPacket(&packet)) {
return -1;
}
+ sequence_number_ = seq_num;
+ return len;
+ }
+ virtual int SendRTCPPacket(int channel, const void *data, int len) {
talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
- return T::network_interface_->SendRtcp(&packet) ? len : -1;
+ return T::SendRtcp(&packet) ? len : -1;
}
+
int sequence_number() const {
return sequence_number_;
}
@@ -348,7 +346,8 @@ class WebRtcVoiceMediaChannel
virtual bool RemoveSendStream(uint32 ssrc);
virtual bool AddRecvStream(const StreamParams& sp);
virtual bool RemoveRecvStream(uint32 ssrc);
- virtual bool SetRenderer(uint32 ssrc, AudioRenderer* renderer);
+ virtual bool SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer);
+ virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer);
virtual bool GetActiveStreams(AudioInfo::StreamList* actives);
virtual int GetOutputLevel();
virtual int GetTimeSinceLastTyping();
@@ -393,12 +392,17 @@ class WebRtcVoiceMediaChannel
static Error WebRtcErrorToChannelError(int err_code);
private:
+ struct WebRtcVoiceChannelInfo;
+
void SetNack(uint32 ssrc, int channel, bool nack_enabled);
bool SetSendCodec(const webrtc::CodecInst& send_codec);
bool ChangePlayout(bool playout);
bool ChangeSend(SendFlags send);
+ bool InConferenceMode() const {
+ return options_.conference_mode.GetWithDefaultIfUnset(false);
+ }
- typedef std::map<uint32, int> ChannelMap;
+ typedef std::map<uint32, WebRtcVoiceChannelInfo> ChannelMap;
talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
std::set<int> ringback_channels_; // channels playing ringback
std::vector<AudioCodec> recv_codecs_;
@@ -411,16 +415,25 @@ class WebRtcVoiceMediaChannel
SendFlags desired_send_;
SendFlags send_;
+ // TODO(xians): Add support for multiple send channels.
uint32 send_ssrc_;
+ // Weak pointer to the renderer of the local audio track. It is owned by the
+ // track and will set to NULL when the track is going away or channel gets
+ // deleted. Used to notify the audio track that the media channel is added
+ // or removed.
+ AudioRenderer* local_renderer_;
uint32 default_receive_ssrc_;
- ChannelMap mux_channels_; // for multiple sources
- // mux_channels_ can be read from WebRtc callback thread. Accesses off the
- // WebRtc thread must be synchronized with edits on the worker thread. Reads
- // on the worker thread are ok.
+ // Note the default channel (voe_channel()) can reside in both
+ // receive_channels_ and send channel in non-conference mode and in that case
+ // it will only be there if a non-zero default_receive_ssrc_ is set.
+ ChannelMap receive_channels_; // for multiple sources
+ // receive_channels_ can be read from WebRtc callback thread. Access from
+ // the WebRtc thread must be synchronized with edits on the worker thread.
+ // Reads on the worker thread are ok.
//
// Do not lock this on the VoE media processor thread; potential for deadlock
// exists.
- mutable talk_base::CriticalSection mux_channels_cs_;
+ mutable talk_base::CriticalSection receive_channels_cs_;
};
} // namespace cricket
diff --git a/talk/media/webrtc/webrtcvoiceengine_unittest.cc b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
index 41b81fc0a5..b476b4dc54 100644
--- a/talk/media/webrtc/webrtcvoiceengine_unittest.cc
+++ b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
@@ -702,6 +702,33 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate0Stereo) {
EXPECT_EQ(32000, gcodec.rate);
}
+// Test that with bitrate=invalid and stereo=0,
+// channels and bitrate are 1 and 32000.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate0Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].params["stereo"] = "0";
+ webrtc::CodecInst gcodec;
+
+ // bitrate that's out of the range between 6000 and 510000 will be considered
+ // as invalid and ignored.
+ codecs[0].bitrate = 5999;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(32000, gcodec.rate);
+
+ codecs[0].bitrate = 510001;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(32000, gcodec.rate);
+}
+
// Test that with bitrate=0 and stereo=1,
// channels and bitrate are 2 and 64000.
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate1Stereo) {
@@ -719,6 +746,33 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate1Stereo) {
EXPECT_EQ(64000, gcodec.rate);
}
+// Test that with bitrate=invalid and stereo=1,
+// channels and bitrate are 2 and 64000.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate1Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].params["stereo"] = "1";
+ webrtc::CodecInst gcodec;
+
+ // bitrate that's out of the range between 6000 and 510000 will be considered
+ // as invalid and ignored.
+ codecs[0].bitrate = 5999;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(2, gcodec.channels);
+ EXPECT_EQ(64000, gcodec.rate);
+
+ codecs[0].bitrate = 510001;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(2, gcodec.channels);
+ EXPECT_EQ(64000, gcodec.rate);
+}
+
// Test that with bitrate=N and stereo unset,
// channels and bitrate are 1 and N.
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoStereo) {
@@ -787,6 +841,35 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate1Stereo) {
EXPECT_STREQ("opus", gcodec.plname);
}
+// Test that bitrate will be overridden by the "maxaveragebitrate" parameter.
+// Also test that the "maxaveragebitrate" can't be set to values outside the
+// range of 6000 and 510000
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusMaxAverageBitrate) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 30000;
+ webrtc::CodecInst gcodec;
+
+ // Ignore if less than 6000.
+ codecs[0].params["maxaveragebitrate"] = "5999";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(30000, gcodec.rate);
+
+ // Ignore if larger than 510000.
+ codecs[0].params["maxaveragebitrate"] = "510001";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(30000, gcodec.rate);
+
+ codecs[0].params["maxaveragebitrate"] = "200000";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(200000, gcodec.rate);
+}
+
// Test that we can enable NACK with opus.
TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNack) {
EXPECT_TRUE(SetupEngine());
diff --git a/talk/p2p/base/dtlstransportchannel_unittest.cc b/talk/p2p/base/dtlstransportchannel_unittest.cc
index cbc3abb1ed..8d124b0854 100644
--- a/talk/p2p/base/dtlstransportchannel_unittest.cc
+++ b/talk/p2p/base/dtlstransportchannel_unittest.cc
@@ -149,13 +149,13 @@ class DtlsTestClient : public sigslot::has_slots<> {
cricket::NS_GINGLE_P2P : cricket::NS_JINGLE_ICE_UDP;
cricket::TransportDescription local_desc(
transport_type, std::vector<std::string>(), kIceUfrag1, kIcePwd1,
- cricket::ICEMODE_FULL, local_fingerprint.release(),
+ cricket::ICEMODE_FULL, local_fingerprint.get(),
cricket::Candidates());
ASSERT_TRUE(transport_->SetLocalTransportDescription(local_desc,
cricket::CA_OFFER));
cricket::TransportDescription remote_desc(
transport_type, std::vector<std::string>(), kIceUfrag1, kIcePwd1,
- cricket::ICEMODE_FULL, remote_fingerprint.release(),
+ cricket::ICEMODE_FULL, remote_fingerprint.get(),
cricket::Candidates());
ASSERT_TRUE(transport_->SetRemoteTransportDescription(remote_desc,
cricket::CA_ANSWER));
@@ -178,9 +178,9 @@ class DtlsTestClient : public sigslot::has_slots<> {
} else {
ASSERT_TRUE(received_dtls_client_hello_);
ASSERT_FALSE(received_dtls_server_hello_);
- }
+ }
}
-
+
void CheckSrtp(const std::string& expected_cipher) {
for (std::vector<cricket::DtlsTransportChannelWrapper*>::iterator it =
channels_.begin(); it != channels_.end(); ++it) {
@@ -306,7 +306,7 @@ class DtlsTestClient : public sigslot::has_slots<> {
ASSERT_TRUE(VerifyPacket(data, size, NULL));
}
}
- }
+ }
}
private:
@@ -332,6 +332,10 @@ class DtlsTransportChannelTest : public testing::Test {
talk_base::InitializeSSL();
}
+ static void TearDownTestCase() {
+ talk_base::CleanupSSL();
+ }
+
DtlsTransportChannelTest() :
client1_("P1", talk_base::Thread::Current(),
talk_base::Thread::Current()),
diff --git a/talk/p2p/base/port_unittest.cc b/talk/p2p/base/port_unittest.cc
index bb6b22f36d..1c6752b844 100644
--- a/talk/p2p/base/port_unittest.cc
+++ b/talk/p2p/base/port_unittest.cc
@@ -1061,7 +1061,8 @@ TEST_F(PortTest, TestLoopbackCallAsIce) {
ASSERT_TRUE_WAIT(lport->last_stun_msg() != NULL, 1000);
msg = lport->last_stun_msg();
EXPECT_EQ(STUN_BINDING_REQUEST, msg->type());
- IceMessage* modified_req = CreateStunMessage(STUN_BINDING_REQUEST);
+ talk_base::scoped_ptr<IceMessage> modified_req(
+ CreateStunMessage(STUN_BINDING_REQUEST));
const StunByteStringAttribute* username_attr = msg->GetByteString(
STUN_ATTR_USERNAME);
modified_req->AddAttribute(new StunByteStringAttribute(
@@ -1075,7 +1076,7 @@ TEST_F(PortTest, TestLoopbackCallAsIce) {
lport->Reset();
talk_base::scoped_ptr<ByteBuffer> buf(new ByteBuffer());
- WriteStunMessage(modified_req, buf.get());
+ WriteStunMessage(modified_req.get(), buf.get());
conn1->OnReadPacket(buf->Data(), buf->Length());
ASSERT_TRUE_WAIT(lport->last_stun_msg() != NULL, 1000);
msg = lport->last_stun_msg();
diff --git a/talk/p2p/base/portallocator.cc b/talk/p2p/base/portallocator.cc
index 00ba4d8ce2..369af863dc 100644
--- a/talk/p2p/base/portallocator.cc
+++ b/talk/p2p/base/portallocator.cc
@@ -39,6 +39,7 @@ PortAllocatorSession::PortAllocatorSession(const std::string& content_name,
: content_name_(content_name),
component_(component),
flags_(flags),
+ generation_(0),
// If PORTALLOCATOR_ENABLE_SHARED_UFRAG flag is not enabled, ignore the
// incoming ufrag and pwd, which will cause each Port to generate one
// by itself.
diff --git a/talk/p2p/base/pseudotcp_unittest.cc b/talk/p2p/base/pseudotcp_unittest.cc
index b9a672e87b..e18159ea9e 100644
--- a/talk/p2p/base/pseudotcp_unittest.cc
+++ b/talk/p2p/base/pseudotcp_unittest.cc
@@ -174,7 +174,7 @@ class PseudoTcpTestBase : public testing::Test,
void UpdateLocalClock() { UpdateClock(&local_, MSG_LCLOCK); }
void UpdateRemoteClock() { UpdateClock(&remote_, MSG_RCLOCK); }
void UpdateClock(PseudoTcp* tcp, uint32 message) {
- long interval; // NOLINT
+ long interval = 0; // NOLINT
tcp->GetNextClock(PseudoTcp::Now(), interval);
interval = talk_base::_max<int>(interval, 0L); // sometimes interval is < 0
talk_base::Thread::Current()->Clear(this, message);
diff --git a/talk/p2p/base/relayserver.cc b/talk/p2p/base/relayserver.cc
index 2c05f23db6..95aa08c39c 100644
--- a/talk/p2p/base/relayserver.cc
+++ b/talk/p2p/base/relayserver.cc
@@ -108,6 +108,8 @@ RelayServer::~RelayServer() {
delete internal_sockets_[i];
for (size_t i = 0; i < external_sockets_.size(); ++i)
delete external_sockets_[i];
+ for (size_t i = 0; i < removed_sockets_.size(); ++i)
+ delete removed_sockets_[i];
while (!server_sockets_.empty()) {
talk_base::AsyncSocket* socket = server_sockets_.begin()->first;
server_sockets_.erase(server_sockets_.begin()->first);
@@ -127,6 +129,7 @@ void RelayServer::RemoveInternalSocket(talk_base::AsyncPacketSocket* socket) {
std::find(internal_sockets_.begin(), internal_sockets_.end(), socket);
ASSERT(iter != internal_sockets_.end());
internal_sockets_.erase(iter);
+ removed_sockets_.push_back(socket);
socket->SignalReadPacket.disconnect(this);
}
@@ -142,6 +145,7 @@ void RelayServer::RemoveExternalSocket(talk_base::AsyncPacketSocket* socket) {
std::find(external_sockets_.begin(), external_sockets_.end(), socket);
ASSERT(iter != external_sockets_.end());
external_sockets_.erase(iter);
+ removed_sockets_.push_back(socket);
socket->SignalReadPacket.disconnect(this);
}
diff --git a/talk/p2p/base/relayserver.h b/talk/p2p/base/relayserver.h
index a5fcc243dd..f3bee7eca8 100644
--- a/talk/p2p/base/relayserver.h
+++ b/talk/p2p/base/relayserver.h
@@ -2,26 +2,26 @@
* libjingle
* Copyright 2004--2005, Google Inc.
*
- * Redistribution and use in source and binary forms, with or without
+ * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
- * 1. Redistributions of source code must retain the above copyright notice,
+ * 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
+ * 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
@@ -96,6 +96,7 @@ class RelayServer : public talk_base::MessageHandler,
bool log_bindings_;
SocketList internal_sockets_;
SocketList external_sockets_;
+ SocketList removed_sockets_;
ServerSocketMap server_sockets_;
BindingMap bindings_;
ConnectionMap connections_;
diff --git a/talk/p2p/base/session.cc b/talk/p2p/base/session.cc
index 3478a33aa6..86428901cf 100644
--- a/talk/p2p/base/session.cc
+++ b/talk/p2p/base/session.cc
@@ -1374,7 +1374,7 @@ bool Session::OnDescriptionInfoMessage(const SessionMessage& msg,
return false;
}
- ContentInfos updated_contents = description_info.ClearContents();
+ ContentInfos& updated_contents = description_info.contents;
// TODO: Currently, reflector sends back
// video stream updates even for an audio-only call, which causes
diff --git a/talk/p2p/base/session_unittest.cc b/talk/p2p/base/session_unittest.cc
index 69028acdc0..ae0c383feb 100644
--- a/talk/p2p/base/session_unittest.cc
+++ b/talk/p2p/base/session_unittest.cc
@@ -872,6 +872,10 @@ class TestClient : public sigslot::has_slots<> {
}
delete session_manager;
delete client;
+ for (std::deque<buzz::XmlElement*>::iterator it = sent_stanzas.begin();
+ it != sent_stanzas.end(); ++it) {
+ delete *it;
+ }
}
void Construct(cricket::PortAllocator* pa,
@@ -899,11 +903,8 @@ class TestClient : public sigslot::has_slots<> {
new_remote_description = false;
last_content_action = cricket::CA_OFFER;
last_content_source = cricket::CS_LOCAL;
- last_expected_sent_stanza = NULL;
session = NULL;
last_session_state = cricket::BaseSession::STATE_INIT;
- chan_a = NULL;
- chan_b = NULL;
blow_up_on_error = true;
error_count = 0;
@@ -925,7 +926,7 @@ class TestClient : public sigslot::has_slots<> {
}
const buzz::XmlElement* stanza() const {
- return last_expected_sent_stanza;
+ return last_expected_sent_stanza.get();
}
cricket::BaseSession::State session_state() const {
@@ -964,7 +965,7 @@ class TestClient : public sigslot::has_slots<> {
EXPECT_TRUE(!sent_stanzas.empty()) <<
"Found no stanza when expected " << expected;
- last_expected_sent_stanza = sent_stanzas.front();
+ last_expected_sent_stanza.reset(sent_stanzas.front());
sent_stanzas.pop_front();
std::string actual = last_expected_sent_stanza->Str();
@@ -1094,12 +1095,12 @@ class TestClient : public sigslot::has_slots<> {
// multiple contents with single components, but not both.
int component_a = 1;
int component_b = (content_name_a == content_name_b) ? 2 : 1;
- chan_a = new ChannelHandler(
+ chan_a.reset(new ChannelHandler(
session->CreateChannel(content_name_a, channel_name_a, component_a),
- channel_name_a);
- chan_b = new ChannelHandler(
+ channel_name_a));
+ chan_b.reset(new ChannelHandler(
session->CreateChannel(content_name_b, channel_name_b, component_b),
- channel_name_b);
+ channel_name_b));
}
int* next_message_id;
@@ -1119,15 +1120,15 @@ class TestClient : public sigslot::has_slots<> {
cricket::ContentAction last_content_action;
cricket::ContentSource last_content_source;
std::deque<buzz::XmlElement*> sent_stanzas;
- buzz::XmlElement* last_expected_sent_stanza;
+ talk_base::scoped_ptr<buzz::XmlElement> last_expected_sent_stanza;
cricket::SessionManager* session_manager;
TestSessionClient* client;
cricket::PortAllocator* port_allocator_;
cricket::Session* session;
cricket::BaseSession::State last_session_state;
- ChannelHandler* chan_a;
- ChannelHandler* chan_b;
+ talk_base::scoped_ptr<ChannelHandler> chan_a;
+ talk_base::scoped_ptr<ChannelHandler> chan_b;
bool blow_up_on_error;
int error_count;
};
@@ -1409,8 +1410,8 @@ class SessionTest : public testing::Test {
EXPECT_TRUE(responder_proxy_chan_b->impl() != NULL);
EXPECT_EQ(responder_proxy_chan_a->impl(), responder_proxy_chan_b->impl());
}
- TestSendRecv(initiator->chan_a, initiator->chan_b,
- responder->chan_a, responder->chan_b);
+ TestSendRecv(initiator->chan_a.get(), initiator->chan_b.get(),
+ responder->chan_a.get(), responder->chan_b.get());
if (resulting_protocol == PROTOCOL_JINGLE) {
// Deliver a description-info message to the initiator and check if the
@@ -1855,8 +1856,8 @@ class SessionTest : public testing::Test {
initiator->session_state(), kEventTimeout);
EXPECT_EQ_WAIT(cricket::BaseSession::STATE_INPROGRESS,
responder->session_state(), kEventTimeout);
- TestSendRecv(initiator->chan_a, initiator->chan_b,
- responder->chan_a, responder->chan_b);
+ TestSendRecv(initiator->chan_a.get(), initiator->chan_b.get(),
+ responder->chan_a.get(), responder->chan_b.get());
}
void TestCandidatesInInitiateAndAccept(const std::string& test_name) {
@@ -1993,8 +1994,8 @@ class SessionTest : public testing::Test {
initiator->session_state(), kEventTimeout);
EXPECT_EQ_WAIT(cricket::BaseSession::STATE_INPROGRESS,
responder->session_state(), kEventTimeout);
- TestSendRecv(initiator->chan_a, initiator->chan_b,
- responder->chan_a, responder->chan_b);
+ TestSendRecv(initiator->chan_a.get(), initiator->chan_b.get(),
+ responder->chan_a.get(), responder->chan_b.get());
}
// Tests that when an initiator terminates right after initiate,
diff --git a/talk/p2p/base/transport.h b/talk/p2p/base/transport.h
index b93513e005..698ec321be 100644
--- a/talk/p2p/base/transport.h
+++ b/talk/p2p/base/transport.h
@@ -146,6 +146,19 @@ enum TransportState {
// Stats that we can return about the connections for a transport channel.
// TODO(hta): Rename to ConnectionStats
struct ConnectionInfo {
+ ConnectionInfo()
+ : best_connection(false),
+ writable(false),
+ readable(false),
+ timeout(false),
+ new_connection(false),
+ rtt(0),
+ sent_total_bytes(0),
+ sent_bytes_second(0),
+ recv_total_bytes(0),
+ recv_bytes_second(0),
+ key(NULL) {}
+
bool best_connection; // Is this the best connection we have?
bool writable; // Has this connection received a STUN response?
bool readable; // Has this connection received a STUN request?
diff --git a/talk/p2p/base/transportdescription.h b/talk/p2p/base/transportdescription.h
index bbca295096..3406118c7e 100644
--- a/talk/p2p/base/transportdescription.h
+++ b/talk/p2p/base/transportdescription.h
@@ -71,7 +71,7 @@ enum IceMode {
typedef std::vector<Candidate> Candidates;
struct TransportDescription {
- TransportDescription() {}
+ TransportDescription() : ice_mode(ICEMODE_FULL) {}
TransportDescription(const std::string& transport_type,
const std::vector<std::string>& transport_options,
diff --git a/talk/p2p/client/connectivitychecker.cc b/talk/p2p/client/connectivitychecker.cc
index 4cc73af76c..1075bd6947 100644
--- a/talk/p2p/client/connectivitychecker.cc
+++ b/talk/p2p/client/connectivitychecker.cc
@@ -61,6 +61,7 @@ class TestHttpPortAllocator : public HttpPortAllocator {
void TestHttpPortAllocatorSession::ConfigReady(PortConfiguration* config) {
SignalConfigReady(username(), password(), config, proxy_);
+ delete config;
}
void TestHttpPortAllocatorSession::OnRequestDone(
diff --git a/talk/session/media/call.cc b/talk/session/media/call.cc
index bd22aab854..7ba6b10916 100644
--- a/talk/session/media/call.cc
+++ b/talk/session/media/call.cc
@@ -591,17 +591,18 @@ VideoContentDescription* Call::CreateVideoStreamUpdate(
void Call::SendVideoStreamUpdate(
Session* session, VideoContentDescription* video) {
+ // Takes the ownership of |video|.
+ talk_base::scoped_ptr<VideoContentDescription> description(video);
const ContentInfo* video_info =
GetFirstVideoContent(session->local_description());
if (video_info == NULL) {
LOG(LS_WARNING) << "Cannot send stream update for video.";
- delete video;
return;
}
std::vector<ContentInfo> contents;
contents.push_back(
- ContentInfo(video_info->name, video_info->type, video));
+ ContentInfo(video_info->name, video_info->type, description.get()));
session->SendDescriptionInfoMessage(contents);
}
diff --git a/talk/session/media/channel.cc b/talk/session/media/channel.cc
index 66f4054018..948a02c7b4 100644
--- a/talk/session/media/channel.cc
+++ b/talk/session/media/channel.cc
@@ -190,10 +190,11 @@ struct PacketMessageData : public talk_base::MessageData {
};
struct AudioRenderMessageData: public talk_base::MessageData {
- AudioRenderMessageData(uint32 s, AudioRenderer* r)
- : ssrc(s), renderer(r), result(false) {}
+ AudioRenderMessageData(uint32 s, AudioRenderer* r, bool l)
+ : ssrc(s), renderer(r), is_local(l), result(false) {}
uint32 ssrc;
AudioRenderer* renderer;
+ bool is_local;
bool result;
};
@@ -1441,8 +1442,14 @@ bool VoiceChannel::Init() {
return true;
}
-bool VoiceChannel::SetRenderer(uint32 ssrc, AudioRenderer* renderer) {
- AudioRenderMessageData data(ssrc, renderer);
+bool VoiceChannel::SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ AudioRenderMessageData data(ssrc, renderer, false);
+ Send(MSG_SETRENDERER, &data);
+ return data.result;
+}
+
+bool VoiceChannel::SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ AudioRenderMessageData data(ssrc, renderer, true);
Send(MSG_SETRENDERER, &data);
return data.result;
}
@@ -1736,8 +1743,12 @@ bool VoiceChannel::SetChannelOptions_w(const AudioOptions& options) {
return media_channel()->SetOptions(options);
}
-bool VoiceChannel::SetRenderer_w(uint32 ssrc, AudioRenderer* renderer) {
- return media_channel()->SetRenderer(ssrc, renderer);
+bool VoiceChannel::SetRenderer_w(uint32 ssrc, AudioRenderer* renderer,
+ bool is_local) {
+ if (is_local)
+ return media_channel()->SetLocalRenderer(ssrc, renderer);
+
+ return media_channel()->SetRemoteRenderer(ssrc, renderer);
}
void VoiceChannel::OnMessage(talk_base::Message *pmsg) {
@@ -1798,7 +1809,7 @@ void VoiceChannel::OnMessage(talk_base::Message *pmsg) {
case MSG_SETRENDERER: {
AudioRenderMessageData* data =
static_cast<AudioRenderMessageData*>(pmsg->pdata);
- data->result = SetRenderer_w(data->ssrc, data->renderer);
+ data->result = SetRenderer_w(data->ssrc, data->renderer, data->is_local);
break;
}
default:
diff --git a/talk/session/media/channel.h b/talk/session/media/channel.h
index b49d5a0535..9bac965882 100644
--- a/talk/session/media/channel.h
+++ b/talk/session/media/channel.h
@@ -363,7 +363,8 @@ class VoiceChannel : public BaseChannel {
const std::string& content_name, bool rtcp);
~VoiceChannel();
bool Init();
- bool SetRenderer(uint32 ssrc, AudioRenderer* renderer);
+ bool SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer);
+ bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer);
// downcasts a MediaChannel
virtual VoiceMediaChannel* media_channel() const {
@@ -454,7 +455,7 @@ class VoiceChannel : public BaseChannel {
void OnSrtpError(uint32 ssrc, SrtpFilter::Mode mode, SrtpFilter::Error error);
// Configuration and setting.
bool SetChannelOptions_w(const AudioOptions& options);
- bool SetRenderer_w(uint32 ssrc, AudioRenderer* renderer);
+ bool SetRenderer_w(uint32 ssrc, AudioRenderer* renderer, bool is_local);
static const int kEarlyMediaTimeout = 1000;
bool received_media_;
diff --git a/talk/session/media/channel_unittest.cc b/talk/session/media/channel_unittest.cc
index 378f552bbc..ff03b49805 100644
--- a/talk/session/media/channel_unittest.cc
+++ b/talk/session/media/channel_unittest.cc
@@ -188,6 +188,10 @@ class ChannelTest : public testing::Test, public sigslot::has_slots<> {
talk_base::InitializeSSL();
}
+ static void TearDownTestCase() {
+ talk_base::CleanupSSL();
+ }
+
void CreateChannels(int flags1, int flags2) {
CreateChannels(new typename T::MediaChannel(NULL),
new typename T::MediaChannel(NULL),
diff --git a/talk/session/media/mediamessages_unittest.cc b/talk/session/media/mediamessages_unittest.cc
index 4c76be31ca..c7c81c3d2e 100644
--- a/talk/session/media/mediamessages_unittest.cc
+++ b/talk/session/media/mediamessages_unittest.cc
@@ -152,6 +152,15 @@ class MediaMessagesTest : public testing::Test {
return desc;
}
+ size_t ClearXmlElements(cricket::XmlElements* elements) {
+ size_t size = elements->size();
+ for (size_t i = 0; i < size; i++) {
+ delete elements->at(i);
+ }
+ elements->clear();
+ return size;
+ }
+
talk_base::scoped_ptr<cricket::SessionDescription> remote_description_;
};
@@ -177,6 +186,7 @@ TEST_F(MediaMessagesTest, ViewNoneToFromXml) {
ASSERT_EQ(1U, actual_view_elems.size());
EXPECT_EQ(expected_view_elem->Str(), actual_view_elems[0]->Str());
+ ClearXmlElements(&actual_view_elems);
cricket::ParseError parse_error;
EXPECT_TRUE(cricket::IsJingleViewRequest(action_elem.get()));
@@ -211,6 +221,7 @@ TEST_F(MediaMessagesTest, ViewVgaToFromXml) {
ASSERT_EQ(2U, actual_view_elems.size());
EXPECT_EQ(expected_view_elem1->Str(), actual_view_elems[0]->Str());
EXPECT_EQ(expected_view_elem2->Str(), actual_view_elems[1]->Str());
+ ClearXmlElements(&actual_view_elems);
view_request.static_video_views.clear();
cricket::ParseError parse_error;
diff --git a/talk/session/media/mediasession.cc b/talk/session/media/mediasession.cc
index 2c087e9cda..1215008b05 100644
--- a/talk/session/media/mediasession.cc
+++ b/talk/session/media/mediasession.cc
@@ -773,7 +773,7 @@ static void FindCodecsToOffer(
if (!FindMatchingCodec<C>(*offered_codecs, *it, NULL) && IsRtxCodec(*it)) {
C rtx_codec = *it;
int referenced_pl_type =
- talk_base::FromString<int>(
+ talk_base::FromString<int>(0,
rtx_codec.params[kCodecParamAssociatedPayloadType]);
new_rtx_codecs.insert(std::pair<int, C>(referenced_pl_type,
rtx_codec));
diff --git a/talk/session/media/mediasessionclient.cc b/talk/session/media/mediasessionclient.cc
index 18407ca739..584c342058 100644
--- a/talk/session/media/mediasessionclient.cc
+++ b/talk/session/media/mediasessionclient.cc
@@ -53,7 +53,8 @@ MediaSessionClient::MediaSessionClient(
focus_call_(NULL),
channel_manager_(new ChannelManager(session_manager_->worker_thread())),
desc_factory_(channel_manager_,
- session_manager_->transport_desc_factory()) {
+ session_manager_->transport_desc_factory()),
+ multisession_enabled_(false) {
Construct();
}
#endif
@@ -71,7 +72,8 @@ MediaSessionClient::MediaSessionClient(
device_manager, new CaptureManager(),
session_manager_->worker_thread())),
desc_factory_(channel_manager_,
- session_manager_->transport_desc_factory()) {
+ session_manager_->transport_desc_factory()),
+ multisession_enabled_(false) {
Construct();
}
@@ -533,7 +535,8 @@ bool ParseJingleStreamsOrLegacySsrc(const buzz::XmlElement* desc_elem,
bool ParseJingleAudioContent(const buzz::XmlElement* content_elem,
ContentDescription** content,
ParseError* error) {
- AudioContentDescription* audio = new AudioContentDescription();
+ talk_base::scoped_ptr<AudioContentDescription> audio(
+ new AudioContentDescription());
for (const buzz::XmlElement* payload_elem =
content_elem->FirstNamed(QN_JINGLE_RTP_PAYLOADTYPE);
@@ -545,11 +548,11 @@ bool ParseJingleAudioContent(const buzz::XmlElement* content_elem,
}
}
- if (!ParseJingleStreamsOrLegacySsrc(content_elem, audio, error)) {
+ if (!ParseJingleStreamsOrLegacySsrc(content_elem, audio.get(), error)) {
return false;
}
- if (!ParseJingleEncryption(content_elem, audio, error)) {
+ if (!ParseJingleEncryption(content_elem, audio.get(), error)) {
return false;
}
@@ -561,14 +564,15 @@ bool ParseJingleAudioContent(const buzz::XmlElement* content_elem,
}
audio->set_rtp_header_extensions(hdrexts);
- *content = audio;
+ *content = audio.release();
return true;
}
bool ParseJingleVideoContent(const buzz::XmlElement* content_elem,
ContentDescription** content,
ParseError* error) {
- VideoContentDescription* video = new VideoContentDescription();
+ talk_base::scoped_ptr<VideoContentDescription> video(
+ new VideoContentDescription());
for (const buzz::XmlElement* payload_elem =
content_elem->FirstNamed(QN_JINGLE_RTP_PAYLOADTYPE);
@@ -580,12 +584,12 @@ bool ParseJingleVideoContent(const buzz::XmlElement* content_elem,
}
}
- if (!ParseJingleStreamsOrLegacySsrc(content_elem, video, error)) {
+ if (!ParseJingleStreamsOrLegacySsrc(content_elem, video.get(), error)) {
return false;
}
- ParseBandwidth(content_elem, video);
+ ParseBandwidth(content_elem, video.get());
- if (!ParseJingleEncryption(content_elem, video, error)) {
+ if (!ParseJingleEncryption(content_elem, video.get(), error)) {
return false;
}
@@ -597,14 +601,15 @@ bool ParseJingleVideoContent(const buzz::XmlElement* content_elem,
}
video->set_rtp_header_extensions(hdrexts);
- *content = video;
+ *content = video.release();
return true;
}
bool ParseJingleSctpDataContent(const buzz::XmlElement* content_elem,
ContentDescription** content,
ParseError* error) {
- DataContentDescription* data = new DataContentDescription();
+ talk_base::scoped_ptr<DataContentDescription> data(
+ new DataContentDescription());
data->set_protocol(kMediaProtocolSctp);
for (const buzz::XmlElement* stream_elem =
@@ -626,7 +631,7 @@ bool ParseJingleSctpDataContent(const buzz::XmlElement* content_elem,
data->mutable_streams().push_back(stream);
}
- *content = data;
+ *content = data.release();
return true;
}
diff --git a/talk/session/media/mediasessionclient_unittest.cc b/talk/session/media/mediasessionclient_unittest.cc
index f9b747bc3f..644e1a61c1 100644
--- a/talk/session/media/mediasessionclient_unittest.cc
+++ b/talk/session/media/mediasessionclient_unittest.cc
@@ -1213,10 +1213,9 @@ class MediaSessionTestParser {
class JingleSessionTestParser : public MediaSessionTestParser {
public:
- JingleSessionTestParser() : action_(NULL) {}
+ JingleSessionTestParser() {}
~JingleSessionTestParser() {
- delete action_;
}
buzz::XmlElement* ActionFromStanza(buzz::XmlElement* stanza) {
@@ -1227,7 +1226,7 @@ class JingleSessionTestParser : public MediaSessionTestParser {
// We need to be able to use multiple contents, but the action
// gets deleted before we can call NextContent, so we need to
// stash away a copy.
- action_ = CopyElement(action);
+ action_.reset(CopyElement(action));
return action_->FirstNamed(cricket::QN_JINGLE_CONTENT);
}
@@ -1351,7 +1350,7 @@ class JingleSessionTestParser : public MediaSessionTestParser {
}
private:
- buzz::XmlElement* action_;
+ talk_base::scoped_ptr<buzz::XmlElement> action_;
};
class GingleSessionTestParser : public MediaSessionTestParser {
@@ -1520,6 +1519,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
delete pa_;
delete nm_;
delete parser_;
+ ClearStanzas();
}
buzz::XmlElement* ActionFromStanza(buzz::XmlElement* stanza) {
@@ -1700,8 +1700,9 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
buzz::XmlElement** element) {
*element = NULL;
- buzz::XmlElement* el = buzz::XmlElement::ForStr(initiate_string);
- client_->session_manager()->OnIncomingMessage(el);
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
ASSERT_TRUE(call_ != NULL);
ASSERT_TRUE(call_->sessions()[0] != NULL);
ASSERT_EQ(cricket::Session::STATE_RECEIVEDINITIATE,
@@ -1710,8 +1711,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[0]->Attr(buzz::QN_TYPE));
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
CheckVideoBandwidth(expected_video_bandwidth_,
call_->sessions()[0]->remote_description());
CheckVideoRtcpMux(expected_video_rtcp_mux_,
@@ -1734,8 +1734,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
ASSERT_TRUE(ContentFromAction(e) != NULL);
*element = CopyElement(ContentFromAction(e));
ASSERT_TRUE(*element != NULL);
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
if (expect_outgoing_crypto_) {
CheckCryptoForGoodOutgoingAccept(call_->sessions()[0]);
}
@@ -1756,8 +1755,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
e = ActionFromStanza(stanzas_[0]);
ASSERT_TRUE(e != NULL);
ASSERT_TRUE(parser_->ActionIsTerminate(e));
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
}
void TestRejectOffer(const std::string &initiate_string,
@@ -1765,8 +1763,9 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
buzz::XmlElement** element) {
*element = NULL;
- buzz::XmlElement* el = buzz::XmlElement::ForStr(initiate_string);
- client_->session_manager()->OnIncomingMessage(el);
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
ASSERT_TRUE(call_ != NULL);
ASSERT_TRUE(call_->sessions()[0] != NULL);
ASSERT_EQ(cricket::Session::STATE_RECEIVEDINITIATE,
@@ -1775,8 +1774,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[0]->Attr(buzz::QN_TYPE));
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
call_->AcceptSession(call_->sessions()[0], options);
ASSERT_EQ(cricket::Session::STATE_SENTACCEPT,
@@ -1791,8 +1789,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
ASSERT_TRUE(ContentFromAction(e) != NULL);
*element = CopyElement(ContentFromAction(e));
ASSERT_TRUE(*element != NULL);
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
buzz::XmlElement* content = *element;
// The NextContent method actually returns the second content. So we
@@ -1826,13 +1823,13 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
e = ActionFromStanza(stanzas_[0]);
ASSERT_TRUE(e != NULL);
ASSERT_TRUE(parser_->ActionIsTerminate(e));
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
}
void TestBadIncomingInitiate(const std::string& initiate_string) {
- buzz::XmlElement* el = buzz::XmlElement::ForStr(initiate_string);
- client_->session_manager()->OnIncomingMessage(el);
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
ASSERT_TRUE(call_ != NULL);
ASSERT_TRUE(call_->sessions()[0] != NULL);
ASSERT_EQ(cricket::Session::STATE_SENTREJECT,
@@ -1841,8 +1838,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
ASSERT_TRUE(stanzas_[1]->HasAttr(buzz::QN_TYPE));
ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[1]->Attr(buzz::QN_TYPE));
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
}
void TestGoodOutgoingInitiate(const cricket::CallOptions& options) {
@@ -2089,8 +2085,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
CheckSctpDataContent(content);
}
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
}
void TestHasAllSupportedAudioCodecs(buzz::XmlElement* e) {
@@ -2379,25 +2374,23 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
buzz::XmlElement** element) {
*element = NULL;
- buzz::XmlElement* el = buzz::XmlElement::ForStr(initiate_string);
- client_->session_manager()->OnIncomingMessage(el);
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
ASSERT_EQ(cricket::Session::STATE_RECEIVEDINITIATE,
call_->sessions()[0]->state());
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
CheckBadCryptoFromIncomingInitiate(call_->sessions()[0]);
call_->AcceptSession(call_->sessions()[0], cricket::CallOptions());
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
CheckNoCryptoForOutgoingAccept(call_->sessions()[0]);
call_->Terminate();
ASSERT_EQ(cricket::Session::STATE_SENTTERMINATE,
call_->sessions()[0]->state());
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
}
void TestIncomingAcceptWithSsrcs(
@@ -2425,11 +2418,11 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
ASSERT_TRUE(content_desc != NULL);
ASSERT_EQ("", content_desc->Attr(cricket::QN_SSRC));
}
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
// We need to insert the session ID into the session accept message.
- buzz::XmlElement* el = buzz::XmlElement::ForStr(accept_string);
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(accept_string));
const std::string sid = call_->sessions()[0]->id();
if (initial_protocol_ == cricket::PROTOCOL_JINGLE) {
buzz::XmlElement* jingle = el->FirstNamed(cricket::QN_JINGLE);
@@ -2439,7 +2432,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
session->SetAttr(cricket::QN_ID, sid);
}
- client_->session_manager()->OnIncomingMessage(el);
+ client_->session_manager()->OnIncomingMessage(el.get());
ASSERT_EQ(cricket::Session::STATE_RECEIVEDACCEPT,
call_->sessions()[0]->state());
@@ -2447,8 +2440,7 @@ class MediaSessionClientTest : public sigslot::has_slots<> {
ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[0]->Attr(buzz::QN_TYPE));
- delete stanzas_[0];
- stanzas_.clear();
+ ClearStanzas();
CheckAudioSsrcForIncomingAccept(call_->sessions()[0]);
CheckVideoSsrcForIncomingAccept(call_->sessions()[0]);
diff --git a/talk/site_scons/site_tools/talk_linux.py b/talk/site_scons/site_tools/talk_linux.py
index 1ceb94a764..acba92120f 100644
--- a/talk/site_scons/site_tools/talk_linux.py
+++ b/talk/site_scons/site_tools/talk_linux.py
@@ -50,11 +50,11 @@ def _InternalBuildDebianPackage(env, debian_files, package_files,
# generated files.
control_file = None
changelog_file = None
- for file in debian_files:
- if os.path.basename(file) == 'control':
- control_file = env.File(file).srcnode().abspath
- elif os.path.basename(file) == 'changelog':
- changelog_file = env.File(file).srcnode().abspath
+ for file_name in debian_files:
+ if os.path.basename(file_name) == 'control':
+ control_file = env.File(file_name).srcnode().abspath
+ elif os.path.basename(file_name) == 'changelog':
+ changelog_file = env.File(file_name).srcnode().abspath
if not control_file:
raise Exception('Need to have a control file')
if not changelog_file:
@@ -87,18 +87,18 @@ def _InternalBuildDebianPackage(env, debian_files, package_files,
# Path to where we will construct the debian build tree.
deb_build_tree = os.path.join(source_dir_name, 'deb_build_tree')
# First copy the files.
- for file in package_files:
- env.Command(os.path.join(deb_build_tree, file[0]), file[1],
+ for file_name in package_files:
+ env.Command(os.path.join(deb_build_tree, file_name[0]), file_name[1],
SCons.Defaults.Copy('$TARGET', '$SOURCE'))
- env.Depends(targets, os.path.join(deb_build_tree, file[0]))
+ env.Depends(targets, os.path.join(deb_build_tree, file_name[0]))
# Now copy the Debian metadata sources. We have to do this all at once so
# that we can remove the target directory before copying, because there
# can't be any other stale files there or else dpkg-buildpackage may use
# them and give incorrect build output.
copied_debian_files_paths = []
- for file in debian_files:
+ for file_name in debian_files:
copied_debian_files_paths.append(os.path.join(deb_build_tree, 'debian',
- os.path.basename(file)))
+ os.path.basename(file_name)))
copy_commands = [
"""dir=$$(dirname $TARGET) && \
rm -Rf $$dir && \