aboutsummaryrefslogtreecommitdiff
path: root/talk
diff options
context:
space:
mode:
Diffstat (limited to 'talk')
-rw-r--r--talk/app/webrtc/OWNERS6
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java29
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java180
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java152
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java6
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java117
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java214
-rw-r--r--talk/app/webrtc/androidvideocapturer.cc22
-rw-r--r--talk/app/webrtc/androidvideocapturer.h2
-rw-r--r--talk/app/webrtc/audiotrack.cc77
-rw-r--r--talk/app/webrtc/audiotrack.h43
-rw-r--r--talk/app/webrtc/dtlsidentitystore.cc16
-rw-r--r--talk/app/webrtc/dtlsidentitystore.h3
-rw-r--r--talk/app/webrtc/fakeportallocatorfactory.h76
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java3
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java75
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java13
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/EglBase.java288
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/EglBase10.java299
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/EglBase14.java254
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java146
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java22
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/RendererCommon.java74
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java283
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java281
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java51
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java395
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java81
-rw-r--r--talk/app/webrtc/java/jni/androidmediacodeccommon.h2
-rw-r--r--talk/app/webrtc/java/jni/androidmediadecoder_jni.cc303
-rw-r--r--talk/app/webrtc/java/jni/androidmediaencoder_jni.cc473
-rw-r--r--talk/app/webrtc/java/jni/androidmediaencoder_jni.h3
-rw-r--r--talk/app/webrtc/java/jni/androidvideocapturer_jni.cc100
-rw-r--r--talk/app/webrtc/java/jni/androidvideocapturer_jni.h18
-rw-r--r--talk/app/webrtc/java/jni/classreferenceholder.cc5
-rw-r--r--talk/app/webrtc/java/jni/jni_helpers.cc25
-rw-r--r--talk/app/webrtc/java/jni/jni_onload.cc (renamed from talk/media/base/fakemediaprocessor.h)32
-rw-r--r--talk/app/webrtc/java/jni/native_handle_impl.cc163
-rw-r--r--talk/app/webrtc/java/jni/native_handle_impl.h52
-rw-r--r--talk/app/webrtc/java/jni/peerconnection_jni.cc169
-rw-r--r--talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc31
-rw-r--r--talk/app/webrtc/java/jni/surfacetexturehelper_jni.h18
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java368
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java221
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/PeerConnection.java13
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java61
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/RtpSender.java25
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java23
-rw-r--r--talk/app/webrtc/jsepsessiondescription.cc3
-rw-r--r--talk/app/webrtc/localaudiosource.cc4
-rw-r--r--talk/app/webrtc/localaudiosource.h15
-rw-r--r--talk/app/webrtc/localaudiosource_unittest.cc40
-rw-r--r--talk/app/webrtc/mediacontroller.cc14
-rw-r--r--talk/app/webrtc/mediastream_unittest.cc22
-rw-r--r--talk/app/webrtc/mediastreaminterface.h43
-rw-r--r--talk/app/webrtc/mediastreamobserver.cc101
-rw-r--r--talk/app/webrtc/mediastreamobserver.h65
-rw-r--r--talk/app/webrtc/mediastreamprovider.h19
-rw-r--r--talk/app/webrtc/objc/README54
-rw-r--r--talk/app/webrtc/objc/RTCFileLogger.mm41
-rw-r--r--talk/app/webrtc/objc/RTCPeerConnection.mm6
-rw-r--r--talk/app/webrtc/objc/RTCPeerConnectionInterface.mm10
-rw-r--r--talk/app/webrtc/objc/avfoundationvideocapturer.h1
-rw-r--r--talk/app/webrtc/objc/avfoundationvideocapturer.mm26
-rw-r--r--talk/app/webrtc/objc/public/RTCFileLogger.h23
-rw-r--r--talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h4
-rw-r--r--talk/app/webrtc/peerconnection.cc645
-rw-r--r--talk/app/webrtc/peerconnection.h83
-rw-r--r--talk/app/webrtc/peerconnection_unittest.cc714
-rw-r--r--talk/app/webrtc/peerconnectionendtoend_unittest.cc92
-rw-r--r--talk/app/webrtc/peerconnectionfactory.cc50
-rw-r--r--talk/app/webrtc/peerconnectionfactory.h22
-rw-r--r--talk/app/webrtc/peerconnectionfactory_unittest.cc300
-rw-r--r--talk/app/webrtc/peerconnectionfactoryproxy.h13
-rw-r--r--talk/app/webrtc/peerconnectioninterface.h116
-rw-r--r--talk/app/webrtc/peerconnectioninterface_unittest.cc175
-rw-r--r--talk/app/webrtc/peerconnectionproxy.h4
-rw-r--r--talk/app/webrtc/portallocatorfactory.cc68
-rw-r--r--talk/app/webrtc/portallocatorfactory.h43
-rw-r--r--talk/app/webrtc/remoteaudiosource.cc132
-rw-r--r--talk/app/webrtc/remoteaudiosource.h42
-rw-r--r--talk/app/webrtc/remoteaudiotrack.cc (renamed from talk/app/webrtc/mediastreamsignaling.h)4
-rw-r--r--[-rwxr-xr-x]talk/app/webrtc/remoteaudiotrack.h (renamed from talk/media/base/voiceprocessor.h)5
-rw-r--r--talk/app/webrtc/rtpreceiver.cc2
-rw-r--r--talk/app/webrtc/rtpreceiver.h8
-rw-r--r--talk/app/webrtc/rtpsender.cc211
-rw-r--r--talk/app/webrtc/rtpsender.h71
-rw-r--r--talk/app/webrtc/rtpsenderinterface.h20
-rw-r--r--talk/app/webrtc/rtpsenderreceiver_unittest.cc251
-rw-r--r--talk/app/webrtc/statscollector.cc71
-rw-r--r--talk/app/webrtc/statscollector.h1
-rw-r--r--talk/app/webrtc/statscollector_unittest.cc47
-rw-r--r--talk/app/webrtc/statstypes.cc5
-rw-r--r--talk/app/webrtc/statstypes.h1
-rw-r--r--talk/app/webrtc/test/DEPS5
-rw-r--r--talk/app/webrtc/test/androidtestinitializer.cc74
-rw-r--r--talk/app/webrtc/test/androidtestinitializer.h (renamed from talk/app/webrtc/mediastreamsignaling.cc)13
-rw-r--r--talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc4
-rw-r--r--talk/app/webrtc/test/fakedtlsidentitystore.h113
-rw-r--r--talk/app/webrtc/test/fakemediastreamsignaling.h140
-rw-r--r--talk/app/webrtc/test/peerconnectiontestwrapper.cc20
-rw-r--r--talk/app/webrtc/test/peerconnectiontestwrapper.h7
-rw-r--r--talk/app/webrtc/videosource.cc28
-rw-r--r--talk/app/webrtc/videosource.h11
-rw-r--r--talk/app/webrtc/videosource_unittest.cc41
-rw-r--r--talk/app/webrtc/videosourceproxy.h1
-rw-r--r--talk/app/webrtc/videotrack.cc4
-rw-r--r--talk/app/webrtc/videotrack_unittest.cc2
-rw-r--r--talk/app/webrtc/videotrackrenderers.cc4
-rw-r--r--talk/app/webrtc/videotrackrenderers.h1
-rw-r--r--talk/app/webrtc/webrtcsdp.cc69
-rw-r--r--talk/app/webrtc/webrtcsdp_unittest.cc89
-rw-r--r--talk/app/webrtc/webrtcsession.cc119
-rw-r--r--talk/app/webrtc/webrtcsession.h10
-rw-r--r--talk/app/webrtc/webrtcsession_unittest.cc198
-rw-r--r--talk/app/webrtc/webrtcsessiondescriptionfactory.cc37
-rw-r--r--talk/build/common.gypi12
-rw-r--r--talk/build/merge_ios_libs.gyp2
-rw-r--r--talk/codereview.settings7
-rwxr-xr-xtalk/libjingle.gyp115
-rwxr-xr-xtalk/libjingle_tests.gyp64
-rw-r--r--talk/media/base/audiorenderer.h2
-rw-r--r--talk/media/base/capturemanager_unittest.cc3
-rw-r--r--talk/media/base/codec.cc45
-rw-r--r--talk/media/base/codec.h63
-rw-r--r--talk/media/base/codec_unittest.cc49
-rw-r--r--talk/media/base/constants.cc1
-rw-r--r--talk/media/base/constants.h3
-rw-r--r--talk/media/base/cryptoparams.h6
-rw-r--r--talk/media/base/executablehelpers.h14
-rw-r--r--talk/media/base/fakemediaengine.h94
-rw-r--r--talk/media/base/mediachannel.h342
-rw-r--r--talk/media/base/mediaengine.h133
-rw-r--r--talk/media/base/streamparams_unittest.cc23
-rw-r--r--talk/media/base/testutils.cc4
-rw-r--r--talk/media/base/testutils.h3
-rw-r--r--talk/media/base/videocapturer.cc27
-rw-r--r--talk/media/base/videocapturer.h22
-rw-r--r--talk/media/base/videocapturer_unittest.cc33
-rw-r--r--talk/media/base/videocommon.cc8
-rw-r--r--talk/media/base/videoengine_unittest.h347
-rw-r--r--talk/media/base/videoframe.cc3
-rw-r--r--talk/media/base/videoframe.h2
-rw-r--r--talk/media/base/videoframefactory.cc4
-rw-r--r--talk/media/base/videorenderer.h11
-rw-r--r--talk/media/devices/carbonvideorenderer.cc1
-rw-r--r--talk/media/devices/carbonvideorenderer.h1
-rw-r--r--talk/media/devices/devicemanager.cc2
-rw-r--r--talk/media/devices/devicemanager_unittest.cc17
-rw-r--r--talk/media/devices/fakedevicemanager.h2
-rw-r--r--talk/media/devices/mobiledevicemanager.cc2
-rw-r--r--talk/media/devices/v4llookup.h4
-rw-r--r--talk/media/devices/videorendererfactory.h9
-rw-r--r--talk/media/devices/win32devicemanager.cc3
-rw-r--r--talk/media/sctp/sctpdataengine.cc13
-rw-r--r--talk/media/sctp/sctpdataengine_unittest.cc18
-rw-r--r--talk/media/webrtc/fakewebrtccall.cc26
-rw-r--r--talk/media/webrtc/fakewebrtccall.h28
-rw-r--r--talk/media/webrtc/fakewebrtcvideoengine.h2
-rw-r--r--talk/media/webrtc/fakewebrtcvoiceengine.h269
-rwxr-xr-xtalk/media/webrtc/simulcast.cc6
-rw-r--r--talk/media/webrtc/webrtcmediaengine.cc106
-rw-r--r--talk/media/webrtc/webrtcmediaengine.h19
-rw-r--r--talk/media/webrtc/webrtcmediaengine_unittest.cc205
-rw-r--r--talk/media/webrtc/webrtcvideocapturer.cc14
-rw-r--r--talk/media/webrtc/webrtcvideocapturer.h2
-rw-r--r--talk/media/webrtc/webrtcvideocapturer_unittest.cc1
-rw-r--r--talk/media/webrtc/webrtcvideoengine2.cc476
-rw-r--r--talk/media/webrtc/webrtcvideoengine2.h40
-rw-r--r--talk/media/webrtc/webrtcvideoengine2_unittest.cc258
-rw-r--r--talk/media/webrtc/webrtcvideoframe.cc19
-rw-r--r--talk/media/webrtc/webrtcvideoframe.h11
-rw-r--r--talk/media/webrtc/webrtcvoe.h24
-rw-r--r--talk/media/webrtc/webrtcvoiceengine.cc1823
-rw-r--r--talk/media/webrtc/webrtcvoiceengine.h145
-rw-r--r--talk/media/webrtc/webrtcvoiceengine_unittest.cc724
-rwxr-xr-xtalk/session/media/bundlefilter.cc65
-rwxr-xr-xtalk/session/media/bundlefilter.h25
-rwxr-xr-xtalk/session/media/bundlefilter_unittest.cc148
-rw-r--r--talk/session/media/channel.cc302
-rw-r--r--talk/session/media/channel.h43
-rw-r--r--talk/session/media/channel_unittest.cc78
-rw-r--r--talk/session/media/channelmanager.cc93
-rw-r--r--talk/session/media/channelmanager.h14
-rw-r--r--talk/session/media/channelmanager_unittest.cc59
-rw-r--r--talk/session/media/mediasession.cc161
-rw-r--r--talk/session/media/mediasession.h61
-rw-r--r--talk/session/media/mediasession_unittest.cc37
-rw-r--r--talk/session/media/srtpfilter.cc37
-rw-r--r--talk/session/media/srtpfilter.h17
-rw-r--r--talk/session/media/srtpfilter_unittest.cc102
191 files changed, 9530 insertions, 7414 deletions
diff --git a/talk/app/webrtc/OWNERS b/talk/app/webrtc/OWNERS
index ffd78e1777..20a1fdf80d 100644
--- a/talk/app/webrtc/OWNERS
+++ b/talk/app/webrtc/OWNERS
@@ -1,5 +1,5 @@
glaznev@webrtc.org
-juberti@google.com
-perkj@google.com
+juberti@webrtc.org
+perkj@webrtc.org
tkchin@webrtc.org
-tommi@google.com
+tommi@webrtc.org
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java
index 1c01ffa0b8..63c05fb616 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java
@@ -28,7 +28,6 @@ package org.webrtc;
import android.graphics.SurfaceTexture;
import android.opengl.GLES20;
-import android.opengl.Matrix;
import android.test.ActivityTestCase;
import android.test.suitebuilder.annotation.MediumTest;
import android.test.suitebuilder.annotation.SmallTest;
@@ -36,9 +35,6 @@ import android.test.suitebuilder.annotation.SmallTest;
import java.nio.ByteBuffer;
import java.util.Random;
-import javax.microedition.khronos.egl.EGL10;
-import javax.microedition.khronos.egl.EGLContext;
-
public final class GlRectDrawerTest extends ActivityTestCase {
// Resolution of the test image.
private static final int WIDTH = 16;
@@ -46,7 +42,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Seed for random pixel creation.
private static final int SEED = 42;
// When comparing pixels, allow some slack for float arithmetic and integer rounding.
- private static final float MAX_DIFF = 1.0f;
+ private static final float MAX_DIFF = 1.5f;
private static float normalizedByte(byte b) {
return (b & 0xFF) / 255.0f;
@@ -100,7 +96,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
@SmallTest
public void testRgbRendering() {
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(WIDTH, HEIGHT);
eglBase.makeCurrent();
@@ -119,7 +115,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Draw the RGB frame onto the pixel buffer.
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix());
+ drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix(), 0, 0, WIDTH, HEIGHT);
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
final ByteBuffer rgbaData = ByteBuffer.allocateDirect(WIDTH * HEIGHT * 4);
@@ -137,7 +133,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
@SmallTest
public void testYuvRendering() {
// Create EGL base with a pixel buffer as display output.
- EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(WIDTH, HEIGHT);
eglBase.makeCurrent();
@@ -166,7 +162,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Draw the YUV frame onto the pixel buffer.
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawYuv(yuvTextures, RendererCommon.identityMatrix());
+ drawer.drawYuv(yuvTextures, RendererCommon.identityMatrix(), 0, 0, WIDTH, HEIGHT);
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
final ByteBuffer data = ByteBuffer.allocateDirect(WIDTH * HEIGHT * 4);
@@ -231,8 +227,9 @@ public final class GlRectDrawerTest extends ActivityTestCase {
private final int rgbTexture;
public StubOesTextureProducer(
- EGLContext sharedContext, SurfaceTexture surfaceTexture, int width, int height) {
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PLAIN);
+ EglBase.Context sharedContext, SurfaceTexture surfaceTexture, int width,
+ int height) {
+ eglBase = EglBase.create(sharedContext, EglBase.CONFIG_PLAIN);
surfaceTexture.setDefaultBufferSize(width, height);
eglBase.createSurface(surfaceTexture);
assertEquals(eglBase.surfaceWidth(), width);
@@ -253,7 +250,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGB, WIDTH,
HEIGHT, 0, GLES20.GL_RGB, GLES20.GL_UNSIGNED_BYTE, rgbPlane);
// Draw the RGB data onto the SurfaceTexture.
- drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix());
+ drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix(), 0, 0, WIDTH, HEIGHT);
eglBase.swapBuffers();
}
@@ -266,14 +263,14 @@ public final class GlRectDrawerTest extends ActivityTestCase {
}
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(WIDTH, HEIGHT);
// Create resources for generating OES textures.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(eglBase.getContext());
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
final StubOesTextureProducer oesProducer = new StubOesTextureProducer(
- eglBase.getContext(), surfaceTextureHelper.getSurfaceTexture(), WIDTH, HEIGHT);
+ eglBase.getEglBaseContext(), surfaceTextureHelper.getSurfaceTexture(), WIDTH, HEIGHT);
final SurfaceTextureHelperTest.MockTextureListener listener =
new SurfaceTextureHelperTest.MockTextureListener();
surfaceTextureHelper.setListener(listener);
@@ -291,7 +288,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Draw the OES texture on the pixel buffer.
eglBase.makeCurrent();
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawOes(listener.oesTextureId, listener.transformMatrix);
+ drawer.drawOes(listener.oesTextureId, listener.transformMatrix, 0, 0, WIDTH, HEIGHT);
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
final ByteBuffer rgbaData = ByteBuffer.allocateDirect(WIDTH * HEIGHT * 4);
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java
new file mode 100644
index 0000000000..b1ec5dda0e
--- /dev/null
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java
@@ -0,0 +1,180 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.webrtc;
+
+import android.annotation.TargetApi;
+import android.opengl.GLES11Ext;
+import android.opengl.GLES20;
+import android.os.Build;
+import android.test.ActivityTestCase;
+import android.test.suitebuilder.annotation.SmallTest;
+import android.util.Log;
+
+import org.webrtc.MediaCodecVideoEncoder.OutputBufferInfo;
+
+import java.nio.ByteBuffer;
+
+@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1)
+public final class MediaCodecVideoEncoderTest extends ActivityTestCase {
+ final static String TAG = "MediaCodecVideoEncoderTest";
+
+ @SmallTest
+ public static void testInitializeUsingByteBuffer() {
+ if (!MediaCodecVideoEncoder.isVp8HwSupported()) {
+ Log.i(TAG,
+ "Hardware does not support VP8 encoding, skipping testInitReleaseUsingByteBuffer");
+ return;
+ }
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30, null));
+ encoder.release();
+ }
+
+ @SmallTest
+ public static void testInitilizeUsingTextures() {
+ if (!MediaCodecVideoEncoder.isVp8HwSupportedUsingTextures()) {
+ Log.i(TAG, "hardware does not support VP8 encoding, skipping testEncoderUsingTextures");
+ return;
+ }
+ EglBase14 eglBase = new EglBase14(null, EglBase.CONFIG_PLAIN);
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30,
+ eglBase.getEglBaseContext()));
+ encoder.release();
+ eglBase.release();
+ }
+
+ @SmallTest
+ public static void testInitializeUsingByteBufferReInitilizeUsingTextures() {
+ if (!MediaCodecVideoEncoder.isVp8HwSupportedUsingTextures()) {
+ Log.i(TAG, "hardware does not support VP8 encoding, skipping testEncoderUsingTextures");
+ return;
+ }
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30,
+ null));
+ encoder.release();
+ EglBase14 eglBase = new EglBase14(null, EglBase.CONFIG_PLAIN);
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30,
+ eglBase.getEglBaseContext()));
+ encoder.release();
+ eglBase.release();
+ }
+
+ @SmallTest
+ public static void testEncoderUsingByteBuffer() throws InterruptedException {
+ if (!MediaCodecVideoEncoder.isVp8HwSupported()) {
+ Log.i(TAG, "Hardware does not support VP8 encoding, skipping testEncoderUsingByteBuffer");
+ return;
+ }
+
+ final int width = 640;
+ final int height = 480;
+ final int min_size = width * height * 3 / 2;
+ final long presentationTimestampUs = 2;
+
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, width, height, 300, 30, null));
+ ByteBuffer[] inputBuffers = encoder.getInputBuffers();
+ assertNotNull(inputBuffers);
+ assertTrue(min_size <= inputBuffers[0].capacity());
+
+ int bufferIndex;
+ do {
+ Thread.sleep(10);
+ bufferIndex = encoder.dequeueInputBuffer();
+ } while (bufferIndex == -1); // |-1| is returned when there is no buffer available yet.
+
+ assertTrue(bufferIndex >= 0);
+ assertTrue(bufferIndex < inputBuffers.length);
+ assertTrue(encoder.encodeBuffer(true, bufferIndex, min_size, presentationTimestampUs));
+
+ OutputBufferInfo info;
+ do {
+ info = encoder.dequeueOutputBuffer();
+ Thread.sleep(10);
+ } while (info == null);
+ assertTrue(info.index >= 0);
+ assertEquals(presentationTimestampUs, info.presentationTimestampUs);
+ assertTrue(info.buffer.capacity() > 0);
+ encoder.releaseOutputBuffer(info.index);
+
+ encoder.release();
+ }
+
+ @SmallTest
+ public static void testEncoderUsingTextures() throws InterruptedException {
+ if (!MediaCodecVideoEncoder.isVp8HwSupportedUsingTextures()) {
+ Log.i(TAG, "Hardware does not support VP8 encoding, skipping testEncoderUsingTextures");
+ return;
+ }
+
+ final int width = 640;
+ final int height = 480;
+ final long presentationTs = 2;
+
+ final EglBase14 eglOesBase = new EglBase14(null, EglBase.CONFIG_PIXEL_BUFFER);
+ eglOesBase.createDummyPbufferSurface();
+ eglOesBase.makeCurrent();
+ int oesTextureId = GlUtil.generateTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
+
+ // TODO(perkj): This test is week since we don't fill the texture with valid data with correct
+ // width and height and verify the encoded data. Fill the OES texture and figure out a way to
+ // verify that the output make sense.
+
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, width, height, 300, 30,
+ eglOesBase.getEglBaseContext()));
+ assertTrue(encoder.encodeTexture(true, oesTextureId, RendererCommon.identityMatrix(),
+ presentationTs));
+ GlUtil.checkNoGLES2Error("encodeTexture");
+
+ // It should be Ok to delete the texture after calling encodeTexture.
+ GLES20.glDeleteTextures(1, new int[] {oesTextureId}, 0);
+
+ OutputBufferInfo info = encoder.dequeueOutputBuffer();
+ while (info == null) {
+ info = encoder.dequeueOutputBuffer();
+ Thread.sleep(20);
+ }
+ assertTrue(info.index != -1);
+ assertTrue(info.buffer.capacity() > 0);
+ assertEquals(presentationTs, info.presentationTimestampUs);
+ encoder.releaseOutputBuffer(info.index);
+
+ encoder.release();
+ eglOesBase.release();
+ }
+}
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java
index 882fde1875..9e0164d4b8 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java
@@ -37,8 +37,6 @@ import android.test.suitebuilder.annotation.SmallTest;
import java.nio.ByteBuffer;
-import javax.microedition.khronos.egl.EGL10;
-
public final class SurfaceTextureHelperTest extends ActivityTestCase {
/**
* Mock texture listener with blocking wait functionality.
@@ -99,6 +97,14 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
}
}
+ /** Assert that two integers are close, with difference at most
+ * {@code threshold}. */
+ public static void assertClose(int threshold, int expected, int actual) {
+ if (Math.abs(expected - actual) <= threshold)
+ return;
+ failNotEquals("Not close enough, threshold " + threshold, expected, actual);
+ }
+
/**
* Test normal use by receiving three uniform texture frames. Texture frames are returned as early
* as possible. The texture pixel values are inspected by drawing the texture frame to a pixel
@@ -109,20 +115,21 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
final int width = 16;
final int height = 16;
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(width, height);
final GlRectDrawer drawer = new GlRectDrawer();
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(eglBase.getContext());
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
final MockTextureListener listener = new MockTextureListener();
surfaceTextureHelper.setListener(listener);
surfaceTextureHelper.getSurfaceTexture().setDefaultBufferSize(width, height);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in
// |surfaceTextureHelper| as the target EGLSurface.
- final EglBase eglOesBase = new EglBase(eglBase.getContext(), EglBase.ConfigType.PLAIN);
+ final EglBase eglOesBase =
+ EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
assertEquals(eglOesBase.surfaceWidth(), width);
assertEquals(eglOesBase.surfaceHeight(), height);
@@ -142,7 +149,7 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Wait for an OES texture to arrive and draw it onto the pixel buffer.
listener.waitForNewFrame();
eglBase.makeCurrent();
- drawer.drawOes(listener.oesTextureId, listener.transformMatrix);
+ drawer.drawOes(listener.oesTextureId, listener.transformMatrix, 0, 0, width, height);
surfaceTextureHelper.returnTextureFrame();
@@ -176,19 +183,20 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
final int width = 16;
final int height = 16;
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(width, height);
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(eglBase.getContext());
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
final MockTextureListener listener = new MockTextureListener();
surfaceTextureHelper.setListener(listener);
surfaceTextureHelper.getSurfaceTexture().setDefaultBufferSize(width, height);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in
// |surfaceTextureHelper| as the target EGLSurface.
- final EglBase eglOesBase = new EglBase(eglBase.getContext(), EglBase.ConfigType.PLAIN);
+ final EglBase eglOesBase =
+ EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
assertEquals(eglOesBase.surfaceWidth(), width);
assertEquals(eglOesBase.surfaceHeight(), height);
@@ -212,7 +220,7 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Draw the pending texture frame onto the pixel buffer.
eglBase.makeCurrent();
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawOes(listener.oesTextureId, listener.transformMatrix);
+ drawer.drawOes(listener.oesTextureId, listener.transformMatrix, 0, 0, width, height);
drawer.release();
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
@@ -240,11 +248,11 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
public static void testDisconnect() throws InterruptedException {
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(EGL10.EGL_NO_CONTEXT);
+ SurfaceTextureHelper.create(null);
final MockTextureListener listener = new MockTextureListener();
surfaceTextureHelper.setListener(listener);
// Create EglBase with the SurfaceTexture as target EGLSurface.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PLAIN);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
eglBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
eglBase.makeCurrent();
// Assert no frame has been received yet.
@@ -276,7 +284,7 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
@SmallTest
public static void testDisconnectImmediately() {
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(EGL10.EGL_NO_CONTEXT);
+ SurfaceTextureHelper.create(null);
surfaceTextureHelper.disconnect();
}
@@ -292,14 +300,14 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(EGL10.EGL_NO_CONTEXT, handler);
+ SurfaceTextureHelper.create(null, handler);
// Create a mock listener and expect frames to be delivered on |thread|.
final MockTextureListener listener = new MockTextureListener(thread);
surfaceTextureHelper.setListener(listener);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the
// SurfaceTexture in |surfaceTextureHelper| as the target EGLSurface.
- final EglBase eglOesBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PLAIN);
+ final EglBase eglOesBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
eglOesBase.makeCurrent();
// Draw a frame onto the SurfaceTexture.
@@ -313,7 +321,119 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Return the frame from this thread.
surfaceTextureHelper.returnTextureFrame();
+ surfaceTextureHelper.disconnect(handler);
+ }
+
+ /**
+ * Test use SurfaceTextureHelper on a separate thread. A uniform texture frame is created and
+ * received on a thread separate from the test thread and returned after disconnect.
+ */
+ @MediumTest
+ public static void testLateReturnFrameOnSeparateThread() throws InterruptedException {
+ final HandlerThread thread = new HandlerThread("SurfaceTextureHelperTestThread");
+ thread.start();
+ final Handler handler = new Handler(thread.getLooper());
+
+ // Create SurfaceTextureHelper and listener.
+ final SurfaceTextureHelper surfaceTextureHelper =
+ SurfaceTextureHelper.create(null, handler);
+ // Create a mock listener and expect frames to be delivered on |thread|.
+ final MockTextureListener listener = new MockTextureListener(thread);
+ surfaceTextureHelper.setListener(listener);
+
+ // Create resources for stubbing an OES texture producer. |eglOesBase| has the
+ // SurfaceTexture in |surfaceTextureHelper| as the target EGLSurface.
+ final EglBase eglOesBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
+ eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
+ eglOesBase.makeCurrent();
+ // Draw a frame onto the SurfaceTexture.
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ // swapBuffers() will ultimately trigger onTextureFrameAvailable().
+ eglOesBase.swapBuffers();
+ eglOesBase.release();
+
+ // Wait for an OES texture to arrive.
+ listener.waitForNewFrame();
+
+ surfaceTextureHelper.disconnect(handler);
+
+ surfaceTextureHelper.returnTextureFrame();
+ }
+
+ @MediumTest
+ public static void testTexturetoYUV() throws InterruptedException {
+ final int width = 16;
+ final int height = 16;
+
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
+
+ // Create SurfaceTextureHelper and listener.
+ final SurfaceTextureHelper surfaceTextureHelper =
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
+ final MockTextureListener listener = new MockTextureListener();
+ surfaceTextureHelper.setListener(listener);
+ surfaceTextureHelper.getSurfaceTexture().setDefaultBufferSize(width, height);
+
+ // Create resources for stubbing an OES texture producer. |eglBase| has the SurfaceTexture in
+ // |surfaceTextureHelper| as the target EGLSurface.
+
+ eglBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
+ assertEquals(eglBase.surfaceWidth(), width);
+ assertEquals(eglBase.surfaceHeight(), height);
+
+ final int red[] = new int[] {79, 144, 185};
+ final int green[] = new int[] {66, 210, 162};
+ final int blue[] = new int[] {161, 117, 158};
+
+ final int ref_y[] = new int[] {81, 180, 168};
+ final int ref_u[] = new int[] {173, 93, 122};
+ final int ref_v[] = new int[] {127, 103, 140};
+
+ // Draw three frames.
+ for (int i = 0; i < 3; ++i) {
+ // Draw a constant color frame onto the SurfaceTexture.
+ eglBase.makeCurrent();
+ GLES20.glClearColor(red[i] / 255.0f, green[i] / 255.0f, blue[i] / 255.0f, 1.0f);
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ // swapBuffers() will ultimately trigger onTextureFrameAvailable().
+ eglBase.swapBuffers();
+
+ // Wait for an OES texture to arrive.
+ listener.waitForNewFrame();
+
+ // Memory layout: Lines are 16 bytes. First 16 lines are
+ // the Y data. These are followed by 8 lines with 8 bytes of U
+ // data on the left and 8 bytes of V data on the right.
+ //
+ // Offset
+ // 0 YYYYYYYY YYYYYYYY
+ // 16 YYYYYYYY YYYYYYYY
+ // ...
+ // 240 YYYYYYYY YYYYYYYY
+ // 256 UUUUUUUU VVVVVVVV
+ // 272 UUUUUUUU VVVVVVVV
+ // ...
+ // 368 UUUUUUUU VVVVVVVV
+ // 384 buffer end
+ ByteBuffer buffer = ByteBuffer.allocateDirect(width * height * 3 / 2);
+ surfaceTextureHelper.textureToYUV(buffer, width, height, width,
+ listener.oesTextureId, listener.transformMatrix);
+
+ surfaceTextureHelper.returnTextureFrame();
+
+ // Allow off-by-one differences due to different rounding.
+ while (buffer.position() < width*height) {
+ assertClose(1, buffer.get() & 0xff, ref_y[i]);
+ }
+ while (buffer.hasRemaining()) {
+ if (buffer.position() % width < width/2)
+ assertClose(1, buffer.get() & 0xff, ref_u[i]);
+ else
+ assertClose(1, buffer.get() & 0xff, ref_v[i]);
+ }
+ }
+
surfaceTextureHelper.disconnect();
- thread.quitSafely();
+ eglBase.release();
}
}
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java
index 47fe780124..341c632b58 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java
@@ -36,8 +36,6 @@ import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
-import javax.microedition.khronos.egl.EGL10;
-
public final class SurfaceViewRendererOnMeasureTest extends ActivityTestCase {
/**
* List with all possible scaling types.
@@ -111,7 +109,7 @@ public final class SurfaceViewRendererOnMeasureTest extends ActivityTestCase {
}
// Test behaviour after SurfaceViewRenderer.init() is called, but still no frame.
- surfaceViewRenderer.init(EGL10.EGL_NO_CONTEXT, null);
+ surfaceViewRenderer.init((EglBase.Context) null, null);
for (RendererCommon.ScalingType scalingType : scalingTypes) {
for (int measureSpecMode : measureSpecModes) {
final int zeroMeasureSize = MeasureSpec.makeMeasureSpec(0, measureSpecMode);
@@ -134,7 +132,7 @@ public final class SurfaceViewRendererOnMeasureTest extends ActivityTestCase {
public void testFrame1280x720() {
final SurfaceViewRenderer surfaceViewRenderer =
new SurfaceViewRenderer(getInstrumentation().getContext());
- surfaceViewRenderer.init(EGL10.EGL_NO_CONTEXT, null);
+ surfaceViewRenderer.init((EglBase.Context) null, null);
// Test different rotation degress, but same rotated size.
for (int rotationDegree : new int[] {0, 90, 180, 270}) {
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java
index dbbe5963cd..1b97201a0a 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java
@@ -29,7 +29,6 @@ package org.webrtc;
import android.test.ActivityTestCase;
import android.test.suitebuilder.annotation.MediumTest;
import android.test.suitebuilder.annotation.SmallTest;
-import android.util.Log;
import android.util.Size;
import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
@@ -37,8 +36,6 @@ import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
import java.util.HashSet;
import java.util.Set;
-import javax.microedition.khronos.egl.EGL10;
-
@SuppressWarnings("deprecation")
public class VideoCapturerAndroidTest extends ActivityTestCase {
static final String TAG = "VideoCapturerAndroidTest";
@@ -87,8 +84,10 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@SmallTest
public void testCreateAndReleaseUsingTextures() {
+ EglBase eglBase = EglBase.create();
VideoCapturerAndroidTestFixtures.release(
- VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT));
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext()));
+ eglBase.release();
}
@SmallTest
@@ -108,12 +107,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.startCapturerAndRender(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@SmallTest
- public void DISABLED_testStartVideoCapturerUsingTextures() throws InterruptedException {
+ public void testStartVideoCapturerUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.startCapturerAndRender(capturer);
+ eglBase.release();
}
@SmallTest
@@ -151,11 +151,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.switchCamera(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@SmallTest
- public void DISABLED_testSwitchVideoCapturerUsingTextures() throws InterruptedException {
- VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ public void testSwitchVideoCapturerUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.switchCamera(capturer);
+ eglBase.release();
}
@MediumTest
@@ -179,12 +181,14 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@MediumTest
public void testCameraCallsAfterStopUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
final String deviceName = CameraEnumerationAndroid.getDeviceName(0);
final VideoCapturerAndroid capturer = VideoCapturerAndroid.create(deviceName, null,
- EGL10.EGL_NO_CONTEXT);
+ eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.cameraCallsAfterStop(capturer,
getInstrumentation().getContext());
+ eglBase.release();
}
@SmallTest
@@ -195,11 +199,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.stopRestartVideoSource(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@SmallTest
- public void DISABLED_testStopRestartVideoSourceUsingTextures() throws InterruptedException {
- VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ public void testStopRestartVideoSourceUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.stopRestartVideoSource(capturer);
+ eglBase.release();
}
@SmallTest
@@ -215,13 +221,50 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@SmallTest
public void testStartStopWithDifferentResolutionsUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
String deviceName = CameraEnumerationAndroid.getDeviceName(0);
VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create(deviceName, null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create(deviceName, null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.startStopWithDifferentResolutions(capturer,
getInstrumentation().getContext());
+ eglBase.release();
+ }
+
+ @SmallTest
+ // This test that an error is reported if the camera is already opened
+ // when VideoCapturerAndroid is started.
+ public void testStartWhileCameraAlreadyOpened() throws InterruptedException {
+ String deviceName = CameraEnumerationAndroid.getDeviceName(0);
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create(deviceName, null);
+ VideoCapturerAndroidTestFixtures.startWhileCameraIsAlreadyOpen(
+ capturer, getInstrumentation().getContext());
+ }
+
+ @SmallTest
+ // This test that VideoCapturerAndroid can be started, even if the camera is already opened
+ // if the camera is closed while VideoCapturerAndroid is re-trying to start.
+ public void testStartWhileCameraIsAlreadyOpenAndCloseCamera() throws InterruptedException {
+ String deviceName = CameraEnumerationAndroid.getDeviceName(0);
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create(deviceName, null);
+ VideoCapturerAndroidTestFixtures.startWhileCameraIsAlreadyOpenAndCloseCamera(
+ capturer, getInstrumentation().getContext());
+ }
+
+ @SmallTest
+ // This test that VideoCapturerAndroid.stop can be called while VideoCapturerAndroid is
+ // re-trying to start.
+ public void startWhileCameraIsAlreadyOpenAndStop() throws InterruptedException {
+ String deviceName = CameraEnumerationAndroid.getDeviceName(0);
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create(deviceName, null);
+ VideoCapturerAndroidTestFixtures.startWhileCameraIsAlreadyOpenAndStop(
+ capturer, getInstrumentation().getContext());
}
+
+
@SmallTest
// This test what happens if buffers are returned after the capturer have
// been stopped and restarted. It does not test or use the C++ layer.
@@ -235,11 +278,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@SmallTest
public void testReturnBufferLateUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
String deviceName = CameraEnumerationAndroid.getDeviceName(0);
VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create(deviceName, null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create(deviceName, null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.returnBufferLate(capturer,
getInstrumentation().getContext());
+ eglBase.release();
}
@MediumTest
@@ -251,11 +296,45 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.returnBufferLateEndToEnd(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@MediumTest
- public void DISABLED_testReturnBufferLateEndToEndUsingTextures() throws InterruptedException {
+ public void testReturnBufferLateEndToEndUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
final VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.returnBufferLateEndToEnd(capturer);
+ eglBase.release();
+ }
+
+ @MediumTest
+ // This test that CameraEventsHandler.onError is triggered if video buffers are not returned to
+ // the capturer.
+ public void testCameraFreezedEventOnBufferStarvationUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroidTestFixtures.CameraEvents cameraEvents =
+ VideoCapturerAndroidTestFixtures.createCameraEvents();
+ VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", cameraEvents,
+ eglBase.getEglBaseContext());
+ VideoCapturerAndroidTestFixtures.cameraFreezedEventOnBufferStarvationUsingTextures(capturer,
+ cameraEvents, getInstrumentation().getContext());
+ eglBase.release();
+ }
+
+ @MediumTest
+ // This test that frames forwarded to a renderer is scaled if onOutputFormatRequest is
+ // called. This test both Java and C++ parts of of the stack.
+ public void testScaleCameraOutput() throws InterruptedException {
+ VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", null);
+ VideoCapturerAndroidTestFixtures.scaleCameraOutput(capturer);
+ }
+
+ @MediumTest
+ // This test that frames forwarded to a renderer is scaled if onOutputFormatRequest is
+ // called. This test both Java and C++ parts of of the stack.
+ public void testScaleCameraOutputUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
+ VideoCapturerAndroidTestFixtures.scaleCameraOutput(capturer);
+ eglBase.release();
}
}
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java
index 11b3ce98a0..0b42e33785 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java
@@ -29,6 +29,7 @@ package org.webrtc;
import android.content.Context;
import android.hardware.Camera;
+import org.webrtc.VideoCapturerAndroidTestFixtures;
import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
import org.webrtc.VideoRenderer.I420Frame;
@@ -42,16 +43,32 @@ public class VideoCapturerAndroidTestFixtures {
static class RendererCallbacks implements VideoRenderer.Callbacks {
private int framesRendered = 0;
private Object frameLock = 0;
+ private int width = 0;
+ private int height = 0;
@Override
public void renderFrame(I420Frame frame) {
synchronized (frameLock) {
++framesRendered;
+ width = frame.rotatedWidth();
+ height = frame.rotatedHeight();
frameLock.notify();
}
VideoRenderer.renderFrameDone(frame);
}
+ public int frameWidth() {
+ synchronized (frameLock) {
+ return width;
+ }
+ }
+
+ public int frameHeight() {
+ synchronized (frameLock) {
+ return height;
+ }
+ }
+
public int WaitForNextFrameToRender() throws InterruptedException {
synchronized (frameLock) {
frameLock.wait();
@@ -102,11 +119,11 @@ public class VideoCapturerAndroidTestFixtures {
}
@Override
- public void onByteBufferFrameCaptured(byte[] frame, int length, int width, int height,
- int rotation, long timeStamp) {
+ public void onByteBufferFrameCaptured(byte[] frame, int width, int height, int rotation,
+ long timeStamp) {
synchronized (frameLock) {
++framesCaptured;
- frameSize = length;
+ frameSize = frame.length;
frameWidth = width;
frameHeight = height;
timestamps.add(timeStamp);
@@ -115,7 +132,8 @@ public class VideoCapturerAndroidTestFixtures {
}
@Override
public void onTextureFrameCaptured(
- int width, int height, int oesTextureId, float[] transformMatrix, long timeStamp) {
+ int width, int height, int oesTextureId, float[] transformMatrix, int rotation,
+ long timeStamp) {
synchronized (frameLock) {
++framesCaptured;
frameWidth = width;
@@ -174,9 +192,20 @@ public class VideoCapturerAndroidTestFixtures {
VideoCapturerAndroid.CameraEventsHandler {
public boolean onCameraOpeningCalled;
public boolean onFirstFrameAvailableCalled;
+ public final Object onCameraFreezedLock = new Object();
+ private String onCameraFreezedDescription;
@Override
- public void onCameraError(String errorDescription) { }
+ public void onCameraError(String errorDescription) {
+ }
+
+ @Override
+ public void onCameraFreezed(String errorDescription) {
+ synchronized (onCameraFreezedLock) {
+ onCameraFreezedDescription = errorDescription;
+ onCameraFreezedLock.notifyAll();
+ }
+ }
@Override
public void onCameraOpening(int cameraId) {
@@ -190,6 +219,13 @@ public class VideoCapturerAndroidTestFixtures {
@Override
public void onCameraClosed() { }
+
+ public String WaitForCameraFreezed() throws InterruptedException {
+ synchronized (onCameraFreezedLock) {
+ onCameraFreezedLock.wait();
+ return onCameraFreezedDescription;
+ }
+ }
}
static public CameraEvents createCameraEvents() {
@@ -275,8 +311,8 @@ public class VideoCapturerAndroidTestFixtures {
assertTrue(observer.WaitForCapturerToStart());
observer.WaitForNextCapturedFrame();
capturer.stopCapture();
- for (long timeStamp : observer.getCopyAndResetListOftimeStamps()) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
capturer.dispose();
@@ -296,9 +332,10 @@ public class VideoCapturerAndroidTestFixtures {
// Make sure camera is started and then stop it.
assertTrue(observer.WaitForCapturerToStart());
capturer.stopCapture();
- for (long timeStamp : observer.getCopyAndResetListOftimeStamps()) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
+
// We can't change |capturer| at this point, but we should not crash.
capturer.switchCamera(null);
capturer.onOutputFormatRequest(640, 480, 15);
@@ -357,17 +394,90 @@ public class VideoCapturerAndroidTestFixtures {
if (capturer.isCapturingToTexture()) {
assertEquals(0, observer.frameSize());
} else {
- assertEquals(format.frameSize(), observer.frameSize());
+ assertTrue(format.frameSize() <= observer.frameSize());
}
capturer.stopCapture();
- for (long timestamp : observer.getCopyAndResetListOftimeStamps()) {
- capturer.returnBuffer(timestamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
}
capturer.dispose();
assertTrue(capturer.isReleased());
}
+ static void waitUntilIdle(VideoCapturerAndroid capturer) throws InterruptedException {
+ final CountDownLatch barrier = new CountDownLatch(1);
+ capturer.getCameraThreadHandler().post(new Runnable() {
+ @Override public void run() {
+ barrier.countDown();
+ }
+ });
+ barrier.await();
+ }
+
+ static public void startWhileCameraIsAlreadyOpen(
+ VideoCapturerAndroid capturer, Context appContext) throws InterruptedException {
+ Camera camera = Camera.open(capturer.getCurrentCameraId());
+
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+
+ if (android.os.Build.VERSION.SDK_INT > android.os.Build.VERSION_CODES.LOLLIPOP_MR1) {
+ // The first opened camera client will be evicted.
+ assertTrue(observer.WaitForCapturerToStart());
+ capturer.stopCapture();
+ } else {
+ assertFalse(observer.WaitForCapturerToStart());
+ }
+
+ capturer.dispose();
+ camera.release();
+ }
+
+ static public void startWhileCameraIsAlreadyOpenAndCloseCamera(
+ VideoCapturerAndroid capturer, Context appContext) throws InterruptedException {
+ Camera camera = Camera.open(capturer.getCurrentCameraId());
+
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+ waitUntilIdle(capturer);
+
+ camera.release();
+
+ // Make sure camera is started and first frame is received and then stop it.
+ assertTrue(observer.WaitForCapturerToStart());
+ observer.WaitForNextCapturedFrame();
+ capturer.stopCapture();
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
+ }
+ capturer.dispose();
+ assertTrue(capturer.isReleased());
+ }
+
+ static public void startWhileCameraIsAlreadyOpenAndStop(
+ VideoCapturerAndroid capturer, Context appContext) throws InterruptedException {
+ Camera camera = Camera.open(capturer.getCurrentCameraId());
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+ capturer.stopCapture();
+ capturer.dispose();
+ assertTrue(capturer.isReleased());
+ camera.release();
+ }
+
static public void returnBufferLate(VideoCapturerAndroid capturer,
Context appContext) throws InterruptedException {
FakeCapturerObserver observer = new FakeCapturerObserver();
@@ -387,9 +497,8 @@ public class VideoCapturerAndroidTestFixtures {
capturer.startCapture(format.width, format.height, format.maxFramerate,
appContext, observer);
observer.WaitForCapturerToStart();
-
- for (Long timeStamp : listOftimestamps) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
observer.WaitForNextCapturedFrame();
@@ -397,9 +506,10 @@ public class VideoCapturerAndroidTestFixtures {
listOftimestamps = observer.getCopyAndResetListOftimeStamps();
assertTrue(listOftimestamps.size() >= 1);
- for (Long timeStamp : listOftimestamps) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
+
capturer.dispose();
assertTrue(capturer.isReleased());
}
@@ -410,6 +520,7 @@ public class VideoCapturerAndroidTestFixtures {
final VideoSource source = factory.createVideoSource(capturer, new MediaConstraints());
final VideoTrack track = factory.createVideoTrack("dummy", source);
final FakeAsyncRenderer renderer = new FakeAsyncRenderer();
+
track.addRenderer(new VideoRenderer(renderer));
// Wait for at least one frame that has not been returned.
assertFalse(renderer.waitForPendingFrames().isEmpty());
@@ -420,9 +531,7 @@ public class VideoCapturerAndroidTestFixtures {
track.dispose();
source.dispose();
factory.dispose();
-
- // The pending frames should keep the JNI parts and |capturer| alive.
- assertFalse(capturer.isReleased());
+ assertTrue(capturer.isReleased());
// Return the frame(s), on a different thread out of spite.
final List<I420Frame> pendingFrames = renderer.waitForPendingFrames();
@@ -436,8 +545,71 @@ public class VideoCapturerAndroidTestFixtures {
});
returnThread.start();
returnThread.join();
+ }
+
+ static public void cameraFreezedEventOnBufferStarvationUsingTextures(
+ VideoCapturerAndroid capturer,
+ CameraEvents events, Context appContext) throws InterruptedException {
+ assertTrue("Not capturing to textures.", capturer.isCapturingToTexture());
- // Check that frames have successfully returned. This will cause |capturer| to be released.
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+ // Make sure camera is started.
+ assertTrue(observer.WaitForCapturerToStart());
+ // Since we don't return the buffer, we should get a starvation message if we are
+ // capturing to a texture.
+ assertEquals("Camera failure. Client must return video buffers.",
+ events.WaitForCameraFreezed());
+
+ capturer.stopCapture();
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
+ }
+
+ capturer.dispose();
assertTrue(capturer.isReleased());
}
+
+ static public void scaleCameraOutput(VideoCapturerAndroid capturer) throws InterruptedException {
+ PeerConnectionFactory factory = new PeerConnectionFactory();
+ VideoSource source =
+ factory.createVideoSource(capturer, new MediaConstraints());
+ VideoTrack track = factory.createVideoTrack("dummy", source);
+ RendererCallbacks renderer = new RendererCallbacks();
+ track.addRenderer(new VideoRenderer(renderer));
+ assertTrue(renderer.WaitForNextFrameToRender() > 0);
+
+ final int startWidth = renderer.frameWidth();
+ final int startHeight = renderer.frameHeight();
+ final int frameRate = 30;
+ final int scaledWidth = startWidth / 2;
+ final int scaledHeight = startHeight / 2;
+
+ // Request the captured frames to be scaled.
+ capturer.onOutputFormatRequest(scaledWidth, scaledHeight, frameRate);
+
+ boolean gotExpectedResolution = false;
+ int numberOfInspectedFrames = 0;
+
+ do {
+ renderer.WaitForNextFrameToRender();
+ ++numberOfInspectedFrames;
+
+ gotExpectedResolution = (renderer.frameWidth() == scaledWidth
+ && renderer.frameHeight() == scaledHeight);
+ } while (!gotExpectedResolution && numberOfInspectedFrames < 30);
+
+ source.stop();
+ track.dispose();
+ source.dispose();
+ factory.dispose();
+ assertTrue(capturer.isReleased());
+
+ assertTrue(gotExpectedResolution);
+ }
+
}
diff --git a/talk/app/webrtc/androidvideocapturer.cc b/talk/app/webrtc/androidvideocapturer.cc
index afcfb5bb7c..d8f12174db 100644
--- a/talk/app/webrtc/androidvideocapturer.cc
+++ b/talk/app/webrtc/androidvideocapturer.cc
@@ -26,6 +26,7 @@
*/
#include "talk/app/webrtc/androidvideocapturer.h"
+#include "talk/app/webrtc/java/jni/native_handle_impl.h"
#include "talk/media/webrtc/webrtcvideoframe.h"
#include "webrtc/base/common.h"
#include "webrtc/base/json.h"
@@ -57,11 +58,13 @@ class AndroidVideoCapturer::FrameFactory : public cricket::VideoFrameFactory {
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& buffer,
int rotation,
int64_t time_stamp_in_ns) {
+ RTC_DCHECK(rotation == 0 || rotation == 90 || rotation == 180 ||
+ rotation == 270);
buffer_ = buffer;
captured_frame_.width = buffer->width();
captured_frame_.height = buffer->height();
captured_frame_.time_stamp = time_stamp_in_ns;
- captured_frame_.rotation = rotation;
+ captured_frame_.rotation = static_cast<webrtc::VideoRotation>(rotation);
}
void ClearCapturedFrame() {
@@ -85,7 +88,7 @@ class AndroidVideoCapturer::FrameFactory : public cricket::VideoFrameFactory {
rtc::scoped_ptr<cricket::VideoFrame> frame(new cricket::WebRtcVideoFrame(
ShallowCenterCrop(buffer_, dst_width, dst_height),
- captured_frame->time_stamp, captured_frame->GetRotation()));
+ captured_frame->time_stamp, captured_frame->rotation));
// Caller takes ownership.
// TODO(magjed): Change CreateAliasedFrame() to return a rtc::scoped_ptr.
return apply_rotation_ ? frame->GetCopyWithRotationApplied()->Copy()
@@ -99,10 +102,17 @@ class AndroidVideoCapturer::FrameFactory : public cricket::VideoFrameFactory {
int output_width,
int output_height) const override {
if (buffer_->native_handle() != nullptr) {
- // TODO(perkj): Implement CreateAliasedFrame properly for textures.
- rtc::scoped_ptr<cricket::VideoFrame> frame(new cricket::WebRtcVideoFrame(
- buffer_, input_frame->time_stamp, input_frame->GetRotation()));
- return frame.release();
+ // TODO(perkj) Implement cropping.
+ RTC_CHECK_EQ(cropped_input_width, buffer_->width());
+ RTC_CHECK_EQ(cropped_input_height, buffer_->height());
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> scaled_buffer(
+ static_cast<webrtc_jni::AndroidTextureBuffer*>(buffer_.get())
+ ->ScaleAndRotate(output_width, output_height,
+ apply_rotation_ ? input_frame->rotation :
+ webrtc::kVideoRotation_0));
+ return new cricket::WebRtcVideoFrame(
+ scaled_buffer, input_frame->time_stamp,
+ apply_rotation_ ? webrtc::kVideoRotation_0 : input_frame->rotation);
}
return VideoFrameFactory::CreateAliasedFrame(input_frame,
cropped_input_width,
diff --git a/talk/app/webrtc/androidvideocapturer.h b/talk/app/webrtc/androidvideocapturer.h
index df783bdf6f..c665eabd91 100644
--- a/talk/app/webrtc/androidvideocapturer.h
+++ b/talk/app/webrtc/androidvideocapturer.h
@@ -32,7 +32,7 @@
#include "talk/media/base/videocapturer.h"
#include "webrtc/base/thread_checker.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
namespace webrtc {
diff --git a/talk/app/webrtc/audiotrack.cc b/talk/app/webrtc/audiotrack.cc
index b0c91296f9..b3223cd29f 100644
--- a/talk/app/webrtc/audiotrack.cc
+++ b/talk/app/webrtc/audiotrack.cc
@@ -27,27 +27,82 @@
#include "talk/app/webrtc/audiotrack.h"
-#include <string>
+#include "webrtc/base/checks.h"
+
+using rtc::scoped_refptr;
namespace webrtc {
-static const char kAudioTrackKind[] = "audio";
+const char MediaStreamTrackInterface::kAudioKind[] = "audio";
+
+// static
+scoped_refptr<AudioTrack> AudioTrack::Create(
+ const std::string& id,
+ const scoped_refptr<AudioSourceInterface>& source) {
+ return new rtc::RefCountedObject<AudioTrack>(id, source);
+}
AudioTrack::AudioTrack(const std::string& label,
- AudioSourceInterface* audio_source)
- : MediaStreamTrack<AudioTrackInterface>(label),
- audio_source_(audio_source) {
+ const scoped_refptr<AudioSourceInterface>& source)
+ : MediaStreamTrack<AudioTrackInterface>(label), audio_source_(source) {
+ if (audio_source_) {
+ audio_source_->RegisterObserver(this);
+ OnChanged();
+ }
+}
+
+AudioTrack::~AudioTrack() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ set_state(MediaStreamTrackInterface::kEnded);
+ if (audio_source_)
+ audio_source_->UnregisterObserver(this);
}
std::string AudioTrack::kind() const {
- return kAudioTrackKind;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return kAudioKind;
+}
+
+AudioSourceInterface* AudioTrack::GetSource() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return audio_source_.get();
}
-rtc::scoped_refptr<AudioTrack> AudioTrack::Create(
- const std::string& id, AudioSourceInterface* source) {
- rtc::RefCountedObject<AudioTrack>* track =
- new rtc::RefCountedObject<AudioTrack>(id, source);
- return track;
+void AudioTrack::AddSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (audio_source_)
+ audio_source_->AddSink(sink);
+}
+
+void AudioTrack::RemoveSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (audio_source_)
+ audio_source_->RemoveSink(sink);
+}
+
+void AudioTrack::OnChanged() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (state() == kFailed)
+ return; // We can't recover from this state (do we ever set it?).
+
+ TrackState new_state = kInitializing;
+
+ // |audio_source_| must be non-null if we ever get here.
+ switch (audio_source_->state()) {
+ case MediaSourceInterface::kLive:
+ case MediaSourceInterface::kMuted:
+ new_state = kLive;
+ break;
+ case MediaSourceInterface::kEnded:
+ new_state = kEnded;
+ break;
+ case MediaSourceInterface::kInitializing:
+ default:
+ // use kInitializing.
+ break;
+ }
+
+ set_state(new_state);
}
} // namespace webrtc
diff --git a/talk/app/webrtc/audiotrack.h b/talk/app/webrtc/audiotrack.h
index 750f272ba2..55f4837714 100644
--- a/talk/app/webrtc/audiotrack.h
+++ b/talk/app/webrtc/audiotrack.h
@@ -28,40 +28,47 @@
#ifndef TALK_APP_WEBRTC_AUDIOTRACK_H_
#define TALK_APP_WEBRTC_AUDIOTRACK_H_
+#include <string>
+
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/mediastreamtrack.h"
#include "talk/app/webrtc/notifier.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/base/thread_checker.h"
namespace webrtc {
-class AudioTrack : public MediaStreamTrack<AudioTrackInterface> {
+class AudioTrack : public MediaStreamTrack<AudioTrackInterface>,
+ public ObserverInterface {
+ protected:
+ // Protected ctor to force use of factory method.
+ AudioTrack(const std::string& label,
+ const rtc::scoped_refptr<AudioSourceInterface>& source);
+ ~AudioTrack() override;
+
public:
static rtc::scoped_refptr<AudioTrack> Create(
- const std::string& id, AudioSourceInterface* source);
-
- // AudioTrackInterface implementation.
- AudioSourceInterface* GetSource() const override {
- return audio_source_.get();
- }
- // TODO(xians): Implement these methods.
- void AddSink(AudioTrackSinkInterface* sink) override {}
- void RemoveSink(AudioTrackSinkInterface* sink) override {}
- bool GetSignalLevel(int* level) override { return false; }
- rtc::scoped_refptr<AudioProcessorInterface> GetAudioProcessor() override {
- return NULL;
- }
- cricket::AudioRenderer* GetRenderer() override { return NULL; }
+ const std::string& id,
+ const rtc::scoped_refptr<AudioSourceInterface>& source);
+ private:
// MediaStreamTrack implementation.
std::string kind() const override;
- protected:
- AudioTrack(const std::string& label, AudioSourceInterface* audio_source);
+ // AudioTrackInterface implementation.
+ AudioSourceInterface* GetSource() const override;
+
+ void AddSink(AudioTrackSinkInterface* sink) override;
+ void RemoveSink(AudioTrackSinkInterface* sink) override;
+
+ // ObserverInterface implementation.
+ void OnChanged() override;
private:
- rtc::scoped_refptr<AudioSourceInterface> audio_source_;
+ const rtc::scoped_refptr<AudioSourceInterface> audio_source_;
+ rtc::ThreadChecker thread_checker_;
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTrack);
};
} // namespace webrtc
diff --git a/talk/app/webrtc/dtlsidentitystore.cc b/talk/app/webrtc/dtlsidentitystore.cc
index 27587796bc..390ec0d0b7 100644
--- a/talk/app/webrtc/dtlsidentitystore.cc
+++ b/talk/app/webrtc/dtlsidentitystore.cc
@@ -27,6 +27,8 @@
#include "talk/app/webrtc/dtlsidentitystore.h"
+#include <utility>
+
#include "talk/app/webrtc/webrtcsessiondescriptionfactory.h"
#include "webrtc/base/logging.h"
@@ -72,7 +74,7 @@ class DtlsIdentityStoreImpl::WorkerTask : public sigslot::has_slots<>,
// Posting to |this| avoids touching |store_| on threads other than
// |signaling_thread_| and thus avoids having to use locks.
IdentityResultMessageData* msg = new IdentityResultMessageData(
- new IdentityResult(key_type_, identity.Pass()));
+ new IdentityResult(key_type_, std::move(identity)));
signaling_thread_->Post(this, MSG_GENERATE_IDENTITY_RESULT, msg);
}
@@ -93,7 +95,7 @@ class DtlsIdentityStoreImpl::WorkerTask : public sigslot::has_slots<>,
static_cast<IdentityResultMessageData*>(msg->pdata));
if (store_) {
store_->OnIdentityGenerated(pdata->data()->key_type_,
- pdata->data()->identity_.Pass());
+ std::move(pdata->data()->identity_));
}
}
break;
@@ -152,7 +154,7 @@ void DtlsIdentityStoreImpl::OnMessage(rtc::Message* msg) {
rtc::scoped_ptr<IdentityResultMessageData> pdata(
static_cast<IdentityResultMessageData*>(msg->pdata));
OnIdentityGenerated(pdata->data()->key_type_,
- pdata->data()->identity_.Pass());
+ std::move(pdata->data()->identity_));
break;
}
}
@@ -178,9 +180,9 @@ void DtlsIdentityStoreImpl::GenerateIdentity(
// Return identity async - post even though we are on |signaling_thread_|.
LOG(LS_VERBOSE) << "Using a free DTLS identity.";
++request_info_[key_type].gen_in_progress_counts_;
- IdentityResultMessageData* msg = new IdentityResultMessageData(
- new IdentityResult(key_type,
- request_info_[key_type].free_identity_.Pass()));
+ IdentityResultMessageData* msg =
+ new IdentityResultMessageData(new IdentityResult(
+ key_type, std::move(request_info_[key_type].free_identity_)));
signaling_thread_->Post(this, MSG_GENERATE_IDENTITY_RESULT, msg);
return;
}
@@ -228,7 +230,7 @@ void DtlsIdentityStoreImpl::OnIdentityGenerated(
// Return the result to the observer.
if (identity.get()) {
LOG(LS_VERBOSE) << "A DTLS identity is returned to an observer.";
- observer->OnSuccess(identity.Pass());
+ observer->OnSuccess(std::move(identity));
} else {
LOG(LS_WARNING) << "Failed to generate DTLS identity.";
observer->OnFailure(0);
diff --git a/talk/app/webrtc/dtlsidentitystore.h b/talk/app/webrtc/dtlsidentitystore.h
index a0eef98e1b..2a5309d34b 100644
--- a/talk/app/webrtc/dtlsidentitystore.h
+++ b/talk/app/webrtc/dtlsidentitystore.h
@@ -30,6 +30,7 @@
#include <queue>
#include <string>
+#include <utility>
#include "webrtc/base/messagehandler.h"
#include "webrtc/base/messagequeue.h"
@@ -129,7 +130,7 @@ class DtlsIdentityStoreImpl : public DtlsIdentityStoreInterface,
struct IdentityResult {
IdentityResult(rtc::KeyType key_type,
rtc::scoped_ptr<rtc::SSLIdentity> identity)
- : key_type_(key_type), identity_(identity.Pass()) {}
+ : key_type_(key_type), identity_(std::move(identity)) {}
rtc::KeyType key_type_;
rtc::scoped_ptr<rtc::SSLIdentity> identity_;
diff --git a/talk/app/webrtc/fakeportallocatorfactory.h b/talk/app/webrtc/fakeportallocatorfactory.h
deleted file mode 100644
index f326b62043..0000000000
--- a/talk/app/webrtc/fakeportallocatorfactory.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * libjingle
- * Copyright 2011 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// This file defines a fake port allocator factory used for testing.
-// This implementation creates instances of cricket::FakePortAllocator.
-
-#ifndef TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
-#define TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
-
-#include "talk/app/webrtc/peerconnectioninterface.h"
-#include "webrtc/p2p/client/fakeportallocator.h"
-
-namespace webrtc {
-
-class FakePortAllocatorFactory : public PortAllocatorFactoryInterface {
- public:
- static FakePortAllocatorFactory* Create() {
- rtc::RefCountedObject<FakePortAllocatorFactory>* allocator =
- new rtc::RefCountedObject<FakePortAllocatorFactory>();
- return allocator;
- }
-
- virtual cricket::PortAllocator* CreatePortAllocator(
- const std::vector<StunConfiguration>& stun_configurations,
- const std::vector<TurnConfiguration>& turn_configurations) {
- stun_configs_ = stun_configurations;
- turn_configs_ = turn_configurations;
- return new cricket::FakePortAllocator(rtc::Thread::Current(), NULL);
- }
-
- const std::vector<StunConfiguration>& stun_configs() const {
- return stun_configs_;
- }
-
- const std::vector<TurnConfiguration>& turn_configs() const {
- return turn_configs_;
- }
-
- void SetNetworkIgnoreMask(int network_ignore_mask) {}
-
- protected:
- FakePortAllocatorFactory() {}
- ~FakePortAllocatorFactory() {}
-
- private:
- std::vector<PortAllocatorFactoryInterface::StunConfiguration> stun_configs_;
- std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turn_configs_;
-};
-
-} // namespace webrtc
-
-#endif // TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
diff --git a/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java b/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java
index 097d1cd906..3444529596 100644
--- a/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java
+++ b/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java
@@ -27,7 +27,9 @@
package org.webrtc;
+import android.annotation.TargetApi;
import android.content.Context;
+
import android.graphics.ImageFormat;
import android.hardware.camera2.CameraCharacteristics;
import android.hardware.camera2.CameraManager;
@@ -45,6 +47,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
+@TargetApi(21)
public class Camera2Enumerator implements CameraEnumerationAndroid.Enumerator {
private final static String TAG = "Camera2Enumerator";
private final static double NANO_SECONDS_PER_SECOND = 1.0e9;
diff --git a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java
index 3e37f6afdc..5f68c3759e 100644
--- a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java
+++ b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java
@@ -29,7 +29,6 @@ package org.webrtc;
import static java.lang.Math.abs;
import static java.lang.Math.ceil;
-import android.hardware.Camera;
import android.graphics.ImageFormat;
import org.json.JSONArray;
@@ -72,7 +71,7 @@ public class CameraEnumerationAndroid {
// other image formats then this needs to be updated and
// VideoCapturerAndroid.getSupportedFormats need to return CaptureFormats of
// all imageFormats.
- public final int imageFormat = ImageFormat.YV12;
+ public final int imageFormat = ImageFormat.NV21;
public CaptureFormat(int width, int height, int minFramerate,
int maxFramerate) {
@@ -88,25 +87,15 @@ public class CameraEnumerationAndroid {
}
// Calculates the frame size of the specified image format. Currently only
- // supporting ImageFormat.YV12. The YV12's stride is the closest rounded up
- // multiple of 16 of the width and width and height are always even.
- // Android guarantees this:
- // http://developer.android.com/reference/android/hardware/Camera.Parameters.html#setPreviewFormat%28int%29
+ // supporting ImageFormat.NV21.
+ // The size is width * height * number of bytes per pixel.
+ // http://developer.android.com/reference/android/hardware/Camera.html#addCallbackBuffer(byte[])
public static int frameSize(int width, int height, int imageFormat) {
- if (imageFormat != ImageFormat.YV12) {
+ if (imageFormat != ImageFormat.NV21) {
throw new UnsupportedOperationException("Don't know how to calculate "
- + "the frame size of non-YV12 image formats.");
+ + "the frame size of non-NV21 image formats.");
}
- int yStride = roundUp(width, 16);
- int uvStride = roundUp(yStride / 2, 16);
- int ySize = yStride * height;
- int uvSize = uvStride * height / 2;
- return ySize + uvSize * 2;
- }
-
- // Rounds up |x| to the closest value that is a multiple of |alignment|.
- private static int roundUp(int x, int alignment) {
- return (int)ceil(x / (double)alignment) * alignment;
+ return (width * height * ImageFormat.getBitsPerPixel(imageFormat)) / 8;
}
@Override
@@ -114,21 +103,19 @@ public class CameraEnumerationAndroid {
return width + "x" + height + "@[" + minFramerate + ":" + maxFramerate + "]";
}
- @Override
- public boolean equals(Object that) {
- if (!(that instanceof CaptureFormat)) {
+ public boolean isSameFormat(final CaptureFormat that) {
+ if (that == null) {
return false;
}
- final CaptureFormat c = (CaptureFormat) that;
- return width == c.width && height == c.height && maxFramerate == c.maxFramerate
- && minFramerate == c.minFramerate;
+ return width == that.width && height == that.height && maxFramerate == that.maxFramerate
+ && minFramerate == that.minFramerate;
}
}
// Returns device names that can be used to create a new VideoCapturerAndroid.
public static String[] getDeviceNames() {
- String[] names = new String[Camera.getNumberOfCameras()];
- for (int i = 0; i < Camera.getNumberOfCameras(); ++i) {
+ String[] names = new String[android.hardware.Camera.getNumberOfCameras()];
+ for (int i = 0; i < android.hardware.Camera.getNumberOfCameras(); ++i) {
names[i] = getDeviceName(i);
}
return names;
@@ -136,22 +123,22 @@ public class CameraEnumerationAndroid {
// Returns number of cameras on device.
public static int getDeviceCount() {
- return Camera.getNumberOfCameras();
+ return android.hardware.Camera.getNumberOfCameras();
}
// Returns the name of the camera with camera index. Returns null if the
// camera can not be used.
public static String getDeviceName(int index) {
- Camera.CameraInfo info = new Camera.CameraInfo();
+ android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
try {
- Camera.getCameraInfo(index, info);
+ android.hardware.Camera.getCameraInfo(index, info);
} catch (Exception e) {
Logging.e(TAG, "getCameraInfo failed on index " + index,e);
return null;
}
String facing =
- (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) ? "front" : "back";
+ (info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT) ? "front" : "back";
return "Camera " + index + ", Facing " + facing
+ ", Orientation " + info.orientation;
}
@@ -159,13 +146,13 @@ public class CameraEnumerationAndroid {
// Returns the name of the front facing camera. Returns null if the
// camera can not be used or does not exist.
public static String getNameOfFrontFacingDevice() {
- return getNameOfDevice(Camera.CameraInfo.CAMERA_FACING_FRONT);
+ return getNameOfDevice(android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT);
}
// Returns the name of the back facing camera. Returns null if the
// camera can not be used or does not exist.
public static String getNameOfBackFacingDevice() {
- return getNameOfDevice(Camera.CameraInfo.CAMERA_FACING_BACK);
+ return getNameOfDevice(android.hardware.Camera.CameraInfo.CAMERA_FACING_BACK);
}
public static String getSupportedFormatsAsJson(int id) throws JSONException {
@@ -194,7 +181,8 @@ public class CameraEnumerationAndroid {
}
}
- public static int[] getFramerateRange(Camera.Parameters parameters, final int framerate) {
+ public static int[] getFramerateRange(android.hardware.Camera.Parameters parameters,
+ final int framerate) {
List<int[]> listFpsRange = parameters.getSupportedPreviewFpsRange();
if (listFpsRange.isEmpty()) {
Logging.w(TAG, "No supported preview fps range");
@@ -203,27 +191,30 @@ public class CameraEnumerationAndroid {
return Collections.min(listFpsRange,
new ClosestComparator<int[]>() {
@Override int diff(int[] range) {
- return abs(framerate - range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX])
- + abs(framerate - range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
+ final int maxFpsWeight = 10;
+ return range[android.hardware.Camera.Parameters.PREVIEW_FPS_MIN_INDEX]
+ + maxFpsWeight * abs(framerate
+ - range[android.hardware.Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
}
});
}
- public static Camera.Size getClosestSupportedSize(
- List<Camera.Size> supportedSizes, final int requestedWidth, final int requestedHeight) {
+ public static android.hardware.Camera.Size getClosestSupportedSize(
+ List<android.hardware.Camera.Size> supportedSizes, final int requestedWidth,
+ final int requestedHeight) {
return Collections.min(supportedSizes,
- new ClosestComparator<Camera.Size>() {
- @Override int diff(Camera.Size size) {
+ new ClosestComparator<android.hardware.Camera.Size>() {
+ @Override int diff(android.hardware.Camera.Size size) {
return abs(requestedWidth - size.width) + abs(requestedHeight - size.height);
}
});
}
private static String getNameOfDevice(int facing) {
- final Camera.CameraInfo info = new Camera.CameraInfo();
- for (int i = 0; i < Camera.getNumberOfCameras(); ++i) {
+ final android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
+ for (int i = 0; i < android.hardware.Camera.getNumberOfCameras(); ++i) {
try {
- Camera.getCameraInfo(i, info);
+ android.hardware.Camera.getCameraInfo(i, info);
if (info.facing == facing) {
return getDeviceName(i);
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java
index 2f35dc3493..54469cc341 100644
--- a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java
+++ b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java
@@ -27,7 +27,6 @@
package org.webrtc;
-import android.hardware.Camera;
import android.os.SystemClock;
import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
@@ -60,11 +59,11 @@ public class CameraEnumerator implements CameraEnumerationAndroid.Enumerator {
private List<CaptureFormat> enumerateFormats(int cameraId) {
Logging.d(TAG, "Get supported formats for camera index " + cameraId + ".");
final long startTimeMs = SystemClock.elapsedRealtime();
- final Camera.Parameters parameters;
- Camera camera = null;
+ final android.hardware.Camera.Parameters parameters;
+ android.hardware.Camera camera = null;
try {
Logging.d(TAG, "Opening camera with index " + cameraId);
- camera = Camera.open(cameraId);
+ camera = android.hardware.Camera.open(cameraId);
parameters = camera.getParameters();
} catch (RuntimeException e) {
Logging.e(TAG, "Open camera failed on camera index " + cameraId, e);
@@ -84,10 +83,10 @@ public class CameraEnumerator implements CameraEnumerationAndroid.Enumerator {
// getSupportedPreviewFpsRange() returns a sorted list. Take the fps range
// corresponding to the highest fps.
final int[] range = listFpsRange.get(listFpsRange.size() - 1);
- minFps = range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX];
- maxFps = range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX];
+ minFps = range[android.hardware.Camera.Parameters.PREVIEW_FPS_MIN_INDEX];
+ maxFps = range[android.hardware.Camera.Parameters.PREVIEW_FPS_MAX_INDEX];
}
- for (Camera.Size size : parameters.getSupportedPreviewSizes()) {
+ for (android.hardware.Camera.Size size : parameters.getSupportedPreviewSizes()) {
formatList.add(new CaptureFormat(size.width, size.height, minFps, maxFps));
}
} catch (Exception e) {
diff --git a/talk/app/webrtc/java/android/org/webrtc/EglBase.java b/talk/app/webrtc/java/android/org/webrtc/EglBase.java
index 2ee36882e8..035645bdd1 100644
--- a/talk/app/webrtc/java/android/org/webrtc/EglBase.java
+++ b/talk/app/webrtc/java/android/org/webrtc/EglBase.java
@@ -28,244 +28,108 @@
package org.webrtc;
import android.graphics.SurfaceTexture;
-import android.view.SurfaceHolder;
-
-import org.webrtc.Logging;
+import android.view.Surface;
import javax.microedition.khronos.egl.EGL10;
-import javax.microedition.khronos.egl.EGLConfig;
-import javax.microedition.khronos.egl.EGLContext;
-import javax.microedition.khronos.egl.EGLDisplay;
-import javax.microedition.khronos.egl.EGLSurface;
+
/**
- * Holds EGL state and utility methods for handling an EGLContext, an EGLDisplay, and an EGLSurface.
+ * Holds EGL state and utility methods for handling an egl 1.0 EGLContext, an EGLDisplay,
+ * and an EGLSurface.
*/
-public final class EglBase {
- private static final String TAG = "EglBase";
+public abstract class EglBase {
+ // EGL wrapper for an actual EGLContext.
+ public static class Context {
+ }
+
// These constants are taken from EGL14.EGL_OPENGL_ES2_BIT and EGL14.EGL_CONTEXT_CLIENT_VERSION.
// https://android.googlesource.com/platform/frameworks/base/+/master/opengl/java/android/opengl/EGL14.java
// This is similar to how GlSurfaceView does:
// http://grepcode.com/file/repository.grepcode.com/java/ext/com.google.android/android/5.1.1_r1/android/opengl/GLSurfaceView.java#760
private static final int EGL_OPENGL_ES2_BIT = 4;
- private static final int EGL_CONTEXT_CLIENT_VERSION = 0x3098;
// Android-specific extension.
private static final int EGL_RECORDABLE_ANDROID = 0x3142;
- private final EGL10 egl;
- private EGLContext eglContext;
- private ConfigType configType;
- private EGLConfig eglConfig;
- private EGLDisplay eglDisplay;
- private EGLSurface eglSurface = EGL10.EGL_NO_SURFACE;
-
- // EGLConfig constructor type. Influences eglChooseConfig arguments.
- public static enum ConfigType {
- // No special parameters.
- PLAIN,
- // Configures with EGL_SURFACE_TYPE = EGL_PBUFFER_BIT.
- PIXEL_BUFFER,
- // Configures with EGL_RECORDABLE_ANDROID = 1.
- // Discourages EGL from using pixel formats that cannot efficiently be
- // converted to something usable by the video encoder.
- RECORDABLE
- }
-
- // Create root context without any EGLSurface or parent EGLContext. This can be used for branching
- // new contexts that share data.
- public EglBase() {
- this(EGL10.EGL_NO_CONTEXT, ConfigType.PLAIN);
- }
-
- // Create a new context with the specified config type, sharing data with sharedContext.
- public EglBase(EGLContext sharedContext, ConfigType configType) {
- this.egl = (EGL10) EGLContext.getEGL();
- this.configType = configType;
- eglDisplay = getEglDisplay();
- eglConfig = getEglConfig(eglDisplay, configType);
- eglContext = createEglContext(sharedContext, eglDisplay, eglConfig);
- }
-
- // Create EGLSurface from the Android SurfaceHolder.
- public void createSurface(SurfaceHolder surfaceHolder) {
- createSurfaceInternal(surfaceHolder);
- }
+ public static final int[] CONFIG_PLAIN = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_RGBA = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_ALPHA_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_PIXEL_BUFFER = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_SURFACE_TYPE, EGL10.EGL_PBUFFER_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_PIXEL_RGBA_BUFFER = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_ALPHA_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_SURFACE_TYPE, EGL10.EGL_PBUFFER_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_RECORDABLE = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL_RECORDABLE_ANDROID, 1,
+ EGL10.EGL_NONE
+ };
+
+ // Create a new context with the specified config attributes, sharing data with sharedContext.
+ // |sharedContext| can be null.
+ public static EglBase create(Context sharedContext, int[] configAttributes) {
+ return (EglBase14.isEGL14Supported()
+ && (sharedContext == null || sharedContext instanceof EglBase14.Context))
+ ? new EglBase14((EglBase14.Context) sharedContext, configAttributes)
+ : new EglBase10((EglBase10.Context) sharedContext, configAttributes);
+ }
+
+ public static EglBase create() {
+ return create(null, CONFIG_PLAIN);
+ }
+
+ public abstract void createSurface(Surface surface);
// Create EGLSurface from the Android SurfaceTexture.
- public void createSurface(SurfaceTexture surfaceTexture) {
- createSurfaceInternal(surfaceTexture);
- }
-
- // Create EGLSurface from either a SurfaceHolder or a SurfaceTexture.
- private void createSurfaceInternal(Object nativeWindow) {
- if (!(nativeWindow instanceof SurfaceHolder) && !(nativeWindow instanceof SurfaceTexture)) {
- throw new IllegalStateException("Input must be either a SurfaceHolder or SurfaceTexture");
- }
- checkIsNotReleased();
- if (configType == ConfigType.PIXEL_BUFFER) {
- Logging.w(TAG, "This EGL context is configured for PIXEL_BUFFER, but uses regular Surface");
- }
- if (eglSurface != EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Already has an EGLSurface");
- }
- int[] surfaceAttribs = {EGL10.EGL_NONE};
- eglSurface = egl.eglCreateWindowSurface(eglDisplay, eglConfig, nativeWindow, surfaceAttribs);
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Failed to create window surface");
- }
- }
+ public abstract void createSurface(SurfaceTexture surfaceTexture);
// Create dummy 1x1 pixel buffer surface so the context can be made current.
- public void createDummyPbufferSurface() {
- createPbufferSurface(1, 1);
- }
-
- public void createPbufferSurface(int width, int height) {
- checkIsNotReleased();
- if (configType != ConfigType.PIXEL_BUFFER) {
- throw new RuntimeException(
- "This EGL context is not configured to use a pixel buffer: " + configType);
- }
- if (eglSurface != EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Already has an EGLSurface");
- }
- int[] surfaceAttribs = {EGL10.EGL_WIDTH, width, EGL10.EGL_HEIGHT, height, EGL10.EGL_NONE};
- eglSurface = egl.eglCreatePbufferSurface(eglDisplay, eglConfig, surfaceAttribs);
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Failed to create pixel buffer surface");
- }
- }
+ public abstract void createDummyPbufferSurface();
- public EGLContext getContext() {
- return eglContext;
- }
+ public abstract void createPbufferSurface(int width, int height);
- public boolean hasSurface() {
- return eglSurface != EGL10.EGL_NO_SURFACE;
- }
+ public abstract Context getEglBaseContext();
- public int surfaceWidth() {
- final int widthArray[] = new int[1];
- egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_WIDTH, widthArray);
- return widthArray[0];
- }
+ public abstract boolean hasSurface();
- public int surfaceHeight() {
- final int heightArray[] = new int[1];
- egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_HEIGHT, heightArray);
- return heightArray[0];
- }
+ public abstract int surfaceWidth();
- public void releaseSurface() {
- if (eglSurface != EGL10.EGL_NO_SURFACE) {
- egl.eglDestroySurface(eglDisplay, eglSurface);
- eglSurface = EGL10.EGL_NO_SURFACE;
- }
- }
+ public abstract int surfaceHeight();
- private void checkIsNotReleased() {
- if (eglDisplay == EGL10.EGL_NO_DISPLAY || eglContext == EGL10.EGL_NO_CONTEXT
- || eglConfig == null) {
- throw new RuntimeException("This object has been released");
- }
- }
+ public abstract void releaseSurface();
- public void release() {
- checkIsNotReleased();
- releaseSurface();
- detachCurrent();
- egl.eglDestroyContext(eglDisplay, eglContext);
- egl.eglTerminate(eglDisplay);
- eglContext = EGL10.EGL_NO_CONTEXT;
- eglDisplay = EGL10.EGL_NO_DISPLAY;
- eglConfig = null;
- }
+ public abstract void release();
- public void makeCurrent() {
- checkIsNotReleased();
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("No EGLSurface - can't make current");
- }
- if (!egl.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) {
- throw new RuntimeException("eglMakeCurrent failed");
- }
- }
+ public abstract void makeCurrent();
// Detach the current EGL context, so that it can be made current on another thread.
- public void detachCurrent() {
- if (!egl.eglMakeCurrent(
- eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT)) {
- throw new RuntimeException("eglMakeCurrent failed");
- }
- }
+ public abstract void detachCurrent();
- public void swapBuffers() {
- checkIsNotReleased();
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("No EGLSurface - can't swap buffers");
- }
- egl.eglSwapBuffers(eglDisplay, eglSurface);
- }
-
- // Return an EGLDisplay, or die trying.
- private EGLDisplay getEglDisplay() {
- EGLDisplay eglDisplay = egl.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY);
- if (eglDisplay == EGL10.EGL_NO_DISPLAY) {
- throw new RuntimeException("Unable to get EGL10 display");
- }
- int[] version = new int[2];
- if (!egl.eglInitialize(eglDisplay, version)) {
- throw new RuntimeException("Unable to initialize EGL10");
- }
- return eglDisplay;
- }
-
- // Return an EGLConfig, or die trying.
- private EGLConfig getEglConfig(EGLDisplay eglDisplay, ConfigType configType) {
- // Always RGB888, GLES2.
- int[] configAttributes = {
- EGL10.EGL_RED_SIZE, 8,
- EGL10.EGL_GREEN_SIZE, 8,
- EGL10.EGL_BLUE_SIZE, 8,
- EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
- EGL10.EGL_NONE, 0, // Allocate dummy fields for specific options.
- EGL10.EGL_NONE
- };
-
- // Fill in dummy fields based on configType.
- switch (configType) {
- case PLAIN:
- break;
- case PIXEL_BUFFER:
- configAttributes[configAttributes.length - 3] = EGL10.EGL_SURFACE_TYPE;
- configAttributes[configAttributes.length - 2] = EGL10.EGL_PBUFFER_BIT;
- break;
- case RECORDABLE:
- configAttributes[configAttributes.length - 3] = EGL_RECORDABLE_ANDROID;
- configAttributes[configAttributes.length - 2] = 1;
- break;
- default:
- throw new IllegalArgumentException();
- }
-
- EGLConfig[] configs = new EGLConfig[1];
- int[] numConfigs = new int[1];
- if (!egl.eglChooseConfig(
- eglDisplay, configAttributes, configs, configs.length, numConfigs)) {
- throw new RuntimeException("Unable to find RGB888 " + configType + " EGL config");
- }
- return configs[0];
- }
-
- // Return an EGLConfig, or die trying.
- private EGLContext createEglContext(
- EGLContext sharedContext, EGLDisplay eglDisplay, EGLConfig eglConfig) {
- int[] contextAttributes = {EGL_CONTEXT_CLIENT_VERSION, 2, EGL10.EGL_NONE};
- EGLContext eglContext =
- egl.eglCreateContext(eglDisplay, eglConfig, sharedContext, contextAttributes);
- if (eglContext == EGL10.EGL_NO_CONTEXT) {
- throw new RuntimeException("Failed to create EGL context");
- }
- return eglContext;
- }
+ public abstract void swapBuffers();
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/EglBase10.java b/talk/app/webrtc/java/android/org/webrtc/EglBase10.java
new file mode 100644
index 0000000000..f2aa9857fa
--- /dev/null
+++ b/talk/app/webrtc/java/android/org/webrtc/EglBase10.java
@@ -0,0 +1,299 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import android.graphics.Canvas;
+import android.graphics.SurfaceTexture;
+import android.graphics.Rect;
+import android.view.Surface;
+import android.view.SurfaceHolder;
+
+import javax.microedition.khronos.egl.EGL10;
+import javax.microedition.khronos.egl.EGLConfig;
+import javax.microedition.khronos.egl.EGLContext;
+import javax.microedition.khronos.egl.EGLDisplay;
+import javax.microedition.khronos.egl.EGLSurface;
+
+/**
+ * Holds EGL state and utility methods for handling an egl 1.0 EGLContext, an EGLDisplay,
+ * and an EGLSurface.
+ */
+final class EglBase10 extends EglBase {
+ // This constant is taken from EGL14.EGL_CONTEXT_CLIENT_VERSION.
+ private static final int EGL_CONTEXT_CLIENT_VERSION = 0x3098;
+
+ private final EGL10 egl;
+ private EGLContext eglContext;
+ private EGLConfig eglConfig;
+ private EGLDisplay eglDisplay;
+ private EGLSurface eglSurface = EGL10.EGL_NO_SURFACE;
+
+ // EGL wrapper for an actual EGLContext.
+ public static class Context extends EglBase.Context {
+ private final EGLContext eglContext;
+
+ public Context(EGLContext eglContext) {
+ this.eglContext = eglContext;
+ }
+ }
+
+ // Create a new context with the specified config type, sharing data with sharedContext.
+ EglBase10(Context sharedContext, int[] configAttributes) {
+ this.egl = (EGL10) EGLContext.getEGL();
+ eglDisplay = getEglDisplay();
+ eglConfig = getEglConfig(eglDisplay, configAttributes);
+ eglContext = createEglContext(sharedContext, eglDisplay, eglConfig);
+ }
+
+ @Override
+ public void createSurface(Surface surface) {
+ /**
+ * We have to wrap Surface in a SurfaceHolder because for some reason eglCreateWindowSurface
+ * couldn't actually take a Surface object until API 17. Older versions fortunately just call
+ * SurfaceHolder.getSurface(), so we'll do that. No other methods are relevant.
+ */
+ class FakeSurfaceHolder implements SurfaceHolder {
+ private final Surface surface;
+
+ FakeSurfaceHolder(Surface surface) {
+ this.surface = surface;
+ }
+
+ @Override
+ public void addCallback(Callback callback) {}
+
+ @Override
+ public void removeCallback(Callback callback) {}
+
+ @Override
+ public boolean isCreating() {
+ return false;
+ }
+
+ @Deprecated
+ @Override
+ public void setType(int i) {}
+
+ @Override
+ public void setFixedSize(int i, int i2) {}
+
+ @Override
+ public void setSizeFromLayout() {}
+
+ @Override
+ public void setFormat(int i) {}
+
+ @Override
+ public void setKeepScreenOn(boolean b) {}
+
+ @Override
+ public Canvas lockCanvas() {
+ return null;
+ }
+
+ @Override
+ public Canvas lockCanvas(Rect rect) {
+ return null;
+ }
+
+ @Override
+ public void unlockCanvasAndPost(Canvas canvas) {}
+
+ @Override
+ public Rect getSurfaceFrame() {
+ return null;
+ }
+
+ @Override
+ public Surface getSurface() {
+ return surface;
+ }
+ }
+
+ createSurfaceInternal(new FakeSurfaceHolder(surface));
+ }
+
+ // Create EGLSurface from the Android SurfaceTexture.
+ @Override
+ public void createSurface(SurfaceTexture surfaceTexture) {
+ createSurfaceInternal(surfaceTexture);
+ }
+
+ // Create EGLSurface from either a SurfaceHolder or a SurfaceTexture.
+ private void createSurfaceInternal(Object nativeWindow) {
+ if (!(nativeWindow instanceof SurfaceHolder) && !(nativeWindow instanceof SurfaceTexture)) {
+ throw new IllegalStateException("Input must be either a SurfaceHolder or SurfaceTexture");
+ }
+ checkIsNotReleased();
+ if (eglSurface != EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL10.EGL_NONE};
+ eglSurface = egl.eglCreateWindowSurface(eglDisplay, eglConfig, nativeWindow, surfaceAttribs);
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create window surface");
+ }
+ }
+
+ // Create dummy 1x1 pixel buffer surface so the context can be made current.
+ @Override
+ public void createDummyPbufferSurface() {
+ createPbufferSurface(1, 1);
+ }
+
+ @Override
+ public void createPbufferSurface(int width, int height) {
+ checkIsNotReleased();
+ if (eglSurface != EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL10.EGL_WIDTH, width, EGL10.EGL_HEIGHT, height, EGL10.EGL_NONE};
+ eglSurface = egl.eglCreatePbufferSurface(eglDisplay, eglConfig, surfaceAttribs);
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create pixel buffer surface");
+ }
+ }
+
+ @Override
+ public org.webrtc.EglBase.Context getEglBaseContext() {
+ return new EglBase10.Context(eglContext);
+ }
+
+ @Override
+ public boolean hasSurface() {
+ return eglSurface != EGL10.EGL_NO_SURFACE;
+ }
+
+ @Override
+ public int surfaceWidth() {
+ final int widthArray[] = new int[1];
+ egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_WIDTH, widthArray);
+ return widthArray[0];
+ }
+
+ @Override
+ public int surfaceHeight() {
+ final int heightArray[] = new int[1];
+ egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_HEIGHT, heightArray);
+ return heightArray[0];
+ }
+
+ @Override
+ public void releaseSurface() {
+ if (eglSurface != EGL10.EGL_NO_SURFACE) {
+ egl.eglDestroySurface(eglDisplay, eglSurface);
+ eglSurface = EGL10.EGL_NO_SURFACE;
+ }
+ }
+
+ private void checkIsNotReleased() {
+ if (eglDisplay == EGL10.EGL_NO_DISPLAY || eglContext == EGL10.EGL_NO_CONTEXT
+ || eglConfig == null) {
+ throw new RuntimeException("This object has been released");
+ }
+ }
+
+ @Override
+ public void release() {
+ checkIsNotReleased();
+ releaseSurface();
+ detachCurrent();
+ egl.eglDestroyContext(eglDisplay, eglContext);
+ egl.eglTerminate(eglDisplay);
+ eglContext = EGL10.EGL_NO_CONTEXT;
+ eglDisplay = EGL10.EGL_NO_DISPLAY;
+ eglConfig = null;
+ }
+
+ @Override
+ public void makeCurrent() {
+ checkIsNotReleased();
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't make current");
+ }
+ if (!egl.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ // Detach the current EGL context, so that it can be made current on another thread.
+ @Override
+ public void detachCurrent() {
+ if (!egl.eglMakeCurrent(
+ eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ @Override
+ public void swapBuffers() {
+ checkIsNotReleased();
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't swap buffers");
+ }
+ egl.eglSwapBuffers(eglDisplay, eglSurface);
+ }
+
+ // Return an EGLDisplay, or die trying.
+ private EGLDisplay getEglDisplay() {
+ EGLDisplay eglDisplay = egl.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY);
+ if (eglDisplay == EGL10.EGL_NO_DISPLAY) {
+ throw new RuntimeException("Unable to get EGL10 display");
+ }
+ int[] version = new int[2];
+ if (!egl.eglInitialize(eglDisplay, version)) {
+ throw new RuntimeException("Unable to initialize EGL10");
+ }
+ return eglDisplay;
+ }
+
+ // Return an EGLConfig, or die trying.
+ private EGLConfig getEglConfig(EGLDisplay eglDisplay, int[] configAttributes) {
+ EGLConfig[] configs = new EGLConfig[1];
+ int[] numConfigs = new int[1];
+ if (!egl.eglChooseConfig(
+ eglDisplay, configAttributes, configs, configs.length, numConfigs)) {
+ throw new RuntimeException("Unable to find any matching EGL config");
+ }
+ return configs[0];
+ }
+
+ // Return an EGLConfig, or die trying.
+ private EGLContext createEglContext(
+ Context sharedContext, EGLDisplay eglDisplay, EGLConfig eglConfig) {
+ int[] contextAttributes = {EGL_CONTEXT_CLIENT_VERSION, 2, EGL10.EGL_NONE};
+ EGLContext rootContext =
+ sharedContext == null ? EGL10.EGL_NO_CONTEXT : sharedContext.eglContext;
+ EGLContext eglContext =
+ egl.eglCreateContext(eglDisplay, eglConfig, rootContext, contextAttributes);
+ if (eglContext == EGL10.EGL_NO_CONTEXT) {
+ throw new RuntimeException("Failed to create EGL context");
+ }
+ return eglContext;
+ }
+}
diff --git a/talk/app/webrtc/java/android/org/webrtc/EglBase14.java b/talk/app/webrtc/java/android/org/webrtc/EglBase14.java
new file mode 100644
index 0000000000..c6f98c3b31
--- /dev/null
+++ b/talk/app/webrtc/java/android/org/webrtc/EglBase14.java
@@ -0,0 +1,254 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import android.annotation.TargetApi;
+import android.graphics.SurfaceTexture;
+import android.opengl.EGL14;
+import android.opengl.EGLConfig;
+import android.opengl.EGLContext;
+import android.opengl.EGLDisplay;
+import android.opengl.EGLExt;
+import android.opengl.EGLSurface;
+import android.view.Surface;
+
+import org.webrtc.Logging;
+
+/**
+ * Holds EGL state and utility methods for handling an EGL14 EGLContext, an EGLDisplay,
+ * and an EGLSurface.
+ */
+@TargetApi(18)
+final class EglBase14 extends EglBase {
+ private static final String TAG = "EglBase14";
+ private static final int EGLExt_SDK_VERSION = android.os.Build.VERSION_CODES.JELLY_BEAN_MR2;
+ private static final int CURRENT_SDK_VERSION = android.os.Build.VERSION.SDK_INT;
+ private EGLContext eglContext;
+ private EGLConfig eglConfig;
+ private EGLDisplay eglDisplay;
+ private EGLSurface eglSurface = EGL14.EGL_NO_SURFACE;
+
+ // EGL 1.4 is supported from API 17. But EGLExt that is used for setting presentation
+ // time stamp on a surface is supported from 18 so we require 18.
+ public static boolean isEGL14Supported() {
+ Logging.d(TAG, "SDK version: " + CURRENT_SDK_VERSION
+ + ". isEGL14Supported: " + (CURRENT_SDK_VERSION >= EGLExt_SDK_VERSION));
+ return (CURRENT_SDK_VERSION >= EGLExt_SDK_VERSION);
+ }
+
+ public static class Context extends EglBase.Context {
+ private final android.opengl.EGLContext egl14Context;
+
+ Context(android.opengl.EGLContext eglContext) {
+ this.egl14Context = eglContext;
+ }
+ }
+
+ // Create a new context with the specified config type, sharing data with sharedContext.
+ // |sharedContext| may be null.
+ EglBase14(EglBase14.Context sharedContext, int[] configAttributes) {
+ eglDisplay = getEglDisplay();
+ eglConfig = getEglConfig(eglDisplay, configAttributes);
+ eglContext = createEglContext(sharedContext, eglDisplay, eglConfig);
+ }
+
+ // Create EGLSurface from the Android Surface.
+ @Override
+ public void createSurface(Surface surface) {
+ createSurfaceInternal(surface);
+ }
+
+ // Create EGLSurface from the Android SurfaceTexture.
+ @Override
+ public void createSurface(SurfaceTexture surfaceTexture) {
+ createSurfaceInternal(surfaceTexture);
+ }
+
+ // Create EGLSurface from either Surface or SurfaceTexture.
+ private void createSurfaceInternal(Object surface) {
+ if (!(surface instanceof Surface) && !(surface instanceof SurfaceTexture)) {
+ throw new IllegalStateException("Input must be either a Surface or SurfaceTexture");
+ }
+ checkIsNotReleased();
+ if (eglSurface != EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL14.EGL_NONE};
+ eglSurface = EGL14.eglCreateWindowSurface(eglDisplay, eglConfig, surface, surfaceAttribs, 0);
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create window surface");
+ }
+ }
+
+ @Override
+ public void createDummyPbufferSurface() {
+ createPbufferSurface(1, 1);
+ }
+
+ @Override
+ public void createPbufferSurface(int width, int height) {
+ checkIsNotReleased();
+ if (eglSurface != EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL14.EGL_WIDTH, width, EGL14.EGL_HEIGHT, height, EGL14.EGL_NONE};
+ eglSurface = EGL14.eglCreatePbufferSurface(eglDisplay, eglConfig, surfaceAttribs, 0);
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create pixel buffer surface");
+ }
+ }
+
+ @Override
+ public Context getEglBaseContext() {
+ return new EglBase14.Context(eglContext);
+ }
+
+ @Override
+ public boolean hasSurface() {
+ return eglSurface != EGL14.EGL_NO_SURFACE;
+ }
+
+ @Override
+ public int surfaceWidth() {
+ final int widthArray[] = new int[1];
+ EGL14.eglQuerySurface(eglDisplay, eglSurface, EGL14.EGL_WIDTH, widthArray, 0);
+ return widthArray[0];
+ }
+
+ @Override
+ public int surfaceHeight() {
+ final int heightArray[] = new int[1];
+ EGL14.eglQuerySurface(eglDisplay, eglSurface, EGL14.EGL_HEIGHT, heightArray, 0);
+ return heightArray[0];
+ }
+
+ @Override
+ public void releaseSurface() {
+ if (eglSurface != EGL14.EGL_NO_SURFACE) {
+ EGL14.eglDestroySurface(eglDisplay, eglSurface);
+ eglSurface = EGL14.EGL_NO_SURFACE;
+ }
+ }
+
+ private void checkIsNotReleased() {
+ if (eglDisplay == EGL14.EGL_NO_DISPLAY || eglContext == EGL14.EGL_NO_CONTEXT
+ || eglConfig == null) {
+ throw new RuntimeException("This object has been released");
+ }
+ }
+
+ @Override
+ public void release() {
+ checkIsNotReleased();
+ releaseSurface();
+ detachCurrent();
+ EGL14.eglDestroyContext(eglDisplay, eglContext);
+ EGL14.eglReleaseThread();
+ EGL14.eglTerminate(eglDisplay);
+ eglContext = EGL14.EGL_NO_CONTEXT;
+ eglDisplay = EGL14.EGL_NO_DISPLAY;
+ eglConfig = null;
+ }
+
+ @Override
+ public void makeCurrent() {
+ checkIsNotReleased();
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't make current");
+ }
+ if (!EGL14.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ // Detach the current EGL context, so that it can be made current on another thread.
+ @Override
+ public void detachCurrent() {
+ if (!EGL14.eglMakeCurrent(
+ eglDisplay, EGL14.EGL_NO_SURFACE, EGL14.EGL_NO_SURFACE, EGL14.EGL_NO_CONTEXT)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ @Override
+ public void swapBuffers() {
+ checkIsNotReleased();
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't swap buffers");
+ }
+ EGL14.eglSwapBuffers(eglDisplay, eglSurface);
+ }
+
+ public void swapBuffers(long timeStampNs) {
+ checkIsNotReleased();
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't swap buffers");
+ }
+ // See https://android.googlesource.com/platform/frameworks/native/+/tools_r22.2/opengl/specs/EGL_ANDROID_presentation_time.txt
+ EGLExt.eglPresentationTimeANDROID(eglDisplay, eglSurface, timeStampNs);
+ EGL14.eglSwapBuffers(eglDisplay, eglSurface);
+ }
+
+ // Return an EGLDisplay, or die trying.
+ private static EGLDisplay getEglDisplay() {
+ EGLDisplay eglDisplay = EGL14.eglGetDisplay(EGL14.EGL_DEFAULT_DISPLAY);
+ if (eglDisplay == EGL14.EGL_NO_DISPLAY) {
+ throw new RuntimeException("Unable to get EGL14 display");
+ }
+ int[] version = new int[2];
+ if (!EGL14.eglInitialize(eglDisplay, version, 0, version, 1)) {
+ throw new RuntimeException("Unable to initialize EGL14");
+ }
+ return eglDisplay;
+ }
+
+ // Return an EGLConfig, or die trying.
+ private static EGLConfig getEglConfig(EGLDisplay eglDisplay, int[] configAttributes) {
+ EGLConfig[] configs = new EGLConfig[1];
+ int[] numConfigs = new int[1];
+ if (!EGL14.eglChooseConfig(
+ eglDisplay, configAttributes, 0, configs, 0, configs.length, numConfigs, 0)) {
+ throw new RuntimeException("Unable to find any matching EGL config");
+ }
+ return configs[0];
+ }
+
+ // Return an EGLConfig, or die trying.
+ private static EGLContext createEglContext(
+ EglBase14.Context sharedContext, EGLDisplay eglDisplay, EGLConfig eglConfig) {
+ int[] contextAttributes = {EGL14.EGL_CONTEXT_CLIENT_VERSION, 2, EGL14.EGL_NONE};
+ EGLContext rootContext =
+ sharedContext == null ? EGL14.EGL_NO_CONTEXT : sharedContext.egl14Context;
+ EGLContext eglContext =
+ EGL14.eglCreateContext(eglDisplay, eglConfig, rootContext, contextAttributes, 0);
+ if (eglContext == EGL14.EGL_NO_CONTEXT) {
+ throw new RuntimeException("Failed to create EGL context");
+ }
+ return eglContext;
+ }
+}
diff --git a/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java b/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java
index 2cb8af754d..6d3d5d2563 100644
--- a/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java
+++ b/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java
@@ -40,13 +40,13 @@ import java.util.IdentityHashMap;
import java.util.Map;
/**
- * Helper class to draw a quad that covers the entire viewport. Rotation, mirror, and cropping is
- * specified using a 4x4 texture coordinate transform matrix. The frame input can either be an OES
- * texture or YUV textures in I420 format. The GL state must be preserved between draw calls, this
- * is intentional to maximize performance. The function release() must be called manually to free
- * the resources held by this object.
+ * Helper class to draw an opaque quad on the target viewport location. Rotation, mirror, and
+ * cropping is specified using a 4x4 texture coordinate transform matrix. The frame input can either
+ * be an OES texture or YUV textures in I420 format. The GL state must be preserved between draw
+ * calls, this is intentional to maximize performance. The function release() must be called
+ * manually to free the resources held by this object.
*/
-public class GlRectDrawer {
+public class GlRectDrawer implements RendererCommon.GlDrawer {
// Simple vertex shader, used for both YUV and OES.
private static final String VERTEX_SHADER_STRING =
"varying vec2 interp_tc;\n"
@@ -118,67 +118,31 @@ public class GlRectDrawer {
1.0f, 1.0f // Top right.
});
- // The keys are one of the fragments shaders above.
- private final Map<String, GlShader> shaders = new IdentityHashMap<String, GlShader>();
- private GlShader currentShader;
- private float[] currentTexMatrix;
- private int texMatrixLocation;
- // Intermediate copy buffer for uploading yuv frames that are not packed, i.e. stride > width.
- // TODO(magjed): Investigate when GL_UNPACK_ROW_LENGTH is available, or make a custom shader that
- // handles stride and compare performance with intermediate copy.
- private ByteBuffer copyBuffer;
+ private static class Shader {
+ public final GlShader glShader;
+ public final int texMatrixLocation;
- /**
- * Upload |planes| into |outputYuvTextures|, taking stride into consideration. |outputYuvTextures|
- * must have been generated in advance.
- */
- public void uploadYuvData(
- int[] outputYuvTextures, int width, int height, int[] strides, ByteBuffer[] planes) {
- // Make a first pass to see if we need a temporary copy buffer.
- int copyCapacityNeeded = 0;
- for (int i = 0; i < 3; ++i) {
- final int planeWidth = (i == 0) ? width : width / 2;
- final int planeHeight = (i == 0) ? height : height / 2;
- if (strides[i] > planeWidth) {
- copyCapacityNeeded = Math.max(copyCapacityNeeded, planeWidth * planeHeight);
- }
- }
- // Allocate copy buffer if necessary.
- if (copyCapacityNeeded > 0
- && (copyBuffer == null || copyBuffer.capacity() < copyCapacityNeeded)) {
- copyBuffer = ByteBuffer.allocateDirect(copyCapacityNeeded);
- }
- // Upload each plane.
- for (int i = 0; i < 3; ++i) {
- GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
- GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, outputYuvTextures[i]);
- final int planeWidth = (i == 0) ? width : width / 2;
- final int planeHeight = (i == 0) ? height : height / 2;
- // GLES only accepts packed data, i.e. stride == planeWidth.
- final ByteBuffer packedByteBuffer;
- if (strides[i] == planeWidth) {
- // Input is packed already.
- packedByteBuffer = planes[i];
- } else {
- VideoRenderer.nativeCopyPlane(
- planes[i], planeWidth, planeHeight, strides[i], copyBuffer, planeWidth);
- packedByteBuffer = copyBuffer;
- }
- GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, planeWidth, planeHeight, 0,
- GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, packedByteBuffer);
+ public Shader(String fragmentShader) {
+ this.glShader = new GlShader(VERTEX_SHADER_STRING, fragmentShader);
+ this.texMatrixLocation = glShader.getUniformLocation("texMatrix");
}
}
+ // The keys are one of the fragments shaders above.
+ private final Map<String, Shader> shaders = new IdentityHashMap<String, Shader>();
+
/**
* Draw an OES texture frame with specified texture transformation matrix. Required resources are
* allocated at the first call to this function.
*/
- public void drawOes(int oesTextureId, float[] texMatrix) {
- prepareShader(OES_FRAGMENT_SHADER_STRING);
+ @Override
+ public void drawOes(int oesTextureId, float[] texMatrix, int x, int y, int width, int height) {
+ prepareShader(OES_FRAGMENT_SHADER_STRING, texMatrix);
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
// updateTexImage() may be called from another thread in another EGL context, so we need to
// bind/unbind the texture in each draw call so that GLES understads it's a new texture.
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, oesTextureId);
- drawRectangle(texMatrix);
+ drawRectangle(x, y, width, height);
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, 0);
}
@@ -186,10 +150,12 @@ public class GlRectDrawer {
* Draw a RGB(A) texture frame with specified texture transformation matrix. Required resources
* are allocated at the first call to this function.
*/
- public void drawRgb(int textureId, float[] texMatrix) {
- prepareShader(RGB_FRAGMENT_SHADER_STRING);
+ @Override
+ public void drawRgb(int textureId, float[] texMatrix, int x, int y, int width, int height) {
+ prepareShader(RGB_FRAGMENT_SHADER_STRING, texMatrix);
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, textureId);
- drawRectangle(texMatrix);
+ drawRectangle(x, y, width, height);
// Unbind the texture as a precaution.
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0);
}
@@ -198,14 +164,15 @@ public class GlRectDrawer {
* Draw a YUV frame with specified texture transformation matrix. Required resources are
* allocated at the first call to this function.
*/
- public void drawYuv(int[] yuvTextures, float[] texMatrix) {
- prepareShader(YUV_FRAGMENT_SHADER_STRING);
+ @Override
+ public void drawYuv(int[] yuvTextures, float[] texMatrix, int x, int y, int width, int height) {
+ prepareShader(YUV_FRAGMENT_SHADER_STRING, texMatrix);
// Bind the textures.
for (int i = 0; i < 3; ++i) {
GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, yuvTextures[i]);
}
- drawRectangle(texMatrix);
+ drawRectangle(x, y, width, height);
// Unbind the textures as a precaution..
for (int i = 0; i < 3; ++i) {
GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
@@ -213,60 +180,51 @@ public class GlRectDrawer {
}
}
- private void drawRectangle(float[] texMatrix) {
- // Try avoid uploading the texture if possible.
- if (!Arrays.equals(currentTexMatrix, texMatrix)) {
- currentTexMatrix = texMatrix.clone();
- // Copy the texture transformation matrix over.
- GLES20.glUniformMatrix4fv(texMatrixLocation, 1, false, texMatrix, 0);
- }
+ private void drawRectangle(int x, int y, int width, int height) {
// Draw quad.
+ GLES20.glViewport(x, y, width, height);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
}
- private void prepareShader(String fragmentShader) {
- // Lazy allocation.
- if (!shaders.containsKey(fragmentShader)) {
- final GlShader shader = new GlShader(VERTEX_SHADER_STRING, fragmentShader);
+ private void prepareShader(String fragmentShader, float[] texMatrix) {
+ final Shader shader;
+ if (shaders.containsKey(fragmentShader)) {
+ shader = shaders.get(fragmentShader);
+ } else {
+ // Lazy allocation.
+ shader = new Shader(fragmentShader);
shaders.put(fragmentShader, shader);
- shader.useProgram();
+ shader.glShader.useProgram();
// Initialize fragment shader uniform values.
if (fragmentShader == YUV_FRAGMENT_SHADER_STRING) {
- GLES20.glUniform1i(shader.getUniformLocation("y_tex"), 0);
- GLES20.glUniform1i(shader.getUniformLocation("u_tex"), 1);
- GLES20.glUniform1i(shader.getUniformLocation("v_tex"), 2);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("y_tex"), 0);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("u_tex"), 1);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("v_tex"), 2);
} else if (fragmentShader == RGB_FRAGMENT_SHADER_STRING) {
- GLES20.glUniform1i(shader.getUniformLocation("rgb_tex"), 0);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("rgb_tex"), 0);
} else if (fragmentShader == OES_FRAGMENT_SHADER_STRING) {
- GLES20.glUniform1i(shader.getUniformLocation("oes_tex"), 0);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("oes_tex"), 0);
} else {
throw new IllegalStateException("Unknown fragment shader: " + fragmentShader);
}
GlUtil.checkNoGLES2Error("Initialize fragment shader uniform values.");
// Initialize vertex shader attributes.
- shader.setVertexAttribArray("in_pos", 2, FULL_RECTANGLE_BUF);
- shader.setVertexAttribArray("in_tc", 2, FULL_RECTANGLE_TEX_BUF);
- }
-
- // Update GLES state if shader is not already current.
- final GlShader shader = shaders.get(fragmentShader);
- if (currentShader != shader) {
- currentShader = shader;
- shader.useProgram();
- GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
- currentTexMatrix = null;
- texMatrixLocation = shader.getUniformLocation("texMatrix");
+ shader.glShader.setVertexAttribArray("in_pos", 2, FULL_RECTANGLE_BUF);
+ shader.glShader.setVertexAttribArray("in_tc", 2, FULL_RECTANGLE_TEX_BUF);
}
+ shader.glShader.useProgram();
+ // Copy the texture transformation matrix over.
+ GLES20.glUniformMatrix4fv(shader.texMatrixLocation, 1, false, texMatrix, 0);
}
/**
* Release all GLES resources. This needs to be done manually, otherwise the resources are leaked.
*/
+ @Override
public void release() {
- for (GlShader shader : shaders.values()) {
- shader.release();
+ for (Shader shader : shaders.values()) {
+ shader.glShader.release();
}
shaders.clear();
- copyBuffer = null;
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java b/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java
index e3a7850db4..950dcdfa44 100644
--- a/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java
+++ b/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java
@@ -55,7 +55,7 @@ import android.util.Log;
* ACCESS_NETWORK_STATE permission.
*/
public class NetworkMonitorAutoDetect extends BroadcastReceiver {
- static enum ConnectionType {
+ public static enum ConnectionType {
CONNECTION_UNKNOWN,
CONNECTION_ETHERNET,
CONNECTION_WIFI,
@@ -96,6 +96,10 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
/** Queries the ConnectivityManager for information about the current connection. */
static class ConnectivityManagerDelegate {
+ /**
+ * Note: In some rare Android systems connectivityManager is null. We handle that
+ * gracefully below.
+ */
private final ConnectivityManager connectivityManager;
ConnectivityManagerDelegate(Context context) {
@@ -114,6 +118,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
* default network.
*/
NetworkState getNetworkState() {
+ if (connectivityManager == null) {
+ return new NetworkState(false, -1, -1);
+ }
return getNetworkState(connectivityManager.getActiveNetworkInfo());
}
@@ -123,6 +130,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
NetworkState getNetworkState(Network network) {
+ if (connectivityManager == null) {
+ return new NetworkState(false, -1, -1);
+ }
return getNetworkState(connectivityManager.getNetworkInfo(network));
}
@@ -142,6 +152,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
Network[] getAllNetworks() {
+ if (connectivityManager == null) {
+ return new Network[0];
+ }
return connectivityManager.getAllNetworks();
}
@@ -152,6 +165,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
int getDefaultNetId() {
+ if (connectivityManager == null) {
+ return INVALID_NET_ID;
+ }
// Android Lollipop had no API to get the default network; only an
// API to return the NetworkInfo for the default network. To
// determine the default network one can find the network with
@@ -188,6 +204,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
boolean hasInternetCapability(Network network) {
+ if (connectivityManager == null) {
+ return false;
+ }
final NetworkCapabilities capabilities =
connectivityManager.getNetworkCapabilities(network);
return capabilities != null && capabilities.hasCapability(NET_CAPABILITY_INTERNET);
@@ -240,7 +259,6 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
static final int INVALID_NET_ID = -1;
private static final String TAG = "NetworkMonitorAutoDetect";
- private static final int UNKNOWN_LINK_SPEED = -1;
private final IntentFilter intentFilter;
// Observer for the connection type change.
diff --git a/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java b/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java
index 94d180da5a..5ada4cc416 100644
--- a/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java
+++ b/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java
@@ -28,8 +28,11 @@
package org.webrtc;
import android.graphics.Point;
+import android.opengl.GLES20;
import android.opengl.Matrix;
+import java.nio.ByteBuffer;
+
/**
* Static helper functions for renderer implementations.
*/
@@ -47,6 +50,73 @@ public class RendererCommon {
public void onFrameResolutionChanged(int videoWidth, int videoHeight, int rotation);
}
+ /** Interface for rendering frames on an EGLSurface. */
+ public static interface GlDrawer {
+ /**
+ * Functions for drawing frames with different sources. The rendering surface target is
+ * implied by the current EGL context of the calling thread and requires no explicit argument.
+ * The coordinates specify the viewport location on the surface target.
+ */
+ void drawOes(int oesTextureId, float[] texMatrix, int x, int y, int width, int height);
+ void drawRgb(int textureId, float[] texMatrix, int x, int y, int width, int height);
+ void drawYuv(int[] yuvTextures, float[] texMatrix, int x, int y, int width, int height);
+
+ /**
+ * Release all GL resources. This needs to be done manually, otherwise resources may leak.
+ */
+ void release();
+ }
+
+ /**
+ * Helper class for uploading YUV bytebuffer frames to textures that handles stride > width. This
+ * class keeps an internal ByteBuffer to avoid unnecessary allocations for intermediate copies.
+ */
+ public static class YuvUploader {
+ // Intermediate copy buffer for uploading yuv frames that are not packed, i.e. stride > width.
+ // TODO(magjed): Investigate when GL_UNPACK_ROW_LENGTH is available, or make a custom shader
+ // that handles stride and compare performance with intermediate copy.
+ private ByteBuffer copyBuffer;
+
+ /**
+ * Upload |planes| into |outputYuvTextures|, taking stride into consideration.
+ * |outputYuvTextures| must have been generated in advance.
+ */
+ public void uploadYuvData(
+ int[] outputYuvTextures, int width, int height, int[] strides, ByteBuffer[] planes) {
+ final int[] planeWidths = new int[] {width, width / 2, width / 2};
+ final int[] planeHeights = new int[] {height, height / 2, height / 2};
+ // Make a first pass to see if we need a temporary copy buffer.
+ int copyCapacityNeeded = 0;
+ for (int i = 0; i < 3; ++i) {
+ if (strides[i] > planeWidths[i]) {
+ copyCapacityNeeded = Math.max(copyCapacityNeeded, planeWidths[i] * planeHeights[i]);
+ }
+ }
+ // Allocate copy buffer if necessary.
+ if (copyCapacityNeeded > 0
+ && (copyBuffer == null || copyBuffer.capacity() < copyCapacityNeeded)) {
+ copyBuffer = ByteBuffer.allocateDirect(copyCapacityNeeded);
+ }
+ // Upload each plane.
+ for (int i = 0; i < 3; ++i) {
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
+ GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, outputYuvTextures[i]);
+ // GLES only accepts packed data, i.e. stride == planeWidth.
+ final ByteBuffer packedByteBuffer;
+ if (strides[i] == planeWidths[i]) {
+ // Input is packed already.
+ packedByteBuffer = planes[i];
+ } else {
+ VideoRenderer.nativeCopyPlane(
+ planes[i], planeWidths[i], planeHeights[i], strides[i], copyBuffer, planeWidths[i]);
+ packedByteBuffer = copyBuffer;
+ }
+ GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, planeWidths[i],
+ planeHeights[i], 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, packedByteBuffer);
+ }
+ }
+ }
+
// Types of video scaling:
// SCALE_ASPECT_FIT - video frame is scaled to fit the size of the view by
// maintaining the aspect ratio (black borders may be displayed).
@@ -182,9 +252,9 @@ public class RendererCommon {
}
// Each dimension is constrained on max display size and how much we are allowed to crop.
final int width = Math.min(maxDisplayWidth,
- (int) (maxDisplayHeight / minVisibleFraction * videoAspectRatio));
+ Math.round(maxDisplayHeight / minVisibleFraction * videoAspectRatio));
final int height = Math.min(maxDisplayHeight,
- (int) (maxDisplayWidth / minVisibleFraction / videoAspectRatio));
+ Math.round(maxDisplayWidth / minVisibleFraction / videoAspectRatio));
return new Point(width, height);
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java b/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java
index b9c158f848..b001d2a101 100644
--- a/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java
+++ b/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java
@@ -35,12 +35,12 @@ import android.os.Handler;
import android.os.HandlerThread;
import android.os.SystemClock;
+import java.nio.ByteBuffer;
+import java.nio.FloatBuffer;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import javax.microedition.khronos.egl.EGLContext;
-
/**
* Helper class to create and synchronize access to a SurfaceTexture. The caller will get notified
* of new frames in onTextureFrameAvailable(), and should call returnTextureFrame() when done with
@@ -51,7 +51,7 @@ import javax.microedition.khronos.egl.EGLContext;
* wrapping texture frames into webrtc::VideoFrames and also handles calling returnTextureFrame()
* when the webrtc::VideoFrame is no longer used.
*/
-final class SurfaceTextureHelper {
+class SurfaceTextureHelper {
private static final String TAG = "SurfaceTextureHelper";
/**
* Callback interface for being notified that a new texture frame is available. The calls will be
@@ -65,7 +65,7 @@ final class SurfaceTextureHelper {
int oesTextureId, float[] transformMatrix, long timestampNs);
}
- public static SurfaceTextureHelper create(EGLContext sharedContext) {
+ public static SurfaceTextureHelper create(EglBase.Context sharedContext) {
return create(sharedContext, null);
}
@@ -74,7 +74,8 @@ final class SurfaceTextureHelper {
* |handler| is non-null, the callback will be executed on that handler's thread. If |handler| is
* null, a dedicated private thread is created for the callbacks.
*/
- public static SurfaceTextureHelper create(final EGLContext sharedContext, final Handler handler) {
+ public static SurfaceTextureHelper create(final EglBase.Context sharedContext,
+ final Handler handler) {
final Handler finalHandler;
if (handler != null) {
finalHandler = handler;
@@ -94,25 +95,240 @@ final class SurfaceTextureHelper {
});
}
+ // State for YUV conversion, instantiated on demand.
+ static private class YuvConverter {
+ private final EglBase eglBase;
+ private final GlShader shader;
+ private boolean released = false;
+
+ // Vertex coordinates in Normalized Device Coordinates, i.e.
+ // (-1, -1) is bottom-left and (1, 1) is top-right.
+ private static final FloatBuffer DEVICE_RECTANGLE =
+ GlUtil.createFloatBuffer(new float[] {
+ -1.0f, -1.0f, // Bottom left.
+ 1.0f, -1.0f, // Bottom right.
+ -1.0f, 1.0f, // Top left.
+ 1.0f, 1.0f, // Top right.
+ });
+
+ // Texture coordinates - (0, 0) is bottom-left and (1, 1) is top-right.
+ private static final FloatBuffer TEXTURE_RECTANGLE =
+ GlUtil.createFloatBuffer(new float[] {
+ 0.0f, 0.0f, // Bottom left.
+ 1.0f, 0.0f, // Bottom right.
+ 0.0f, 1.0f, // Top left.
+ 1.0f, 1.0f // Top right.
+ });
+
+ private static final String VERTEX_SHADER =
+ "varying vec2 interp_tc;\n"
+ + "attribute vec4 in_pos;\n"
+ + "attribute vec4 in_tc;\n"
+ + "\n"
+ + "uniform mat4 texMatrix;\n"
+ + "\n"
+ + "void main() {\n"
+ + " gl_Position = in_pos;\n"
+ + " interp_tc = (texMatrix * in_tc).xy;\n"
+ + "}\n";
+
+ private static final String FRAGMENT_SHADER =
+ "#extension GL_OES_EGL_image_external : require\n"
+ + "precision mediump float;\n"
+ + "varying vec2 interp_tc;\n"
+ + "\n"
+ + "uniform samplerExternalOES oesTex;\n"
+ // Difference in texture coordinate corresponding to one
+ // sub-pixel in the x direction.
+ + "uniform vec2 xUnit;\n"
+ // Color conversion coefficients, including constant term
+ + "uniform vec4 coeffs;\n"
+ + "\n"
+ + "void main() {\n"
+ // Since the alpha read from the texture is always 1, this could
+ // be written as a mat4 x vec4 multiply. However, that seems to
+ // give a worse framerate, possibly because the additional
+ // multiplies by 1.0 consume resources. TODO(nisse): Could also
+ // try to do it as a vec3 x mat3x4, followed by an add in of a
+ // constant vector.
+ + " gl_FragColor.r = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc - 1.5 * xUnit).rgb);\n"
+ + " gl_FragColor.g = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc - 0.5 * xUnit).rgb);\n"
+ + " gl_FragColor.b = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc + 0.5 * xUnit).rgb);\n"
+ + " gl_FragColor.a = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc + 1.5 * xUnit).rgb);\n"
+ + "}\n";
+
+ private int texMatrixLoc;
+ private int xUnitLoc;
+ private int coeffsLoc;;
+
+ YuvConverter (EglBase.Context sharedContext) {
+ eglBase = EglBase.create(sharedContext, EglBase.CONFIG_PIXEL_RGBA_BUFFER);
+ eglBase.createDummyPbufferSurface();
+ eglBase.makeCurrent();
+
+ shader = new GlShader(VERTEX_SHADER, FRAGMENT_SHADER);
+ shader.useProgram();
+ texMatrixLoc = shader.getUniformLocation("texMatrix");
+ xUnitLoc = shader.getUniformLocation("xUnit");
+ coeffsLoc = shader.getUniformLocation("coeffs");
+ GLES20.glUniform1i(shader.getUniformLocation("oesTex"), 0);
+ GlUtil.checkNoGLES2Error("Initialize fragment shader uniform values.");
+ // Initialize vertex shader attributes.
+ shader.setVertexAttribArray("in_pos", 2, DEVICE_RECTANGLE);
+ // If the width is not a multiple of 4 pixels, the texture
+ // will be scaled up slightly and clipped at the right border.
+ shader.setVertexAttribArray("in_tc", 2, TEXTURE_RECTANGLE);
+ eglBase.detachCurrent();
+ }
+
+ synchronized void convert(ByteBuffer buf,
+ int width, int height, int stride, int textureId, float [] transformMatrix) {
+ if (released) {
+ throw new IllegalStateException(
+ "YuvConverter.convert called on released object");
+ }
+
+ // We draw into a buffer laid out like
+ //
+ // +---------+
+ // | |
+ // | Y |
+ // | |
+ // | |
+ // +----+----+
+ // | U | V |
+ // | | |
+ // +----+----+
+ //
+ // In memory, we use the same stride for all of Y, U and V. The
+ // U data starts at offset |height| * |stride| from the Y data,
+ // and the V data starts at at offset |stride/2| from the U
+ // data, with rows of U and V data alternating.
+ //
+ // Now, it would have made sense to allocate a pixel buffer with
+ // a single byte per pixel (EGL10.EGL_COLOR_BUFFER_TYPE,
+ // EGL10.EGL_LUMINANCE_BUFFER,), but that seems to be
+ // unsupported by devices. So do the following hack: Allocate an
+ // RGBA buffer, of width |stride|/4. To render each of these
+ // large pixels, sample the texture at 4 different x coordinates
+ // and store the results in the four components.
+ //
+ // Since the V data needs to start on a boundary of such a
+ // larger pixel, it is not sufficient that |stride| is even, it
+ // has to be a multiple of 8 pixels.
+
+ if (stride % 8 != 0) {
+ throw new IllegalArgumentException(
+ "Invalid stride, must be a multiple of 8");
+ }
+ if (stride < width){
+ throw new IllegalArgumentException(
+ "Invalid stride, must >= width");
+ }
+
+ int y_width = (width+3) / 4;
+ int uv_width = (width+7) / 8;
+ int uv_height = (height+1)/2;
+ int total_height = height + uv_height;
+ int size = stride * total_height;
+
+ if (buf.capacity() < size) {
+ throw new IllegalArgumentException("YuvConverter.convert called with too small buffer");
+ }
+ // Produce a frame buffer starting at top-left corner, not
+ // bottom-left.
+ transformMatrix =
+ RendererCommon.multiplyMatrices(transformMatrix,
+ RendererCommon.verticalFlipMatrix());
+
+ // Create new pBuffferSurface with the correct size if needed.
+ if (eglBase.hasSurface()) {
+ if (eglBase.surfaceWidth() != stride/4 ||
+ eglBase.surfaceHeight() != total_height){
+ eglBase.releaseSurface();
+ eglBase.createPbufferSurface(stride/4, total_height);
+ }
+ } else {
+ eglBase.createPbufferSurface(stride/4, total_height);
+ }
+
+ eglBase.makeCurrent();
+
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
+ GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, textureId);
+ GLES20.glUniformMatrix4fv(texMatrixLoc, 1, false, transformMatrix, 0);
+
+ // Draw Y
+ GLES20.glViewport(0, 0, y_width, height);
+ // Matrix * (1;0;0;0) / width. Note that opengl uses column major order.
+ GLES20.glUniform2f(xUnitLoc,
+ transformMatrix[0] / width,
+ transformMatrix[1] / width);
+ // Y'UV444 to RGB888, see
+ // https://en.wikipedia.org/wiki/YUV#Y.27UV444_to_RGB888_conversion.
+ // We use the ITU-R coefficients for U and V */
+ GLES20.glUniform4f(coeffsLoc, 0.299f, 0.587f, 0.114f, 0.0f);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+
+ // Draw U
+ GLES20.glViewport(0, height, uv_width, uv_height);
+ // Matrix * (1;0;0;0) / (2*width). Note that opengl uses column major order.
+ GLES20.glUniform2f(xUnitLoc,
+ transformMatrix[0] / (2.0f*width),
+ transformMatrix[1] / (2.0f*width));
+ GLES20.glUniform4f(coeffsLoc, -0.169f, -0.331f, 0.499f, 0.5f);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+
+ // Draw V
+ GLES20.glViewport(stride/8, height, uv_width, uv_height);
+ GLES20.glUniform4f(coeffsLoc, 0.499f, -0.418f, -0.0813f, 0.5f);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+
+ GLES20.glReadPixels(0, 0, stride/4, total_height, GLES20.GL_RGBA,
+ GLES20.GL_UNSIGNED_BYTE, buf);
+
+ GlUtil.checkNoGLES2Error("YuvConverter.convert");
+
+ // Unbind texture. Reportedly needed on some devices to get
+ // the texture updated from the camera.
+ GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, 0);
+ eglBase.detachCurrent();
+ }
+
+ synchronized void release() {
+ released = true;
+ eglBase.makeCurrent();
+ shader.release();
+ eglBase.release();
+ }
+ }
+
private final Handler handler;
- private final boolean isOwningThread;
+ private boolean isOwningThread;
private final EglBase eglBase;
private final SurfaceTexture surfaceTexture;
private final int oesTextureId;
+ private YuvConverter yuvConverter;
+
private OnTextureFrameAvailableListener listener;
// The possible states of this class.
private boolean hasPendingTexture = false;
- private boolean isTextureInUse = false;
+ private volatile boolean isTextureInUse = false;
private boolean isQuitting = false;
- private SurfaceTextureHelper(EGLContext sharedContext, Handler handler, boolean isOwningThread) {
+ private SurfaceTextureHelper(EglBase.Context sharedContext,
+ Handler handler, boolean isOwningThread) {
if (handler.getLooper().getThread() != Thread.currentThread()) {
throw new IllegalStateException("SurfaceTextureHelper must be created on the handler thread");
}
this.handler = handler;
this.isOwningThread = isOwningThread;
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PIXEL_BUFFER);
+ eglBase = EglBase.create(sharedContext, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createDummyPbufferSurface();
eglBase.makeCurrent();
@@ -120,6 +336,18 @@ final class SurfaceTextureHelper {
surfaceTexture = new SurfaceTexture(oesTextureId);
}
+ private YuvConverter getYuvConverter() {
+ // yuvConverter is assigned once
+ if (yuvConverter != null)
+ return yuvConverter;
+
+ synchronized(this) {
+ if (yuvConverter == null)
+ yuvConverter = new YuvConverter(eglBase.getEglBaseContext());
+ return yuvConverter;
+ }
+ }
+
/**
* Start to stream textures to the given |listener|.
* A Listener can only be set once.
@@ -164,12 +392,19 @@ final class SurfaceTextureHelper {
});
}
+ public boolean isTextureInUse() {
+ return isTextureInUse;
+ }
+
/**
* Call disconnect() to stop receiving frames. Resources are released when the texture frame has
* been returned by a call to returnTextureFrame(). You are guaranteed to not receive any more
* onTextureFrameAvailable() after this function returns.
*/
public void disconnect() {
+ if (!isOwningThread) {
+ throw new IllegalStateException("Must call disconnect(handler).");
+ }
if (handler.getLooper().getThread() == Thread.currentThread()) {
isQuitting = true;
if (!isTextureInUse) {
@@ -190,6 +425,28 @@ final class SurfaceTextureHelper {
ThreadUtils.awaitUninterruptibly(barrier);
}
+ /**
+ * Call disconnect() to stop receiving frames and quit the looper used by |handler|.
+ * Resources are released when the texture frame has been returned by a call to
+ * returnTextureFrame(). You are guaranteed to not receive any more
+ * onTextureFrameAvailable() after this function returns.
+ */
+ public void disconnect(Handler handler) {
+ if (this.handler != handler) {
+ throw new IllegalStateException("Wrong handler.");
+ }
+ isOwningThread = true;
+ disconnect();
+ }
+
+ public void textureToYUV(ByteBuffer buf,
+ int width, int height, int stride, int textureId, float [] transformMatrix) {
+ if (textureId != oesTextureId)
+ throw new IllegalStateException("textureToByteBuffer called with unexpected textureId");
+
+ getYuvConverter().convert(buf, width, height, stride, textureId, transformMatrix);
+ }
+
private void tryDeliverTextureFrame() {
if (handler.getLooper().getThread() != Thread.currentThread()) {
throw new IllegalStateException("Wrong thread.");
@@ -218,12 +475,14 @@ final class SurfaceTextureHelper {
if (isTextureInUse || !isQuitting) {
throw new IllegalStateException("Unexpected release.");
}
+ synchronized (this) {
+ if (yuvConverter != null)
+ yuvConverter.release();
+ }
eglBase.makeCurrent();
GLES20.glDeleteTextures(1, new int[] {oesTextureId}, 0);
surfaceTexture.release();
eglBase.release();
- if (isOwningThread) {
- handler.getLooper().quit();
- }
+ handler.getLooper().quit();
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java b/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java
index d7c9e2af0a..fa199b33c8 100644
--- a/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java
+++ b/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java
@@ -28,10 +28,9 @@
package org.webrtc;
import android.content.Context;
+import android.content.res.Resources.NotFoundException;
import android.graphics.Point;
-import android.graphics.SurfaceTexture;
import android.opengl.GLES20;
-import android.opengl.Matrix;
import android.os.Handler;
import android.os.HandlerThread;
import android.util.AttributeSet;
@@ -67,7 +66,8 @@ public class SurfaceViewRenderer extends SurfaceView
// EGL and GL resources for drawing YUV/OES textures. After initilization, these are only accessed
// from the render thread.
private EglBase eglBase;
- private GlRectDrawer drawer;
+ private final RendererCommon.YuvUploader yuvUploader = new RendererCommon.YuvUploader();
+ private RendererCommon.GlDrawer drawer;
// Texture ids for YUV frames. Allocated on first arrival of a YUV frame.
private int[] yuvTextures = null;
@@ -77,23 +77,22 @@ public class SurfaceViewRenderer extends SurfaceView
// These variables are synchronized on |layoutLock|.
private final Object layoutLock = new Object();
- // These three different dimension values are used to keep track of the state in these functions:
- // requestLayout() -> onMeasure() -> onLayout() -> surfaceChanged().
- // requestLayout() is triggered internally by frame size changes, but can also be triggered
- // externally by layout update requests.
- // Most recent measurement specification from onMeasure().
- private int widthSpec;
- private int heightSpec;
- // Current size on screen in pixels. Updated in onLayout(), and should be consistent with
- // |widthSpec|/|heightSpec| after that.
- private int layoutWidth;
- private int layoutHeight;
- // Current surface size of the underlying Surface. Updated in surfaceChanged(), and should be
- // consistent with |layoutWidth|/|layoutHeight| after that.
+ // These dimension values are used to keep track of the state in these functions: onMeasure(),
+ // onLayout(), and surfaceChanged(). A new layout is triggered with requestLayout(). This happens
+ // internally when the incoming frame size changes. requestLayout() can also be triggered
+ // externally. The layout change is a two pass process: first onMeasure() is called in a top-down
+ // traversal of the View tree, followed by an onLayout() pass that is also top-down. During the
+ // onLayout() pass, each parent is responsible for positioning its children using the sizes
+ // computed in the measure pass.
+ // |desiredLayoutsize| is the layout size we have requested in onMeasure() and are waiting for to
+ // take effect.
+ private Point desiredLayoutSize = new Point();
+ // |layoutSize|/|surfaceSize| is the actual current layout/surface size. They are updated in
+ // onLayout() and surfaceChanged() respectively.
+ private final Point layoutSize = new Point();
// TODO(magjed): Enable hardware scaler with SurfaceHolder.setFixedSize(). This will decouple
// layout and surface size.
- private int surfaceWidth;
- private int surfaceHeight;
+ private final Point surfaceSize = new Point();
// |isSurfaceCreated| keeps track of the current status in surfaceCreated()/surfaceDestroyed().
private boolean isSurfaceCreated;
// Last rendered frame dimensions, or 0 if no frame has been rendered yet.
@@ -121,12 +120,18 @@ public class SurfaceViewRenderer extends SurfaceView
// Time in ns spent in renderFrameOnRenderThread() function.
private long renderTimeNs;
- // Runnable for posting frames to render thread..
+ // Runnable for posting frames to render thread.
private final Runnable renderFrameRunnable = new Runnable() {
@Override public void run() {
renderFrameOnRenderThread();
}
};
+ // Runnable for clearing Surface to black.
+ private final Runnable makeBlackRunnable = new Runnable() {
+ @Override public void run() {
+ makeBlack();
+ }
+ };
/**
* Standard View constructor. In order to render something, you must first call init().
@@ -149,17 +154,28 @@ public class SurfaceViewRenderer extends SurfaceView
* reinitialize the renderer after a previous init()/release() cycle.
*/
public void init(
- EGLContext sharedContext, RendererCommon.RendererEvents rendererEvents) {
+ EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents) {
+ init(sharedContext, rendererEvents, EglBase.CONFIG_PLAIN, new GlRectDrawer());
+ }
+
+ /**
+ * Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
+ * for drawing frames on the EGLSurface. This class is responsible for calling release() on
+ * |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
+ * init()/release() cycle.
+ */
+ public void init(EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents,
+ int[] configAttributes, RendererCommon.GlDrawer drawer) {
synchronized (handlerLock) {
if (renderThreadHandler != null) {
- throw new IllegalStateException("Already initialized");
+ throw new IllegalStateException(getResourceName() + "Already initialized");
}
- Logging.d(TAG, "Initializing");
+ Logging.d(TAG, getResourceName() + "Initializing.");
this.rendererEvents = rendererEvents;
+ this.drawer = drawer;
renderThread = new HandlerThread(TAG);
renderThread.start();
- drawer = new GlRectDrawer();
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PLAIN);
+ eglBase = EglBase.create(sharedContext, configAttributes);
renderThreadHandler = new Handler(renderThread.getLooper());
}
tryCreateEglSurface();
@@ -174,8 +190,8 @@ public class SurfaceViewRenderer extends SurfaceView
runOnRenderThread(new Runnable() {
@Override public void run() {
synchronized (layoutLock) {
- if (isSurfaceCreated) {
- eglBase.createSurface(getHolder());
+ if (isSurfaceCreated && !eglBase.hasSurface()) {
+ eglBase.createSurface(getHolder().getSurface());
eglBase.makeCurrent();
// Necessary for YUV frames with odd width.
GLES20.glPixelStorei(GLES20.GL_UNPACK_ALIGNMENT, 1);
@@ -195,7 +211,7 @@ public class SurfaceViewRenderer extends SurfaceView
final CountDownLatch eglCleanupBarrier = new CountDownLatch(1);
synchronized (handlerLock) {
if (renderThreadHandler == null) {
- Logging.d(TAG, "Already released");
+ Logging.d(TAG, getResourceName() + "Already released");
return;
}
// Release EGL and GL resources on render thread.
@@ -210,11 +226,8 @@ public class SurfaceViewRenderer extends SurfaceView
GLES20.glDeleteTextures(3, yuvTextures, 0);
yuvTextures = null;
}
- if (eglBase.hasSurface()) {
- // Clear last rendered image to black.
- GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
- eglBase.swapBuffers();
- }
+ // Clear last rendered image to black.
+ makeBlack();
eglBase.release();
eglBase = null;
eglCleanupBarrier.countDown();
@@ -242,6 +255,14 @@ public class SurfaceViewRenderer extends SurfaceView
frameRotation = 0;
rendererEvents = null;
}
+ resetStatistics();
+ }
+
+ /**
+ * Reset statistics. This will reset the logged statistics in logStatistics(), and
+ * RendererEvents.onFirstFrameRendered() will be called for the next frame.
+ */
+ public void resetStatistics() {
synchronized (statisticsLock) {
framesReceived = 0;
framesDropped = 0;
@@ -277,27 +298,28 @@ public class SurfaceViewRenderer extends SurfaceView
}
synchronized (handlerLock) {
if (renderThreadHandler == null) {
- Logging.d(TAG, "Dropping frame - SurfaceViewRenderer not initialized or already released.");
- } else {
- synchronized (frameLock) {
- if (pendingFrame == null) {
- updateFrameDimensionsAndReportEvents(frame);
- pendingFrame = frame;
- renderThreadHandler.post(renderFrameRunnable);
- return;
+ Logging.d(TAG, getResourceName()
+ + "Dropping frame - Not initialized or already released.");
+ VideoRenderer.renderFrameDone(frame);
+ return;
+ }
+ synchronized (frameLock) {
+ if (pendingFrame != null) {
+ // Drop old frame.
+ synchronized (statisticsLock) {
+ ++framesDropped;
}
+ VideoRenderer.renderFrameDone(pendingFrame);
}
+ pendingFrame = frame;
+ updateFrameDimensionsAndReportEvents(frame);
+ renderThreadHandler.post(renderFrameRunnable);
}
}
- // Drop frame.
- synchronized (statisticsLock) {
- ++framesDropped;
- }
- VideoRenderer.renderFrameDone(frame);
}
// Returns desired layout size given current measure specification and video aspect ratio.
- private Point getDesiredLayoutSize() {
+ private Point getDesiredLayoutSize(int widthSpec, int heightSpec) {
synchronized (layoutLock) {
final int maxWidth = getDefaultSize(Integer.MAX_VALUE, widthSpec);
final int maxHeight = getDefaultSize(Integer.MAX_VALUE, heightSpec);
@@ -317,18 +339,30 @@ public class SurfaceViewRenderer extends SurfaceView
@Override
protected void onMeasure(int widthSpec, int heightSpec) {
synchronized (layoutLock) {
- this.widthSpec = widthSpec;
- this.heightSpec = heightSpec;
- final Point size = getDesiredLayoutSize();
- setMeasuredDimension(size.x, size.y);
+ if (frameWidth == 0 || frameHeight == 0) {
+ super.onMeasure(widthSpec, heightSpec);
+ return;
+ }
+ desiredLayoutSize = getDesiredLayoutSize(widthSpec, heightSpec);
+ if (desiredLayoutSize.x != getMeasuredWidth() || desiredLayoutSize.y != getMeasuredHeight()) {
+ // Clear the surface asap before the layout change to avoid stretched video and other
+ // render artifacs. Don't wait for it to finish because the IO thread should never be
+ // blocked, so it's a best-effort attempt.
+ synchronized (handlerLock) {
+ if (renderThreadHandler != null) {
+ renderThreadHandler.postAtFrontOfQueue(makeBlackRunnable);
+ }
+ }
+ }
+ setMeasuredDimension(desiredLayoutSize.x, desiredLayoutSize.y);
}
}
@Override
protected void onLayout(boolean changed, int left, int top, int right, int bottom) {
synchronized (layoutLock) {
- layoutWidth = right - left;
- layoutHeight = bottom - top;
+ layoutSize.x = right - left;
+ layoutSize.y = bottom - top;
}
// Might have a pending frame waiting for a layout of correct size.
runOnRenderThread(renderFrameRunnable);
@@ -337,7 +371,7 @@ public class SurfaceViewRenderer extends SurfaceView
// SurfaceHolder.Callback interface.
@Override
public void surfaceCreated(final SurfaceHolder holder) {
- Logging.d(TAG, "Surface created");
+ Logging.d(TAG, getResourceName() + "Surface created.");
synchronized (layoutLock) {
isSurfaceCreated = true;
}
@@ -346,11 +380,11 @@ public class SurfaceViewRenderer extends SurfaceView
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
- Logging.d(TAG, "Surface destroyed");
+ Logging.d(TAG, getResourceName() + "Surface destroyed.");
synchronized (layoutLock) {
isSurfaceCreated = false;
- surfaceWidth = 0;
- surfaceHeight = 0;
+ surfaceSize.x = 0;
+ surfaceSize.y = 0;
}
runOnRenderThread(new Runnable() {
@Override public void run() {
@@ -361,10 +395,10 @@ public class SurfaceViewRenderer extends SurfaceView
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
- Logging.d(TAG, "Surface changed: " + width + "x" + height);
+ Logging.d(TAG, getResourceName() + "Surface changed: " + width + "x" + height);
synchronized (layoutLock) {
- surfaceWidth = width;
- surfaceHeight = height;
+ surfaceSize.x = width;
+ surfaceSize.y = height;
}
// Might have a pending frame waiting for a surface of correct size.
runOnRenderThread(renderFrameRunnable);
@@ -381,26 +415,35 @@ public class SurfaceViewRenderer extends SurfaceView
}
}
+ private String getResourceName() {
+ try {
+ return getResources().getResourceEntryName(getId()) + ": ";
+ } catch (NotFoundException e) {
+ return "";
+ }
+ }
+
+ private void makeBlack() {
+ if (Thread.currentThread() != renderThread) {
+ throw new IllegalStateException(getResourceName() + "Wrong thread.");
+ }
+ if (eglBase != null && eglBase.hasSurface()) {
+ GLES20.glClearColor(0, 0, 0, 0);
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ eglBase.swapBuffers();
+ }
+ }
+
/**
* Requests new layout if necessary. Returns true if layout and surface size are consistent.
*/
private boolean checkConsistentLayout() {
+ if (Thread.currentThread() != renderThread) {
+ throw new IllegalStateException(getResourceName() + "Wrong thread.");
+ }
synchronized (layoutLock) {
- final Point desiredLayoutSize = getDesiredLayoutSize();
- if (desiredLayoutSize.x != layoutWidth || desiredLayoutSize.y != layoutHeight) {
- Logging.d(TAG, "Requesting new layout with size: "
- + desiredLayoutSize.x + "x" + desiredLayoutSize.y);
- // Request layout update on UI thread.
- post(new Runnable() {
- @Override public void run() {
- requestLayout();
- }
- });
- return false;
- }
- // Wait for requestLayout() to propagate through this sequence before returning true:
- // requestLayout() -> onMeasure() -> onLayout() -> surfaceChanged().
- return surfaceWidth == layoutWidth && surfaceHeight == layoutHeight;
+ // Return false while we are in the middle of a layout change.
+ return layoutSize.equals(desiredLayoutSize) && surfaceSize.equals(layoutSize);
}
}
@@ -408,61 +451,51 @@ public class SurfaceViewRenderer extends SurfaceView
* Renders and releases |pendingFrame|.
*/
private void renderFrameOnRenderThread() {
+ if (Thread.currentThread() != renderThread) {
+ throw new IllegalStateException(getResourceName() + "Wrong thread.");
+ }
+ // Fetch and render |pendingFrame|.
+ final VideoRenderer.I420Frame frame;
+ synchronized (frameLock) {
+ if (pendingFrame == null) {
+ return;
+ }
+ frame = pendingFrame;
+ pendingFrame = null;
+ }
if (eglBase == null || !eglBase.hasSurface()) {
- Logging.d(TAG, "No surface to draw on");
+ Logging.d(TAG, getResourceName() + "No surface to draw on");
+ VideoRenderer.renderFrameDone(frame);
return;
}
if (!checkConsistentLayout()) {
// Output intermediate black frames while the layout is updated.
- GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
- eglBase.swapBuffers();
+ makeBlack();
+ VideoRenderer.renderFrameDone(frame);
return;
}
// After a surface size change, the EGLSurface might still have a buffer of the old size in the
// pipeline. Querying the EGLSurface will show if the underlying buffer dimensions haven't yet
// changed. Such a buffer will be rendered incorrectly, so flush it with a black frame.
synchronized (layoutLock) {
- if (eglBase.surfaceWidth() != surfaceWidth || eglBase.surfaceHeight() != surfaceHeight) {
- GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
- eglBase.swapBuffers();
+ if (eglBase.surfaceWidth() != surfaceSize.x || eglBase.surfaceHeight() != surfaceSize.y) {
+ makeBlack();
}
}
- // Fetch and render |pendingFrame|.
- final VideoRenderer.I420Frame frame;
- synchronized (frameLock) {
- if (pendingFrame == null) {
- return;
- }
- frame = pendingFrame;
- pendingFrame = null;
- }
final long startTimeNs = System.nanoTime();
- final float[] samplingMatrix;
- if (frame.yuvFrame) {
- // The convention in WebRTC is that the first element in a ByteBuffer corresponds to the
- // top-left corner of the image, but in glTexImage2D() the first element corresponds to the
- // bottom-left corner. We correct this discrepancy by setting a vertical flip as sampling
- // matrix.
- samplingMatrix = RendererCommon.verticalFlipMatrix();
- } else {
- // TODO(magjed): Move updateTexImage() to the video source instead.
- SurfaceTexture surfaceTexture = (SurfaceTexture) frame.textureObject;
- surfaceTexture.updateTexImage();
- samplingMatrix = new float[16];
- surfaceTexture.getTransformMatrix(samplingMatrix);
- }
-
final float[] texMatrix;
synchronized (layoutLock) {
final float[] rotatedSamplingMatrix =
- RendererCommon.rotateTextureMatrix(samplingMatrix, frame.rotationDegree);
+ RendererCommon.rotateTextureMatrix(frame.samplingMatrix, frame.rotationDegree);
final float[] layoutMatrix = RendererCommon.getLayoutMatrix(
- mirror, frameAspectRatio(), (float) layoutWidth / layoutHeight);
+ mirror, frameAspectRatio(), (float) layoutSize.x / layoutSize.y);
texMatrix = RendererCommon.multiplyMatrices(rotatedSamplingMatrix, layoutMatrix);
}
- GLES20.glViewport(0, 0, surfaceWidth, surfaceHeight);
+ // TODO(magjed): glClear() shouldn't be necessary since every pixel is covered anyway, but it's
+ // a workaround for bug 5147. Performance will be slightly worse.
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
if (frame.yuvFrame) {
// Make sure YUV textures are allocated.
if (yuvTextures == null) {
@@ -471,11 +504,11 @@ public class SurfaceViewRenderer extends SurfaceView
yuvTextures[i] = GlUtil.generateTexture(GLES20.GL_TEXTURE_2D);
}
}
- drawer.uploadYuvData(
+ yuvUploader.uploadYuvData(
yuvTextures, frame.width, frame.height, frame.yuvStrides, frame.yuvPlanes);
- drawer.drawYuv(yuvTextures, texMatrix);
+ drawer.drawYuv(yuvTextures, texMatrix, 0, 0, surfaceSize.x, surfaceSize.y);
} else {
- drawer.drawOes(frame.textureId, texMatrix);
+ drawer.drawOes(frame.textureId, texMatrix, 0, 0, surfaceSize.x, surfaceSize.y);
}
eglBase.swapBuffers();
@@ -483,6 +516,12 @@ public class SurfaceViewRenderer extends SurfaceView
synchronized (statisticsLock) {
if (framesRendered == 0) {
firstFrameTimeNs = startTimeNs;
+ synchronized (layoutLock) {
+ Logging.d(TAG, getResourceName() + "Reporting first rendered frame.");
+ if (rendererEvents != null) {
+ rendererEvents.onFirstFrameRendered();
+ }
+ }
}
++framesRendered;
renderTimeNs += (System.nanoTime() - startTimeNs);
@@ -508,32 +547,32 @@ public class SurfaceViewRenderer extends SurfaceView
synchronized (layoutLock) {
if (frameWidth != frame.width || frameHeight != frame.height
|| frameRotation != frame.rotationDegree) {
+ Logging.d(TAG, getResourceName() + "Reporting frame resolution changed to "
+ + frame.width + "x" + frame.height + " with rotation " + frame.rotationDegree);
if (rendererEvents != null) {
- final String id = getResources().getResourceEntryName(getId());
- if (frameWidth == 0 || frameHeight == 0) {
- Logging.d(TAG, "ID: " + id + ". Reporting first rendered frame.");
- rendererEvents.onFirstFrameRendered();
- }
- Logging.d(TAG, "ID: " + id + ". Reporting frame resolution changed to "
- + frame.width + "x" + frame.height + " with rotation " + frame.rotationDegree);
rendererEvents.onFrameResolutionChanged(frame.width, frame.height, frame.rotationDegree);
}
frameWidth = frame.width;
frameHeight = frame.height;
frameRotation = frame.rotationDegree;
+ post(new Runnable() {
+ @Override public void run() {
+ requestLayout();
+ }
+ });
}
}
}
private void logStatistics() {
synchronized (statisticsLock) {
- Logging.d(TAG, "ID: " + getResources().getResourceEntryName(getId()) + ". Frames received: "
+ Logging.d(TAG, getResourceName() + "Frames received: "
+ framesReceived + ". Dropped: " + framesDropped + ". Rendered: " + framesRendered);
if (framesReceived > 0 && framesRendered > 0) {
final long timeSinceFirstFrameNs = System.nanoTime() - firstFrameTimeNs;
- Logging.d(TAG, "Duration: " + (int) (timeSinceFirstFrameNs / 1e6) +
- " ms. FPS: " + (float) framesRendered * 1e9 / timeSinceFirstFrameNs);
- Logging.d(TAG, "Average render time: "
+ Logging.d(TAG, getResourceName() + "Duration: " + (int) (timeSinceFirstFrameNs / 1e6) +
+ " ms. FPS: " + framesRendered * 1e9 / timeSinceFirstFrameNs);
+ Logging.d(TAG, getResourceName() + "Average render time: "
+ (int) (renderTimeNs / (1000 * framesRendered)) + " us.");
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java b/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java
index 0d8968aba9..e60ead9f00 100644
--- a/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java
+++ b/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java
@@ -28,11 +28,13 @@
package org.webrtc;
import android.os.Handler;
+import android.os.SystemClock;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
-final class ThreadUtils {
+public class ThreadUtils {
/**
* Utility class to be used for checking that a method is called on the correct thread.
*/
@@ -86,6 +88,29 @@ final class ThreadUtils {
}
}
+ public static boolean joinUninterruptibly(final Thread thread, long timeoutMs) {
+ final long startTimeMs = SystemClock.elapsedRealtime();
+ long timeRemainingMs = timeoutMs;
+ boolean wasInterrupted = false;
+ while (timeRemainingMs > 0) {
+ try {
+ thread.join(timeRemainingMs);
+ break;
+ } catch (InterruptedException e) {
+ // Someone is asking us to return early at our convenience. We can't cancel this operation,
+ // but we should preserve the information and pass it along.
+ wasInterrupted = true;
+ final long elapsedTimeMs = SystemClock.elapsedRealtime() - startTimeMs;
+ timeRemainingMs = timeoutMs - elapsedTimeMs;
+ }
+ }
+ // Pass interruption information along.
+ if (wasInterrupted) {
+ Thread.currentThread().interrupt();
+ }
+ return !thread.isAlive();
+ }
+
public static void joinUninterruptibly(final Thread thread) {
executeUninterruptibly(new BlockingOperation() {
@Override
@@ -104,6 +129,30 @@ final class ThreadUtils {
});
}
+ public static boolean awaitUninterruptibly(CountDownLatch barrier, long timeoutMs) {
+ final long startTimeMs = SystemClock.elapsedRealtime();
+ long timeRemainingMs = timeoutMs;
+ boolean wasInterrupted = false;
+ boolean result = false;
+ do {
+ try {
+ result = barrier.await(timeRemainingMs, TimeUnit.MILLISECONDS);
+ break;
+ } catch (InterruptedException e) {
+ // Someone is asking us to return early at our convenience. We can't cancel this operation,
+ // but we should preserve the information and pass it along.
+ wasInterrupted = true;
+ final long elapsedTimeMs = SystemClock.elapsedRealtime() - startTimeMs;
+ timeRemainingMs = timeoutMs - elapsedTimeMs;
+ }
+ } while (timeRemainingMs > 0);
+ // Pass interruption information along.
+ if (wasInterrupted) {
+ Thread.currentThread().interrupt();
+ }
+ return result;
+ }
+
/**
* Post |callable| to |handler| and wait for the result.
*/
diff --git a/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java b/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java
index 4caefc513d..36f60edd5c 100644
--- a/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java
+++ b/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java
@@ -28,9 +28,6 @@
package org.webrtc;
import android.content.Context;
-import android.graphics.SurfaceTexture;
-import android.hardware.Camera;
-import android.hardware.Camera.PreviewCallback;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.SystemClock;
@@ -53,9 +50,6 @@ import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import javax.microedition.khronos.egl.EGLContext;
-import javax.microedition.khronos.egl.EGL10;
-
// Android specific implementation of VideoCapturer.
// An instance of this class can be created by an application using
// VideoCapturerAndroid.create();
@@ -68,21 +62,22 @@ import javax.microedition.khronos.egl.EGL10;
// camera thread. The internal *OnCameraThread() methods must check |camera| for null to check if
// the camera has been stopped.
@SuppressWarnings("deprecation")
-public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallback,
+public class VideoCapturerAndroid extends VideoCapturer implements
+ android.hardware.Camera.PreviewCallback,
SurfaceTextureHelper.OnTextureFrameAvailableListener {
private final static String TAG = "VideoCapturerAndroid";
private final static int CAMERA_OBSERVER_PERIOD_MS = 2000;
+ private final static int CAMERA_FREEZE_REPORT_TIMOUT_MS = 6000;
- private Camera camera; // Only non-null while capturing.
+ private android.hardware.Camera camera; // Only non-null while capturing.
private HandlerThread cameraThread;
private final Handler cameraThreadHandler;
private Context applicationContext;
// Synchronization lock for |id|.
private final Object cameraIdLock = new Object();
private int id;
- private Camera.CameraInfo info;
- private final FramePool videoBuffers;
- private final CameraStatistics cameraStatistics = new CameraStatistics();
+ private android.hardware.Camera.CameraInfo info;
+ private final CameraStatistics cameraStatistics;
// Remember the requested format in case we want to switch cameras.
private int requestedWidth;
private int requestedHeight;
@@ -94,17 +89,28 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private CapturerObserver frameObserver = null;
private final CameraEventsHandler eventsHandler;
private boolean firstFrameReported;
+ // Arbitrary queue depth. Higher number means more memory allocated & held,
+ // lower number means more sensitivity to processing time in the client (and
+ // potentially stalling the capturer if it runs out of buffers to write to).
+ private static final int NUMBER_OF_CAPTURE_BUFFERS = 3;
+ private final Set<byte[]> queuedBuffers = new HashSet<byte[]>();
private final boolean isCapturingToTexture;
- private final SurfaceTextureHelper surfaceHelper;
+ final SurfaceTextureHelper surfaceHelper; // Package visible for testing purposes.
// The camera API can output one old frame after the camera has been switched or the resolution
// has been changed. This flag is used for dropping the first frame after camera restart.
private boolean dropNextFrame = false;
+ // |openCameraOnCodecThreadRunner| is used for retrying to open the camera if it is in use by
+ // another application when startCaptureOnCameraThread is called.
+ private Runnable openCameraOnCodecThreadRunner;
+ private final static int MAX_OPEN_CAMERA_ATTEMPTS = 3;
+ private final static int OPEN_CAMERA_DELAY_MS = 500;
+ private int openCameraAttempts;
// Camera error callback.
- private final Camera.ErrorCallback cameraErrorCallback =
- new Camera.ErrorCallback() {
+ private final android.hardware.Camera.ErrorCallback cameraErrorCallback =
+ new android.hardware.Camera.ErrorCallback() {
@Override
- public void onError(int error, Camera camera) {
+ public void onError(int error, android.hardware.Camera camera) {
String errorMessage;
if (error == android.hardware.Camera.CAMERA_ERROR_SERVER_DIED) {
errorMessage = "Camera server died!";
@@ -120,47 +126,45 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Camera observer - monitors camera framerate. Observer is executed on camera thread.
private final Runnable cameraObserver = new Runnable() {
+ private int freezePeriodCount;
@Override
public void run() {
int cameraFramesCount = cameraStatistics.getAndResetFrameCount();
int cameraFps = (cameraFramesCount * 1000 + CAMERA_OBSERVER_PERIOD_MS / 2)
/ CAMERA_OBSERVER_PERIOD_MS;
- Logging.d(TAG, "Camera fps: " + cameraFps +
- ". Pending buffers: " + cameraStatistics.pendingFramesTimeStamps());
+ Logging.d(TAG, "Camera fps: " + cameraFps +".");
if (cameraFramesCount == 0) {
- Logging.e(TAG, "Camera freezed.");
- if (eventsHandler != null) {
- eventsHandler.onCameraError("Camera failure.");
+ ++freezePeriodCount;
+ if (CAMERA_OBSERVER_PERIOD_MS * freezePeriodCount > CAMERA_FREEZE_REPORT_TIMOUT_MS
+ && eventsHandler != null) {
+ Logging.e(TAG, "Camera freezed.");
+ if (surfaceHelper.isTextureInUse()) {
+ // This can only happen if we are capturing to textures.
+ eventsHandler.onCameraFreezed("Camera failure. Client must return video buffers.");
+ } else {
+ eventsHandler.onCameraFreezed("Camera failure.");
+ }
+ return;
}
} else {
- cameraThreadHandler.postDelayed(this, CAMERA_OBSERVER_PERIOD_MS);
+ freezePeriodCount = 0;
}
+ cameraThreadHandler.postDelayed(this, CAMERA_OBSERVER_PERIOD_MS);
}
};
private static class CameraStatistics {
private int frameCount = 0;
private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
- private final Set<Long> timeStampsNs = new HashSet<Long>();
CameraStatistics() {
threadChecker.detachThread();
}
- public void addPendingFrame(long timestamp) {
+ public void addFrame() {
threadChecker.checkIsOnValidThread();
++frameCount;
- timeStampsNs.add(timestamp);
- }
-
- public void frameReturned(long timestamp) {
- threadChecker.checkIsOnValidThread();
- if (!timeStampsNs.contains(timestamp)) {
- throw new IllegalStateException(
- "CameraStatistics.frameReturned called with unknown timestamp " + timestamp);
- }
- timeStampsNs.remove(timestamp);
}
public int getAndResetFrameCount() {
@@ -169,28 +173,16 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
frameCount = 0;
return count;
}
-
- // Return number of pending frames that have not been returned.
- public int pendingFramesCount() {
- threadChecker.checkIsOnValidThread();
- return timeStampsNs.size();
- }
-
- public String pendingFramesTimeStamps() {
- threadChecker.checkIsOnValidThread();
- List<Long> timeStampsMs = new ArrayList<Long>();
- for (long ts : timeStampsNs) {
- timeStampsMs.add(TimeUnit.NANOSECONDS.toMillis(ts));
- }
- return timeStampsMs.toString();
- }
}
public static interface CameraEventsHandler {
- // Camera error handler - invoked when camera stops receiving frames
+ // Camera error handler - invoked when camera can not be opened
// or any camera exception happens on camera thread.
void onCameraError(String errorDescription);
+ // Invoked when camera stops receiving frames
+ void onCameraFreezed(String errorDescription);
+
// Callback invoked when camera is opening.
void onCameraOpening(int cameraId);
@@ -216,7 +208,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
public static VideoCapturerAndroid create(String name,
- CameraEventsHandler eventsHandler, EGLContext sharedEglContext) {
+ CameraEventsHandler eventsHandler, EglBase.Context sharedEglContext) {
final int cameraId = lookupDeviceName(name);
if (cameraId == -1) {
return null;
@@ -224,7 +216,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
final VideoCapturerAndroid capturer = new VideoCapturerAndroid(cameraId, eventsHandler,
sharedEglContext);
- capturer.setNativeCapturer(nativeCreateVideoCapturer(capturer));
+ capturer.setNativeCapturer(
+ nativeCreateVideoCapturer(capturer, capturer.surfaceHelper));
return capturer;
}
@@ -243,7 +236,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Switch camera to the next valid camera id. This can only be called while
// the camera is running.
public void switchCamera(final CameraSwitchHandler handler) {
- if (Camera.getNumberOfCameras() < 2) {
+ if (android.hardware.Camera.getNumberOfCameras() < 2) {
if (handler != null) {
handler.onCameraSwitchError("No camera to switch to.");
}
@@ -274,7 +267,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
pendingCameraSwitch = false;
}
if (handler != null) {
- handler.onCameraSwitchDone(info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT);
+ handler.onCameraSwitchDone(
+ info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT);
}
}
});
@@ -282,6 +276,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Requests a new output format from the video capturer. Captured frames
// by the camera will be scaled/or dropped by the video capturer.
+ // It does not matter if width and height are flipped. I.E, |width| = 640, |height| = 480 produce
+ // the same result as |width| = 480, |height| = 640.
// TODO(magjed/perkj): Document what this function does. Change name?
public void onOutputFormatRequest(final int width, final int height, final int framerate) {
cameraThreadHandler.post(new Runnable() {
@@ -303,7 +299,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Helper function to retrieve the current camera id synchronously. Note that the camera id might
// change at any point by switchCamera() calls.
- private int getCurrentCameraId() {
+ int getCurrentCameraId() {
synchronized (cameraIdLock) {
return id;
}
@@ -329,20 +325,19 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
private VideoCapturerAndroid(int cameraId, CameraEventsHandler eventsHandler,
- EGLContext sharedContext) {
- Logging.d(TAG, "VideoCapturerAndroid");
+ EglBase.Context sharedContext) {
this.id = cameraId;
this.eventsHandler = eventsHandler;
cameraThread = new HandlerThread(TAG);
cameraThread.start();
cameraThreadHandler = new Handler(cameraThread.getLooper());
- videoBuffers = new FramePool(cameraThread);
isCapturingToTexture = (sharedContext != null);
- surfaceHelper = SurfaceTextureHelper.create(
- isCapturingToTexture ? sharedContext : EGL10.EGL_NO_CONTEXT, cameraThreadHandler);
+ cameraStatistics = new CameraStatistics();
+ surfaceHelper = SurfaceTextureHelper.create(sharedContext, cameraThreadHandler);
if (isCapturingToTexture) {
surfaceHelper.setListener(this);
}
+ Logging.d(TAG, "VideoCapturerAndroid isCapturingToTexture : " + isCapturingToTexture);
}
private void checkIsOnCameraThread() {
@@ -355,13 +350,13 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// found. If |deviceName| is empty, the first available device is used.
private static int lookupDeviceName(String deviceName) {
Logging.d(TAG, "lookupDeviceName: " + deviceName);
- if (deviceName == null || Camera.getNumberOfCameras() == 0) {
+ if (deviceName == null || android.hardware.Camera.getNumberOfCameras() == 0) {
return -1;
}
if (deviceName.isEmpty()) {
return 0;
}
- for (int i = 0; i < Camera.getNumberOfCameras(); ++i) {
+ for (int i = 0; i < android.hardware.Camera.getNumberOfCameras(); ++i) {
if (deviceName.equals(CameraEnumerationAndroid.getDeviceName(i))) {
return i;
}
@@ -382,14 +377,9 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
if (camera != null) {
throw new IllegalStateException("Release called while camera is running");
}
- if (cameraStatistics.pendingFramesCount() != 0) {
- throw new IllegalStateException("Release called with pending frames left");
- }
}
});
- surfaceHelper.disconnect();
- cameraThread.quit();
- ThreadUtils.joinUninterruptibly(cameraThread);
+ surfaceHelper.disconnect(cameraThreadHandler);
cameraThread = null;
}
@@ -413,6 +403,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
if (frameObserver == null) {
throw new RuntimeException("frameObserver not set.");
}
+
cameraThreadHandler.post(new Runnable() {
@Override public void run() {
startCaptureOnCameraThread(width, height, framerate, frameObserver,
@@ -422,8 +413,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
private void startCaptureOnCameraThread(
- int width, int height, int framerate, CapturerObserver frameObserver,
- Context applicationContext) {
+ final int width, final int height, final int framerate, final CapturerObserver frameObserver,
+ final Context applicationContext) {
Throwable error = null;
checkIsOnCameraThread();
if (camera != null) {
@@ -431,17 +422,36 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
this.applicationContext = applicationContext;
this.frameObserver = frameObserver;
+ this.firstFrameReported = false;
+
try {
- synchronized (cameraIdLock) {
- Logging.d(TAG, "Opening camera " + id);
- firstFrameReported = false;
- if (eventsHandler != null) {
- eventsHandler.onCameraOpening(id);
+ try {
+ synchronized (cameraIdLock) {
+ Logging.d(TAG, "Opening camera " + id);
+ if (eventsHandler != null) {
+ eventsHandler.onCameraOpening(id);
+ }
+ camera = android.hardware.Camera.open(id);
+ info = new android.hardware.Camera.CameraInfo();
+ android.hardware.Camera.getCameraInfo(id, info);
+ }
+ } catch (RuntimeException e) {
+ openCameraAttempts++;
+ if (openCameraAttempts < MAX_OPEN_CAMERA_ATTEMPTS) {
+ Logging.e(TAG, "Camera.open failed, retrying", e);
+ openCameraOnCodecThreadRunner = new Runnable() {
+ @Override public void run() {
+ startCaptureOnCameraThread(width, height, framerate, frameObserver,
+ applicationContext);
+ }
+ };
+ cameraThreadHandler.postDelayed(openCameraOnCodecThreadRunner, OPEN_CAMERA_DELAY_MS);
+ return;
}
- camera = Camera.open(id);
- info = new Camera.CameraInfo();
- Camera.getCameraInfo(id, info);
+ openCameraAttempts = 0;
+ throw e;
}
+
try {
camera.setPreviewTexture(surfaceHelper.getSurfaceTexture());
} catch (IOException e) {
@@ -485,17 +495,18 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
requestedFramerate = framerate;
// Find closest supported format for |width| x |height| @ |framerate|.
- final Camera.Parameters parameters = camera.getParameters();
+ final android.hardware.Camera.Parameters parameters = camera.getParameters();
final int[] range = CameraEnumerationAndroid.getFramerateRange(parameters, framerate * 1000);
- final Camera.Size previewSize = CameraEnumerationAndroid.getClosestSupportedSize(
- parameters.getSupportedPreviewSizes(), width, height);
+ final android.hardware.Camera.Size previewSize =
+ CameraEnumerationAndroid.getClosestSupportedSize(
+ parameters.getSupportedPreviewSizes(), width, height);
final CaptureFormat captureFormat = new CaptureFormat(
previewSize.width, previewSize.height,
- range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX],
- range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
+ range[android.hardware.Camera.Parameters.PREVIEW_FPS_MIN_INDEX],
+ range[android.hardware.Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
// Check if we are already using this capture format, then we don't need to do anything.
- if (captureFormat.equals(this.captureFormat)) {
+ if (captureFormat.isSameFormat(this.captureFormat)) {
return;
}
@@ -511,11 +522,15 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
parameters.setPreviewFpsRange(captureFormat.minFramerate, captureFormat.maxFramerate);
}
parameters.setPreviewSize(captureFormat.width, captureFormat.height);
- parameters.setPreviewFormat(captureFormat.imageFormat);
+
+ if (!isCapturingToTexture) {
+ parameters.setPreviewFormat(captureFormat.imageFormat);
+ }
// Picture size is for taking pictures and not for preview/video, but we need to set it anyway
// as a workaround for an aspect ratio problem on Nexus 7.
- final Camera.Size pictureSize = CameraEnumerationAndroid.getClosestSupportedSize(
- parameters.getSupportedPictureSizes(), width, height);
+ final android.hardware.Camera.Size pictureSize =
+ CameraEnumerationAndroid.getClosestSupportedSize(
+ parameters.getSupportedPictureSizes(), width, height);
parameters.setPictureSize(pictureSize.width, pictureSize.height);
// Temporarily stop preview if it's already running.
@@ -532,13 +547,19 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
this.captureFormat = captureFormat;
List<String> focusModes = parameters.getSupportedFocusModes();
- if (focusModes.contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
- parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
+ if (focusModes.contains(android.hardware.Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
+ parameters.setFocusMode(android.hardware.Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
}
camera.setParameters(parameters);
if (!isCapturingToTexture) {
- videoBuffers.queueCameraBuffers(captureFormat.frameSize(), camera);
+ queuedBuffers.clear();
+ final int frameSize = captureFormat.frameSize();
+ for (int i = 0; i < NUMBER_OF_CAPTURE_BUFFERS; ++i) {
+ final ByteBuffer buffer = ByteBuffer.allocateDirect(frameSize);
+ queuedBuffers.add(buffer.array());
+ camera.addCallbackBuffer(buffer.array());
+ }
camera.setPreviewCallbackWithBuffer(this);
}
camera.startPreview();
@@ -561,6 +582,10 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private void stopCaptureOnCameraThread() {
checkIsOnCameraThread();
Logging.d(TAG, "stopCaptureOnCameraThread");
+ if (openCameraOnCodecThreadRunner != null) {
+ cameraThreadHandler.removeCallbacks(openCameraOnCodecThreadRunner);
+ }
+ openCameraAttempts = 0;
if (camera == null) {
Logging.e(TAG, "Calling stopCapture() for already stopped camera.");
return;
@@ -571,13 +596,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
Logging.d(TAG, "Stop preview.");
camera.stopPreview();
camera.setPreviewCallbackWithBuffer(null);
- if (!isCapturingToTexture()) {
- videoBuffers.stopReturnBuffersToCamera();
- Logging.d(TAG, "stopReturnBuffersToCamera called."
- + (cameraStatistics.pendingFramesCount() == 0?
- " All buffers have been returned."
- : " Pending buffers: " + cameraStatistics.pendingFramesTimeStamps() + "."));
- }
+ queuedBuffers.clear();
captureFormat = null;
Logging.d(TAG, "Release camera.");
@@ -593,7 +612,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
Logging.d(TAG, "switchCameraOnCameraThread");
stopCaptureOnCameraThread();
synchronized (cameraIdLock) {
- id = (id + 1) % Camera.getNumberOfCameras();
+ id = (id + 1) % android.hardware.Camera.getNumberOfCameras();
}
dropNextFrame = true;
startCaptureOnCameraThread(requestedWidth, requestedHeight, requestedFramerate, frameObserver,
@@ -612,17 +631,9 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
frameObserver.onOutputFormatRequest(width, height, framerate);
}
- public void returnBuffer(final long timeStamp) {
- cameraThreadHandler.post(new Runnable() {
- @Override public void run() {
- cameraStatistics.frameReturned(timeStamp);
- if (isCapturingToTexture) {
- surfaceHelper.returnTextureFrame();
- } else {
- videoBuffers.returnBuffer(timeStamp);
- }
- }
- });
+ // Exposed for testing purposes only.
+ Handler getCameraThreadHandler() {
+ return cameraThreadHandler;
}
private int getDeviceOrientation() {
@@ -650,7 +661,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private int getFrameOrientation() {
int rotation = getDeviceOrientation();
- if (info.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
+ if (info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_BACK) {
rotation = 360 - rotation;
}
return (info.orientation + rotation) % 360;
@@ -658,9 +669,10 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Called on cameraThread so must not "synchronized".
@Override
- public void onPreviewFrame(byte[] data, Camera callbackCamera) {
+ public void onPreviewFrame(byte[] data, android.hardware.Camera callbackCamera) {
checkIsOnCameraThread();
- if (camera == null) {
+ if (camera == null || !queuedBuffers.contains(data)) {
+ // The camera has been stopped or |data| is an old invalid buffer.
return;
}
if (camera != callbackCamera) {
@@ -675,16 +687,10 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
firstFrameReported = true;
}
- // Mark the frame owning |data| as used.
- // Note that since data is directBuffer,
- // data.length >= videoBuffers.frameSize.
- if (videoBuffers.reserveByteBuffer(data, captureTimeNs)) {
- cameraStatistics.addPendingFrame(captureTimeNs);
- frameObserver.onByteBufferFrameCaptured(data, videoBuffers.frameSize, captureFormat.width,
- captureFormat.height, getFrameOrientation(), captureTimeNs);
- } else {
- Logging.w(TAG, "reserveByteBuffer failed - dropping frame.");
- }
+ cameraStatistics.addFrame();
+ frameObserver.onByteBufferFrameCaptured(data, captureFormat.width, captureFormat.height,
+ getFrameOrientation(), captureTimeNs);
+ camera.addCallbackBuffer(data);
}
@Override
@@ -696,135 +702,22 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
surfaceHelper.returnTextureFrame();
return;
}
- if (!dropNextFrame) {
+ if (dropNextFrame) {
surfaceHelper.returnTextureFrame();
- dropNextFrame = true;
+ dropNextFrame = false;
return;
}
int rotation = getFrameOrientation();
- if (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
+ if (info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT) {
// Undo the mirror that the OS "helps" us with.
// http://developer.android.com/reference/android/hardware/Camera.html#setDisplayOrientation(int)
transformMatrix =
RendererCommon.multiplyMatrices(transformMatrix, RendererCommon.horizontalFlipMatrix());
}
- transformMatrix = RendererCommon.rotateTextureMatrix(transformMatrix, rotation);
-
- final int rotatedWidth = (rotation % 180 == 0) ? captureFormat.width : captureFormat.height;
- final int rotatedHeight = (rotation % 180 == 0) ? captureFormat.height : captureFormat.width;
- cameraStatistics.addPendingFrame(timestampNs);
- frameObserver.onTextureFrameCaptured(rotatedWidth, rotatedHeight, oesTextureId,
- transformMatrix, timestampNs);
- }
-
- // Class used for allocating and bookkeeping video frames. All buffers are
- // direct allocated so that they can be directly used from native code. This class is
- // not thread-safe, and enforces single thread use.
- private static class FramePool {
- // Thread that all calls should be made on.
- private final Thread thread;
- // Arbitrary queue depth. Higher number means more memory allocated & held,
- // lower number means more sensitivity to processing time in the client (and
- // potentially stalling the capturer if it runs out of buffers to write to).
- private static final int numCaptureBuffers = 3;
- // This container tracks the buffers added as camera callback buffers. It is needed for finding
- // the corresponding ByteBuffer given a byte[].
- private final Map<byte[], ByteBuffer> queuedBuffers = new IdentityHashMap<byte[], ByteBuffer>();
- // This container tracks the frames that have been sent but not returned. It is needed for
- // keeping the buffers alive and for finding the corresponding ByteBuffer given a timestamp.
- private final Map<Long, ByteBuffer> pendingBuffers = new HashMap<Long, ByteBuffer>();
- private int frameSize = 0;
- private Camera camera;
-
- public FramePool(Thread thread) {
- this.thread = thread;
- }
-
- private void checkIsOnValidThread() {
- if (Thread.currentThread() != thread) {
- throw new IllegalStateException("Wrong thread");
- }
- }
-
- // Discards previous queued buffers and adds new callback buffers to camera.
- public void queueCameraBuffers(int frameSize, Camera camera) {
- checkIsOnValidThread();
- this.camera = camera;
- this.frameSize = frameSize;
-
- queuedBuffers.clear();
- for (int i = 0; i < numCaptureBuffers; ++i) {
- final ByteBuffer buffer = ByteBuffer.allocateDirect(frameSize);
- camera.addCallbackBuffer(buffer.array());
- queuedBuffers.put(buffer.array(), buffer);
- }
- Logging.d(TAG, "queueCameraBuffers enqueued " + numCaptureBuffers
- + " buffers of size " + frameSize + ".");
- }
-
- public void stopReturnBuffersToCamera() {
- checkIsOnValidThread();
- this.camera = null;
- queuedBuffers.clear();
- // Frames in |pendingBuffers| need to be kept alive until they are returned.
- }
-
- public boolean reserveByteBuffer(byte[] data, long timeStamp) {
- checkIsOnValidThread();
- final ByteBuffer buffer = queuedBuffers.remove(data);
- if (buffer == null) {
- // Frames might be posted to |onPreviewFrame| with the previous format while changing
- // capture format in |startPreviewOnCameraThread|. Drop these old frames.
- Logging.w(TAG, "Received callback buffer from previous configuration with length: "
- + (data == null ? "null" : data.length));
- return false;
- }
- if (buffer.capacity() != frameSize) {
- throw new IllegalStateException("Callback buffer has unexpected frame size");
- }
- if (pendingBuffers.containsKey(timeStamp)) {
- Logging.e(TAG, "Timestamp already present in pending buffers - they need to be unique");
- return false;
- }
- pendingBuffers.put(timeStamp, buffer);
- if (queuedBuffers.isEmpty()) {
- Logging.d(TAG, "Camera is running out of capture buffers.");
- }
- return true;
- }
-
- public void returnBuffer(long timeStamp) {
- checkIsOnValidThread();
- final ByteBuffer returnedFrame = pendingBuffers.remove(timeStamp);
- if (returnedFrame == null) {
- throw new RuntimeException("unknown data buffer with time stamp "
- + timeStamp + "returned?!?");
- }
-
- if (camera != null && returnedFrame.capacity() == frameSize) {
- camera.addCallbackBuffer(returnedFrame.array());
- if (queuedBuffers.isEmpty()) {
- Logging.d(TAG, "Frame returned when camera is running out of capture"
- + " buffers for TS " + TimeUnit.NANOSECONDS.toMillis(timeStamp));
- }
- queuedBuffers.put(returnedFrame.array(), returnedFrame);
- return;
- }
-
- if (returnedFrame.capacity() != frameSize) {
- Logging.d(TAG, "returnBuffer with time stamp "
- + TimeUnit.NANOSECONDS.toMillis(timeStamp)
- + " called with old frame size, " + returnedFrame.capacity() + ".");
- // Since this frame has the wrong size, don't requeue it. Frames with the correct size are
- // created in queueCameraBuffers so this must be an old buffer.
- return;
- }
-
- Logging.d(TAG, "returnBuffer with time stamp "
- + TimeUnit.NANOSECONDS.toMillis(timeStamp)
- + " called after camera has been stopped.");
- }
+ cameraStatistics.addFrame();
+ frameObserver.onTextureFrameCaptured(captureFormat.width, captureFormat.height, oesTextureId,
+ transformMatrix, rotation, timestampNs);
}
// Interface used for providing callbacks to an observer.
@@ -835,13 +728,14 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Delivers a captured frame. Called on a Java thread owned by
// VideoCapturerAndroid.
- abstract void onByteBufferFrameCaptured(byte[] data, int length, int width, int height,
- int rotation, long timeStamp);
+ abstract void onByteBufferFrameCaptured(byte[] data, int width, int height, int rotation,
+ long timeStamp);
// Delivers a captured frame in a texture with id |oesTextureId|. Called on a Java thread
// owned by VideoCapturerAndroid.
abstract void onTextureFrameCaptured(
- int width, int height, int oesTextureId, float[] transformMatrix, long timestamp);
+ int width, int height, int oesTextureId, float[] transformMatrix, int rotation,
+ long timestamp);
// Requests an output format from the video capturer. Captured frames
// by the camera will be scaled/or dropped by the video capturer.
@@ -864,17 +758,18 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
@Override
- public void onByteBufferFrameCaptured(byte[] data, int length, int width, int height,
+ public void onByteBufferFrameCaptured(byte[] data, int width, int height,
int rotation, long timeStamp) {
- nativeOnByteBufferFrameCaptured(nativeCapturer, data, length, width, height, rotation,
+ nativeOnByteBufferFrameCaptured(nativeCapturer, data, data.length, width, height, rotation,
timeStamp);
}
@Override
public void onTextureFrameCaptured(
- int width, int height, int oesTextureId, float[] transformMatrix, long timestamp) {
+ int width, int height, int oesTextureId, float[] transformMatrix, int rotation,
+ long timestamp) {
nativeOnTextureFrameCaptured(nativeCapturer, width, height, oesTextureId, transformMatrix,
- timestamp);
+ rotation, timestamp);
}
@Override
@@ -887,10 +782,12 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private native void nativeOnByteBufferFrameCaptured(long nativeCapturer,
byte[] data, int length, int width, int height, int rotation, long timeStamp);
private native void nativeOnTextureFrameCaptured(long nativeCapturer, int width, int height,
- int oesTextureId, float[] transformMatrix, long timestamp);
+ int oesTextureId, float[] transformMatrix, int rotation, long timestamp);
private native void nativeOnOutputFormatRequest(long nativeCapturer,
int width, int height, int framerate);
}
- private static native long nativeCreateVideoCapturer(VideoCapturerAndroid videoCapturer);
+ private static native long nativeCreateVideoCapturer(
+ VideoCapturerAndroid videoCapturer,
+ SurfaceTextureHelper surfaceHelper);
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java b/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
index bacd0cf11f..bb6f01cea2 100644
--- a/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
+++ b/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
@@ -38,7 +38,7 @@ import javax.microedition.khronos.opengles.GL10;
import android.annotation.SuppressLint;
import android.graphics.Point;
import android.graphics.Rect;
-import android.graphics.SurfaceTexture;
+import android.opengl.EGL14;
import android.opengl.GLES20;
import android.opengl.GLSurfaceView;
@@ -59,7 +59,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private static Runnable eglContextReady = null;
private static final String TAG = "VideoRendererGui";
private GLSurfaceView surface;
- private static EGLContext eglContext = null;
+ private static EglBase.Context eglContext = null;
// Indicates if SurfaceView.Renderer.onSurfaceCreated was called.
// If true then for every newly created yuv image renderer createTexture()
// should be called. The variable is accessed on multiple threads and
@@ -69,8 +69,6 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private int screenHeight;
// List of yuv renderers.
private final ArrayList<YuvImageRenderer> yuvImageRenderers;
- // |drawer| is synchronized on |yuvImageRenderers|.
- private GlRectDrawer drawer;
// Render and draw threads.
private static Thread renderFrameThread;
private static Thread drawThread;
@@ -99,6 +97,8 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
// currently leaking resources to avoid a rare crash in release() where the EGLContext has
// become invalid beforehand.
private int[] yuvTextures = { 0, 0, 0 };
+ private final RendererCommon.YuvUploader yuvUploader = new RendererCommon.YuvUploader();
+ private final RendererCommon.GlDrawer drawer;
// Resources for making a deep copy of incoming OES texture frame.
private GlTextureFrameBuffer textureCopy;
@@ -157,12 +157,13 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private YuvImageRenderer(
GLSurfaceView surface, int id,
int x, int y, int width, int height,
- RendererCommon.ScalingType scalingType, boolean mirror) {
+ RendererCommon.ScalingType scalingType, boolean mirror, RendererCommon.GlDrawer drawer) {
Logging.d(TAG, "YuvImageRenderer.Create id: " + id);
this.surface = surface;
this.id = id;
this.scalingType = scalingType;
this.mirror = mirror;
+ this.drawer = drawer;
layoutInPercentage = new Rect(x, y, Math.min(100, x + width), Math.min(100, y + height));
updateLayoutProperties = false;
rotationDegree = 0;
@@ -174,6 +175,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private synchronized void release() {
surface = null;
+ drawer.release();
synchronized (pendingFrameLock) {
if (pendingFrame != null) {
VideoRenderer.renderFrameDone(pendingFrame);
@@ -226,7 +228,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
}
- private void draw(GlRectDrawer drawer) {
+ private void draw() {
if (!seenFrame) {
// No frame received yet - nothing to render.
return;
@@ -241,29 +243,15 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
if (isNewFrame) {
+ rotatedSamplingMatrix = RendererCommon.rotateTextureMatrix(
+ pendingFrame.samplingMatrix, pendingFrame.rotationDegree);
if (pendingFrame.yuvFrame) {
rendererType = RendererType.RENDERER_YUV;
- drawer.uploadYuvData(yuvTextures, pendingFrame.width, pendingFrame.height,
+ yuvUploader.uploadYuvData(yuvTextures, pendingFrame.width, pendingFrame.height,
pendingFrame.yuvStrides, pendingFrame.yuvPlanes);
- // The convention in WebRTC is that the first element in a ByteBuffer corresponds to the
- // top-left corner of the image, but in glTexImage2D() the first element corresponds to
- // the bottom-left corner. We correct this discrepancy by setting a vertical flip as
- // sampling matrix.
- final float[] samplingMatrix = RendererCommon.verticalFlipMatrix();
- rotatedSamplingMatrix =
- RendererCommon.rotateTextureMatrix(samplingMatrix, pendingFrame.rotationDegree);
} else {
rendererType = RendererType.RENDERER_TEXTURE;
- // External texture rendering. Update texture image to latest and make a deep copy of
- // the external texture.
- // TODO(magjed): Move updateTexImage() to the video source instead.
- final SurfaceTexture surfaceTexture = (SurfaceTexture) pendingFrame.textureObject;
- surfaceTexture.updateTexImage();
- final float[] samplingMatrix = new float[16];
- surfaceTexture.getTransformMatrix(samplingMatrix);
- rotatedSamplingMatrix =
- RendererCommon.rotateTextureMatrix(samplingMatrix, pendingFrame.rotationDegree);
-
+ // External texture rendering. Make a deep copy of the external texture.
// Reallocate offscreen texture if necessary.
textureCopy.setSize(pendingFrame.rotatedWidth(), pendingFrame.rotatedHeight());
@@ -272,12 +260,13 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
GlUtil.checkNoGLES2Error("glBindFramebuffer");
// Copy the OES texture content. This will also normalize the sampling matrix.
- GLES20.glViewport(0, 0, textureCopy.getWidth(), textureCopy.getHeight());
- drawer.drawOes(pendingFrame.textureId, rotatedSamplingMatrix);
+ drawer.drawOes(pendingFrame.textureId, rotatedSamplingMatrix,
+ 0, 0, textureCopy.getWidth(), textureCopy.getHeight());
rotatedSamplingMatrix = RendererCommon.identityMatrix();
// Restore normal framebuffer.
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
+ GLES20.glFinish();
}
copyTimeNs += (System.nanoTime() - now);
VideoRenderer.renderFrameDone(pendingFrame);
@@ -285,17 +274,17 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
}
- // OpenGL defaults to lower left origin - flip vertically.
- GLES20.glViewport(displayLayout.left, screenHeight - displayLayout.bottom,
- displayLayout.width(), displayLayout.height());
-
updateLayoutMatrix();
final float[] texMatrix =
RendererCommon.multiplyMatrices(rotatedSamplingMatrix, layoutMatrix);
+ // OpenGL defaults to lower left origin - flip viewport position vertically.
+ final int viewportY = screenHeight - displayLayout.bottom;
if (rendererType == RendererType.RENDERER_YUV) {
- drawer.drawYuv(yuvTextures, texMatrix);
+ drawer.drawYuv(yuvTextures, texMatrix,
+ displayLayout.left, viewportY, displayLayout.width(), displayLayout.height());
} else {
- drawer.drawRgb(textureCopy.getTextureId(), texMatrix);
+ drawer.drawRgb(textureCopy.getTextureId(), texMatrix,
+ displayLayout.left, viewportY, displayLayout.width(), displayLayout.height());
}
if (isNewFrame) {
@@ -314,7 +303,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
". Dropped: " + framesDropped + ". Rendered: " + framesRendered);
if (framesReceived > 0 && framesRendered > 0) {
Logging.d(TAG, "Duration: " + (int)(timeSinceFirstFrameNs / 1e6) +
- " ms. FPS: " + (float)framesRendered * 1e9 / timeSinceFirstFrameNs);
+ " ms. FPS: " + framesRendered * 1e9 / timeSinceFirstFrameNs);
Logging.d(TAG, "Draw time: " +
(int) (drawTimeNs / (1000 * framesRendered)) + " us. Copy time: " +
(int) (copyTimeNs / (1000 * framesReceived)) + " us");
@@ -429,7 +418,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
eglContextReady = eglContextReadyCallback;
}
- public static synchronized EGLContext getEGLContext() {
+ public static synchronized EglBase.Context getEglBaseContext() {
return eglContext;
}
@@ -477,6 +466,16 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
*/
public static synchronized YuvImageRenderer create(int x, int y, int width, int height,
RendererCommon.ScalingType scalingType, boolean mirror) {
+ return create(x, y, width, height, scalingType, mirror, new GlRectDrawer());
+ }
+
+ /**
+ * Creates VideoRenderer.Callbacks with top left corner at (x, y) and resolution (width, height).
+ * All parameters are in percentage of screen resolution. The custom |drawer| will be used for
+ * drawing frames on the EGLSurface. This class is responsible for calling release() on |drawer|.
+ */
+ public static synchronized YuvImageRenderer create(int x, int y, int width, int height,
+ RendererCommon.ScalingType scalingType, boolean mirror, RendererCommon.GlDrawer drawer) {
// Check display region parameters.
if (x < 0 || x > 100 || y < 0 || y > 100 ||
width < 0 || width > 100 || height < 0 || height > 100 ||
@@ -490,7 +489,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
final YuvImageRenderer yuvImageRenderer = new YuvImageRenderer(
instance.surface, instance.yuvImageRenderers.size(),
- x, y, width, height, scalingType, mirror);
+ x, y, width, height, scalingType, mirror, drawer);
synchronized (instance.yuvImageRenderers) {
if (instance.onSurfaceCreatedCalled) {
// onSurfaceCreated has already been called for VideoRendererGui -
@@ -498,6 +497,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
// rendering list.
final CountDownLatch countDownLatch = new CountDownLatch(1);
instance.surface.queueEvent(new Runnable() {
+ @Override
public void run() {
yuvImageRenderer.createTextures();
yuvImageRenderer.setScreenSize(
@@ -608,13 +608,16 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
Logging.d(TAG, "VideoRendererGui.onSurfaceCreated");
// Store render EGL context.
synchronized (VideoRendererGui.class) {
- eglContext = ((EGL10) EGLContext.getEGL()).eglGetCurrentContext();
+ if (EglBase14.isEGL14Supported()) {
+ eglContext = new EglBase14.Context(EGL14.eglGetCurrentContext());
+ } else {
+ eglContext = new EglBase10.Context(((EGL10) EGLContext.getEGL()).eglGetCurrentContext());
+ }
+
Logging.d(TAG, "VideoRendererGui EGL Context: " + eglContext);
}
synchronized (yuvImageRenderers) {
- // Create drawer for YUV/OES frames.
- drawer = new GlRectDrawer();
// Create textures for all images.
for (YuvImageRenderer yuvImageRenderer : yuvImageRenderers) {
yuvImageRenderer.createTextures();
@@ -655,7 +658,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
synchronized (yuvImageRenderers) {
for (YuvImageRenderer yuvImageRenderer : yuvImageRenderers) {
- yuvImageRenderer.draw(drawer);
+ yuvImageRenderer.draw();
}
}
}
diff --git a/talk/app/webrtc/java/jni/androidmediacodeccommon.h b/talk/app/webrtc/java/jni/androidmediacodeccommon.h
index 348a716496..92ea135f12 100644
--- a/talk/app/webrtc/java/jni/androidmediacodeccommon.h
+++ b/talk/app/webrtc/java/jni/androidmediacodeccommon.h
@@ -72,6 +72,8 @@ enum { kMediaCodecTimeoutMs = 1000 };
enum { kMediaCodecStatisticsIntervalMs = 3000 };
// Maximum amount of pending frames for VP8 decoder.
enum { kMaxPendingFramesVp8 = 1 };
+// Maximum amount of pending frames for VP9 decoder.
+enum { kMaxPendingFramesVp9 = 1 };
// Maximum amount of pending frames for H.264 decoder.
enum { kMaxPendingFramesH264 = 30 };
// Maximum amount of decoded frames for which per-frame logging is enabled.
diff --git a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
index b664f16e2e..c3d287ce0d 100644
--- a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
@@ -33,14 +33,15 @@
#include "talk/app/webrtc/java/jni/androidmediacodeccommon.h"
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
+#include "talk/app/webrtc/java/jni/surfacetexturehelper_jni.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/base/thread.h"
#include "webrtc/base/timeutils.h"
-#include "webrtc/common_video/interface/i420_buffer_pool.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/logcat_trace_context.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "third_party/libyuv/include/libyuv/convert.h"
@@ -62,6 +63,7 @@ using webrtc::VideoCodec;
using webrtc::VideoCodecType;
using webrtc::kVideoCodecH264;
using webrtc::kVideoCodecVP8;
+using webrtc::kVideoCodecVP9;
namespace webrtc_jni {
@@ -87,9 +89,14 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
int32_t Release() override;
int32_t Reset() override;
+
+ bool PrefersLateDecoding() const override { return true; }
+
// rtc::MessageHandler implementation.
void OnMessage(rtc::Message* msg) override;
+ const char* ImplementationName() const override;
+
private:
// CHECK-fail if not running on |codec_thread_|.
void CheckOnCodecThread();
@@ -105,13 +112,17 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
// Type of video codec.
VideoCodecType codecType_;
+ // Render EGL context - owned by factory, should not be allocated/destroyed
+ // by VideoDecoder.
+ jobject render_egl_context_;
+
bool key_frame_required_;
bool inited_;
bool sw_fallback_required_;
bool use_surface_;
VideoCodec codec_;
webrtc::I420BufferPool decoded_frame_pool_;
- NativeHandleImpl native_handle_;
+ rtc::scoped_refptr<SurfaceTextureHelper> surface_texture_helper_;
DecodedImageCallback* callback_;
int frames_received_; // Number of frames received by decoder.
int frames_decoded_; // Number of frames decoded by decoder.
@@ -120,10 +131,6 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
int current_bytes_; // Encoded bytes in the current statistics interval.
int current_decoding_time_ms_; // Overall decoding time in the current second
uint32_t max_pending_frames_; // Maximum number of pending input frames
- std::vector<int32_t> timestamps_;
- std::vector<int64_t> ntp_times_ms_;
- std::vector<int64_t> frame_rtc_times_ms_; // Time when video frame is sent to
- // decoder input.
// State that is constant for the lifetime of this object once the ctor
// returns.
@@ -134,7 +141,8 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
jmethodID j_release_method_;
jmethodID j_dequeue_input_buffer_method_;
jmethodID j_queue_input_buffer_method_;
- jmethodID j_dequeue_output_buffer_method_;
+ jmethodID j_dequeue_byte_buffer_method_;
+ jmethodID j_dequeue_texture_buffer_method_;
jmethodID j_return_decoded_byte_buffer_method_;
// MediaCodecVideoDecoder fields.
jfieldID j_input_buffers_field_;
@@ -144,24 +152,23 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
jfieldID j_height_field_;
jfieldID j_stride_field_;
jfieldID j_slice_height_field_;
- jfieldID j_surface_texture_field_;
// MediaCodecVideoDecoder.DecodedTextureBuffer fields.
- jfieldID j_textureID_field_;
- jfieldID j_texture_presentation_timestamp_us_field_;
- // MediaCodecVideoDecoder.DecodedByteBuffer fields.
+ jfieldID j_texture_id_field_;
+ jfieldID j_transform_matrix_field_;
+ jfieldID j_texture_timestamp_ms_field_;
+ jfieldID j_texture_ntp_timestamp_ms_field_;
+ jfieldID j_texture_decode_time_ms_field_;
+ jfieldID j_texture_frame_delay_ms_field_;
+ // MediaCodecVideoDecoder.DecodedOutputBuffer fields.
jfieldID j_info_index_field_;
jfieldID j_info_offset_field_;
jfieldID j_info_size_field_;
- jfieldID j_info_presentation_timestamp_us_field_;
+ jfieldID j_info_timestamp_ms_field_;
+ jfieldID j_info_ntp_timestamp_ms_field_;
+ jfieldID j_byte_buffer_decode_time_ms_field_;
// Global references; must be deleted in Release().
std::vector<jobject> input_buffers_;
- jobject surface_texture_;
- jobject previous_surface_texture_;
-
- // Render EGL context - owned by factory, should not be allocated/destroyed
- // by VideoDecoder.
- jobject render_egl_context_;
};
MediaCodecVideoDecoder::MediaCodecVideoDecoder(
@@ -171,8 +178,6 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
key_frame_required_(true),
inited_(false),
sw_fallback_required_(false),
- surface_texture_(NULL),
- previous_surface_texture_(NULL),
codec_thread_(new Thread()),
j_media_codec_video_decoder_class_(
jni,
@@ -191,19 +196,22 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
j_init_decode_method_ = GetMethodID(
jni, *j_media_codec_video_decoder_class_, "initDecode",
"(Lorg/webrtc/MediaCodecVideoDecoder$VideoCodecType;"
- "IILjavax/microedition/khronos/egl/EGLContext;)Z");
+ "IILorg/webrtc/SurfaceTextureHelper;)Z");
j_release_method_ =
GetMethodID(jni, *j_media_codec_video_decoder_class_, "release", "()V");
j_dequeue_input_buffer_method_ = GetMethodID(
jni, *j_media_codec_video_decoder_class_, "dequeueInputBuffer", "()I");
j_queue_input_buffer_method_ = GetMethodID(
- jni, *j_media_codec_video_decoder_class_, "queueInputBuffer", "(IIJ)Z");
- j_dequeue_output_buffer_method_ = GetMethodID(
+ jni, *j_media_codec_video_decoder_class_, "queueInputBuffer", "(IIJJJ)Z");
+ j_dequeue_byte_buffer_method_ = GetMethodID(
jni, *j_media_codec_video_decoder_class_, "dequeueOutputBuffer",
- "(I)Ljava/lang/Object;");
+ "(I)Lorg/webrtc/MediaCodecVideoDecoder$DecodedOutputBuffer;");
+ j_dequeue_texture_buffer_method_ = GetMethodID(
+ jni, *j_media_codec_video_decoder_class_, "dequeueTextureBuffer",
+ "(I)Lorg/webrtc/MediaCodecVideoDecoder$DecodedTextureBuffer;");
j_return_decoded_byte_buffer_method_ =
GetMethodID(jni, *j_media_codec_video_decoder_class_,
- "returnDecodedByteBuffer", "(I)V");
+ "returnDecodedOutputBuffer", "(I)V");
j_input_buffers_field_ = GetFieldID(
jni, *j_media_codec_video_decoder_class_,
@@ -221,28 +229,36 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
jni, *j_media_codec_video_decoder_class_, "stride", "I");
j_slice_height_field_ = GetFieldID(
jni, *j_media_codec_video_decoder_class_, "sliceHeight", "I");
- j_surface_texture_field_ = GetFieldID(
- jni, *j_media_codec_video_decoder_class_, "surfaceTexture",
- "Landroid/graphics/SurfaceTexture;");
- jclass j_decoder_decoded_texture_buffer_class = FindClass(jni,
+ jclass j_decoded_texture_buffer_class = FindClass(jni,
"org/webrtc/MediaCodecVideoDecoder$DecodedTextureBuffer");
- j_textureID_field_ = GetFieldID(
- jni, j_decoder_decoded_texture_buffer_class, "textureID", "I");
- j_texture_presentation_timestamp_us_field_ =
- GetFieldID(jni, j_decoder_decoded_texture_buffer_class,
- "presentationTimestampUs", "J");
-
- jclass j_decoder_decoded_byte_buffer_class = FindClass(jni,
- "org/webrtc/MediaCodecVideoDecoder$DecodedByteBuffer");
+ j_texture_id_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "textureID", "I");
+ j_transform_matrix_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "transformMatrix", "[F");
+ j_texture_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "timeStampMs", "J");
+ j_texture_ntp_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "ntpTimeStampMs", "J");
+ j_texture_decode_time_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "decodeTimeMs", "J");
+ j_texture_frame_delay_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "frameDelayMs", "J");
+
+ jclass j_decoded_output_buffer_class = FindClass(jni,
+ "org/webrtc/MediaCodecVideoDecoder$DecodedOutputBuffer");
j_info_index_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "index", "I");
+ jni, j_decoded_output_buffer_class, "index", "I");
j_info_offset_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "offset", "I");
+ jni, j_decoded_output_buffer_class, "offset", "I");
j_info_size_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "size", "I");
- j_info_presentation_timestamp_us_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "presentationTimestampUs", "J");
+ jni, j_decoded_output_buffer_class, "size", "I");
+ j_info_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_output_buffer_class, "timeStampMs", "J");
+ j_info_ntp_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_output_buffer_class, "ntpTimeStampMs", "J");
+ j_byte_buffer_decode_time_ms_field_ = GetFieldID(
+ jni, j_decoded_output_buffer_class, "decodeTimeMs", "J");
CHECK_EXCEPTION(jni) << "MediaCodecVideoDecoder ctor failed";
use_surface_ = (render_egl_context_ != NULL);
@@ -254,14 +270,6 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
MediaCodecVideoDecoder::~MediaCodecVideoDecoder() {
// Call Release() to ensure no more callbacks to us after we are deleted.
Release();
- // Delete global references.
- JNIEnv* jni = AttachCurrentThreadIfNeeded();
- if (previous_surface_texture_ != NULL) {
- jni->DeleteGlobalRef(previous_surface_texture_);
- }
- if (surface_texture_ != NULL) {
- jni->DeleteGlobalRef(surface_texture_);
- }
}
int32_t MediaCodecVideoDecoder::InitDecode(const VideoCodec* inst,
@@ -312,6 +320,21 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
frames_received_ = 0;
frames_decoded_ = 0;
+ jobject java_surface_texture_helper_ = nullptr;
+ if (use_surface_) {
+ java_surface_texture_helper_ = jni->CallStaticObjectMethod(
+ FindClass(jni, "org/webrtc/SurfaceTextureHelper"),
+ GetStaticMethodID(jni,
+ FindClass(jni, "org/webrtc/SurfaceTextureHelper"),
+ "create",
+ "(Lorg/webrtc/EglBase$Context;)"
+ "Lorg/webrtc/SurfaceTextureHelper;"),
+ render_egl_context_);
+ RTC_CHECK(java_surface_texture_helper_ != nullptr);
+ surface_texture_helper_ = new rtc::RefCountedObject<SurfaceTextureHelper>(
+ jni, java_surface_texture_helper_);
+ }
+
jobject j_video_codec_enum = JavaEnumFromIndex(
jni, "MediaCodecVideoDecoder$VideoCodecType", codecType_);
bool success = jni->CallBooleanMethod(
@@ -320,7 +343,7 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
j_video_codec_enum,
codec_.width,
codec_.height,
- use_surface_ ? render_egl_context_ : nullptr);
+ java_surface_texture_helper_);
if (CheckException(jni) || !success) {
ALOGE << "Codec initialization error - fallback to SW codec.";
sw_fallback_required_ = true;
@@ -332,6 +355,9 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
case kVideoCodecVP8:
max_pending_frames_ = kMaxPendingFramesVp8;
break;
+ case kVideoCodecVP9:
+ max_pending_frames_ = kMaxPendingFramesVp9;
+ break;
case kVideoCodecH264:
max_pending_frames_ = kMaxPendingFramesH264;
break;
@@ -342,9 +368,6 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
current_frames_ = 0;
current_bytes_ = 0;
current_decoding_time_ms_ = 0;
- timestamps_.clear();
- ntp_times_ms_.clear();
- frame_rtc_times_ms_.clear();
jobjectArray input_buffers = (jobjectArray)GetObjectField(
jni, *j_media_codec_video_decoder_, j_input_buffers_field_);
@@ -361,15 +384,6 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
}
}
- if (use_surface_) {
- jobject surface_texture = GetObjectField(
- jni, *j_media_codec_video_decoder_, j_surface_texture_field_);
- if (previous_surface_texture_ != NULL) {
- jni->DeleteGlobalRef(previous_surface_texture_);
- }
- previous_surface_texture_ = surface_texture_;
- surface_texture_ = jni->NewGlobalRef(surface_texture);
- }
codec_thread_->PostDelayed(kMediaCodecPollMs, this);
return WEBRTC_VIDEO_CODEC_OK;
@@ -395,6 +409,7 @@ int32_t MediaCodecVideoDecoder::ReleaseOnCodecThread() {
}
input_buffers_.clear();
jni->CallVoidMethod(*j_media_codec_video_decoder_, j_release_method_);
+ surface_texture_helper_ = nullptr;
inited_ = false;
rtc::MessageQueueManager::Clear(this);
if (CheckException(jni)) {
@@ -501,19 +516,21 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
// Try to drain the decoder and wait until output is not too
// much behind the input.
- if (frames_received_ > frames_decoded_ + max_pending_frames_) {
+ const int64 drain_start = GetCurrentTimeMs();
+ while ((frames_received_ > frames_decoded_ + max_pending_frames_) &&
+ (GetCurrentTimeMs() - drain_start) < kMediaCodecTimeoutMs) {
ALOGV("Received: %d. Decoded: %d. Wait for output...",
frames_received_, frames_decoded_);
- if (!DeliverPendingOutputs(jni, kMediaCodecTimeoutMs * 1000)) {
+ if (!DeliverPendingOutputs(jni, kMediaCodecPollMs)) {
ALOGE << "DeliverPendingOutputs error. Frames received: " <<
frames_received_ << ". Frames decoded: " << frames_decoded_;
return ProcessHWErrorOnCodecThread();
}
- if (frames_received_ > frames_decoded_ + max_pending_frames_) {
- ALOGE << "Output buffer dequeue timeout. Frames received: " <<
- frames_received_ << ". Frames decoded: " << frames_decoded_;
- return ProcessHWErrorOnCodecThread();
- }
+ }
+ if (frames_received_ > frames_decoded_ + max_pending_frames_) {
+ ALOGE << "Output buffer dequeue timeout. Frames received: " <<
+ frames_received_ << ". Frames decoded: " << frames_decoded_;
+ return ProcessHWErrorOnCodecThread();
}
// Get input buffer.
@@ -535,11 +552,14 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
" is bigger than buffer size " << buffer_capacity;
return ProcessHWErrorOnCodecThread();
}
- jlong timestamp_us = (frames_received_ * 1000000) / codec_.maxFramerate;
+ jlong presentation_timestamp_us =
+ (frames_received_ * 1000000) / codec_.maxFramerate;
if (frames_decoded_ < kMaxDecodedLogFrames) {
ALOGD << "Decoder frame in # " << frames_received_ << ". Type: "
<< inputImage._frameType << ". Buffer # " <<
- j_input_buffer_index << ". TS: " << (int)(timestamp_us / 1000)
+ j_input_buffer_index << ". pTS: "
+ << (int)(presentation_timestamp_us / 1000)
+ << ". TS: " << inputImage._timeStamp
<< ". Size: " << inputImage._length;
}
memcpy(buffer, inputImage._buffer, inputImage._length);
@@ -547,16 +567,16 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
// Save input image timestamps for later output.
frames_received_++;
current_bytes_ += inputImage._length;
- timestamps_.push_back(inputImage._timeStamp);
- ntp_times_ms_.push_back(inputImage.ntp_time_ms_);
- frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
// Feed input to decoder.
- bool success = jni->CallBooleanMethod(*j_media_codec_video_decoder_,
- j_queue_input_buffer_method_,
- j_input_buffer_index,
- inputImage._length,
- timestamp_us);
+ bool success = jni->CallBooleanMethod(
+ *j_media_codec_video_decoder_,
+ j_queue_input_buffer_method_,
+ j_input_buffer_index,
+ inputImage._length,
+ presentation_timestamp_us,
+ static_cast<int64_t> (inputImage._timeStamp),
+ inputImage.ntp_time_ms_);
if (CheckException(jni) || !success) {
ALOGE << "queueInputBuffer error";
return ProcessHWErrorOnCodecThread();
@@ -572,16 +592,18 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
}
bool MediaCodecVideoDecoder::DeliverPendingOutputs(
- JNIEnv* jni, int dequeue_timeout_us) {
+ JNIEnv* jni, int dequeue_timeout_ms) {
if (frames_received_ <= frames_decoded_) {
// No need to query for output buffers - decoder is drained.
return true;
}
// Get decoder output.
- jobject j_decoder_output_buffer = jni->CallObjectMethod(
- *j_media_codec_video_decoder_,
- j_dequeue_output_buffer_method_,
- dequeue_timeout_us);
+ jobject j_decoder_output_buffer =
+ jni->CallObjectMethod(*j_media_codec_video_decoder_,
+ use_surface_ ? j_dequeue_texture_buffer_method_
+ : j_dequeue_byte_buffer_method_,
+ dequeue_timeout_ms);
+
if (CheckException(jni)) {
ALOGE << "dequeueOutputBuffer() error";
return false;
@@ -601,19 +623,35 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
j_slice_height_field_);
rtc::scoped_refptr<webrtc::VideoFrameBuffer> frame_buffer;
- long output_timestamps_ms = 0;
+ int64_t output_timestamps_ms = 0;
+ int64_t output_ntp_timestamps_ms = 0;
+ int decode_time_ms = 0;
+ int64_t frame_delayed_ms = 0;
if (use_surface_) {
// Extract data from Java DecodedTextureBuffer.
const int texture_id =
- GetIntField(jni, j_decoder_output_buffer, j_textureID_field_);
- const int64_t timestamp_us =
- GetLongField(jni, j_decoder_output_buffer,
- j_texture_presentation_timestamp_us_field_);
- output_timestamps_ms = timestamp_us / rtc::kNumMicrosecsPerMillisec;
- // Create webrtc::VideoFrameBuffer with native texture handle.
- native_handle_.SetTextureObject(surface_texture_, texture_id);
- frame_buffer = new rtc::RefCountedObject<JniNativeHandleBuffer>(
- &native_handle_, width, height);
+ GetIntField(jni, j_decoder_output_buffer, j_texture_id_field_);
+ if (texture_id != 0) { // |texture_id| == 0 represents a dropped frame.
+ const jfloatArray j_transform_matrix =
+ reinterpret_cast<jfloatArray>(GetObjectField(
+ jni, j_decoder_output_buffer, j_transform_matrix_field_));
+ const int64_t timestamp_us =
+ GetLongField(jni, j_decoder_output_buffer,
+ j_texture_timestamp_ms_field_);
+ output_timestamps_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_texture_timestamp_ms_field_);
+ output_ntp_timestamps_ms =
+ GetLongField(jni, j_decoder_output_buffer,
+ j_texture_ntp_timestamp_ms_field_);
+ decode_time_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_texture_decode_time_ms_field_);
+ frame_delayed_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_texture_frame_delay_ms_field_);
+
+ // Create webrtc::VideoFrameBuffer with native texture handle.
+ frame_buffer = surface_texture_helper_->CreateTextureFrame(
+ width, height, NativeHandleImpl(jni, texture_id, j_transform_matrix));
+ }
} else {
// Extract data from Java ByteBuffer and create output yuv420 frame -
// for non surface decoding only.
@@ -623,9 +661,14 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
GetIntField(jni, j_decoder_output_buffer, j_info_offset_field_);
const int output_buffer_size =
GetIntField(jni, j_decoder_output_buffer, j_info_size_field_);
- const int64_t timestamp_us = GetLongField(
- jni, j_decoder_output_buffer, j_info_presentation_timestamp_us_field_);
- output_timestamps_ms = timestamp_us / rtc::kNumMicrosecsPerMillisec;
+ output_timestamps_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_info_timestamp_ms_field_);
+ output_ntp_timestamps_ms =
+ GetLongField(jni, j_decoder_output_buffer,
+ j_info_ntp_timestamp_ms_field_);
+
+ decode_time_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_byte_buffer_decode_time_ms_field_);
if (output_buffer_size < width * height * 3 / 2) {
ALOGE << "Insufficient output buffer size: " << output_buffer_size;
@@ -683,41 +726,31 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
j_return_decoded_byte_buffer_method_,
output_buffer_index);
if (CheckException(jni)) {
- ALOGE << "returnDecodedByteBuffer error";
+ ALOGE << "returnDecodedOutputBuffer error";
return false;
}
}
VideoFrame decoded_frame(frame_buffer, 0, 0, webrtc::kVideoRotation_0);
+ decoded_frame.set_timestamp(output_timestamps_ms);
+ decoded_frame.set_ntp_time_ms(output_ntp_timestamps_ms);
- // Get frame timestamps from a queue.
- if (timestamps_.size() > 0) {
- decoded_frame.set_timestamp(timestamps_.front());
- timestamps_.erase(timestamps_.begin());
- }
- if (ntp_times_ms_.size() > 0) {
- decoded_frame.set_ntp_time_ms(ntp_times_ms_.front());
- ntp_times_ms_.erase(ntp_times_ms_.begin());
- }
- int64_t frame_decoding_time_ms = 0;
- if (frame_rtc_times_ms_.size() > 0) {
- frame_decoding_time_ms = GetCurrentTimeMs() - frame_rtc_times_ms_.front();
- frame_rtc_times_ms_.erase(frame_rtc_times_ms_.begin());
- }
if (frames_decoded_ < kMaxDecodedLogFrames) {
ALOGD << "Decoder frame out # " << frames_decoded_ << ". " << width <<
" x " << height << ". " << stride << " x " << slice_height <<
- ". Color: " << color_format << ". TS:" << (int)output_timestamps_ms <<
- ". DecTime: " << (int)frame_decoding_time_ms;
+ ". Color: " << color_format << ". TS:" << decoded_frame.timestamp() <<
+ ". DecTime: " << (int)decode_time_ms <<
+ ". DelayTime: " << (int)frame_delayed_ms;
}
// Calculate and print decoding statistics - every 3 seconds.
frames_decoded_++;
current_frames_++;
- current_decoding_time_ms_ += frame_decoding_time_ms;
+ current_decoding_time_ms_ += decode_time_ms;
int statistic_time_ms = GetCurrentTimeMs() - start_time_ms_;
if (statistic_time_ms >= kMediaCodecStatisticsIntervalMs &&
current_frames_ > 0) {
- ALOGD << "Decoded frames: " << frames_decoded_ << ". Bitrate: " <<
+ ALOGD << "Decoded frames: " << frames_decoded_ << ". Received frames: "
+ << frames_received_ << ". Bitrate: " <<
(current_bytes_ * 8 / statistic_time_ms) << " kbps, fps: " <<
((current_frames_ * 1000 + statistic_time_ms / 2) / statistic_time_ms)
<< ". decTime: " << (current_decoding_time_ms_ / current_frames_) <<
@@ -728,12 +761,15 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
current_decoding_time_ms_ = 0;
}
- // Callback - output decoded frame.
- const int32_t callback_status = callback_->Decoded(decoded_frame);
- if (callback_status > 0) {
- ALOGE << "callback error";
+ // |.IsZeroSize())| returns true when a frame has been dropped.
+ if (!decoded_frame.IsZeroSize()) {
+ // Callback - output decoded frame.
+ const int32_t callback_status =
+ callback_->Decoded(decoded_frame, decode_time_ms);
+ if (callback_status > 0) {
+ ALOGE << "callback error";
+ }
}
-
return true;
}
@@ -790,6 +826,17 @@ MediaCodecVideoDecoderFactory::MediaCodecVideoDecoderFactory() :
supported_codec_types_.push_back(kVideoCodecVP8);
}
+ bool is_vp9_hw_supported = jni->CallStaticBooleanMethod(
+ j_decoder_class,
+ GetStaticMethodID(jni, j_decoder_class, "isVp9HwSupported", "()Z"));
+ if (CheckException(jni)) {
+ is_vp9_hw_supported = false;
+ }
+ if (is_vp9_hw_supported) {
+ ALOGD << "VP9 HW Decoder supported.";
+ supported_codec_types_.push_back(kVideoCodecVP9);
+ }
+
bool is_h264_hw_supported = jni->CallStaticBooleanMethod(
j_decoder_class,
GetStaticMethodID(jni, j_decoder_class, "isH264HwSupported", "()Z"));
@@ -825,7 +872,7 @@ void MediaCodecVideoDecoderFactory::SetEGLContext(
render_egl_context_ = NULL;
} else {
jclass j_egl_context_class =
- FindClass(jni, "javax/microedition/khronos/egl/EGLContext");
+ FindClass(jni, "org/webrtc/EglBase$Context");
if (!jni->IsInstanceOf(render_egl_context_, j_egl_context_class)) {
ALOGE << "Wrong EGL Context.";
jni->DeleteGlobalRef(render_egl_context_);
@@ -841,7 +888,7 @@ void MediaCodecVideoDecoderFactory::SetEGLContext(
webrtc::VideoDecoder* MediaCodecVideoDecoderFactory::CreateVideoDecoder(
VideoCodecType type) {
if (supported_codec_types_.empty()) {
- ALOGE << "No HW video decoder for type " << (int)type;
+ ALOGW << "No HW video decoder for type " << (int)type;
return NULL;
}
for (VideoCodecType codec_type : supported_codec_types_) {
@@ -851,7 +898,7 @@ webrtc::VideoDecoder* MediaCodecVideoDecoderFactory::CreateVideoDecoder(
AttachCurrentThreadIfNeeded(), type, render_egl_context_);
}
}
- ALOGE << "Can not find HW video decoder for type " << (int)type;
+ ALOGW << "Can not find HW video decoder for type " << (int)type;
return NULL;
}
@@ -861,5 +908,9 @@ void MediaCodecVideoDecoderFactory::DestroyVideoDecoder(
delete decoder;
}
+const char* MediaCodecVideoDecoder::ImplementationName() const {
+ return "MediaCodec";
+}
+
} // namespace webrtc_jni
diff --git a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
index ac349e7faf..64831c3174 100644
--- a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
@@ -29,14 +29,16 @@
#include "talk/app/webrtc/java/jni/androidmediaencoder_jni.h"
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/androidmediacodeccommon.h"
+#include "talk/app/webrtc/java/jni/native_handle_impl.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/thread.h"
+#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
-#include "webrtc/modules/video_coding/utility/include/vp8_header_parser.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
#include "webrtc/system_wrappers/include/field_trial.h"
#include "webrtc/system_wrappers/include/logcat_trace_context.h"
#include "third_party/libyuv/include/libyuv/convert.h"
@@ -56,6 +58,7 @@ using webrtc::VideoCodec;
using webrtc::VideoCodecType;
using webrtc::kVideoCodecH264;
using webrtc::kVideoCodecVP8;
+using webrtc::kVideoCodecVP9;
namespace webrtc_jni {
@@ -79,7 +82,9 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
public rtc::MessageHandler {
public:
virtual ~MediaCodecVideoEncoder();
- explicit MediaCodecVideoEncoder(JNIEnv* jni, VideoCodecType codecType);
+ MediaCodecVideoEncoder(JNIEnv* jni,
+ VideoCodecType codecType,
+ jobject egl_context);
// webrtc::VideoEncoder implementation. Everything trampolines to
// |codec_thread_| for execution.
@@ -103,13 +108,18 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
int GetTargetFramerate() override;
+ bool SupportsNativeHandle() const override { return true; }
+ const char* ImplementationName() const override;
+
private:
// CHECK-fail if not running on |codec_thread_|.
void CheckOnCodecThread();
- // Release() and InitEncode() in an attempt to restore the codec to an
+ private:
+ // ResetCodecOnCodecThread() calls ReleaseOnCodecThread() and
+ // InitEncodeOnCodecThread() in an attempt to restore the codec to an
// operable state. Necessary after all manner of OMX-layer errors.
- void ResetCodec();
+ bool ResetCodecOnCodecThread();
// Implementation of webrtc::VideoEncoder methods above, all running on the
// codec thread exclusively.
@@ -117,10 +127,20 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
// If width==0 then this is assumed to be a re-initialization and the
// previously-current values are reused instead of the passed parameters
// (makes it easier to reason about thread-safety).
- int32_t InitEncodeOnCodecThread(int width, int height, int kbps, int fps);
+ int32_t InitEncodeOnCodecThread(int width, int height, int kbps, int fps,
+ bool use_surface);
+ // Reconfigure to match |frame| in width, height. Also reconfigures the
+ // encoder if |frame| is a texture/byte buffer and the encoder is initialized
+ // for byte buffer/texture. Returns false if reconfiguring fails.
+ bool MaybeReconfigureEncoderOnCodecThread(const webrtc::VideoFrame& frame);
int32_t EncodeOnCodecThread(
const webrtc::VideoFrame& input_image,
const std::vector<webrtc::FrameType>* frame_types);
+ bool EncodeByteBufferOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame, int input_buffer_index);
+ bool EncodeTextureOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame);
+
int32_t RegisterEncodeCompleteCallbackOnCodecThread(
webrtc::EncodedImageCallback* callback);
int32_t ReleaseOnCodecThread();
@@ -150,11 +170,14 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
// State that is constant for the lifetime of this object once the ctor
// returns.
scoped_ptr<Thread> codec_thread_; // Thread on which to operate MediaCodec.
+ rtc::ThreadChecker codec_thread_checker_;
ScopedGlobalRef<jclass> j_media_codec_video_encoder_class_;
ScopedGlobalRef<jobject> j_media_codec_video_encoder_;
jmethodID j_init_encode_method_;
+ jmethodID j_get_input_buffers_method_;
jmethodID j_dequeue_input_buffer_method_;
- jmethodID j_encode_method_;
+ jmethodID j_encode_buffer_method_;
+ jmethodID j_encode_texture_method_;
jmethodID j_release_method_;
jmethodID j_set_rates_method_;
jmethodID j_dequeue_output_buffer_method_;
@@ -170,6 +193,7 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
int width_; // Frame width in pixels.
int height_; // Frame height in pixels.
bool inited_;
+ bool use_surface_;
uint16_t picture_id_;
enum libyuv::FourCC encoder_fourcc_; // Encoder color space format.
int last_set_bitrate_kbps_; // Last-requested bitrate in kbps.
@@ -205,6 +229,16 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
// H264 bitstream parser, used to extract QP from encoded bitstreams.
webrtc::H264BitstreamParser h264_bitstream_parser_;
+
+ // VP9 variables to populate codec specific structure.
+ webrtc::GofInfoVP9 gof_; // Contains each frame's temporal information for
+ // non-flexible VP9 mode.
+ uint8_t tl0_pic_idx_;
+ size_t gof_idx_;
+
+ // EGL context - owned by factory, should not be allocated/destroyed
+ // by MediaCodecVideoEncoder.
+ jobject egl_context_;
};
MediaCodecVideoEncoder::~MediaCodecVideoEncoder() {
@@ -213,11 +247,9 @@ MediaCodecVideoEncoder::~MediaCodecVideoEncoder() {
}
MediaCodecVideoEncoder::MediaCodecVideoEncoder(
- JNIEnv* jni, VideoCodecType codecType) :
+ JNIEnv* jni, VideoCodecType codecType, jobject egl_context) :
codecType_(codecType),
callback_(NULL),
- inited_(false),
- picture_id_(0),
codec_thread_(new Thread()),
j_media_codec_video_encoder_class_(
jni,
@@ -228,7 +260,11 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(
GetMethodID(jni,
*j_media_codec_video_encoder_class_,
"<init>",
- "()V"))) {
+ "()V"))),
+ inited_(false),
+ use_surface_(false),
+ picture_id_(0),
+ egl_context_(egl_context) {
ScopedLocalRefFrame local_ref_frame(jni);
// It would be nice to avoid spinning up a new thread per MediaCodec, and
// instead re-use e.g. the PeerConnectionFactory's |worker_thread_|, but bug
@@ -239,19 +275,27 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(
// thread.
codec_thread_->SetName("MediaCodecVideoEncoder", NULL);
RTC_CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoEncoder";
-
+ codec_thread_checker_.DetachFromThread();
jclass j_output_buffer_info_class =
FindClass(jni, "org/webrtc/MediaCodecVideoEncoder$OutputBufferInfo");
j_init_encode_method_ = GetMethodID(
jni,
*j_media_codec_video_encoder_class_,
"initEncode",
- "(Lorg/webrtc/MediaCodecVideoEncoder$VideoCodecType;IIII)"
- "[Ljava/nio/ByteBuffer;");
+ "(Lorg/webrtc/MediaCodecVideoEncoder$VideoCodecType;"
+ "IIIILorg/webrtc/EglBase14$Context;)Z");
+ j_get_input_buffers_method_ = GetMethodID(
+ jni,
+ *j_media_codec_video_encoder_class_,
+ "getInputBuffers",
+ "()[Ljava/nio/ByteBuffer;");
j_dequeue_input_buffer_method_ = GetMethodID(
jni, *j_media_codec_video_encoder_class_, "dequeueInputBuffer", "()I");
- j_encode_method_ = GetMethodID(
- jni, *j_media_codec_video_encoder_class_, "encode", "(ZIIJ)Z");
+ j_encode_buffer_method_ = GetMethodID(
+ jni, *j_media_codec_video_encoder_class_, "encodeBuffer", "(ZIIJ)Z");
+ j_encode_texture_method_ = GetMethodID(
+ jni, *j_media_codec_video_encoder_class_, "encodeTexture",
+ "(ZI[FJ)Z");
j_release_method_ =
GetMethodID(jni, *j_media_codec_video_encoder_class_, "release", "()V");
j_set_rates_method_ = GetMethodID(
@@ -275,6 +319,7 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(
j_info_presentation_timestamp_us_field_ = GetFieldID(
jni, j_output_buffer_info_class, "presentationTimestampUs", "J");
CHECK_EXCEPTION(jni) << "MediaCodecVideoEncoder ctor failed";
+ srand(time(NULL));
AllowBlockingCalls();
}
@@ -295,8 +340,8 @@ int32_t MediaCodecVideoEncoder::InitEncode(
<< codecType_;
ALOGD << "InitEncode request";
- scale_ = webrtc::field_trial::FindFullName(
- "WebRTC-MediaCodecVideoEncoder-AutomaticResize") == "Enabled";
+ scale_ = (codecType_ != kVideoCodecVP9) && (webrtc::field_trial::FindFullName(
+ "WebRTC-MediaCodecVideoEncoder-AutomaticResize") == "Enabled");
ALOGD << "Encoder automatic resize " << (scale_ ? "enabled" : "disabled");
if (scale_) {
if (codecType_ == kVideoCodecVP8) {
@@ -331,7 +376,8 @@ int32_t MediaCodecVideoEncoder::InitEncode(
codec_settings->width,
codec_settings->height,
codec_settings->startBitrate,
- codec_settings->maxFramerate));
+ codec_settings->maxFramerate,
+ false /* use_surface */));
}
int32_t MediaCodecVideoEncoder::Encode(
@@ -374,6 +420,7 @@ int32_t MediaCodecVideoEncoder::SetRates(uint32_t new_bit_rate,
}
void MediaCodecVideoEncoder::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
@@ -381,7 +428,6 @@ void MediaCodecVideoEncoder::OnMessage(rtc::Message* msg) {
// functor), so expect no ID/data.
RTC_CHECK(!msg->message_id) << "Unexpected message!";
RTC_CHECK(!msg->pdata) << "Unexpected message!";
- CheckOnCodecThread();
if (!inited_) {
return;
}
@@ -393,26 +439,24 @@ void MediaCodecVideoEncoder::OnMessage(rtc::Message* msg) {
codec_thread_->PostDelayed(kMediaCodecPollMs, this);
}
-void MediaCodecVideoEncoder::CheckOnCodecThread() {
- RTC_CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
- << "Running on wrong thread!";
-}
-
-void MediaCodecVideoEncoder::ResetCodec() {
- ALOGE << "ResetCodec";
- if (Release() != WEBRTC_VIDEO_CODEC_OK ||
- codec_thread_->Invoke<int32_t>(Bind(
- &MediaCodecVideoEncoder::InitEncodeOnCodecThread, this,
- width_, height_, 0, 0)) != WEBRTC_VIDEO_CODEC_OK) {
+bool MediaCodecVideoEncoder::ResetCodecOnCodecThread() {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ ALOGE << "ResetOnCodecThread";
+ if (ReleaseOnCodecThread() != WEBRTC_VIDEO_CODEC_OK ||
+ InitEncodeOnCodecThread(width_, height_, 0, 0, false) !=
+ WEBRTC_VIDEO_CODEC_OK) {
// TODO(fischman): wouldn't it be nice if there was a way to gracefully
// degrade to a SW encoder at this point? There isn't one AFAICT :(
// https://code.google.com/p/webrtc/issues/detail?id=2920
+ return false;
}
+ return true;
}
int32_t MediaCodecVideoEncoder::InitEncodeOnCodecThread(
- int width, int height, int kbps, int fps) {
- CheckOnCodecThread();
+ int width, int height, int kbps, int fps, bool use_surface) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ RTC_CHECK(!use_surface || egl_context_ != nullptr) << "EGL context not set.";
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
@@ -448,52 +492,63 @@ int32_t MediaCodecVideoEncoder::InitEncodeOnCodecThread(
render_times_ms_.clear();
frame_rtc_times_ms_.clear();
drop_next_input_frame_ = false;
+ use_surface_ = use_surface;
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
+ gof_.SetGofInfoVP9(webrtc::TemporalStructureMode::kTemporalStructureMode1);
+ tl0_pic_idx_ = static_cast<uint8_t>(rand());
+ gof_idx_ = 0;
+
// We enforce no extra stride/padding in the format creation step.
jobject j_video_codec_enum = JavaEnumFromIndex(
jni, "MediaCodecVideoEncoder$VideoCodecType", codecType_);
- jobjectArray input_buffers = reinterpret_cast<jobjectArray>(
- jni->CallObjectMethod(*j_media_codec_video_encoder_,
- j_init_encode_method_,
- j_video_codec_enum,
- width_,
- height_,
- kbps,
- fps));
- CHECK_EXCEPTION(jni);
- if (IsNull(jni, input_buffers)) {
+ const bool encode_status = jni->CallBooleanMethod(
+ *j_media_codec_video_encoder_, j_init_encode_method_,
+ j_video_codec_enum, width, height, kbps, fps,
+ (use_surface ? egl_context_ : nullptr));
+ if (!encode_status) {
+ ALOGE << "Failed to configure encoder.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
+ CHECK_EXCEPTION(jni);
- inited_ = true;
- switch (GetIntField(jni, *j_media_codec_video_encoder_,
- j_color_format_field_)) {
- case COLOR_FormatYUV420Planar:
- encoder_fourcc_ = libyuv::FOURCC_YU12;
- break;
- case COLOR_FormatYUV420SemiPlanar:
- case COLOR_QCOM_FormatYUV420SemiPlanar:
- case COLOR_QCOM_FORMATYUV420PackedSemiPlanar32m:
- encoder_fourcc_ = libyuv::FOURCC_NV12;
- break;
- default:
- LOG(LS_ERROR) << "Wrong color format.";
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- size_t num_input_buffers = jni->GetArrayLength(input_buffers);
- RTC_CHECK(input_buffers_.empty())
- << "Unexpected double InitEncode without Release";
- input_buffers_.resize(num_input_buffers);
- for (size_t i = 0; i < num_input_buffers; ++i) {
- input_buffers_[i] =
- jni->NewGlobalRef(jni->GetObjectArrayElement(input_buffers, i));
- int64_t yuv_buffer_capacity =
- jni->GetDirectBufferCapacity(input_buffers_[i]);
+ if (!use_surface) {
+ jobjectArray input_buffers = reinterpret_cast<jobjectArray>(
+ jni->CallObjectMethod(*j_media_codec_video_encoder_,
+ j_get_input_buffers_method_));
CHECK_EXCEPTION(jni);
- RTC_CHECK(yuv_buffer_capacity >= yuv_size_) << "Insufficient capacity";
+ if (IsNull(jni, input_buffers)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ switch (GetIntField(jni, *j_media_codec_video_encoder_,
+ j_color_format_field_)) {
+ case COLOR_FormatYUV420Planar:
+ encoder_fourcc_ = libyuv::FOURCC_YU12;
+ break;
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_QCOM_FormatYUV420SemiPlanar:
+ case COLOR_QCOM_FORMATYUV420PackedSemiPlanar32m:
+ encoder_fourcc_ = libyuv::FOURCC_NV12;
+ break;
+ default:
+ LOG(LS_ERROR) << "Wrong color format.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ size_t num_input_buffers = jni->GetArrayLength(input_buffers);
+ RTC_CHECK(input_buffers_.empty())
+ << "Unexpected double InitEncode without Release";
+ input_buffers_.resize(num_input_buffers);
+ for (size_t i = 0; i < num_input_buffers; ++i) {
+ input_buffers_[i] =
+ jni->NewGlobalRef(jni->GetObjectArrayElement(input_buffers, i));
+ int64_t yuv_buffer_capacity =
+ jni->GetDirectBufferCapacity(input_buffers_[i]);
+ CHECK_EXCEPTION(jni);
+ RTC_CHECK(yuv_buffer_capacity >= yuv_size_) << "Insufficient capacity";
+ }
}
- CHECK_EXCEPTION(jni);
+ inited_ = true;
codec_thread_->PostDelayed(kMediaCodecPollMs, this);
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -501,40 +556,53 @@ int32_t MediaCodecVideoEncoder::InitEncodeOnCodecThread(
int32_t MediaCodecVideoEncoder::EncodeOnCodecThread(
const webrtc::VideoFrame& frame,
const std::vector<webrtc::FrameType>* frame_types) {
- CheckOnCodecThread();
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
+
frames_received_++;
if (!DeliverPendingOutputs(jni)) {
- ResetCodec();
- // Continue as if everything's fine.
+ if (!ResetCodecOnCodecThread())
+ return WEBRTC_VIDEO_CODEC_ERROR;
}
if (drop_next_input_frame_) {
- ALOGV("Encoder drop frame - failed callback.");
+ ALOGW << "Encoder drop frame - failed callback.";
drop_next_input_frame_ = false;
return WEBRTC_VIDEO_CODEC_OK;
}
RTC_CHECK(frame_types->size() == 1) << "Unexpected stream count";
- // Check framerate before spatial resolution change.
- if (scale_)
- quality_scaler_.OnEncodeFrame(frame);
- const VideoFrame& input_frame =
- scale_ ? quality_scaler_.GetScaledFrame(frame) : frame;
+ VideoFrame input_frame = frame;
+ if (scale_) {
+ // Check framerate before spatial resolution change.
+ quality_scaler_.OnEncodeFrame(frame);
+ const webrtc::QualityScaler::Resolution scaled_resolution =
+ quality_scaler_.GetScaledResolution();
+ if (scaled_resolution.width != frame.width() ||
+ scaled_resolution.height != frame.height()) {
+ if (frame.native_handle() != nullptr) {
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> scaled_buffer(
+ static_cast<AndroidTextureBuffer*>(
+ frame.video_frame_buffer().get())->ScaleAndRotate(
+ scaled_resolution.width,
+ scaled_resolution.height,
+ webrtc::kVideoRotation_0));
+ input_frame.set_video_frame_buffer(scaled_buffer);
+ } else {
+ input_frame = quality_scaler_.GetScaledFrame(frame);
+ }
+ }
+ }
- if (input_frame.width() != width_ || input_frame.height() != height_) {
- ALOGD << "Frame resolution change from " << width_ << " x " << height_ <<
- " to " << input_frame.width() << " x " << input_frame.height();
- width_ = input_frame.width();
- height_ = input_frame.height();
- ResetCodec();
- return WEBRTC_VIDEO_CODEC_OK;
+ if (!MaybeReconfigureEncoderOnCodecThread(input_frame)) {
+ ALOGE << "Failed to reconfigure encoder.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
}
// Check if we accumulated too many frames in encoder input buffers
@@ -552,65 +620,138 @@ int32_t MediaCodecVideoEncoder::EncodeOnCodecThread(
}
}
- int j_input_buffer_index = jni->CallIntMethod(*j_media_codec_video_encoder_,
- j_dequeue_input_buffer_method_);
- CHECK_EXCEPTION(jni);
- if (j_input_buffer_index == -1) {
- // Video codec falls behind - no input buffer available.
- ALOGV("Encoder drop frame - no input buffers available");
- frames_dropped_++;
- // Report dropped frame to quality_scaler_.
- OnDroppedFrame();
- return WEBRTC_VIDEO_CODEC_OK; // TODO(fischman): see webrtc bug 2887.
- }
- if (j_input_buffer_index == -2) {
- ResetCodec();
+ const bool key_frame = frame_types->front() != webrtc::kVideoFrameDelta;
+ bool encode_status = true;
+ if (!input_frame.native_handle()) {
+ int j_input_buffer_index = jni->CallIntMethod(*j_media_codec_video_encoder_,
+ j_dequeue_input_buffer_method_);
+ CHECK_EXCEPTION(jni);
+ if (j_input_buffer_index == -1) {
+ // Video codec falls behind - no input buffer available.
+ ALOGW << "Encoder drop frame - no input buffers available";
+ frames_dropped_++;
+ // Report dropped frame to quality_scaler_.
+ OnDroppedFrame();
+ return WEBRTC_VIDEO_CODEC_OK; // TODO(fischman): see webrtc bug 2887.
+ }
+ if (j_input_buffer_index == -2) {
+ ResetCodecOnCodecThread();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ encode_status = EncodeByteBufferOnCodecThread(jni, key_frame, input_frame,
+ j_input_buffer_index);
+ } else {
+ encode_status = EncodeTextureOnCodecThread(jni, key_frame, input_frame);
+ }
+
+ if (!encode_status) {
+ ALOGE << "Failed encode frame with timestamp: " << input_frame.timestamp();
+ ResetCodecOnCodecThread();
return WEBRTC_VIDEO_CODEC_ERROR;
}
+ last_input_timestamp_ms_ =
+ current_timestamp_us_ / rtc::kNumMicrosecsPerMillisec;
+ frames_in_queue_++;
+
+ // Save input image timestamps for later output
+ timestamps_.push_back(input_frame.timestamp());
+ render_times_ms_.push_back(input_frame.render_time_ms());
+ frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
+ current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;
+
+ if (!DeliverPendingOutputs(jni)) {
+ ALOGE << "Failed deliver pending outputs.";
+ ResetCodecOnCodecThread();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool MediaCodecVideoEncoder::MaybeReconfigureEncoderOnCodecThread(
+ const webrtc::VideoFrame& frame) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+
+ const bool is_texture_frame = frame.native_handle() != nullptr;
+ const bool reconfigure_due_to_format = is_texture_frame != use_surface_;
+ const bool reconfigure_due_to_size =
+ frame.width() != width_ || frame.height() != height_;
+
+ if (reconfigure_due_to_format) {
+ ALOGD << "Reconfigure encoder due to format change. "
+ << (use_surface_ ?
+ "Reconfiguring to encode from byte buffer." :
+ "Reconfiguring to encode from texture.");
+ }
+ if (reconfigure_due_to_size) {
+ ALOGD << "Reconfigure encoder due to frame resolution change from "
+ << width_ << " x " << height_ << " to " << frame.width() << " x "
+ << frame.height();
+ width_ = frame.width();
+ height_ = frame.height();
+ }
+
+ if (!reconfigure_due_to_format && !reconfigure_due_to_size)
+ return true;
+
+ ReleaseOnCodecThread();
+
+ return InitEncodeOnCodecThread(width_, height_, 0, 0 , is_texture_frame) ==
+ WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool MediaCodecVideoEncoder::EncodeByteBufferOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame, int input_buffer_index) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ RTC_CHECK(!use_surface_);
+
ALOGV("Encoder frame in # %d. TS: %lld. Q: %d",
frames_received_ - 1, current_timestamp_us_ / 1000, frames_in_queue_);
- jobject j_input_buffer = input_buffers_[j_input_buffer_index];
+ jobject j_input_buffer = input_buffers_[input_buffer_index];
uint8_t* yuv_buffer =
reinterpret_cast<uint8_t*>(jni->GetDirectBufferAddress(j_input_buffer));
CHECK_EXCEPTION(jni);
RTC_CHECK(yuv_buffer) << "Indirect buffer??";
RTC_CHECK(!libyuv::ConvertFromI420(
- input_frame.buffer(webrtc::kYPlane), input_frame.stride(webrtc::kYPlane),
- input_frame.buffer(webrtc::kUPlane), input_frame.stride(webrtc::kUPlane),
- input_frame.buffer(webrtc::kVPlane), input_frame.stride(webrtc::kVPlane),
+ frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
+ frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
yuv_buffer, width_, width_, height_, encoder_fourcc_))
<< "ConvertFromI420 failed";
- last_input_timestamp_ms_ = current_timestamp_us_ / 1000;
- frames_in_queue_++;
- // Save input image timestamps for later output
- timestamps_.push_back(input_frame.timestamp());
- render_times_ms_.push_back(input_frame.render_time_ms());
- frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
-
- bool key_frame = frame_types->front() != webrtc::kVideoFrameDelta;
bool encode_status = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
- j_encode_method_,
+ j_encode_buffer_method_,
key_frame,
- j_input_buffer_index,
+ input_buffer_index,
yuv_size_,
current_timestamp_us_);
CHECK_EXCEPTION(jni);
- current_timestamp_us_ += 1000000 / last_set_fps_;
+ return encode_status;
+}
- if (!encode_status || !DeliverPendingOutputs(jni)) {
- ResetCodec();
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
+bool MediaCodecVideoEncoder::EncodeTextureOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ RTC_CHECK(use_surface_);
+ NativeHandleImpl* handle =
+ static_cast<NativeHandleImpl*>(frame.native_handle());
+ jfloatArray sampling_matrix = jni->NewFloatArray(16);
+ jni->SetFloatArrayRegion(sampling_matrix, 0, 16, handle->sampling_matrix);
- return WEBRTC_VIDEO_CODEC_OK;
+ bool encode_status = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
+ j_encode_texture_method_,
+ key_frame,
+ handle->oes_texture_id,
+ sampling_matrix,
+ current_timestamp_us_);
+ CHECK_EXCEPTION(jni);
+ return encode_status;
}
int32_t MediaCodecVideoEncoder::RegisterEncodeCompleteCallbackOnCodecThread(
webrtc::EncodedImageCallback* callback) {
- CheckOnCodecThread();
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
callback_ = callback;
@@ -618,10 +759,10 @@ int32_t MediaCodecVideoEncoder::RegisterEncodeCompleteCallbackOnCodecThread(
}
int32_t MediaCodecVideoEncoder::ReleaseOnCodecThread() {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
if (!inited_) {
return WEBRTC_VIDEO_CODEC_OK;
}
- CheckOnCodecThread();
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ALOGD << "EncoderReleaseOnCodecThread: Frames received: " <<
frames_received_ << ". Encoded: " << frames_encoded_ <<
@@ -634,13 +775,14 @@ int32_t MediaCodecVideoEncoder::ReleaseOnCodecThread() {
CHECK_EXCEPTION(jni);
rtc::MessageQueueManager::Clear(this);
inited_ = false;
+ use_surface_ = false;
ALOGD << "EncoderReleaseOnCodecThread done.";
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t MediaCodecVideoEncoder::SetRatesOnCodecThread(uint32_t new_bit_rate,
uint32_t frame_rate) {
- CheckOnCodecThread();
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
if (last_set_bitrate_kbps_ == new_bit_rate &&
last_set_fps_ == frame_rate) {
return WEBRTC_VIDEO_CODEC_OK;
@@ -659,7 +801,7 @@ int32_t MediaCodecVideoEncoder::SetRatesOnCodecThread(uint32_t new_bit_rate,
last_set_fps_);
CHECK_EXCEPTION(jni);
if (!ret) {
- ResetCodec();
+ ResetCodecOnCodecThread();
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
@@ -691,6 +833,7 @@ jlong MediaCodecVideoEncoder::GetOutputBufferInfoPresentationTimestampUs(
}
bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
while (true) {
jobject j_output_buffer_info = jni->CallObjectMethod(
*j_media_codec_video_encoder_, j_dequeue_output_buffer_method_);
@@ -702,7 +845,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
int output_buffer_index =
GetOutputBufferInfoIndex(jni, j_output_buffer_info);
if (output_buffer_index == -1) {
- ResetCodec();
+ ResetCodecOnCodecThread();
return false;
}
@@ -786,19 +929,42 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
info.codecSpecific.VP8.layerSync = false;
info.codecSpecific.VP8.tl0PicIdx = webrtc::kNoTl0PicIdx;
info.codecSpecific.VP8.keyIdx = webrtc::kNoKeyIdx;
- picture_id_ = (picture_id_ + 1) & 0x7FFF;
+ } else if (codecType_ == kVideoCodecVP9) {
+ if (key_frame) {
+ gof_idx_ = 0;
+ }
+ info.codecSpecific.VP9.picture_id = picture_id_;
+ info.codecSpecific.VP9.inter_pic_predicted = key_frame ? false : true;
+ info.codecSpecific.VP9.flexible_mode = false;
+ info.codecSpecific.VP9.ss_data_available = key_frame ? true : false;
+ info.codecSpecific.VP9.tl0_pic_idx = tl0_pic_idx_++;
+ info.codecSpecific.VP9.temporal_idx = webrtc::kNoTemporalIdx;
+ info.codecSpecific.VP9.spatial_idx = webrtc::kNoSpatialIdx;
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.inter_layer_predicted = false;
+ info.codecSpecific.VP9.gof_idx =
+ static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
+ info.codecSpecific.VP9.num_spatial_layers = 1;
+ info.codecSpecific.VP9.spatial_layer_resolution_present = false;
+ if (info.codecSpecific.VP9.ss_data_available) {
+ info.codecSpecific.VP9.spatial_layer_resolution_present = true;
+ info.codecSpecific.VP9.width[0] = width_;
+ info.codecSpecific.VP9.height[0] = height_;
+ info.codecSpecific.VP9.gof.CopyGofInfoVP9(gof_);
+ }
}
+ picture_id_ = (picture_id_ + 1) & 0x7FFF;
// Generate a header describing a single fragment.
webrtc::RTPFragmentationHeader header;
memset(&header, 0, sizeof(header));
- if (codecType_ == kVideoCodecVP8) {
+ if (codecType_ == kVideoCodecVP8 || codecType_ == kVideoCodecVP9) {
header.VerifyAndAllocateFragmentationHeader(1);
header.fragmentationOffset[0] = 0;
header.fragmentationLength[0] = image->_length;
header.fragmentationPlType[0] = 0;
header.fragmentationTimeDiff[0] = 0;
- if (scale_) {
+ if (codecType_ == kVideoCodecVP8 && scale_) {
int qp;
if (webrtc::vp8::GetQp(payload, payload_size, &qp))
quality_scaler_.ReportQP(qp);
@@ -829,7 +995,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
ALOGE << "Data:" << image->_buffer[0] << " " << image->_buffer[1]
<< " " << image->_buffer[2] << " " << image->_buffer[3]
<< " " << image->_buffer[4] << " " << image->_buffer[5];
- ResetCodec();
+ ResetCodecOnCodecThread();
return false;
}
scPositions[scPositionsLength] = payload_size;
@@ -852,7 +1018,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
output_buffer_index);
CHECK_EXCEPTION(jni);
if (!success) {
- ResetCodec();
+ ResetCodecOnCodecThread();
return false;
}
@@ -907,7 +1073,12 @@ int MediaCodecVideoEncoder::GetTargetFramerate() {
return scale_ ? quality_scaler_.GetTargetFramerate() : -1;
}
-MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory() {
+const char* MediaCodecVideoEncoder::ImplementationName() const {
+ return "MediaCodec";
+}
+
+MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory()
+ : egl_context_(nullptr) {
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
jclass j_encoder_class = FindClass(jni, "org/webrtc/MediaCodecVideoEncoder");
@@ -923,6 +1094,16 @@ MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory() {
MAX_VIDEO_WIDTH, MAX_VIDEO_HEIGHT, MAX_VIDEO_FPS));
}
+ bool is_vp9_hw_supported = jni->CallStaticBooleanMethod(
+ j_encoder_class,
+ GetStaticMethodID(jni, j_encoder_class, "isVp9HwSupported", "()Z"));
+ CHECK_EXCEPTION(jni);
+ if (is_vp9_hw_supported) {
+ ALOGD << "VP9 HW Encoder supported.";
+ supported_codecs_.push_back(VideoCodec(kVideoCodecVP9, "VP9",
+ MAX_VIDEO_WIDTH, MAX_VIDEO_HEIGHT, MAX_VIDEO_FPS));
+ }
+
bool is_h264_hw_supported = jni->CallStaticBooleanMethod(
j_encoder_class,
GetStaticMethodID(jni, j_encoder_class, "isH264HwSupported", "()Z"));
@@ -936,9 +1117,37 @@ MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory() {
MediaCodecVideoEncoderFactory::~MediaCodecVideoEncoderFactory() {}
+void MediaCodecVideoEncoderFactory::SetEGLContext(
+ JNIEnv* jni, jobject render_egl_context) {
+ ALOGD << "MediaCodecVideoEncoderFactory::SetEGLContext";
+ if (egl_context_) {
+ jni->DeleteGlobalRef(egl_context_);
+ egl_context_ = NULL;
+ }
+ if (!IsNull(jni, render_egl_context)) {
+ egl_context_ = jni->NewGlobalRef(render_egl_context);
+ if (CheckException(jni)) {
+ ALOGE << "error calling NewGlobalRef for EGL Context.";
+ egl_context_ = NULL;
+ } else {
+ jclass j_egl_context_class =
+ FindClass(jni, "org/webrtc/EglBase14$Context");
+ if (!jni->IsInstanceOf(egl_context_, j_egl_context_class)) {
+ ALOGE << "Wrong EGL Context.";
+ jni->DeleteGlobalRef(egl_context_);
+ egl_context_ = NULL;
+ }
+ }
+ }
+ if (egl_context_ == NULL) {
+ ALOGW << "NULL VideoDecoder EGL context - HW surface encoding is disabled.";
+ }
+}
+
webrtc::VideoEncoder* MediaCodecVideoEncoderFactory::CreateVideoEncoder(
VideoCodecType type) {
if (supported_codecs_.empty()) {
+ ALOGW << "No HW video encoder for type " << (int)type;
return NULL;
}
for (std::vector<VideoCodec>::const_iterator it = supported_codecs_.begin();
@@ -946,9 +1155,11 @@ webrtc::VideoEncoder* MediaCodecVideoEncoderFactory::CreateVideoEncoder(
if (it->type == type) {
ALOGD << "Create HW video encoder for type " << (int)type <<
" (" << it->name << ").";
- return new MediaCodecVideoEncoder(AttachCurrentThreadIfNeeded(), type);
+ return new MediaCodecVideoEncoder(AttachCurrentThreadIfNeeded(), type,
+ egl_context_);
}
}
+ ALOGW << "Can not find HW video encoder for type " << (int)type;
return NULL;
}
diff --git a/talk/app/webrtc/java/jni/androidmediaencoder_jni.h b/talk/app/webrtc/java/jni/androidmediaencoder_jni.h
index ff124aa146..8ff8164c3b 100644
--- a/talk/app/webrtc/java/jni/androidmediaencoder_jni.h
+++ b/talk/app/webrtc/java/jni/androidmediaencoder_jni.h
@@ -43,6 +43,8 @@ class MediaCodecVideoEncoderFactory
MediaCodecVideoEncoderFactory();
virtual ~MediaCodecVideoEncoderFactory();
+ void SetEGLContext(JNIEnv* jni, jobject render_egl_context);
+
// WebRtcVideoEncoderFactory implementation.
webrtc::VideoEncoder* CreateVideoEncoder(webrtc::VideoCodecType type)
override;
@@ -50,6 +52,7 @@ class MediaCodecVideoEncoderFactory
void DestroyVideoEncoder(webrtc::VideoEncoder* encoder) override;
private:
+ jobject egl_context_;
// Empty if platform support is lacking, const after ctor returns.
std::vector<VideoCodec> supported_codecs_;
};
diff --git a/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc b/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
index 02b9f22015..8813c89de4 100644
--- a/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
+++ b/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
@@ -29,8 +29,9 @@
#include "talk/app/webrtc/java/jni/androidvideocapturer_jni.h"
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
+#include "talk/app/webrtc/java/jni/surfacetexturehelper_jni.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
#include "webrtc/base/bind.h"
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
namespace webrtc_jni {
@@ -47,15 +48,19 @@ int AndroidVideoCapturerJni::SetAndroidObjects(JNIEnv* jni,
return 0;
}
-AndroidVideoCapturerJni::AndroidVideoCapturerJni(JNIEnv* jni,
- jobject j_video_capturer)
- : j_capturer_global_(jni, j_video_capturer),
+AndroidVideoCapturerJni::AndroidVideoCapturerJni(
+ JNIEnv* jni,
+ jobject j_video_capturer,
+ jobject j_surface_texture_helper)
+ : j_video_capturer_(jni, j_video_capturer),
j_video_capturer_class_(
jni, FindClass(jni, "org/webrtc/VideoCapturerAndroid")),
j_observer_class_(
jni,
FindClass(jni,
"org/webrtc/VideoCapturerAndroid$NativeObserver")),
+ surface_texture_helper_(new rtc::RefCountedObject<SurfaceTextureHelper>(
+ jni, j_surface_texture_helper)),
capturer_(nullptr) {
LOG(LS_INFO) << "AndroidVideoCapturerJni ctor";
thread_checker_.DetachFromThread();
@@ -64,7 +69,7 @@ AndroidVideoCapturerJni::AndroidVideoCapturerJni(JNIEnv* jni,
AndroidVideoCapturerJni::~AndroidVideoCapturerJni() {
LOG(LS_INFO) << "AndroidVideoCapturerJni dtor";
jni()->CallVoidMethod(
- *j_capturer_global_,
+ *j_video_capturer_,
GetMethodID(jni(), *j_video_capturer_class_, "release", "()V"));
CHECK_EXCEPTION(jni()) << "error during VideoCapturerAndroid.release()";
}
@@ -90,7 +95,7 @@ void AndroidVideoCapturerJni::Start(int width, int height, int framerate,
jni(), *j_video_capturer_class_, "startCapture",
"(IIILandroid/content/Context;"
"Lorg/webrtc/VideoCapturerAndroid$CapturerObserver;)V");
- jni()->CallVoidMethod(*j_capturer_global_,
+ jni()->CallVoidMethod(*j_video_capturer_,
m, width, height,
framerate,
application_context_,
@@ -109,7 +114,7 @@ void AndroidVideoCapturerJni::Stop() {
}
jmethodID m = GetMethodID(jni(), *j_video_capturer_class_,
"stopCapture", "()V");
- jni()->CallVoidMethod(*j_capturer_global_, m);
+ jni()->CallVoidMethod(*j_video_capturer_, m);
CHECK_EXCEPTION(jni()) << "error during VideoCapturerAndroid.stopCapture";
LOG(LS_INFO) << "AndroidVideoCapturerJni stop done";
}
@@ -127,19 +132,12 @@ void AndroidVideoCapturerJni::AsyncCapturerInvoke(
invoker_->AsyncInvoke<void>(rtc::Bind(method, capturer_, args...));
}
-void AndroidVideoCapturerJni::ReturnBuffer(int64_t time_stamp) {
- jmethodID m = GetMethodID(jni(), *j_video_capturer_class_,
- "returnBuffer", "(J)V");
- jni()->CallVoidMethod(*j_capturer_global_, m, time_stamp);
- CHECK_EXCEPTION(jni()) << "error during VideoCapturerAndroid.returnBuffer";
-}
-
std::string AndroidVideoCapturerJni::GetSupportedFormats() {
jmethodID m =
GetMethodID(jni(), *j_video_capturer_class_,
"getSupportedFormatsAsJson", "()Ljava/lang/String;");
jstring j_json_caps =
- (jstring) jni()->CallObjectMethod(*j_capturer_global_, m);
+ (jstring) jni()->CallObjectMethod(*j_video_capturer_, m);
CHECK_EXCEPTION(jni()) << "error during supportedFormatsAsJson";
return JavaToStdString(jni(), j_json_caps);
}
@@ -158,46 +156,33 @@ void AndroidVideoCapturerJni::OnMemoryBufferFrame(void* video_frame,
int rotation,
int64_t timestamp_ns) {
const uint8_t* y_plane = static_cast<uint8_t*>(video_frame);
- // Android guarantees that the stride is a multiple of 16.
- // http://developer.android.com/reference/android/hardware/Camera.Parameters.html#setPreviewFormat%28int%29
- int y_stride;
- int uv_stride;
- webrtc::Calc16ByteAlignedStride(width, &y_stride, &uv_stride);
- const uint8_t* v_plane = y_plane + y_stride * height;
- const uint8_t* u_plane =
- v_plane + uv_stride * webrtc::AlignInt(height, 2) / 2;
-
- // Wrap the Java buffer, and call ReturnBuffer() in the wrapped
- // VideoFrameBuffer destructor.
- rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
- new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
- width, height, y_plane, y_stride, u_plane, uv_stride, v_plane,
- uv_stride,
- rtc::Bind(&AndroidVideoCapturerJni::ReturnBuffer, this,
- timestamp_ns)));
+ const uint8_t* vu_plane = y_plane + width * height;
+
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+ buffer_pool_.CreateBuffer(width, height);
+ libyuv::NV21ToI420(
+ y_plane, width,
+ vu_plane, width,
+ buffer->MutableData(webrtc::kYPlane), buffer->stride(webrtc::kYPlane),
+ buffer->MutableData(webrtc::kUPlane), buffer->stride(webrtc::kUPlane),
+ buffer->MutableData(webrtc::kVPlane), buffer->stride(webrtc::kVPlane),
+ width, height);
AsyncCapturerInvoke("OnIncomingFrame",
&webrtc::AndroidVideoCapturer::OnIncomingFrame,
buffer, rotation, timestamp_ns);
}
-void AndroidVideoCapturerJni::OnTextureFrame(
- int width,
- int height,
- int64_t timestamp_ns,
- const NativeTextureHandleImpl& handle) {
- // TODO(magjed): Fix this. See bug webrtc:4993.
- RTC_NOTREACHED()
- << "The rest of the stack for Android expects the native "
- "handle to be a NativeHandleImpl with a SurfaceTexture, not a "
- "NativeTextureHandleImpl";
+void AndroidVideoCapturerJni::OnTextureFrame(int width,
+ int height,
+ int rotation,
+ int64_t timestamp_ns,
+ const NativeHandleImpl& handle) {
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
- new rtc::RefCountedObject<AndroidTextureBuffer>(
- width, height, handle,
- rtc::Bind(&AndroidVideoCapturerJni::ReturnBuffer, this,
- timestamp_ns)));
+ surface_texture_helper_->CreateTextureFrame(width, height, handle));
+
AsyncCapturerInvoke("OnIncomingFrame",
&webrtc::AndroidVideoCapturer::OnIncomingFrame,
- buffer, 0, timestamp_ns);
+ buffer, rotation, timestamp_ns);
}
void AndroidVideoCapturerJni::OnOutputFormatRequest(int width,
@@ -216,13 +201,6 @@ JOW(void,
jint width, jint height, jint rotation, jlong timestamp) {
jboolean is_copy = true;
jbyte* bytes = jni->GetByteArrayElements(j_frame, &is_copy);
- // If this is a copy of the original frame, it means that the memory
- // is not direct memory and thus VideoCapturerAndroid does not guarantee
- // that the memory is valid when we have released |j_frame|.
- // TODO(magjed): Move ReleaseByteArrayElements() into ReturnBuffer() and
- // remove this check.
- RTC_CHECK(!is_copy)
- << "NativeObserver_nativeOnFrameCaptured: frame is a copy";
reinterpret_cast<AndroidVideoCapturerJni*>(j_capturer)
->OnMemoryBufferFrame(bytes, length, width, height, rotation, timestamp);
jni->ReleaseByteArrayElements(j_frame, bytes, JNI_ABORT);
@@ -231,11 +209,11 @@ JOW(void,
JOW(void, VideoCapturerAndroid_00024NativeObserver_nativeOnTextureFrameCaptured)
(JNIEnv* jni, jclass, jlong j_capturer, jint j_width, jint j_height,
jint j_oes_texture_id, jfloatArray j_transform_matrix,
- jlong j_timestamp) {
+ jint j_rotation, jlong j_timestamp) {
reinterpret_cast<AndroidVideoCapturerJni*>(j_capturer)
- ->OnTextureFrame(j_width, j_height, j_timestamp,
- NativeTextureHandleImpl(jni, j_oes_texture_id,
- j_transform_matrix));
+ ->OnTextureFrame(j_width, j_height, j_rotation, j_timestamp,
+ NativeHandleImpl(jni, j_oes_texture_id,
+ j_transform_matrix));
}
JOW(void, VideoCapturerAndroid_00024NativeObserver_nativeCapturerStarted)
@@ -254,9 +232,11 @@ JOW(void, VideoCapturerAndroid_00024NativeObserver_nativeOnOutputFormatRequest)
}
JOW(jlong, VideoCapturerAndroid_nativeCreateVideoCapturer)
- (JNIEnv* jni, jclass, jobject j_video_capturer) {
+ (JNIEnv* jni, jclass,
+ jobject j_video_capturer, jobject j_surface_texture_helper) {
rtc::scoped_refptr<webrtc::AndroidVideoCapturerDelegate> delegate =
- new rtc::RefCountedObject<AndroidVideoCapturerJni>(jni, j_video_capturer);
+ new rtc::RefCountedObject<AndroidVideoCapturerJni>(
+ jni, j_video_capturer, j_surface_texture_helper);
rtc::scoped_ptr<cricket::VideoCapturer> capturer(
new webrtc::AndroidVideoCapturer(delegate));
// Caller takes ownership of the cricket::VideoCapturer* pointer.
diff --git a/talk/app/webrtc/java/jni/androidvideocapturer_jni.h b/talk/app/webrtc/java/jni/androidvideocapturer_jni.h
index d1eb3a0ad0..89ecacb3a5 100644
--- a/talk/app/webrtc/java/jni/androidvideocapturer_jni.h
+++ b/talk/app/webrtc/java/jni/androidvideocapturer_jni.h
@@ -36,10 +36,12 @@
#include "webrtc/base/asyncinvoker.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/thread_checker.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
namespace webrtc_jni {
-class NativeTextureHandleImpl;
+struct NativeHandleImpl;
+class SurfaceTextureHelper;
// AndroidVideoCapturerJni implements AndroidVideoCapturerDelegate.
// The purpose of the delegate is to hide the JNI specifics from the C++ only
@@ -48,7 +50,9 @@ class AndroidVideoCapturerJni : public webrtc::AndroidVideoCapturerDelegate {
public:
static int SetAndroidObjects(JNIEnv* jni, jobject appliction_context);
- AndroidVideoCapturerJni(JNIEnv* jni, jobject j_video_capturer);
+ AndroidVideoCapturerJni(JNIEnv* jni,
+ jobject j_video_capturer,
+ jobject j_surface_texture_helper);
void Start(int width, int height, int framerate,
webrtc::AndroidVideoCapturer* capturer) override;
@@ -60,15 +64,14 @@ class AndroidVideoCapturerJni : public webrtc::AndroidVideoCapturerDelegate {
void OnCapturerStarted(bool success);
void OnMemoryBufferFrame(void* video_frame, int length, int width,
int height, int rotation, int64_t timestamp_ns);
- void OnTextureFrame(int width, int height, int64_t timestamp_ns,
- const NativeTextureHandleImpl& handle);
+ void OnTextureFrame(int width, int height, int rotation, int64_t timestamp_ns,
+ const NativeHandleImpl& handle);
void OnOutputFormatRequest(int width, int height, int fps);
protected:
~AndroidVideoCapturerJni();
private:
- void ReturnBuffer(int64_t time_stamp);
JNIEnv* jni();
// To avoid deducing Args from the 3rd parameter of AsyncCapturerInvoke.
@@ -85,10 +88,13 @@ class AndroidVideoCapturerJni : public webrtc::AndroidVideoCapturerDelegate {
void (webrtc::AndroidVideoCapturer::*method)(Args...),
typename Identity<Args>::type... args);
- const ScopedGlobalRef<jobject> j_capturer_global_;
+ const ScopedGlobalRef<jobject> j_video_capturer_;
const ScopedGlobalRef<jclass> j_video_capturer_class_;
const ScopedGlobalRef<jclass> j_observer_class_;
+ // Used on the Java thread running the camera.
+ webrtc::I420BufferPool buffer_pool_;
+ rtc::scoped_refptr<SurfaceTextureHelper> surface_texture_helper_;
rtc::ThreadChecker thread_checker_;
// |capturer| is a guaranteed to be a valid pointer between a call to
diff --git a/talk/app/webrtc/java/jni/classreferenceholder.cc b/talk/app/webrtc/java/jni/classreferenceholder.cc
index 4c836f8252..5fe8ec707c 100644
--- a/talk/app/webrtc/java/jni/classreferenceholder.cc
+++ b/talk/app/webrtc/java/jni/classreferenceholder.cc
@@ -72,20 +72,21 @@ ClassReferenceHolder::ClassReferenceHolder(JNIEnv* jni) {
LoadClass(jni, "org/webrtc/IceCandidate");
#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
LoadClass(jni, "android/graphics/SurfaceTexture");
- LoadClass(jni, "javax/microedition/khronos/egl/EGLContext");
LoadClass(jni, "org/webrtc/CameraEnumerator");
LoadClass(jni, "org/webrtc/Camera2Enumerator");
LoadClass(jni, "org/webrtc/CameraEnumerationAndroid");
LoadClass(jni, "org/webrtc/VideoCapturerAndroid");
LoadClass(jni, "org/webrtc/VideoCapturerAndroid$NativeObserver");
LoadClass(jni, "org/webrtc/EglBase");
+ LoadClass(jni, "org/webrtc/EglBase$Context");
+ LoadClass(jni, "org/webrtc/EglBase14$Context");
LoadClass(jni, "org/webrtc/NetworkMonitor");
LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder");
LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder$OutputBufferInfo");
LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder$VideoCodecType");
LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder");
LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$DecodedTextureBuffer");
- LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$DecodedByteBuffer");
+ LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$DecodedOutputBuffer");
LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$VideoCodecType");
LoadClass(jni, "org/webrtc/SurfaceTextureHelper");
#endif
diff --git a/talk/app/webrtc/java/jni/jni_helpers.cc b/talk/app/webrtc/java/jni/jni_helpers.cc
index 755698e379..3a7ff21e77 100644
--- a/talk/app/webrtc/java/jni/jni_helpers.cc
+++ b/talk/app/webrtc/java/jni/jni_helpers.cc
@@ -1,4 +1,3 @@
-
/*
* libjingle
* Copyright 2015 Google Inc.
@@ -33,8 +32,6 @@
#include <sys/syscall.h>
#include <unistd.h>
-#include "unicode/unistr.h"
-
namespace webrtc_jni {
static JavaVM* g_jvm = nullptr;
@@ -46,8 +43,6 @@ static pthread_once_t g_jni_ptr_once = PTHREAD_ONCE_INIT;
// were attached by the JVM because of a Java->native call.
static pthread_key_t g_jni_ptr;
-using icu::UnicodeString;
-
JavaVM *GetJVM() {
RTC_CHECK(g_jvm) << "JNI_OnLoad failed to run?";
return g_jvm;
@@ -232,22 +227,20 @@ bool IsNull(JNIEnv* jni, jobject obj) {
// Given a UTF-8 encoded |native| string return a new (UTF-16) jstring.
jstring JavaStringFromStdString(JNIEnv* jni, const std::string& native) {
- UnicodeString ustr(UnicodeString::fromUTF8(native));
- jstring jstr = jni->NewString(ustr.getBuffer(), ustr.length());
- CHECK_EXCEPTION(jni) << "error during NewString";
+ jstring jstr = jni->NewStringUTF(native.c_str());
+ CHECK_EXCEPTION(jni) << "error during NewStringUTF";
return jstr;
}
// Given a (UTF-16) jstring return a new UTF-8 native string.
std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
- const jchar* jchars = jni->GetStringChars(j_string, NULL);
- CHECK_EXCEPTION(jni) << "Error during GetStringChars";
- UnicodeString ustr(jchars, jni->GetStringLength(j_string));
- CHECK_EXCEPTION(jni) << "Error during GetStringLength";
- jni->ReleaseStringChars(j_string, jchars);
- CHECK_EXCEPTION(jni) << "Error during ReleaseStringChars";
- std::string ret;
- return ustr.toUTF8String(ret);
+ const char* chars = jni->GetStringUTFChars(j_string, NULL);
+ CHECK_EXCEPTION(jni) << "Error during GetStringUTFChars";
+ std::string str(chars, jni->GetStringUTFLength(j_string));
+ CHECK_EXCEPTION(jni) << "Error during GetStringUTFLength";
+ jni->ReleaseStringUTFChars(j_string, chars);
+ CHECK_EXCEPTION(jni) << "Error during ReleaseStringUTFChars";
+ return str;
}
// Return the (singleton) Java Enum object corresponding to |index|;
diff --git a/talk/media/base/fakemediaprocessor.h b/talk/app/webrtc/java/jni/jni_onload.cc
index 8de2678c95..9664ecdca6 100644
--- a/talk/media/base/fakemediaprocessor.h
+++ b/talk/app/webrtc/java/jni/jni_onload.cc
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2004 Google Inc.
+ * Copyright 2015 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -25,5 +25,31 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-// TODO(solenberg): Remove this file once Chromium's libjingle.gyp/.gn are
-// updated.
+#include <jni.h>
+#undef JNIEXPORT
+#define JNIEXPORT __attribute__((visibility("default")))
+
+#include "talk/app/webrtc/java/jni/classreferenceholder.h"
+#include "talk/app/webrtc/java/jni/jni_helpers.h"
+#include "webrtc/base/ssladapter.h"
+
+namespace webrtc_jni {
+
+extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
+ jint ret = InitGlobalJniVariables(jvm);
+ RTC_DCHECK_GE(ret, 0);
+ if (ret < 0)
+ return -1;
+
+ RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
+ LoadGlobalClassReferenceHolder();
+
+ return ret;
+}
+
+extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM *jvm, void *reserved) {
+ FreeGlobalClassReferenceHolder();
+ RTC_CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
+}
+
+} // namespace webrtc_jni
diff --git a/talk/app/webrtc/java/jni/native_handle_impl.cc b/talk/app/webrtc/java/jni/native_handle_impl.cc
index ac3e0455df..1757184154 100644
--- a/talk/app/webrtc/java/jni/native_handle_impl.cc
+++ b/talk/app/webrtc/java/jni/native_handle_impl.cc
@@ -27,14 +27,65 @@
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
+#include "talk/app/webrtc/java/jni/jni_helpers.h"
+#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/keep_ref_until_done.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/base/logging.h"
+
+using webrtc::NativeHandleBuffer;
+
+namespace {
+
+void RotateMatrix(float a[16], webrtc::VideoRotation rotation) {
+ // Texture coordinates are in the range 0 to 1. The transformation of the last
+ // row in each rotation matrix is needed for proper translation, e.g, to
+ // mirror x, we don't replace x by -x, but by 1-x.
+ switch (rotation) {
+ case webrtc::kVideoRotation_0:
+ break;
+ case webrtc::kVideoRotation_90: {
+ const float ROTATE_90[16] =
+ { a[4], a[5], a[6], a[7],
+ -a[0], -a[1], -a[2], -a[3],
+ a[8], a[9], a[10], a[11],
+ a[0] + a[12], a[1] + a[13], a[2] + a[14], a[3] + a[15]};
+ memcpy(a, ROTATE_90, sizeof(ROTATE_90));
+ } break;
+ case webrtc::kVideoRotation_180: {
+ const float ROTATE_180[16] =
+ { -a[0], -a[1], -a[2], -a[3],
+ -a[4], -a[5], -a[6], -a[7],
+ a[8], a[9], a[10], a[11],
+ a[0] + a[4] + a[12], a[1] +a[5] + a[13], a[2] + a[6] + a[14],
+ a[3] + a[11]+ a[15]};
+ memcpy(a, ROTATE_180, sizeof(ROTATE_180));
+ }
+ break;
+ case webrtc::kVideoRotation_270: {
+ const float ROTATE_270[16] =
+ { -a[4], -a[5], -a[6], -a[7],
+ a[0], a[1], a[2], a[3],
+ a[8], a[9], a[10], a[11],
+ a[4] + a[12], a[5] + a[13], a[6] + a[14], a[7] + a[15]};
+ memcpy(a, ROTATE_270, sizeof(ROTATE_270));
+ } break;
+ }
+}
+
+} // anonymouse namespace
namespace webrtc_jni {
-NativeTextureHandleImpl::NativeTextureHandleImpl(JNIEnv* jni,
- jint j_oes_texture_id,
- jfloatArray j_transform_matrix)
- : oes_texture_id(j_oes_texture_id) {
+// Aligning pointer to 64 bytes for improved performance, e.g. use SIMD.
+static const int kBufferAlignment = 64;
+
+NativeHandleImpl::NativeHandleImpl(JNIEnv* jni,
+ jint j_oes_texture_id,
+ jfloatArray j_transform_matrix)
+ : oes_texture_id(j_oes_texture_id) {
RTC_CHECK_EQ(16, jni->GetArrayLength(j_transform_matrix));
jfloat* transform_matrix_ptr =
jni->GetFloatArrayElements(j_transform_matrix, nullptr);
@@ -44,41 +95,15 @@ NativeTextureHandleImpl::NativeTextureHandleImpl(JNIEnv* jni,
jni->ReleaseFloatArrayElements(j_transform_matrix, transform_matrix_ptr, 0);
}
-NativeHandleImpl::NativeHandleImpl() : texture_object_(NULL), texture_id_(-1) {}
-
-void* NativeHandleImpl::GetHandle() {
- return texture_object_;
-}
-
-int NativeHandleImpl::GetTextureId() {
- return texture_id_;
-}
-
-void NativeHandleImpl::SetTextureObject(void* texture_object, int texture_id) {
- texture_object_ = reinterpret_cast<jobject>(texture_object);
- texture_id_ = texture_id;
-}
-
-JniNativeHandleBuffer::JniNativeHandleBuffer(void* native_handle,
- int width,
- int height)
- : NativeHandleBuffer(native_handle, width, height) {}
-
-rtc::scoped_refptr<webrtc::VideoFrameBuffer>
-JniNativeHandleBuffer::NativeToI420Buffer() {
- // TODO(pbos): Implement before using this in the encoder pipeline (or
- // remove the RTC_CHECK() in VideoCapture).
- RTC_NOTREACHED();
- return nullptr;
-}
-
AndroidTextureBuffer::AndroidTextureBuffer(
int width,
int height,
- const NativeTextureHandleImpl& native_handle,
+ const NativeHandleImpl& native_handle,
+ jobject surface_texture_helper,
const rtc::Callback0<void>& no_longer_used)
: webrtc::NativeHandleBuffer(&native_handle_, width, height),
native_handle_(native_handle),
+ surface_texture_helper_(surface_texture_helper),
no_longer_used_cb_(no_longer_used) {}
AndroidTextureBuffer::~AndroidTextureBuffer() {
@@ -87,9 +112,75 @@ AndroidTextureBuffer::~AndroidTextureBuffer() {
rtc::scoped_refptr<webrtc::VideoFrameBuffer>
AndroidTextureBuffer::NativeToI420Buffer() {
- RTC_NOTREACHED()
- << "AndroidTextureBuffer::NativeToI420Buffer not implemented.";
- return nullptr;
+ int uv_width = (width()+7) / 8;
+ int stride = 8 * uv_width;
+ int uv_height = (height()+1)/2;
+ size_t size = stride * (height() + uv_height);
+ // The data is owned by the frame, and the normal case is that the
+ // data is deleted by the frame's destructor callback.
+ //
+ // TODO(nisse): Use an I420BufferPool. We then need to extend that
+ // class, and I420Buffer, to support our memory layout.
+ rtc::scoped_ptr<uint8_t, webrtc::AlignedFreeDeleter> yuv_data(
+ static_cast<uint8_t*>(webrtc::AlignedMalloc(size, kBufferAlignment)));
+ // See SurfaceTextureHelper.java for the required layout.
+ uint8_t* y_data = yuv_data.get();
+ uint8_t* u_data = y_data + height() * stride;
+ uint8_t* v_data = u_data + stride/2;
+
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> copy =
+ new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
+ width(), height(),
+ y_data, stride,
+ u_data, stride,
+ v_data, stride,
+ rtc::Bind(&webrtc::AlignedFree, yuv_data.release()));
+
+ JNIEnv* jni = AttachCurrentThreadIfNeeded();
+ ScopedLocalRefFrame local_ref_frame(jni);
+
+ jmethodID transform_mid = GetMethodID(
+ jni,
+ GetObjectClass(jni, surface_texture_helper_),
+ "textureToYUV",
+ "(Ljava/nio/ByteBuffer;IIII[F)V");
+
+ jobject byte_buffer = jni->NewDirectByteBuffer(y_data, size);
+
+ // TODO(nisse): Keep java transform matrix around.
+ jfloatArray sampling_matrix = jni->NewFloatArray(16);
+ jni->SetFloatArrayRegion(sampling_matrix, 0, 16,
+ native_handle_.sampling_matrix);
+
+ jni->CallVoidMethod(surface_texture_helper_,
+ transform_mid,
+ byte_buffer, width(), height(), stride,
+ native_handle_.oes_texture_id, sampling_matrix);
+ CHECK_EXCEPTION(jni) << "textureToYUV throwed an exception";
+
+ return copy;
+}
+
+rtc::scoped_refptr<AndroidTextureBuffer>
+AndroidTextureBuffer::ScaleAndRotate(int dst_widht,
+ int dst_height,
+ webrtc::VideoRotation rotation) {
+ if (width() == dst_widht && height() == dst_height &&
+ rotation == webrtc::kVideoRotation_0) {
+ return this;
+ }
+ int rotated_width = (rotation % 180 == 0) ? dst_widht : dst_height;
+ int rotated_height = (rotation % 180 == 0) ? dst_height : dst_widht;
+
+ // Here we use Bind magic to add a reference count to |this| until the newly
+ // created AndroidTextureBuffer is destructed
+ rtc::scoped_refptr<AndroidTextureBuffer> buffer(
+ new rtc::RefCountedObject<AndroidTextureBuffer>(
+ rotated_width, rotated_height, native_handle_,
+ surface_texture_helper_, rtc::KeepRefUntilDone(this)));
+
+ RotateMatrix(buffer->native_handle_.sampling_matrix, rotation);
+ return buffer;
}
} // namespace webrtc_jni
diff --git a/talk/app/webrtc/java/jni/native_handle_impl.h b/talk/app/webrtc/java/jni/native_handle_impl.h
index dd04bc20b1..1d0f601d0d 100644
--- a/talk/app/webrtc/java/jni/native_handle_impl.h
+++ b/talk/app/webrtc/java/jni/native_handle_impl.h
@@ -31,56 +31,44 @@
#include <jni.h>
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
+#include "webrtc/common_video/rotation.h"
namespace webrtc_jni {
// Wrapper for texture object.
-struct NativeTextureHandleImpl {
- NativeTextureHandleImpl(JNIEnv* jni,
- jint j_oes_texture_id,
- jfloatArray j_transform_matrix);
+struct NativeHandleImpl {
+ NativeHandleImpl(JNIEnv* jni,
+ jint j_oes_texture_id,
+ jfloatArray j_transform_matrix);
const int oes_texture_id;
float sampling_matrix[16];
};
-// Native handle for SurfaceTexture + texture id.
-class NativeHandleImpl {
- public:
- NativeHandleImpl();
-
- void* GetHandle();
- int GetTextureId();
- void SetTextureObject(void* texture_object, int texture_id);
-
- private:
- jobject texture_object_;
- int32_t texture_id_;
-};
-
-class JniNativeHandleBuffer : public webrtc::NativeHandleBuffer {
- public:
- JniNativeHandleBuffer(void* native_handle, int width, int height);
-
- // TODO(pbos): Override destructor to release native handle, at the moment the
- // native handle is not released based on refcount.
-
- private:
- rtc::scoped_refptr<webrtc::VideoFrameBuffer> NativeToI420Buffer() override;
-};
-
class AndroidTextureBuffer : public webrtc::NativeHandleBuffer {
public:
AndroidTextureBuffer(int width,
int height,
- const NativeTextureHandleImpl& native_handle,
+ const NativeHandleImpl& native_handle,
+ jobject surface_texture_helper,
const rtc::Callback0<void>& no_longer_used);
~AndroidTextureBuffer();
rtc::scoped_refptr<VideoFrameBuffer> NativeToI420Buffer() override;
+ rtc::scoped_refptr<AndroidTextureBuffer> ScaleAndRotate(
+ int dst_widht,
+ int dst_height,
+ webrtc::VideoRotation rotation);
+
private:
- NativeTextureHandleImpl native_handle_;
+ NativeHandleImpl native_handle_;
+ // Raw object pointer, relying on the caller, i.e.,
+ // AndroidVideoCapturerJni or the C++ SurfaceTextureHelper, to keep
+ // a global reference. TODO(nisse): Make this a reference to the C++
+ // SurfaceTextureHelper instead, but that requires some refactoring
+ // of AndroidVideoCapturerJni.
+ jobject surface_texture_helper_;
rtc::Callback0<void> no_longer_used_cb_;
};
diff --git a/talk/app/webrtc/java/jni/peerconnection_jni.cc b/talk/app/webrtc/java/jni/peerconnection_jni.cc
index e75cd553b6..5ea63f74ae 100644
--- a/talk/app/webrtc/java/jni/peerconnection_jni.cc
+++ b/talk/app/webrtc/java/jni/peerconnection_jni.cc
@@ -57,6 +57,7 @@
#define JNIEXPORT __attribute__((visibility("default")))
#include <limits>
+#include <utility>
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/jni_helpers.h"
@@ -74,10 +75,11 @@
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/event_tracer.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/logsinks.h"
-#include "webrtc/base/networkmonitor.h"
#include "webrtc/base/messagequeue.h"
+#include "webrtc/base/networkmonitor.h"
#include "webrtc/base/ssladapter.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/system_wrappers/include/field_trial_default.h"
@@ -141,22 +143,6 @@ static bool factory_static_initialized = false;
static bool video_hw_acceleration_enabled = true;
#endif
-extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
- jint ret = InitGlobalJniVariables(jvm);
- if (ret < 0)
- return -1;
-
- RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
- LoadGlobalClassReferenceHolder();
-
- return ret;
-}
-
-extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM *jvm, void *reserved) {
- FreeGlobalClassReferenceHolder();
- RTC_CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
-}
-
// Return the (singleton) Java Enum object corresponding to |index|;
// |state_class_fragment| is something like "MediaSource$State".
static jobject JavaEnumFromIndex(
@@ -545,7 +531,7 @@ class SdpObserverWrapper : public T {
protected:
// Common implementation for failure of Set & Create types, distinguished by
// |op| being "Set" or "Create".
- void OnFailure(const std::string& op, const std::string& error) {
+ void DoOnFailure(const std::string& op, const std::string& error) {
jmethodID m = GetMethodID(jni(), *j_observer_class_, "on" + op + "Failure",
"(Ljava/lang/String;)V");
jstring j_error_string = JavaStringFromStdString(jni(), error);
@@ -572,7 +558,7 @@ class CreateSdpObserverWrapper
void OnFailure(const std::string& error) override {
ScopedLocalRefFrame local_ref_frame(jni());
- SdpObserverWrapper::OnFailure(std::string("Create"), error);
+ SdpObserverWrapper::DoOnFailure(std::string("Create"), error);
}
};
@@ -585,7 +571,7 @@ class SetSdpObserverWrapper
void OnFailure(const std::string& error) override {
ScopedLocalRefFrame local_ref_frame(jni());
- SdpObserverWrapper::OnFailure(std::string("Set"), error);
+ SdpObserverWrapper::DoOnFailure(std::string("Set"), error);
}
};
@@ -773,7 +759,7 @@ class JavaVideoRendererWrapper : public VideoRendererInterface {
jni, *j_frame_class_, "<init>", "(III[I[Ljava/nio/ByteBuffer;J)V")),
j_texture_frame_ctor_id_(GetMethodID(
jni, *j_frame_class_, "<init>",
- "(IIILjava/lang/Object;IJ)V")),
+ "(IIII[FJ)V")),
j_byte_buffer_class_(jni, FindClass(jni, "java/nio/ByteBuffer")) {
CHECK_EXCEPTION(jni);
}
@@ -829,13 +815,13 @@ class JavaVideoRendererWrapper : public VideoRendererInterface {
jobject CricketToJavaTextureFrame(const cricket::VideoFrame* frame) {
NativeHandleImpl* handle =
reinterpret_cast<NativeHandleImpl*>(frame->GetNativeHandle());
- jobject texture_object = reinterpret_cast<jobject>(handle->GetHandle());
- int texture_id = handle->GetTextureId();
+ jfloatArray sampling_matrix = jni()->NewFloatArray(16);
+ jni()->SetFloatArrayRegion(sampling_matrix, 0, 16, handle->sampling_matrix);
return jni()->NewObject(
*j_frame_class_, j_texture_frame_ctor_id_,
frame->GetWidth(), frame->GetHeight(),
static_cast<int>(frame->GetVideoRotation()),
- texture_object, texture_id, javaShallowCopy(frame));
+ handle->oes_texture_id, sampling_matrix, javaShallowCopy(frame));
}
JNIEnv* jni() {
@@ -1054,6 +1040,32 @@ JOW(void, PeerConnectionFactory_initializeFieldTrials)(
webrtc::field_trial::InitFieldTrialsFromString(field_trials_init_string);
}
+JOW(void, PeerConnectionFactory_initializeInternalTracer)(JNIEnv* jni, jclass) {
+ rtc::tracing::SetupInternalTracer();
+}
+
+JOW(jboolean, PeerConnectionFactory_startInternalTracingCapture)(
+ JNIEnv* jni, jclass, jstring j_event_tracing_filename) {
+ if (!j_event_tracing_filename)
+ return false;
+
+ const char* init_string =
+ jni->GetStringUTFChars(j_event_tracing_filename, NULL);
+ LOG(LS_INFO) << "Starting internal tracing to: " << init_string;
+ bool ret = rtc::tracing::StartInternalCapture(init_string);
+ jni->ReleaseStringUTFChars(j_event_tracing_filename, init_string);
+ return ret;
+}
+
+JOW(void, PeerConnectionFactory_stopInternalTracingCapture)(
+ JNIEnv* jni, jclass) {
+ rtc::tracing::StopInternalCapture();
+}
+
+JOW(void, PeerConnectionFactory_shutdownInternalTracer)(JNIEnv* jni, jclass) {
+ rtc::tracing::ShutdownInternalTracer();
+}
+
// Helper struct for working around the fact that CreatePeerConnectionFactory()
// comes in two flavors: either entirely automagical (constructing its own
// threads and deleting them on teardown, but no external codec factory support)
@@ -1251,6 +1263,46 @@ JOW(jlong, PeerConnectionFactory_nativeCreateAudioTrack)(
return (jlong)track.release();
}
+JOW(jboolean, PeerConnectionFactory_nativeStartAecDump)(
+ JNIEnv* jni, jclass, jlong native_factory, jint file) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ return factory->StartAecDump(file);
+#else
+ return false;
+#endif
+}
+
+JOW(void, PeerConnectionFactory_nativeStopAecDump)(
+ JNIEnv* jni, jclass, jlong native_factory) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ factory->StopAecDump();
+#endif
+}
+
+JOW(jboolean, PeerConnectionFactory_nativeStartRtcEventLog)(
+ JNIEnv* jni, jclass, jlong native_factory, jint file) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ return factory->StartRtcEventLog(file);
+#else
+ return false;
+#endif
+}
+
+JOW(void, PeerConnectionFactory_nativeStopRtcEventLog)(
+ JNIEnv* jni, jclass, jlong native_factory) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ factory->StopRtcEventLog();
+#endif
+}
+
JOW(void, PeerConnectionFactory_nativeSetOptions)(
JNIEnv* jni, jclass, jlong native_factory, jobject options) {
rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
@@ -1292,21 +1344,35 @@ JOW(void, PeerConnectionFactory_nativeSetOptions)(
}
JOW(void, PeerConnectionFactory_nativeSetVideoHwAccelerationOptions)(
- JNIEnv* jni, jclass, jlong native_factory, jobject render_egl_context) {
+ JNIEnv* jni, jclass, jlong native_factory, jobject local_egl_context,
+ jobject remote_egl_context) {
#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
OwnedFactoryAndThreads* owned_factory =
reinterpret_cast<OwnedFactoryAndThreads*>(native_factory);
+
+ jclass j_eglbase14_context_class =
+ FindClass(jni, "org/webrtc/EglBase14$Context");
+
+ MediaCodecVideoEncoderFactory* encoder_factory =
+ static_cast<MediaCodecVideoEncoderFactory*>
+ (owned_factory->encoder_factory());
+ if (encoder_factory &&
+ jni->IsInstanceOf(local_egl_context, j_eglbase14_context_class)) {
+ LOG(LS_INFO) << "Set EGL context for HW encoding.";
+ encoder_factory->SetEGLContext(jni, local_egl_context);
+ }
+
MediaCodecVideoDecoderFactory* decoder_factory =
static_cast<MediaCodecVideoDecoderFactory*>
(owned_factory->decoder_factory());
- if (decoder_factory) {
- LOG(LS_INFO) << "Set EGL context for HW acceleration.";
- decoder_factory->SetEGLContext(jni, render_egl_context);
+ if (decoder_factory &&
+ jni->IsInstanceOf(remote_egl_context, j_eglbase14_context_class)) {
+ LOG(LS_INFO) << "Set EGL context for HW decoding.";
+ decoder_factory->SetEGLContext(jni, remote_egl_context);
}
#endif
}
-
static std::string
GetJavaEnumName(JNIEnv* jni, const std::string& className, jobject j_enum) {
jclass enumClass = FindClass(jni, className.c_str());
@@ -1503,6 +1569,9 @@ static void JavaRTCConfigurationToJsepRTCConfiguration(
jfieldID j_ice_connection_receiving_timeout_id =
GetFieldID(jni, j_rtc_config_class, "iceConnectionReceivingTimeout", "I");
+ jfieldID j_ice_backup_candidate_pair_ping_interval_id = GetFieldID(
+ jni, j_rtc_config_class, "iceBackupCandidatePairPingInterval", "I");
+
jfieldID j_continual_gathering_policy_id =
GetFieldID(jni, j_rtc_config_class, "continualGatheringPolicy",
"Lorg/webrtc/PeerConnection$ContinualGatheringPolicy;");
@@ -1524,6 +1593,8 @@ static void JavaRTCConfigurationToJsepRTCConfiguration(
jni, j_rtc_config, j_audio_jitter_buffer_fast_accelerate_id);
rtc_config->ice_connection_receiving_timeout =
GetIntField(jni, j_rtc_config, j_ice_connection_receiving_timeout_id);
+ rtc_config->ice_backup_candidate_pair_ping_interval = GetIntField(
+ jni, j_rtc_config, j_ice_backup_candidate_pair_ping_interval_id);
rtc_config->continual_gathering_policy =
JavaContinualGatheringPolicyToNativeType(
jni, j_continual_gathering_policy);
@@ -1550,7 +1621,7 @@ JOW(jlong, PeerConnectionFactory_nativeCreatePeerConnection)(
rtc::SSLIdentity::Generate(webrtc::kIdentityName, rtc::KT_ECDSA));
if (ssl_identity.get()) {
rtc_config.certificates.push_back(
- rtc::RTCCertificate::Create(ssl_identity.Pass()));
+ rtc::RTCCertificate::Create(std::move(ssl_identity)));
LOG(LS_INFO) << "ECDSA certificate created.";
} else {
// Failing to create certificate should not abort peer connection
@@ -1704,6 +1775,29 @@ JOW(void, PeerConnection_nativeRemoveLocalStream)(
reinterpret_cast<MediaStreamInterface*>(native_stream));
}
+JOW(jobject, PeerConnection_nativeCreateSender)(
+ JNIEnv* jni, jobject j_pc, jstring j_kind, jstring j_stream_id) {
+ jclass j_rtp_sender_class = FindClass(jni, "org/webrtc/RtpSender");
+ jmethodID j_rtp_sender_ctor =
+ GetMethodID(jni, j_rtp_sender_class, "<init>", "(J)V");
+
+ std::string kind = JavaToStdString(jni, j_kind);
+ std::string stream_id = JavaToStdString(jni, j_stream_id);
+ rtc::scoped_refptr<RtpSenderInterface> sender =
+ ExtractNativePC(jni, j_pc)->CreateSender(kind, stream_id);
+ if (!sender.get()) {
+ return nullptr;
+ }
+ jlong nativeSenderPtr = jlongFromPointer(sender.get());
+ jobject j_sender =
+ jni->NewObject(j_rtp_sender_class, j_rtp_sender_ctor, nativeSenderPtr);
+ CHECK_EXCEPTION(jni) << "error during NewObject";
+ // Sender is now owned by the Java object, and will be freed from
+ // RtpSender.dispose(), called by PeerConnection.dispose() or getSenders().
+ sender->AddRef();
+ return j_sender;
+}
+
JOW(jobject, PeerConnection_nativeGetSenders)(JNIEnv* jni, jobject j_pc) {
jclass j_array_list_class = FindClass(jni, "java/util/ArrayList");
jmethodID j_array_list_ctor =
@@ -1723,7 +1817,8 @@ JOW(jobject, PeerConnection_nativeGetSenders)(JNIEnv* jni, jobject j_pc) {
jobject j_sender =
jni->NewObject(j_rtp_sender_class, j_rtp_sender_ctor, nativeSenderPtr);
CHECK_EXCEPTION(jni) << "error during NewObject";
- // Sender is now owned by Java object, and will be freed from there.
+ // Sender is now owned by the Java object, and will be freed from
+ // RtpSender.dispose(), called by PeerConnection.dispose() or getSenders().
sender->AddRef();
jni->CallBooleanMethod(j_senders, j_array_list_add, j_sender);
CHECK_EXCEPTION(jni) << "error during CallBooleanMethod";
@@ -1802,6 +1897,7 @@ JOW(jobject, VideoCapturer_nativeCreateVideoCapturer)(
// Since we can't create platform specific java implementations in Java, we
// defer the creation to C land.
#if defined(ANDROID)
+ // TODO(nisse): This case is intended to be deleted.
jclass j_video_capturer_class(
FindClass(jni, "org/webrtc/VideoCapturerAndroid"));
const int camera_id = jni->CallStaticIntMethod(
@@ -1816,8 +1912,13 @@ JOW(jobject, VideoCapturer_nativeCreateVideoCapturer)(
j_video_capturer_class,
GetMethodID(jni, j_video_capturer_class, "<init>", "(I)V"), camera_id);
CHECK_EXCEPTION(jni) << "error during creation of VideoCapturerAndroid";
+ jfieldID helper_fid = GetFieldID(jni, j_video_capturer_class, "surfaceHelper",
+ "Lorg/webrtc/SurfaceTextureHelper;");
+
rtc::scoped_refptr<webrtc::AndroidVideoCapturerDelegate> delegate =
- new rtc::RefCountedObject<AndroidVideoCapturerJni>(jni, j_video_capturer);
+ new rtc::RefCountedObject<AndroidVideoCapturerJni>(
+ jni, j_video_capturer,
+ GetObjectField(jni, j_video_capturer, helper_fid));
rtc::scoped_ptr<cricket::VideoCapturer> capturer(
new webrtc::AndroidVideoCapturer(delegate));
@@ -2003,11 +2104,11 @@ JOW(jbyteArray, CallSessionFileRotatingLogSink_nativeGetLogData)(
return result;
}
-JOW(void, RtpSender_nativeSetTrack)(JNIEnv* jni,
+JOW(jboolean, RtpSender_nativeSetTrack)(JNIEnv* jni,
jclass,
jlong j_rtp_sender_pointer,
jlong j_track_pointer) {
- reinterpret_cast<RtpSenderInterface*>(j_rtp_sender_pointer)
+ return reinterpret_cast<RtpSenderInterface*>(j_rtp_sender_pointer)
->SetTrack(reinterpret_cast<MediaStreamTrackInterface*>(j_track_pointer));
}
diff --git a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc
index 05f1b23768..3e32b9a6fe 100644
--- a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc
+++ b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc
@@ -35,25 +35,14 @@
namespace webrtc_jni {
-SurfaceTextureHelper::SurfaceTextureHelper(JNIEnv* jni,
- jobject egl_shared_context)
- : j_surface_texture_helper_class_(
- jni,
- FindClass(jni, "org/webrtc/SurfaceTextureHelper")),
- j_surface_texture_helper_(
- jni,
- jni->CallStaticObjectMethod(
- *j_surface_texture_helper_class_,
- GetStaticMethodID(jni,
- *j_surface_texture_helper_class_,
- "create",
- "(Ljavax/microedition/khronos/egl/EGLContext;)"
- "Lorg/webrtc/SurfaceTextureHelper;"),
- egl_shared_context)),
- j_return_texture_method_(GetMethodID(jni,
- *j_surface_texture_helper_class_,
- "returnTextureFrame",
- "()V")) {
+SurfaceTextureHelper::SurfaceTextureHelper(
+ JNIEnv* jni, jobject surface_texture_helper)
+ : j_surface_texture_helper_(jni, surface_texture_helper),
+ j_return_texture_method_(
+ GetMethodID(jni,
+ FindClass(jni, "org/webrtc/SurfaceTextureHelper"),
+ "returnTextureFrame",
+ "()V")) {
CHECK_EXCEPTION(jni) << "error during initialization of SurfaceTextureHelper";
}
@@ -70,9 +59,9 @@ void SurfaceTextureHelper::ReturnTextureFrame() const {
rtc::scoped_refptr<webrtc::VideoFrameBuffer>
SurfaceTextureHelper::CreateTextureFrame(int width, int height,
- const NativeTextureHandleImpl& native_handle) {
+ const NativeHandleImpl& native_handle) {
return new rtc::RefCountedObject<AndroidTextureBuffer>(
- width, height, native_handle,
+ width, height, native_handle, *j_surface_texture_helper_,
rtc::Bind(&SurfaceTextureHelper::ReturnTextureFrame, this));
}
diff --git a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h
index dc9d2b853d..8dde2b54ed 100644
--- a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h
+++ b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h
@@ -35,7 +35,7 @@
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
#include "webrtc/base/refcount.h"
#include "webrtc/base/scoped_ref_ptr.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
namespace webrtc_jni {
@@ -49,24 +49,19 @@ namespace webrtc_jni {
// destroyed while a VideoFrameBuffer is in use.
// This class is the C++ counterpart of the java class SurfaceTextureHelper.
// Usage:
-// 1. Create an instance of this class.
-// 2. Call GetJavaSurfaceTextureHelper to get the Java SurfaceTextureHelper.
+// 1. Create an java instance of SurfaceTextureHelper.
+// 2. Create an instance of this class.
// 3. Register a listener to the Java SurfaceListener and start producing
// new buffers.
-// 3. Call CreateTextureFrame to wrap the Java texture in a VideoFrameBuffer.
+// 4. Call CreateTextureFrame to wrap the Java texture in a VideoFrameBuffer.
class SurfaceTextureHelper : public rtc::RefCountInterface {
public:
- SurfaceTextureHelper(JNIEnv* jni, jobject shared_egl_context);
-
- // Returns the Java SurfaceTextureHelper.
- jobject GetJavaSurfaceTextureHelper() const {
- return *j_surface_texture_helper_;
- }
+ SurfaceTextureHelper(JNIEnv* jni, jobject surface_texture_helper);
rtc::scoped_refptr<webrtc::VideoFrameBuffer> CreateTextureFrame(
int width,
int height,
- const NativeTextureHandleImpl& native_handle);
+ const NativeHandleImpl& native_handle);
protected:
~SurfaceTextureHelper();
@@ -75,7 +70,6 @@ class SurfaceTextureHelper : public rtc::RefCountInterface {
// May be called on arbitrary thread.
void ReturnTextureFrame() const;
- const ScopedGlobalRef<jclass> j_surface_texture_helper_class_;
const ScopedGlobalRef<jobject> j_surface_texture_helper_;
const jmethodID j_return_texture_method_;
};
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
index 42af9c7fd0..19002f70e1 100644
--- a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
@@ -33,23 +33,23 @@ import android.media.MediaCodecInfo;
import android.media.MediaCodecInfo.CodecCapabilities;
import android.media.MediaCodecList;
import android.media.MediaFormat;
-import android.opengl.GLES11Ext;
-import android.opengl.GLES20;
import android.os.Build;
+import android.os.SystemClock;
import android.view.Surface;
import org.webrtc.Logging;
import java.nio.ByteBuffer;
import java.util.Arrays;
+import java.util.LinkedList;
import java.util.List;
-
-import javax.microedition.khronos.egl.EGLContext;
+import java.util.concurrent.CountDownLatch;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
// Java-side of peerconnection_jni.cc:MediaCodecVideoDecoder.
// This class is an implementation detail of the Java PeerConnection API.
-// MediaCodec is thread-hostile so this class must be operated on a single
-// thread.
+@SuppressWarnings("deprecation")
public class MediaCodecVideoDecoder {
// This class is constructed, operated, and destroyed by its C++ incarnation,
// so the class and its methods have non-public visibility. The API this
@@ -66,18 +66,26 @@ public class MediaCodecVideoDecoder {
}
private static final int DEQUEUE_INPUT_TIMEOUT = 500000; // 500 ms timeout.
+ private static final int MEDIA_CODEC_RELEASE_TIMEOUT_MS = 5000; // Timeout for codec releasing.
// Active running decoder instance. Set in initDecode() (called from native code)
// and reset to null in release() call.
private static MediaCodecVideoDecoder runningInstance = null;
+ private static MediaCodecVideoDecoderErrorCallback errorCallback = null;
+ private static int codecErrors = 0;
+
private Thread mediaCodecThread;
private MediaCodec mediaCodec;
private ByteBuffer[] inputBuffers;
private ByteBuffer[] outputBuffers;
private static final String VP8_MIME_TYPE = "video/x-vnd.on2.vp8";
+ private static final String VP9_MIME_TYPE = "video/x-vnd.on2.vp9";
private static final String H264_MIME_TYPE = "video/avc";
// List of supported HW VP8 decoders.
private static final String[] supportedVp8HwCodecPrefixes =
{"OMX.qcom.", "OMX.Nvidia.", "OMX.Exynos.", "OMX.Intel." };
+ // List of supported HW VP9 decoders.
+ private static final String[] supportedVp9HwCodecPrefixes =
+ {"OMX.qcom.", "OMX.Exynos." };
// List of supported HW H.264 decoders.
private static final String[] supportedH264HwCodecPrefixes =
{"OMX.qcom.", "OMX.Intel." };
@@ -96,13 +104,29 @@ public class MediaCodecVideoDecoder {
private int height;
private int stride;
private int sliceHeight;
+ private boolean hasDecodedFirstFrame;
+ private final Queue<TimeStamps> decodeStartTimeMs = new LinkedList<TimeStamps>();
private boolean useSurface;
- private int textureID = 0;
- private SurfaceTexture surfaceTexture = null;
+
+ // The below variables are only used when decoding to a Surface.
+ private TextureListener textureListener;
+ // Max number of output buffers queued before starting to drop decoded frames.
+ private static final int MAX_QUEUED_OUTPUTBUFFERS = 3;
+ private int droppedFrames;
private Surface surface = null;
- private EglBase eglBase;
+ private final Queue<DecodedOutputBuffer>
+ dequeuedSurfaceOutputBuffers = new LinkedList<DecodedOutputBuffer>();
+
+ // MediaCodec error handler - invoked when critical error happens which may prevent
+ // further use of media codec API. Now it means that one of media codec instances
+ // is hanging and can no longer be used in the next call.
+ public static interface MediaCodecVideoDecoderErrorCallback {
+ void onMediaCodecVideoDecoderCriticalError(int codecErrors);
+ }
- private MediaCodecVideoDecoder() {
+ public static void setErrorCallback(MediaCodecVideoDecoderErrorCallback errorCallback) {
+ Logging.d(TAG, "Set error callback");
+ MediaCodecVideoDecoder.errorCallback = errorCallback;
}
// Helper struct for findVp8Decoder() below.
@@ -120,6 +144,7 @@ public class MediaCodecVideoDecoder {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.KITKAT) {
return null; // MediaCodec.setParameters is missing.
}
+ Logging.d(TAG, "Trying to find HW decoder for mime " + mime);
for (int i = 0; i < MediaCodecList.getCodecCount(); ++i) {
MediaCodecInfo info = MediaCodecList.getCodecInfoAt(i);
if (info.isEncoder()) {
@@ -135,7 +160,7 @@ public class MediaCodecVideoDecoder {
if (name == null) {
continue; // No HW support in this codec; try the next one.
}
- Logging.v(TAG, "Found candidate decoder " + name);
+ Logging.d(TAG, "Found candidate decoder " + name);
// Check if this is supported decoder.
boolean supportedCodec = false;
@@ -166,6 +191,7 @@ public class MediaCodecVideoDecoder {
}
}
}
+ Logging.d(TAG, "No HW decoder found for mime " + mime);
return null; // No HW decoder.
}
@@ -173,6 +199,10 @@ public class MediaCodecVideoDecoder {
return findDecoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes) != null;
}
+ public static boolean isVp9HwSupported() {
+ return findDecoder(VP9_MIME_TYPE, supportedVp9HwCodecPrefixes) != null;
+ }
+
public static boolean isH264HwSupported() {
return findDecoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes) != null;
}
@@ -197,17 +227,21 @@ public class MediaCodecVideoDecoder {
}
}
- // Pass null in |sharedContext| to configure the codec for ByteBuffer output.
- private boolean initDecode(VideoCodecType type, int width, int height, EGLContext sharedContext) {
+ // Pass null in |surfaceTextureHelper| to configure the codec for ByteBuffer output.
+ private boolean initDecode(
+ VideoCodecType type, int width, int height, SurfaceTextureHelper surfaceTextureHelper) {
if (mediaCodecThread != null) {
throw new RuntimeException("Forgot to release()?");
}
- useSurface = (sharedContext != null);
+ useSurface = (surfaceTextureHelper != null);
String mime = null;
String[] supportedCodecPrefixes = null;
if (type == VideoCodecType.VIDEO_CODEC_VP8) {
mime = VP8_MIME_TYPE;
supportedCodecPrefixes = supportedVp8HwCodecPrefixes;
+ } else if (type == VideoCodecType.VIDEO_CODEC_VP9) {
+ mime = VP9_MIME_TYPE;
+ supportedCodecPrefixes = supportedVp9HwCodecPrefixes;
} else if (type == VideoCodecType.VIDEO_CODEC_H264) {
mime = H264_MIME_TYPE;
supportedCodecPrefixes = supportedH264HwCodecPrefixes;
@@ -221,9 +255,6 @@ public class MediaCodecVideoDecoder {
Logging.d(TAG, "Java initDecode: " + type + " : "+ width + " x " + height +
". Color: 0x" + Integer.toHexString(properties.colorFormat) +
". Use Surface: " + useSurface);
- if (sharedContext != null) {
- Logging.d(TAG, "Decoder shared EGL Context: " + sharedContext);
- }
runningInstance = this; // Decoder is now running and can be queried for stack traces.
mediaCodecThread = Thread.currentThread();
try {
@@ -233,16 +264,8 @@ public class MediaCodecVideoDecoder {
sliceHeight = height;
if (useSurface) {
- // Create shared EGL context.
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PIXEL_BUFFER);
- eglBase.createDummyPbufferSurface();
- eglBase.makeCurrent();
-
- // Create output surface
- textureID = GlUtil.generateTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
- Logging.d(TAG, "Video decoder TextureID = " + textureID);
- surfaceTexture = new SurfaceTexture(textureID);
- surface = new Surface(surfaceTexture);
+ textureListener = new TextureListener(surfaceTextureHelper);
+ surface = new Surface(surfaceTextureHelper.getSurfaceTexture());
}
MediaFormat format = MediaFormat.createVideoFormat(mime, width, height);
@@ -261,6 +284,10 @@ public class MediaCodecVideoDecoder {
colorFormat = properties.colorFormat;
outputBuffers = mediaCodec.getOutputBuffers();
inputBuffers = mediaCodec.getInputBuffers();
+ decodeStartTimeMs.clear();
+ hasDecodedFirstFrame = false;
+ dequeuedSurfaceOutputBuffers.clear();
+ droppedFrames = 0;
Logging.d(TAG, "Input buffers: " + inputBuffers.length +
". Output buffers: " + outputBuffers.length);
return true;
@@ -271,25 +298,45 @@ public class MediaCodecVideoDecoder {
}
private void release() {
- Logging.d(TAG, "Java releaseDecoder");
+ Logging.d(TAG, "Java releaseDecoder. Total number of dropped frames: " + droppedFrames);
checkOnMediaCodecThread();
- try {
- mediaCodec.stop();
- mediaCodec.release();
- } catch (IllegalStateException e) {
- Logging.e(TAG, "release failed", e);
+
+ // Run Mediacodec stop() and release() on separate thread since sometime
+ // Mediacodec.stop() may hang.
+ final CountDownLatch releaseDone = new CountDownLatch(1);
+
+ Runnable runMediaCodecRelease = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Logging.d(TAG, "Java releaseDecoder on release thread");
+ mediaCodec.stop();
+ mediaCodec.release();
+ Logging.d(TAG, "Java releaseDecoder on release thread done");
+ } catch (Exception e) {
+ Logging.e(TAG, "Media decoder release failed", e);
+ }
+ releaseDone.countDown();
+ }
+ };
+ new Thread(runMediaCodecRelease).start();
+
+ if (!ThreadUtils.awaitUninterruptibly(releaseDone, MEDIA_CODEC_RELEASE_TIMEOUT_MS)) {
+ Logging.e(TAG, "Media decoder release timeout");
+ codecErrors++;
+ if (errorCallback != null) {
+ Logging.e(TAG, "Invoke codec error callback. Errors: " + codecErrors);
+ errorCallback.onMediaCodecVideoDecoderCriticalError(codecErrors);
+ }
}
+
mediaCodec = null;
mediaCodecThread = null;
runningInstance = null;
if (useSurface) {
surface.release();
surface = null;
- Logging.d(TAG, "Delete video decoder TextureID " + textureID);
- GLES20.glDeleteTextures(1, new int[] {textureID}, 0);
- textureID = 0;
- eglBase.release();
- eglBase = null;
+ textureListener.release();
}
Logging.d(TAG, "Java releaseDecoder done");
}
@@ -306,13 +353,15 @@ public class MediaCodecVideoDecoder {
}
}
- private boolean queueInputBuffer(
- int inputBufferIndex, int size, long timestampUs) {
+ private boolean queueInputBuffer(int inputBufferIndex, int size, long presentationTimeStamUs,
+ long timeStampMs, long ntpTimeStamp) {
checkOnMediaCodecThread();
try {
inputBuffers[inputBufferIndex].position(0);
inputBuffers[inputBufferIndex].limit(size);
- mediaCodec.queueInputBuffer(inputBufferIndex, 0, size, timestampUs, 0);
+ decodeStartTimeMs.add(new TimeStamps(SystemClock.elapsedRealtime(), timeStampMs,
+ ntpTimeStamp));
+ mediaCodec.queueInputBuffer(inputBufferIndex, 0, size, presentationTimeStamUs, 0);
return true;
}
catch (IllegalStateException e) {
@@ -321,56 +370,183 @@ public class MediaCodecVideoDecoder {
}
}
- // Helper structs for dequeueOutputBuffer() below.
- private static class DecodedByteBuffer {
- public DecodedByteBuffer(int index, int offset, int size, long presentationTimestampUs) {
+ private static class TimeStamps {
+ public TimeStamps(long decodeStartTimeMs, long timeStampMs, long ntpTimeStampMs) {
+ this.decodeStartTimeMs = decodeStartTimeMs;
+ this.timeStampMs = timeStampMs;
+ this.ntpTimeStampMs = ntpTimeStampMs;
+ }
+ private final long decodeStartTimeMs; // Time when this frame was queued for decoding.
+ private final long timeStampMs; // Only used for bookkeeping in Java. Used in C++;
+ private final long ntpTimeStampMs; // Only used for bookkeeping in Java. Used in C++;
+ }
+
+ // Helper struct for dequeueOutputBuffer() below.
+ private static class DecodedOutputBuffer {
+ public DecodedOutputBuffer(int index, int offset, int size, long timeStampMs,
+ long ntpTimeStampMs, long decodeTime, long endDecodeTime) {
this.index = index;
this.offset = offset;
this.size = size;
- this.presentationTimestampUs = presentationTimestampUs;
+ this.timeStampMs = timeStampMs;
+ this.ntpTimeStampMs = ntpTimeStampMs;
+ this.decodeTimeMs = decodeTime;
+ this.endDecodeTimeMs = endDecodeTime;
}
private final int index;
private final int offset;
private final int size;
- private final long presentationTimestampUs;
+ private final long timeStampMs;
+ private final long ntpTimeStampMs;
+ // Number of ms it took to decode this frame.
+ private final long decodeTimeMs;
+ // System time when this frame finished decoding.
+ private final long endDecodeTimeMs;
}
+ // Helper struct for dequeueTextureBuffer() below.
private static class DecodedTextureBuffer {
private final int textureID;
- private final long presentationTimestampUs;
+ private final float[] transformMatrix;
+ private final long timeStampMs;
+ private final long ntpTimeStampMs;
+ private final long decodeTimeMs;
+ // Interval from when the frame finished decoding until this buffer has been created.
+ // Since there is only one texture, this interval depend on the time from when
+ // a frame is decoded and provided to C++ and until that frame is returned to the MediaCodec
+ // so that the texture can be updated with the next decoded frame.
+ private final long frameDelayMs;
- public DecodedTextureBuffer(int textureID, long presentationTimestampUs) {
+ // A DecodedTextureBuffer with zero |textureID| has special meaning and represents a frame
+ // that was dropped.
+ public DecodedTextureBuffer(int textureID, float[] transformMatrix, long timeStampMs,
+ long ntpTimeStampMs, long decodeTimeMs, long frameDelay) {
this.textureID = textureID;
- this.presentationTimestampUs = presentationTimestampUs;
+ this.transformMatrix = transformMatrix;
+ this.timeStampMs = timeStampMs;
+ this.ntpTimeStampMs = ntpTimeStampMs;
+ this.decodeTimeMs = decodeTimeMs;
+ this.frameDelayMs = frameDelay;
}
}
- // Returns null if no decoded buffer is available, and otherwise either a DecodedByteBuffer or
- // DecodedTexturebuffer depending on |useSurface| configuration.
+ // Poll based texture listener.
+ private static class TextureListener
+ implements SurfaceTextureHelper.OnTextureFrameAvailableListener {
+ private final SurfaceTextureHelper surfaceTextureHelper;
+ // |newFrameLock| is used to synchronize arrival of new frames with wait()/notifyAll().
+ private final Object newFrameLock = new Object();
+ // |bufferToRender| is non-null when waiting for transition between addBufferToRender() to
+ // onTextureFrameAvailable().
+ private DecodedOutputBuffer bufferToRender;
+ private DecodedTextureBuffer renderedBuffer;
+
+ public TextureListener(SurfaceTextureHelper surfaceTextureHelper) {
+ this.surfaceTextureHelper = surfaceTextureHelper;
+ surfaceTextureHelper.setListener(this);
+ }
+
+ public void addBufferToRender(DecodedOutputBuffer buffer) {
+ if (bufferToRender != null) {
+ Logging.e(TAG,
+ "Unexpected addBufferToRender() called while waiting for a texture.");
+ throw new IllegalStateException("Waiting for a texture.");
+ }
+ bufferToRender = buffer;
+ }
+
+ public boolean isWaitingForTexture() {
+ synchronized (newFrameLock) {
+ return bufferToRender != null;
+ }
+ }
+
+ // Callback from |surfaceTextureHelper|. May be called on an arbitrary thread.
+ @Override
+ public void onTextureFrameAvailable(
+ int oesTextureId, float[] transformMatrix, long timestampNs) {
+ synchronized (newFrameLock) {
+ if (renderedBuffer != null) {
+ Logging.e(TAG,
+ "Unexpected onTextureFrameAvailable() called while already holding a texture.");
+ throw new IllegalStateException("Already holding a texture.");
+ }
+ // |timestampNs| is always zero on some Android versions.
+ renderedBuffer = new DecodedTextureBuffer(oesTextureId, transformMatrix,
+ bufferToRender.timeStampMs, bufferToRender.ntpTimeStampMs, bufferToRender.decodeTimeMs,
+ SystemClock.elapsedRealtime() - bufferToRender.endDecodeTimeMs);
+ bufferToRender = null;
+ newFrameLock.notifyAll();
+ }
+ }
+
+ // Dequeues and returns a DecodedTextureBuffer if available, or null otherwise.
+ public DecodedTextureBuffer dequeueTextureBuffer(int timeoutMs) {
+ synchronized (newFrameLock) {
+ if (renderedBuffer == null && timeoutMs > 0 && isWaitingForTexture()) {
+ try {
+ newFrameLock.wait(timeoutMs);
+ } catch(InterruptedException e) {
+ // Restore the interrupted status by reinterrupting the thread.
+ Thread.currentThread().interrupt();
+ }
+ }
+ DecodedTextureBuffer returnedBuffer = renderedBuffer;
+ renderedBuffer = null;
+ return returnedBuffer;
+ }
+ }
+
+ public void release() {
+ // SurfaceTextureHelper.disconnect() will block until any onTextureFrameAvailable() in
+ // progress is done. Therefore, the call to disconnect() must be outside any synchronized
+ // statement that is also used in the onTextureFrameAvailable() above to avoid deadlocks.
+ surfaceTextureHelper.disconnect();
+ synchronized (newFrameLock) {
+ if (renderedBuffer != null) {
+ surfaceTextureHelper.returnTextureFrame();
+ renderedBuffer = null;
+ }
+ }
+ }
+ }
+
+ // Returns null if no decoded buffer is available, and otherwise a DecodedByteBuffer.
// Throws IllegalStateException if call is made on the wrong thread, if color format changes to an
// unsupported format, or if |mediaCodec| is not in the Executing state. Throws CodecException
// upon codec error.
- private Object dequeueOutputBuffer(int dequeueTimeoutUs)
- throws IllegalStateException, MediaCodec.CodecException {
+ private DecodedOutputBuffer dequeueOutputBuffer(int dequeueTimeoutMs) {
checkOnMediaCodecThread();
+ if (decodeStartTimeMs.isEmpty()) {
+ return null;
+ }
// Drain the decoder until receiving a decoded buffer or hitting
// MediaCodec.INFO_TRY_AGAIN_LATER.
final MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
while (true) {
- final int result = mediaCodec.dequeueOutputBuffer(info, dequeueTimeoutUs);
+ final int result = mediaCodec.dequeueOutputBuffer(
+ info, TimeUnit.MILLISECONDS.toMicros(dequeueTimeoutMs));
switch (result) {
- case MediaCodec.INFO_TRY_AGAIN_LATER:
- return null;
case MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED:
outputBuffers = mediaCodec.getOutputBuffers();
Logging.d(TAG, "Decoder output buffers changed: " + outputBuffers.length);
+ if (hasDecodedFirstFrame) {
+ throw new RuntimeException("Unexpected output buffer change event.");
+ }
break;
case MediaCodec.INFO_OUTPUT_FORMAT_CHANGED:
MediaFormat format = mediaCodec.getOutputFormat();
Logging.d(TAG, "Decoder format changed: " + format.toString());
+ int new_width = format.getInteger(MediaFormat.KEY_WIDTH);
+ int new_height = format.getInteger(MediaFormat.KEY_HEIGHT);
+ if (hasDecodedFirstFrame && (new_width != width || new_height != height)) {
+ throw new RuntimeException("Unexpected size change. Configured " + width + "*" +
+ height + ". New " + new_width + "*" + new_height);
+ }
width = format.getInteger(MediaFormat.KEY_WIDTH);
height = format.getInteger(MediaFormat.KEY_HEIGHT);
+
if (!useSurface && format.containsKey(MediaFormat.KEY_COLOR_FORMAT)) {
colorFormat = format.getInteger(MediaFormat.KEY_COLOR_FORMAT);
Logging.d(TAG, "Color: 0x" + Integer.toHexString(colorFormat));
@@ -388,18 +564,76 @@ public class MediaCodecVideoDecoder {
stride = Math.max(width, stride);
sliceHeight = Math.max(height, sliceHeight);
break;
+ case MediaCodec.INFO_TRY_AGAIN_LATER:
+ return null;
default:
- // Output buffer decoded.
- if (useSurface) {
- mediaCodec.releaseOutputBuffer(result, true /* render */);
- // TODO(magjed): Wait for SurfaceTexture.onFrameAvailable() before returning a texture
- // frame.
- return new DecodedTextureBuffer(textureID, info.presentationTimeUs);
- } else {
- return new DecodedByteBuffer(result, info.offset, info.size, info.presentationTimeUs);
- }
+ hasDecodedFirstFrame = true;
+ TimeStamps timeStamps = decodeStartTimeMs.remove();
+ return new DecodedOutputBuffer(result, info.offset, info.size, timeStamps.timeStampMs,
+ timeStamps.ntpTimeStampMs,
+ SystemClock.elapsedRealtime() - timeStamps.decodeStartTimeMs,
+ SystemClock.elapsedRealtime());
+ }
+ }
+ }
+
+ // Returns null if no decoded buffer is available, and otherwise a DecodedTextureBuffer.
+ // Throws IllegalStateException if call is made on the wrong thread, if color format changes to an
+ // unsupported format, or if |mediaCodec| is not in the Executing state. Throws CodecException
+ // upon codec error. If |dequeueTimeoutMs| > 0, the oldest decoded frame will be dropped if
+ // a frame can't be returned.
+ private DecodedTextureBuffer dequeueTextureBuffer(int dequeueTimeoutMs) {
+ checkOnMediaCodecThread();
+ if (!useSurface) {
+ throw new IllegalStateException("dequeueTexture() called for byte buffer decoding.");
+ }
+ DecodedOutputBuffer outputBuffer = dequeueOutputBuffer(dequeueTimeoutMs);
+ if (outputBuffer != null) {
+ dequeuedSurfaceOutputBuffers.add(outputBuffer);
+ }
+
+ MaybeRenderDecodedTextureBuffer();
+ // Check if there is texture ready now by waiting max |dequeueTimeoutMs|.
+ DecodedTextureBuffer renderedBuffer = textureListener.dequeueTextureBuffer(dequeueTimeoutMs);
+ if (renderedBuffer != null) {
+ MaybeRenderDecodedTextureBuffer();
+ return renderedBuffer;
+ }
+
+ if ((dequeuedSurfaceOutputBuffers.size()
+ >= Math.min(MAX_QUEUED_OUTPUTBUFFERS, outputBuffers.length)
+ || (dequeueTimeoutMs > 0 && !dequeuedSurfaceOutputBuffers.isEmpty()))) {
+ ++droppedFrames;
+ // Drop the oldest frame still in dequeuedSurfaceOutputBuffers.
+ // The oldest frame is owned by |textureListener| and can't be dropped since
+ // mediaCodec.releaseOutputBuffer has already been called.
+ final DecodedOutputBuffer droppedFrame = dequeuedSurfaceOutputBuffers.remove();
+ if (dequeueTimeoutMs > 0) {
+ // TODO(perkj): Re-add the below log when VideoRenderGUI has been removed or fixed to
+ // return the one and only texture even if it does not render.
+ // Logging.w(TAG, "Draining decoder. Dropping frame with TS: "
+ // + droppedFrame.timeStampMs + ". Total number of dropped frames: " + droppedFrames);
+ } else {
+ Logging.w(TAG, "Too many output buffers. Dropping frame with TS: "
+ + droppedFrame.timeStampMs + ". Total number of dropped frames: " + droppedFrames);
}
+
+ mediaCodec.releaseOutputBuffer(droppedFrame.index, false /* render */);
+ return new DecodedTextureBuffer(0, null, droppedFrame.timeStampMs,
+ droppedFrame.ntpTimeStampMs, droppedFrame.decodeTimeMs,
+ SystemClock.elapsedRealtime() - droppedFrame.endDecodeTimeMs);
+ }
+ return null;
+ }
+
+ private void MaybeRenderDecodedTextureBuffer() {
+ if (dequeuedSurfaceOutputBuffers.isEmpty() || textureListener.isWaitingForTexture()) {
+ return;
}
+ // Get the first frame in the queue and render to the decoder output surface.
+ final DecodedOutputBuffer buffer = dequeuedSurfaceOutputBuffers.remove();
+ textureListener.addBufferToRender(buffer);
+ mediaCodec.releaseOutputBuffer(buffer.index, true /* render */);
}
// Release a dequeued output byte buffer back to the codec for re-use. Should only be called for
@@ -407,11 +641,11 @@ public class MediaCodecVideoDecoder {
// Throws IllegalStateException if the call is made on the wrong thread, if codec is configured
// for surface decoding, or if |mediaCodec| is not in the Executing state. Throws
// MediaCodec.CodecException upon codec error.
- private void returnDecodedByteBuffer(int index)
+ private void returnDecodedOutputBuffer(int index)
throws IllegalStateException, MediaCodec.CodecException {
checkOnMediaCodecThread();
if (useSurface) {
- throw new IllegalStateException("returnDecodedByteBuffer() called for surface decoding.");
+ throw new IllegalStateException("returnDecodedOutputBuffer() called for surface decoding.");
}
mediaCodec.releaseOutputBuffer(index, false /* render */);
}
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
index f3f03c1d20..5c8f9dc77e 100644
--- a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
@@ -27,24 +27,29 @@
package org.webrtc;
+import android.annotation.TargetApi;
import android.media.MediaCodec;
import android.media.MediaCodecInfo.CodecCapabilities;
import android.media.MediaCodecInfo;
import android.media.MediaCodecList;
import android.media.MediaFormat;
+import android.opengl.GLES20;
import android.os.Build;
import android.os.Bundle;
+import android.view.Surface;
import org.webrtc.Logging;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
// Java-side of peerconnection_jni.cc:MediaCodecVideoEncoder.
// This class is an implementation detail of the Java PeerConnection API.
-// MediaCodec is thread-hostile so this class must be operated on a single
-// thread.
+@TargetApi(19)
+@SuppressWarnings("deprecation")
public class MediaCodecVideoEncoder {
// This class is constructed, operated, and destroyed by its C++ incarnation,
// so the class and its methods have non-public visibility. The API this
@@ -60,18 +65,31 @@ public class MediaCodecVideoEncoder {
VIDEO_CODEC_H264
}
+ private static final int MEDIA_CODEC_RELEASE_TIMEOUT_MS = 5000; // Timeout for codec releasing.
private static final int DEQUEUE_TIMEOUT = 0; // Non-blocking, no wait.
- // Active running encoder instance. Set in initDecode() (called from native code)
+ // Active running encoder instance. Set in initEncode() (called from native code)
// and reset to null in release() call.
private static MediaCodecVideoEncoder runningInstance = null;
+ private static MediaCodecVideoEncoderErrorCallback errorCallback = null;
+ private static int codecErrors = 0;
+
private Thread mediaCodecThread;
private MediaCodec mediaCodec;
private ByteBuffer[] outputBuffers;
+ private EglBase14 eglBase;
+ private int width;
+ private int height;
+ private Surface inputSurface;
+ private GlRectDrawer drawer;
private static final String VP8_MIME_TYPE = "video/x-vnd.on2.vp8";
+ private static final String VP9_MIME_TYPE = "video/x-vnd.on2.vp9";
private static final String H264_MIME_TYPE = "video/avc";
// List of supported HW VP8 codecs.
private static final String[] supportedVp8HwCodecPrefixes =
{"OMX.qcom.", "OMX.Intel." };
+ // List of supported HW VP9 decoders.
+ private static final String[] supportedVp9HwCodecPrefixes =
+ {"OMX.qcom."};
// List of supported HW H.264 codecs.
private static final String[] supportedH264HwCodecPrefixes =
{"OMX.qcom." };
@@ -99,13 +117,25 @@ public class MediaCodecVideoEncoder {
CodecCapabilities.COLOR_QCOM_FormatYUV420SemiPlanar,
COLOR_QCOM_FORMATYUV420PackedSemiPlanar32m
};
- private int colorFormat;
- // Video encoder type.
+ private static final int[] supportedSurfaceColorList = {
+ CodecCapabilities.COLOR_FormatSurface
+ };
private VideoCodecType type;
+ private int colorFormat; // Used by native code.
+
// SPS and PPS NALs (Config frame) for H.264.
private ByteBuffer configData = null;
- private MediaCodecVideoEncoder() {
+ // MediaCodec error handler - invoked when critical error happens which may prevent
+ // further use of media codec API. Now it means that one of media codec instances
+ // is hanging and can no longer be used in the next call.
+ public static interface MediaCodecVideoEncoderErrorCallback {
+ void onMediaCodecVideoEncoderCriticalError(int codecErrors);
+ }
+
+ public static void setErrorCallback(MediaCodecVideoEncoderErrorCallback errorCallback) {
+ Logging.d(TAG, "Set error callback");
+ MediaCodecVideoEncoder.errorCallback = errorCallback;
}
// Helper struct for findHwEncoder() below.
@@ -119,7 +149,7 @@ public class MediaCodecVideoEncoder {
}
private static EncoderProperties findHwEncoder(
- String mime, String[] supportedHwCodecPrefixes) {
+ String mime, String[] supportedHwCodecPrefixes, int[] colorList) {
// MediaCodec.setParameters is missing for JB and below, so bitrate
// can not be adjusted dynamically.
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.KITKAT) {
@@ -130,8 +160,7 @@ public class MediaCodecVideoEncoder {
if (mime.equals(H264_MIME_TYPE)) {
List<String> exceptionModels = Arrays.asList(H264_HW_EXCEPTION_MODELS);
if (exceptionModels.contains(Build.MODEL)) {
- Logging.w(TAG, "Model: " + Build.MODEL +
- " has black listed H.264 encoder.");
+ Logging.w(TAG, "Model: " + Build.MODEL + " has black listed H.264 encoder.");
return null;
}
}
@@ -170,8 +199,7 @@ public class MediaCodecVideoEncoder {
Logging.v(TAG, " Color: 0x" + Integer.toHexString(colorFormat));
}
- // Check if codec supports either yuv420 or nv12.
- for (int supportedColorFormat : supportedColorList) {
+ for (int supportedColorFormat : colorList) {
for (int codecColorFormat : capabilities.colorFormats) {
if (codecColorFormat == supportedColorFormat) {
// Found supported HW encoder.
@@ -182,15 +210,34 @@ public class MediaCodecVideoEncoder {
}
}
}
- return null; // No HW VP8 encoder.
+ return null; // No HW encoder.
}
public static boolean isVp8HwSupported() {
- return findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes) != null;
+ return findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes, supportedColorList) != null;
+ }
+
+ public static boolean isVp9HwSupported() {
+ return findHwEncoder(VP9_MIME_TYPE, supportedVp9HwCodecPrefixes, supportedColorList) != null;
}
public static boolean isH264HwSupported() {
- return findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes) != null;
+ return findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes, supportedColorList) != null;
+ }
+
+ public static boolean isVp8HwSupportedUsingTextures() {
+ return findHwEncoder(
+ VP8_MIME_TYPE, supportedVp8HwCodecPrefixes, supportedSurfaceColorList) != null;
+ }
+
+ public static boolean isVp9HwSupportedUsingTextures() {
+ return findHwEncoder(
+ VP9_MIME_TYPE, supportedVp9HwCodecPrefixes, supportedSurfaceColorList) != null;
+ }
+
+ public static boolean isH264HwSupportedUsingTextures() {
+ return findHwEncoder(
+ H264_MIME_TYPE, supportedH264HwCodecPrefixes, supportedSurfaceColorList) != null;
}
private void checkOnMediaCodecThread() {
@@ -223,32 +270,43 @@ public class MediaCodecVideoEncoder {
}
}
- // Return the array of input buffers, or null on failure.
- private ByteBuffer[] initEncode(
- VideoCodecType type, int width, int height, int kbps, int fps) {
+ boolean initEncode(VideoCodecType type, int width, int height, int kbps, int fps,
+ EglBase14.Context sharedContext) {
+ final boolean useSurface = sharedContext != null;
Logging.d(TAG, "Java initEncode: " + type + " : " + width + " x " + height +
- ". @ " + kbps + " kbps. Fps: " + fps +
- ". Color: 0x" + Integer.toHexString(colorFormat));
+ ". @ " + kbps + " kbps. Fps: " + fps + ". Encode from texture : " + useSurface);
+
+ this.width = width;
+ this.height = height;
if (mediaCodecThread != null) {
throw new RuntimeException("Forgot to release()?");
}
- this.type = type;
EncoderProperties properties = null;
String mime = null;
int keyFrameIntervalSec = 0;
if (type == VideoCodecType.VIDEO_CODEC_VP8) {
mime = VP8_MIME_TYPE;
- properties = findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes);
+ properties = findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes,
+ useSurface ? supportedSurfaceColorList : supportedColorList);
+ keyFrameIntervalSec = 100;
+ } else if (type == VideoCodecType.VIDEO_CODEC_VP9) {
+ mime = VP9_MIME_TYPE;
+ properties = findHwEncoder(VP9_MIME_TYPE, supportedH264HwCodecPrefixes,
+ useSurface ? supportedSurfaceColorList : supportedColorList);
keyFrameIntervalSec = 100;
} else if (type == VideoCodecType.VIDEO_CODEC_H264) {
mime = H264_MIME_TYPE;
- properties = findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes);
+ properties = findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes,
+ useSurface ? supportedSurfaceColorList : supportedColorList);
keyFrameIntervalSec = 20;
}
if (properties == null) {
throw new RuntimeException("Can not find HW encoder for " + type);
}
runningInstance = this; // Encoder is now running and can be queried for stack traces.
+ colorFormat = properties.colorFormat;
+ Logging.d(TAG, "Color format: " + colorFormat);
+
mediaCodecThread = Thread.currentThread();
try {
MediaFormat format = MediaFormat.createVideoFormat(mime, width, height);
@@ -259,26 +317,39 @@ public class MediaCodecVideoEncoder {
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, keyFrameIntervalSec);
Logging.d(TAG, " Format: " + format);
mediaCodec = createByCodecName(properties.codecName);
+ this.type = type;
if (mediaCodec == null) {
Logging.e(TAG, "Can not create media encoder");
- return null;
+ return false;
}
mediaCodec.configure(
format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
+
+ if (useSurface) {
+ eglBase = new EglBase14(sharedContext, EglBase.CONFIG_RECORDABLE);
+ // Create an input surface and keep a reference since we must release the surface when done.
+ inputSurface = mediaCodec.createInputSurface();
+ eglBase.createSurface(inputSurface);
+ drawer = new GlRectDrawer();
+ }
mediaCodec.start();
- colorFormat = properties.colorFormat;
outputBuffers = mediaCodec.getOutputBuffers();
- ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers();
- Logging.d(TAG, "Input buffers: " + inputBuffers.length +
- ". Output buffers: " + outputBuffers.length);
- return inputBuffers;
+ Logging.d(TAG, "Output buffers: " + outputBuffers.length);
+
} catch (IllegalStateException e) {
Logging.e(TAG, "initEncode failed", e);
- return null;
+ return false;
}
+ return true;
+ }
+
+ ByteBuffer[] getInputBuffers() {
+ ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers();
+ Logging.d(TAG, "Input buffers: " + inputBuffers.length);
+ return inputBuffers;
}
- private boolean encode(
+ boolean encodeBuffer(
boolean isKeyframe, int inputBuffer, int size,
long presentationTimestampUs) {
checkOnMediaCodecThread();
@@ -298,22 +369,82 @@ public class MediaCodecVideoEncoder {
return true;
}
catch (IllegalStateException e) {
- Logging.e(TAG, "encode failed", e);
+ Logging.e(TAG, "encodeBuffer failed", e);
return false;
}
}
- private void release() {
- Logging.d(TAG, "Java releaseEncoder");
+ boolean encodeTexture(boolean isKeyframe, int oesTextureId, float[] transformationMatrix,
+ long presentationTimestampUs) {
checkOnMediaCodecThread();
try {
- mediaCodec.stop();
- mediaCodec.release();
- } catch (IllegalStateException e) {
- Logging.e(TAG, "release failed", e);
+ if (isKeyframe) {
+ Logging.d(TAG, "Sync frame request");
+ Bundle b = new Bundle();
+ b.putInt(MediaCodec.PARAMETER_KEY_REQUEST_SYNC_FRAME, 0);
+ mediaCodec.setParameters(b);
+ }
+ eglBase.makeCurrent();
+ // TODO(perkj): glClear() shouldn't be necessary since every pixel is covered anyway,
+ // but it's a workaround for bug webrtc:5147.
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ drawer.drawOes(oesTextureId, transformationMatrix, 0, 0, width, height);
+ eglBase.swapBuffers(TimeUnit.MICROSECONDS.toNanos(presentationTimestampUs));
+ return true;
+ }
+ catch (RuntimeException e) {
+ Logging.e(TAG, "encodeTexture failed", e);
+ return false;
}
+ }
+
+ void release() {
+ Logging.d(TAG, "Java releaseEncoder");
+ checkOnMediaCodecThread();
+
+ // Run Mediacodec stop() and release() on separate thread since sometime
+ // Mediacodec.stop() may hang.
+ final CountDownLatch releaseDone = new CountDownLatch(1);
+
+ Runnable runMediaCodecRelease = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Logging.d(TAG, "Java releaseEncoder on release thread");
+ mediaCodec.stop();
+ mediaCodec.release();
+ Logging.d(TAG, "Java releaseEncoder on release thread done");
+ } catch (Exception e) {
+ Logging.e(TAG, "Media encoder release failed", e);
+ }
+ releaseDone.countDown();
+ }
+ };
+ new Thread(runMediaCodecRelease).start();
+
+ if (!ThreadUtils.awaitUninterruptibly(releaseDone, MEDIA_CODEC_RELEASE_TIMEOUT_MS)) {
+ Logging.e(TAG, "Media encoder release timeout");
+ codecErrors++;
+ if (errorCallback != null) {
+ Logging.e(TAG, "Invoke codec error callback. Errors: " + codecErrors);
+ errorCallback.onMediaCodecVideoEncoderCriticalError(codecErrors);
+ }
+ }
+
mediaCodec = null;
mediaCodecThread = null;
+ if (drawer != null) {
+ drawer.release();
+ drawer = null;
+ }
+ if (eglBase != null) {
+ eglBase.release();
+ eglBase = null;
+ }
+ if (inputSurface != null) {
+ inputSurface.release();
+ inputSurface = null;
+ }
runningInstance = null;
Logging.d(TAG, "Java releaseEncoder done");
}
@@ -336,7 +467,7 @@ public class MediaCodecVideoEncoder {
// Dequeue an input buffer and return its index, -1 if no input buffer is
// available, or -2 if the codec is no longer operative.
- private int dequeueInputBuffer() {
+ int dequeueInputBuffer() {
checkOnMediaCodecThread();
try {
return mediaCodec.dequeueInputBuffer(DEQUEUE_TIMEOUT);
@@ -347,7 +478,7 @@ public class MediaCodecVideoEncoder {
}
// Helper struct for dequeueOutputBuffer() below.
- private static class OutputBufferInfo {
+ static class OutputBufferInfo {
public OutputBufferInfo(
int index, ByteBuffer buffer,
boolean isKeyFrame, long presentationTimestampUs) {
@@ -357,15 +488,15 @@ public class MediaCodecVideoEncoder {
this.presentationTimestampUs = presentationTimestampUs;
}
- private final int index;
- private final ByteBuffer buffer;
- private final boolean isKeyFrame;
- private final long presentationTimestampUs;
+ public final int index;
+ public final ByteBuffer buffer;
+ public final boolean isKeyFrame;
+ public final long presentationTimestampUs;
}
// Dequeue and return an output buffer, or null if no output is ready. Return
// a fake OutputBufferInfo with index -1 if the codec is no longer operable.
- private OutputBufferInfo dequeueOutputBuffer() {
+ OutputBufferInfo dequeueOutputBuffer() {
checkOnMediaCodecThread();
try {
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
@@ -434,7 +565,7 @@ public class MediaCodecVideoEncoder {
// Release a dequeued output buffer back to the codec for re-use. Return
// false if the codec is no longer operable.
- private boolean releaseOutputBuffer(int index) {
+ boolean releaseOutputBuffer(int index) {
checkOnMediaCodecThread();
try {
mediaCodec.releaseOutputBuffer(index, false);
diff --git a/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java b/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
index 50023001d7..36cd07595c 100644
--- a/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
+++ b/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
@@ -28,7 +28,6 @@
package org.webrtc;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
@@ -151,6 +150,7 @@ public class PeerConnection {
public int audioJitterBufferMaxPackets;
public boolean audioJitterBufferFastAccelerate;
public int iceConnectionReceivingTimeout;
+ public int iceBackupCandidatePairPingInterval;
public KeyType keyType;
public ContinualGatheringPolicy continualGatheringPolicy;
@@ -163,6 +163,7 @@ public class PeerConnection {
audioJitterBufferMaxPackets = 50;
audioJitterBufferFastAccelerate = false;
iceConnectionReceivingTimeout = -1;
+ iceBackupCandidatePairPingInterval = -1;
keyType = KeyType.ECDSA;
continualGatheringPolicy = ContinualGatheringPolicy.GATHER_ONCE;
}
@@ -223,6 +224,14 @@ public class PeerConnection {
localStreams.remove(stream);
}
+ public RtpSender createSender(String kind, String stream_id) {
+ RtpSender new_sender = nativeCreateSender(kind, stream_id);
+ if (new_sender != null) {
+ senders.add(new_sender);
+ }
+ return new_sender;
+ }
+
// Note that calling getSenders will dispose of the senders previously
// returned (and same goes for getReceivers).
public List<RtpSender> getSenders() {
@@ -288,6 +297,8 @@ public class PeerConnection {
private native boolean nativeGetStats(
StatsObserver observer, long nativeTrack);
+ private native RtpSender nativeCreateSender(String kind, String stream_id);
+
private native List<RtpSender> nativeGetSenders();
private native List<RtpReceiver> nativeGetReceivers();
diff --git a/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java b/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
index 83999ece98..d759c69271 100644
--- a/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
+++ b/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
@@ -73,6 +73,15 @@ public class PeerConnectionFactory {
// Field trial initialization. Must be called before PeerConnectionFactory
// is created.
public static native void initializeFieldTrials(String fieldTrialsInitString);
+ // Internal tracing initialization. Must be called before PeerConnectionFactory is created to
+ // prevent racing with tracing code.
+ public static native void initializeInternalTracer();
+ // Internal tracing shutdown, called to prevent resource leaks. Must be called after
+ // PeerConnectionFactory is gone to prevent races with code performing tracing.
+ public static native void shutdownInternalTracer();
+ // Start/stop internal capturing of internal tracing.
+ public static native boolean startInternalTracingCapture(String tracing_filename);
+ public static native void stopInternalTracingCapture();
public PeerConnectionFactory() {
nativeFactory = nativeCreatePeerConnectionFactory();
@@ -131,12 +140,52 @@ public class PeerConnectionFactory {
nativeFactory, id, source.nativeSource));
}
+ // Starts recording an AEC dump. Ownership of the file is transfered to the
+ // native code. If an AEC dump is already in progress, it will be stopped and
+ // a new one will start using the provided file.
+ public boolean startAecDump(int file_descriptor) {
+ return nativeStartAecDump(nativeFactory, file_descriptor);
+ }
+
+ // Stops recording an AEC dump. If no AEC dump is currently being recorded,
+ // this call will have no effect.
+ public void stopAecDump() {
+ nativeStopAecDump(nativeFactory);
+ }
+
+ // Starts recording an RTC event log. Ownership of the file is transfered to
+ // the native code. If an RTC event log is already being recorded, it will be
+ // stopped and a new one will start using the provided file.
+ public boolean startRtcEventLog(int file_descriptor) {
+ return nativeStartRtcEventLog(nativeFactory, file_descriptor);
+ }
+
+ // Stops recording an RTC event log. If no RTC event log is currently being
+ // recorded, this call will have no effect.
+ public void StopRtcEventLog() {
+ nativeStopRtcEventLog(nativeFactory);
+ }
+
public void setOptions(Options options) {
nativeSetOptions(nativeFactory, options);
}
+ @Deprecated
public void setVideoHwAccelerationOptions(Object renderEGLContext) {
- nativeSetVideoHwAccelerationOptions(nativeFactory, renderEGLContext);
+ nativeSetVideoHwAccelerationOptions(nativeFactory, renderEGLContext, renderEGLContext);
+ }
+
+ /** Set the EGL context used by HW Video encoding and decoding.
+ *
+ *
+ * @param localEGLContext An instance of javax.microedition.khronos.egl.EGLContext.
+ * Must be the same as used by VideoCapturerAndroid and any local
+ * video renderer.
+ * @param remoteEGLContext An instance of javax.microedition.khronos.egl.EGLContext.
+ * Must be the same as used by any remote video renderer.
+ */
+ public void setVideoHwAccelerationOptions(Object localEGLContext, Object remoteEGLContext) {
+ nativeSetVideoHwAccelerationOptions(nativeFactory, localEGLContext, remoteEGLContext);
}
public void dispose() {
@@ -201,10 +250,18 @@ public class PeerConnectionFactory {
private static native long nativeCreateAudioTrack(
long nativeFactory, String id, long nativeSource);
+ private static native boolean nativeStartAecDump(long nativeFactory, int file_descriptor);
+
+ private static native void nativeStopAecDump(long nativeFactory);
+
+ private static native boolean nativeStartRtcEventLog(long nativeFactory, int file_descriptor);
+
+ private static native void nativeStopRtcEventLog(long nativeFactory);
+
public native void nativeSetOptions(long nativeFactory, Options options);
private static native void nativeSetVideoHwAccelerationOptions(
- long nativeFactory, Object renderEGLContext);
+ long nativeFactory, Object localEGLContext, Object remoteEGLContext);
private static native void nativeThreadsCallbacks(long nativeFactory);
diff --git a/talk/app/webrtc/java/src/org/webrtc/RtpSender.java b/talk/app/webrtc/java/src/org/webrtc/RtpSender.java
index 37357c0657..9ac2e7034f 100644
--- a/talk/app/webrtc/java/src/org/webrtc/RtpSender.java
+++ b/talk/app/webrtc/java/src/org/webrtc/RtpSender.java
@@ -32,6 +32,7 @@ public class RtpSender {
final long nativeRtpSender;
private MediaStreamTrack cachedTrack;
+ private boolean ownsTrack = true;
public RtpSender(long nativeRtpSender) {
this.nativeRtpSender = nativeRtpSender;
@@ -40,14 +41,22 @@ public class RtpSender {
cachedTrack = (track == 0) ? null : new MediaStreamTrack(track);
}
- // NOTE: This should not be called with a track that's already used by
- // another RtpSender, because then it would be double-disposed.
- public void setTrack(MediaStreamTrack track) {
- if (cachedTrack != null) {
+ // If |takeOwnership| is true, the RtpSender takes ownership of the track
+ // from the caller, and will auto-dispose of it when no longer needed.
+ // |takeOwnership| should only be used if the caller owns the track; it is
+ // not appropriate when the track is owned by, for example, another RtpSender
+ // or a MediaStream.
+ public boolean setTrack(MediaStreamTrack track, boolean takeOwnership) {
+ if (!nativeSetTrack(nativeRtpSender,
+ (track == null) ? 0 : track.nativeTrack)) {
+ return false;
+ }
+ if (cachedTrack != null && ownsTrack) {
cachedTrack.dispose();
}
cachedTrack = track;
- nativeSetTrack(nativeRtpSender, (track == null) ? 0 : track.nativeTrack);
+ ownsTrack = takeOwnership;
+ return true;
}
public MediaStreamTrack track() {
@@ -59,14 +68,14 @@ public class RtpSender {
}
public void dispose() {
- if (cachedTrack != null) {
+ if (cachedTrack != null && ownsTrack) {
cachedTrack.dispose();
}
free(nativeRtpSender);
}
- private static native void nativeSetTrack(long nativeRtpSender,
- long nativeTrack);
+ private static native boolean nativeSetTrack(long nativeRtpSender,
+ long nativeTrack);
// This should increment the reference count of the track.
// Will be released in dispose() or setTrack().
diff --git a/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java b/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
index 3c255dd123..2e307fc54b 100644
--- a/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
+++ b/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
@@ -46,7 +46,11 @@ public class VideoRenderer {
public final int[] yuvStrides;
public ByteBuffer[] yuvPlanes;
public final boolean yuvFrame;
- public Object textureObject;
+ // Matrix that transforms standard coordinates to their proper sampling locations in
+ // the texture. This transform compensates for any properties of the video source that
+ // cause it to appear different from a normalized texture. This matrix does not take
+ // |rotationDegree| into account.
+ public final float[] samplingMatrix;
public int textureId;
// Frame pointer in C++.
private long nativeFramePointer;
@@ -70,19 +74,27 @@ public class VideoRenderer {
if (rotationDegree % 90 != 0) {
throw new IllegalArgumentException("Rotation degree not multiple of 90: " + rotationDegree);
}
+ // The convention in WebRTC is that the first element in a ByteBuffer corresponds to the
+ // top-left corner of the image, but in glTexImage2D() the first element corresponds to the
+ // bottom-left corner. This discrepancy is corrected by setting a vertical flip as sampling
+ // matrix.
+ samplingMatrix = new float[] {
+ 1, 0, 0, 0,
+ 0, -1, 0, 0,
+ 0, 0, 1, 0,
+ 0, 1, 0, 1};
}
/**
* Construct a texture frame of the given dimensions with data in SurfaceTexture
*/
- I420Frame(
- int width, int height, int rotationDegree,
- Object textureObject, int textureId, long nativeFramePointer) {
+ I420Frame(int width, int height, int rotationDegree, int textureId, float[] samplingMatrix,
+ long nativeFramePointer) {
this.width = width;
this.height = height;
this.yuvStrides = null;
this.yuvPlanes = null;
- this.textureObject = textureObject;
+ this.samplingMatrix = samplingMatrix;
this.textureId = textureId;
this.yuvFrame = false;
this.rotationDegree = rotationDegree;
@@ -125,7 +137,6 @@ public class VideoRenderer {
*/
public static void renderFrameDone(I420Frame frame) {
frame.yuvPlanes = null;
- frame.textureObject = null;
frame.textureId = 0;
if (frame.nativeFramePointer != 0) {
releaseNativeFrame(frame.nativeFramePointer);
diff --git a/talk/app/webrtc/jsepsessiondescription.cc b/talk/app/webrtc/jsepsessiondescription.cc
index 24bd9d4195..226432db69 100644
--- a/talk/app/webrtc/jsepsessiondescription.cc
+++ b/talk/app/webrtc/jsepsessiondescription.cc
@@ -29,6 +29,7 @@
#include "talk/app/webrtc/webrtcsdp.h"
#include "talk/session/media/mediasession.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/stringencode.h"
using rtc::scoped_ptr;
@@ -44,7 +45,7 @@ static const char* kSupportedTypes[] = {
static bool IsTypeSupported(const std::string& type) {
bool type_supported = false;
- for (size_t i = 0; i < ARRAY_SIZE(kSupportedTypes); ++i) {
+ for (size_t i = 0; i < arraysize(kSupportedTypes); ++i) {
if (kSupportedTypes[i] == type) {
type_supported = true;
break;
diff --git a/talk/app/webrtc/localaudiosource.cc b/talk/app/webrtc/localaudiosource.cc
index 63c6f13a3d..591877aa8b 100644
--- a/talk/app/webrtc/localaudiosource.cc
+++ b/talk/app/webrtc/localaudiosource.cc
@@ -49,7 +49,7 @@ void FromConstraints(const MediaConstraintsInterface::Constraints& constraints,
// a different algorithm will be required.
struct {
const char* name;
- cricket::Settable<bool>& value;
+ rtc::Optional<bool>& value;
} key_to_value[] = {
{MediaConstraintsInterface::kGoogEchoCancellation,
options->echo_cancellation},
@@ -78,7 +78,7 @@ void FromConstraints(const MediaConstraintsInterface::Constraints& constraints,
for (auto& entry : key_to_value) {
if (constraint.key.compare(entry.name) == 0)
- entry.value.Set(value);
+ entry.value = rtc::Optional<bool>(value);
}
}
}
diff --git a/talk/app/webrtc/localaudiosource.h b/talk/app/webrtc/localaudiosource.h
index 557745b8b8..5158eb1215 100644
--- a/talk/app/webrtc/localaudiosource.h
+++ b/talk/app/webrtc/localaudiosource.h
@@ -48,16 +48,17 @@ class LocalAudioSource : public Notifier<AudioSourceInterface> {
const PeerConnectionFactoryInterface::Options& options,
const MediaConstraintsInterface* constraints);
- virtual SourceState state() const { return source_state_; }
+ SourceState state() const override { return source_state_; }
+ bool remote() const override { return false; }
+
virtual const cricket::AudioOptions& options() const { return options_; }
- protected:
- LocalAudioSource()
- : source_state_(kInitializing) {
- }
+ void AddSink(AudioTrackSinkInterface* sink) override {}
+ void RemoveSink(AudioTrackSinkInterface* sink) override {}
- ~LocalAudioSource() {
- }
+ protected:
+ LocalAudioSource() : source_state_(kInitializing) {}
+ ~LocalAudioSource() override {}
private:
void Initialize(const PeerConnectionFactoryInterface::Options& options,
diff --git a/talk/app/webrtc/localaudiosource_unittest.cc b/talk/app/webrtc/localaudiosource_unittest.cc
index 8e05c18287..75d0c35462 100644
--- a/talk/app/webrtc/localaudiosource_unittest.cc
+++ b/talk/app/webrtc/localaudiosource_unittest.cc
@@ -58,23 +58,14 @@ TEST(LocalAudioSourceTest, SetValidOptions) {
LocalAudioSource::Create(PeerConnectionFactoryInterface::Options(),
&constraints);
- bool value;
- EXPECT_TRUE(source->options().echo_cancellation.Get(&value));
- EXPECT_FALSE(value);
- EXPECT_TRUE(source->options().extended_filter_aec.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().delay_agnostic_aec.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().auto_gain_control.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().experimental_agc.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().noise_suppression.Get(&value));
- EXPECT_FALSE(value);
- EXPECT_TRUE(source->options().highpass_filter.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().aec_dump.Get(&value));
- EXPECT_TRUE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().echo_cancellation);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().extended_filter_aec);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().delay_agnostic_aec);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().auto_gain_control);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().experimental_agc);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().noise_suppression);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().highpass_filter);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().aec_dump);
}
TEST(LocalAudioSourceTest, OptionNotSet) {
@@ -82,8 +73,7 @@ TEST(LocalAudioSourceTest, OptionNotSet) {
rtc::scoped_refptr<LocalAudioSource> source =
LocalAudioSource::Create(PeerConnectionFactoryInterface::Options(),
&constraints);
- bool value;
- EXPECT_FALSE(source->options().highpass_filter.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source->options().highpass_filter);
}
TEST(LocalAudioSourceTest, MandatoryOverridesOptional) {
@@ -97,9 +87,7 @@ TEST(LocalAudioSourceTest, MandatoryOverridesOptional) {
LocalAudioSource::Create(PeerConnectionFactoryInterface::Options(),
&constraints);
- bool value;
- EXPECT_TRUE(source->options().echo_cancellation.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().echo_cancellation);
}
TEST(LocalAudioSourceTest, InvalidOptional) {
@@ -112,9 +100,7 @@ TEST(LocalAudioSourceTest, InvalidOptional) {
&constraints);
EXPECT_EQ(MediaSourceInterface::kLive, source->state());
- bool value;
- EXPECT_TRUE(source->options().highpass_filter.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().highpass_filter);
}
TEST(LocalAudioSourceTest, InvalidMandatory) {
@@ -127,7 +113,5 @@ TEST(LocalAudioSourceTest, InvalidMandatory) {
&constraints);
EXPECT_EQ(MediaSourceInterface::kLive, source->state());
- bool value;
- EXPECT_TRUE(source->options().highpass_filter.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().highpass_filter);
}
diff --git a/talk/app/webrtc/mediacontroller.cc b/talk/app/webrtc/mediacontroller.cc
index f7d85116b1..24f5877483 100644
--- a/talk/app/webrtc/mediacontroller.cc
+++ b/talk/app/webrtc/mediacontroller.cc
@@ -47,11 +47,10 @@ class MediaController : public webrtc::MediaControllerInterface,
RTC_DCHECK(nullptr != worker_thread);
worker_thread_->Invoke<void>(
rtc::Bind(&MediaController::Construct_w, this,
- channel_manager_->media_engine()->GetVoE()));
+ channel_manager_->media_engine()));
}
~MediaController() override {
- worker_thread_->Invoke<void>(
- rtc::Bind(&MediaController::Destruct_w, this));
+ worker_thread_->Invoke<void>(rtc::Bind(&MediaController::Destruct_w, this));
}
webrtc::Call* call_w() override {
@@ -64,10 +63,11 @@ class MediaController : public webrtc::MediaControllerInterface,
}
private:
- void Construct_w(webrtc::VoiceEngine* voice_engine) {
+ void Construct_w(cricket::MediaEngineInterface* media_engine) {
RTC_DCHECK(worker_thread_->IsCurrent());
+ RTC_DCHECK(media_engine);
webrtc::Call::Config config;
- config.voice_engine = voice_engine;
+ config.audio_state = media_engine->GetAudioState();
config.bitrate_config.min_bitrate_bps = kMinBandwidthBps;
config.bitrate_config.start_bitrate_bps = kStartBandwidthBps;
config.bitrate_config.max_bitrate_bps = kMaxBandwidthBps;
@@ -84,7 +84,7 @@ class MediaController : public webrtc::MediaControllerInterface,
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MediaController);
};
-} // namespace {
+} // namespace {
namespace webrtc {
@@ -93,4 +93,4 @@ MediaControllerInterface* MediaControllerInterface::Create(
cricket::ChannelManager* channel_manager) {
return new MediaController(worker_thread, channel_manager);
}
-} // namespace webrtc
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastream_unittest.cc b/talk/app/webrtc/mediastream_unittest.cc
index 2cf930c4c0..f19b9456a6 100644
--- a/talk/app/webrtc/mediastream_unittest.cc
+++ b/talk/app/webrtc/mediastream_unittest.cc
@@ -48,9 +48,23 @@ namespace webrtc {
// Helper class to test Observer.
class MockObserver : public ObserverInterface {
public:
- MockObserver() {}
+ explicit MockObserver(NotifierInterface* notifier) : notifier_(notifier) {
+ notifier_->RegisterObserver(this);
+ }
+
+ ~MockObserver() { Unregister(); }
+
+ void Unregister() {
+ if (notifier_) {
+ notifier_->UnregisterObserver(this);
+ notifier_ = nullptr;
+ }
+ }
MOCK_METHOD0(OnChanged, void());
+
+ private:
+ NotifierInterface* notifier_;
};
class MediaStreamTest: public testing::Test {
@@ -75,8 +89,7 @@ class MediaStreamTest: public testing::Test {
}
void ChangeTrack(MediaStreamTrackInterface* track) {
- MockObserver observer;
- track->RegisterObserver(&observer);
+ MockObserver observer(track);
EXPECT_CALL(observer, OnChanged())
.Times(Exactly(1));
@@ -127,8 +140,7 @@ TEST_F(MediaStreamTest, GetTrackInfo) {
}
TEST_F(MediaStreamTest, RemoveTrack) {
- MockObserver observer;
- stream_->RegisterObserver(&observer);
+ MockObserver observer(stream_);
EXPECT_CALL(observer, OnChanged())
.Times(Exactly(2));
diff --git a/talk/app/webrtc/mediastreaminterface.h b/talk/app/webrtc/mediastreaminterface.h
index 5911e85e8e..9b137d9f76 100644
--- a/talk/app/webrtc/mediastreaminterface.h
+++ b/talk/app/webrtc/mediastreaminterface.h
@@ -71,8 +71,6 @@ class NotifierInterface {
// Base class for sources. A MediaStreamTrack have an underlying source that
// provide media. A source can be shared with multiple tracks.
-// TODO(perkj): Implement sources for local and remote audio tracks and
-// remote video tracks.
class MediaSourceInterface : public rtc::RefCountInterface,
public NotifierInterface {
public:
@@ -85,6 +83,8 @@ class MediaSourceInterface : public rtc::RefCountInterface,
virtual SourceState state() const = 0;
+ virtual bool remote() const = 0;
+
protected:
virtual ~MediaSourceInterface() {}
};
@@ -100,6 +100,9 @@ class MediaStreamTrackInterface : public rtc::RefCountInterface,
kFailed = 3, // Track negotiation failed.
};
+ static const char kAudioKind[];
+ static const char kVideoKind[];
+
virtual std::string kind() const = 0;
virtual std::string id() const = 0;
virtual bool enabled() const = 0;
@@ -115,13 +118,6 @@ class MediaStreamTrackInterface : public rtc::RefCountInterface,
// Interface for rendering VideoFrames from a VideoTrack
class VideoRendererInterface {
public:
- // TODO(guoweis): Remove this function. Obsolete. The implementation of
- // VideoRendererInterface should be able to handle different frame size as
- // well as pending rotation. If it can't apply the frame rotation by itself,
- // it should call |frame|.GetCopyWithRotationApplied() to get a frame that has
- // the rotation applied.
- virtual void SetSize(int width, int height) {}
-
// |frame| may have pending rotation. For clients which can't apply rotation,
// |frame|->GetCopyWithRotationApplied() will return a frame that has the
// rotation applied.
@@ -149,6 +145,19 @@ class VideoTrackInterface : public MediaStreamTrackInterface {
virtual ~VideoTrackInterface() {}
};
+// Interface for receiving audio data from a AudioTrack.
+class AudioTrackSinkInterface {
+ public:
+ virtual void OnData(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) = 0;
+
+ protected:
+ virtual ~AudioTrackSinkInterface() {}
+};
+
// AudioSourceInterface is a reference counted source used for AudioTracks.
// The same source can be used in multiple AudioTracks.
class AudioSourceInterface : public MediaSourceInterface {
@@ -164,23 +173,17 @@ class AudioSourceInterface : public MediaSourceInterface {
// TODO(xians): Makes all the interface pure virtual after Chrome has their
// implementations.
// Sets the volume to the source. |volume| is in the range of [0, 10].
+ // TODO(tommi): This method should be on the track and ideally volume should
+ // be applied in the track in a way that does not affect clones of the track.
virtual void SetVolume(double volume) {}
// Registers/unregisters observer to the audio source.
virtual void RegisterAudioObserver(AudioObserver* observer) {}
virtual void UnregisterAudioObserver(AudioObserver* observer) {}
-};
-// Interface for receiving audio data from a AudioTrack.
-class AudioTrackSinkInterface {
- public:
- virtual void OnData(const void* audio_data,
- int bits_per_sample,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames) = 0;
- protected:
- virtual ~AudioTrackSinkInterface() {}
+ // TODO(tommi): Make pure virtual.
+ virtual void AddSink(AudioTrackSinkInterface* sink) {}
+ virtual void RemoveSink(AudioTrackSinkInterface* sink) {}
};
// Interface of the audio processor used by the audio track to collect
diff --git a/talk/app/webrtc/mediastreamobserver.cc b/talk/app/webrtc/mediastreamobserver.cc
new file mode 100644
index 0000000000..2650b9a6f7
--- /dev/null
+++ b/talk/app/webrtc/mediastreamobserver.cc
@@ -0,0 +1,101 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/mediastreamobserver.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+MediaStreamObserver::MediaStreamObserver(MediaStreamInterface* stream)
+ : stream_(stream),
+ cached_audio_tracks_(stream->GetAudioTracks()),
+ cached_video_tracks_(stream->GetVideoTracks()) {
+ stream_->RegisterObserver(this);
+}
+
+MediaStreamObserver::~MediaStreamObserver() {
+ stream_->UnregisterObserver(this);
+}
+
+void MediaStreamObserver::OnChanged() {
+ AudioTrackVector new_audio_tracks = stream_->GetAudioTracks();
+ VideoTrackVector new_video_tracks = stream_->GetVideoTracks();
+
+ // Find removed audio tracks.
+ for (const auto& cached_track : cached_audio_tracks_) {
+ auto it = std::find_if(
+ new_audio_tracks.begin(), new_audio_tracks.end(),
+ [cached_track](const AudioTrackVector::value_type& new_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == new_audio_tracks.end()) {
+ SignalAudioTrackRemoved(cached_track.get(), stream_);
+ }
+ }
+
+ // Find added audio tracks.
+ for (const auto& new_track : new_audio_tracks) {
+ auto it = std::find_if(
+ cached_audio_tracks_.begin(), cached_audio_tracks_.end(),
+ [new_track](const AudioTrackVector::value_type& cached_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == cached_audio_tracks_.end()) {
+ SignalAudioTrackAdded(new_track.get(), stream_);
+ }
+ }
+
+ // Find removed video tracks.
+ for (const auto& cached_track : cached_video_tracks_) {
+ auto it = std::find_if(
+ new_video_tracks.begin(), new_video_tracks.end(),
+ [cached_track](const VideoTrackVector::value_type& new_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == new_video_tracks.end()) {
+ SignalVideoTrackRemoved(cached_track.get(), stream_);
+ }
+ }
+
+ // Find added video tracks.
+ for (const auto& new_track : new_video_tracks) {
+ auto it = std::find_if(
+ cached_video_tracks_.begin(), cached_video_tracks_.end(),
+ [new_track](const VideoTrackVector::value_type& cached_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == cached_video_tracks_.end()) {
+ SignalVideoTrackAdded(new_track.get(), stream_);
+ }
+ }
+
+ cached_audio_tracks_ = new_audio_tracks;
+ cached_video_tracks_ = new_video_tracks;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamobserver.h b/talk/app/webrtc/mediastreamobserver.h
new file mode 100644
index 0000000000..1dd6c4c118
--- /dev/null
+++ b/talk/app/webrtc/mediastreamobserver.h
@@ -0,0 +1,65 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMOBSERVER_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMOBSERVER_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/base/sigslot.h"
+
+namespace webrtc {
+
+// Helper class which will listen for changes to a stream and emit the
+// corresponding signals.
+class MediaStreamObserver : public ObserverInterface {
+ public:
+ explicit MediaStreamObserver(MediaStreamInterface* stream);
+ ~MediaStreamObserver();
+
+ const MediaStreamInterface* stream() const { return stream_; }
+
+ void OnChanged() override;
+
+ sigslot::signal2<AudioTrackInterface*, MediaStreamInterface*>
+ SignalAudioTrackAdded;
+ sigslot::signal2<AudioTrackInterface*, MediaStreamInterface*>
+ SignalAudioTrackRemoved;
+ sigslot::signal2<VideoTrackInterface*, MediaStreamInterface*>
+ SignalVideoTrackAdded;
+ sigslot::signal2<VideoTrackInterface*, MediaStreamInterface*>
+ SignalVideoTrackRemoved;
+
+ private:
+ rtc::scoped_refptr<MediaStreamInterface> stream_;
+ AudioTrackVector cached_audio_tracks_;
+ VideoTrackVector cached_video_tracks_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMOBSERVER_H_
diff --git a/talk/app/webrtc/mediastreamprovider.h b/talk/app/webrtc/mediastreamprovider.h
index 1c62daf9f1..585d51bcc8 100644
--- a/talk/app/webrtc/mediastreamprovider.h
+++ b/talk/app/webrtc/mediastreamprovider.h
@@ -29,6 +29,7 @@
#define TALK_APP_WEBRTC_MEDIASTREAMPROVIDER_H_
#include "webrtc/base/basictypes.h"
+#include "webrtc/base/scoped_ptr.h"
namespace cricket {
@@ -42,6 +43,8 @@ struct VideoOptions;
namespace webrtc {
+class AudioSinkInterface;
+
// TODO(deadbeef): Change the key from an ssrc to a "sender_id" or
// "receiver_id" string, which will be the MSID in the short term and MID in
// the long term.
@@ -50,8 +53,8 @@ namespace webrtc {
// RtpSenders/Receivers to get to the BaseChannels. These interfaces should be
// refactored away eventually, as the classes converge.
-// This interface is called by AudioTrackHandler classes in mediastreamhandler.h
-// to change the settings of an audio track connected to certain PeerConnection.
+// This interface is called by AudioRtpSender/Receivers to change the settings
+// of an audio track connected to certain PeerConnection.
class AudioProviderInterface {
public:
// Enable/disable the audio playout of a remote audio track with |ssrc|.
@@ -67,13 +70,19 @@ class AudioProviderInterface {
// |volume| is in the range of [0, 10].
virtual void SetAudioPlayoutVolume(uint32_t ssrc, double volume) = 0;
+ // Allows for setting a direct audio sink for an incoming audio source.
+ // Only one audio sink is supported per ssrc and ownership of the sink is
+ // passed to the provider.
+ virtual void SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) = 0;
+
protected:
virtual ~AudioProviderInterface() {}
};
-// This interface is called by VideoTrackHandler classes in mediastreamhandler.h
-// to change the settings of a video track connected to a certain
-// PeerConnection.
+// This interface is called by VideoRtpSender/Receivers to change the settings
+// of a video track connected to a certain PeerConnection.
class VideoProviderInterface {
public:
virtual bool SetCaptureDevice(uint32_t ssrc,
diff --git a/talk/app/webrtc/objc/README b/talk/app/webrtc/objc/README
index 692fbbc564..c323e73ed1 100644
--- a/talk/app/webrtc/objc/README
+++ b/talk/app/webrtc/objc/README
@@ -12,69 +12,59 @@ Prerequisites:
up for building for iOS-device, iOS-simulator, and Mac (resp) are:
function wrbase() {
cd /path/to/webrtc/trunk
- export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0 libjingle_objc=1"
+ export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0"
export GYP_GENERATORS="ninja"
}
function wrios() {
wrbase
- export GYP_DEFINES="$GYP_DEFINES OS=ios target_arch=armv7"
+ export GYP_DEFINES="$GYP_DEFINES OS=ios"
export GYP_GENERATOR_FLAGS="$GYP_GENERATOR_FLAGS output_dir=out_ios"
export GYP_CROSSCOMPILE=1
}
+function wrios32() {
+ wrios
+ export GYP_DEFINES="$GYP_DEFINES target_arch=arm"
+}
+
+function wrios64() {
+ wrios
+ export GYP_DEFINES="$GYP_DEFINES target_arch=arm64"
+}
+
function wrsim() {
wrbase
- export GYP_DEFINES="$GYP_DEFINES OS=ios target_arch=ia32"
+ export GYP_DEFINES="$GYP_DEFINES OS=ios target_subarch=arm32 target_arch=ia32"
export GYP_GENERATOR_FLAGS="$GYP_GENERATOR_FLAGS output_dir=out_sim"
export GYP_CROSSCOMPILE=1
}
function wrmac() {
wrbase
- export GYP_DEFINES="$GYP_DEFINES OS=mac target_arch=x64"
+ export GYP_DEFINES="$GYP_DEFINES OS=mac target_subarch=arm64 target_arch=x64"
export GYP_GENERATOR_FLAGS="$GYP_GENERATOR_FLAGS output_dir=out_mac"
}
-- Finally, run "gclient runhooks" to generate ninja files.
+- Finally, run "webrtc/build/gyp_webrtc" to generate ninja files.
Example of building & using the unittest & app:
- To build & run the unittest (must target mac):
- wrmac && gclient runhooks && \
+ wrmac && ./webrtc/build/gyp_webrtc && \
ninja -C out_mac/Debug libjingle_peerconnection_objc_test && \
./out_mac/Debug/libjingle_peerconnection_objc_test.app/Contents/MacOS/libjingle_peerconnection_objc_test
- To build & launch the sample app on OSX:
- wrmac && gclient runhooks && ninja -C out_mac/Debug AppRTCDemo && \
+ wrmac && ./webrtc/build/gyp_webrtc && ninja -C out_mac/Debug AppRTCDemo && \
./out_mac/Debug/AppRTCDemo.app/Contents/MacOS/AppRTCDemo
- To build & launch the sample app on the iOS simulator:
- wrsim && gclient runhooks && ninja -C out_sim/Debug iossim AppRTCDemo && \
+ wrsim && ./webrtc/build/gyp_webrtc && ninja -C out_sim/Debug iossim AppRTCDemo && \
./out_sim/Debug/iossim out_sim/Debug/AppRTCDemo.app
-- To build & sign the sample app for an iOS device:
- wrios && gclient runhooks && ninja -C out_ios/Debug-iphoneos AppRTCDemo
-
-- To install the sample app on an iOS device:
- ideviceinstaller -i out_ios/Debug-iphoneos/AppRTCDemo.app
- (if installing ideviceinstaller from brew, use --HEAD to get support
- for .app directories)
-- Alternatively, use iPhone Configuration Utility:
- - Open "iPhone Configuration Utility" (http://support.apple.com/kb/DL1465)
- - Click the "Add" icon (command-o)
- - Open the app under out_ios/Debug-iphoneos/AppRTCDemo (should be added to the Applications tab)
- - Click the device's name in the left-hand panel and select the Applications tab
- - Click Install on the AppRTCDemo line.
- (If you have any problems deploying for the first time, check
- the Info.plist file to ensure that the Bundle Identifier matches
- your phone provisioning profile, or use a development wildcard
- provisioning profile.)
-- Alternately, use ios-deploy:
- ios-deploy -d -b out_ios/Debug-iphoneos/AppRTCDemo.app
+- To build & sign the sample app for an iOS device (32 bit):
+ wrios32 && ./webrtc/build/gyp_webrtc && ninja -C out_ios/Debug-iphoneos AppRTCDemo
-- Once installed:
- - Tap AppRTCDemo on the iOS device's home screen (might have to scroll to find it).
- - In desktop chrome, navigate to http://apprtc.appspot.com and note
- the r=<NNN> room number in the resulting URL; enter that number
- into the text field on the phone.
+- To build & sign the sample app for an iOS device (64 bit):
+ wrios64 && ./webrtc/build/gyp_webrtc && ninja -C out_ios/Debug-iphoneos AppRTCDemo
diff --git a/talk/app/webrtc/objc/RTCFileLogger.mm b/talk/app/webrtc/objc/RTCFileLogger.mm
index c4e469655d..44ada3e22e 100644
--- a/talk/app/webrtc/objc/RTCFileLogger.mm
+++ b/talk/app/webrtc/objc/RTCFileLogger.mm
@@ -35,15 +35,17 @@
NSString *const kDefaultLogDirName = @"webrtc_logs";
NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
+const char *kRTCFileLoggerRotatingLogPrefix = "rotating_log";
@implementation RTCFileLogger {
BOOL _hasStarted;
NSString *_dirPath;
NSUInteger _maxFileSize;
- rtc::scoped_ptr<rtc::CallSessionFileRotatingLogSink> _logSink;
+ rtc::scoped_ptr<rtc::FileRotatingLogSink> _logSink;
}
@synthesize severity = _severity;
+@synthesize rotationType = _rotationType;
- (instancetype)init {
NSArray *paths = NSSearchPathForDirectoriesInDomains(
@@ -57,6 +59,14 @@ NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
- (instancetype)initWithDirPath:(NSString *)dirPath
maxFileSize:(NSUInteger)maxFileSize {
+ return [self initWithDirPath:dirPath
+ maxFileSize:maxFileSize
+ rotationType:kRTCFileLoggerTypeCall];
+}
+
+- (instancetype)initWithDirPath:(NSString *)dirPath
+ maxFileSize:(NSUInteger)maxFileSize
+ rotationType:(RTCFileLoggerRotationType)rotationType {
NSParameterAssert(dirPath.length);
NSParameterAssert(maxFileSize);
if (self = [super init]) {
@@ -91,8 +101,20 @@ NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
if (_hasStarted) {
return;
}
- _logSink.reset(new rtc::CallSessionFileRotatingLogSink(_dirPath.UTF8String,
- _maxFileSize));
+ switch (_rotationType) {
+ case kRTCFileLoggerTypeApp:
+ _logSink.reset(
+ new rtc::FileRotatingLogSink(_dirPath.UTF8String,
+ kRTCFileLoggerRotatingLogPrefix,
+ _maxFileSize,
+ _maxFileSize / 10));
+ break;
+ case kRTCFileLoggerTypeCall:
+ _logSink.reset(
+ new rtc::CallSessionFileRotatingLogSink(_dirPath.UTF8String,
+ _maxFileSize));
+ break;
+ }
if (!_logSink->Init()) {
LOG(LS_ERROR) << "Failed to open log files at path: "
<< _dirPath.UTF8String;
@@ -120,8 +142,17 @@ NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
return nil;
}
NSMutableData* logData = [NSMutableData data];
- rtc::scoped_ptr<rtc::CallSessionFileRotatingStream> stream(
- new rtc::CallSessionFileRotatingStream(_dirPath.UTF8String));
+ rtc::scoped_ptr<rtc::FileRotatingStream> stream;
+ switch(_rotationType) {
+ case kRTCFileLoggerTypeApp:
+ stream.reset(
+ new rtc::FileRotatingStream(_dirPath.UTF8String,
+ kRTCFileLoggerRotatingLogPrefix));
+ break;
+ case kRTCFileLoggerTypeCall:
+ stream.reset(new rtc::CallSessionFileRotatingStream(_dirPath.UTF8String));
+ break;
+ }
if (!stream->Open()) {
return logData;
}
diff --git a/talk/app/webrtc/objc/RTCPeerConnection.mm b/talk/app/webrtc/objc/RTCPeerConnection.mm
index 44d39cb090..f814f06ad8 100644
--- a/talk/app/webrtc/objc/RTCPeerConnection.mm
+++ b/talk/app/webrtc/objc/RTCPeerConnection.mm
@@ -271,11 +271,13 @@ class RTCStatsObserver : public StatsObserver {
- (instancetype)initWithFactory:(webrtc::PeerConnectionFactoryInterface*)factory
iceServers:(const webrtc::PeerConnectionInterface::IceServers&)iceServers
constraints:(const webrtc::MediaConstraintsInterface*)constraints {
- NSParameterAssert(factory != NULL);
+ NSParameterAssert(factory != nullptr);
if (self = [super init]) {
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
+ config.servers = iceServers;
_observer.reset(new webrtc::RTCPeerConnectionObserver(self));
_peerConnection = factory->CreatePeerConnection(
- iceServers, constraints, NULL, NULL, _observer.get());
+ config, constraints, nullptr, nullptr, _observer.get());
_localStreams = [[NSMutableArray alloc] init];
}
return self;
diff --git a/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm b/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm
index 58d12ace4c..ff45bd2bac 100644
--- a/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm
+++ b/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm
@@ -39,6 +39,7 @@
@synthesize tcpCandidatePolicy = _tcpCandidatePolicy;
@synthesize audioJitterBufferMaxPackets = _audioJitterBufferMaxPackets;
@synthesize iceConnectionReceivingTimeout = _iceConnectionReceivingTimeout;
+@synthesize iceBackupCandidatePairPingInterval = _iceBackupCandidatePairPingInterval;
- (instancetype)init {
if (self = [super init]) {
@@ -51,6 +52,7 @@
[RTCEnumConverter tcpCandidatePolicyForNativeEnum:config.tcp_candidate_policy];
_audioJitterBufferMaxPackets = config.audio_jitter_buffer_max_packets;
_iceConnectionReceivingTimeout = config.ice_connection_receiving_timeout;
+ _iceBackupCandidatePairPingInterval = config.ice_backup_candidate_pair_ping_interval;
}
return self;
}
@@ -60,7 +62,8 @@
rtcpMuxPolicy:(RTCRtcpMuxPolicy)rtcpMuxPolicy
tcpCandidatePolicy:(RTCTcpCandidatePolicy)tcpCandidatePolicy
audioJitterBufferMaxPackets:(int)audioJitterBufferMaxPackets
- iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout {
+ iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout
+ iceBackupCandidatePairPingInterval:(int)iceBackupCandidatePairPingInterval {
if (self = [super init]) {
_iceTransportsType = iceTransportsType;
_bundlePolicy = bundlePolicy;
@@ -68,6 +71,7 @@
_tcpCandidatePolicy = tcpCandidatePolicy;
_audioJitterBufferMaxPackets = audioJitterBufferMaxPackets;
_iceConnectionReceivingTimeout = iceConnectionReceivingTimeout;
+ _iceBackupCandidatePairPingInterval = iceBackupCandidatePairPingInterval;
}
return self;
}
@@ -85,8 +89,8 @@
nativeConfig.tcp_candidate_policy =
[RTCEnumConverter nativeEnumForTcpCandidatePolicy:_tcpCandidatePolicy];
nativeConfig.audio_jitter_buffer_max_packets = _audioJitterBufferMaxPackets;
- nativeConfig.ice_connection_receiving_timeout =
- _iceConnectionReceivingTimeout;
+ nativeConfig.ice_connection_receiving_timeout = _iceConnectionReceivingTimeout;
+ nativeConfig.ice_backup_candidate_pair_ping_interval = _iceBackupCandidatePairPingInterval;
return nativeConfig;
}
diff --git a/talk/app/webrtc/objc/avfoundationvideocapturer.h b/talk/app/webrtc/objc/avfoundationvideocapturer.h
index ded80f6647..32de09aadd 100644
--- a/talk/app/webrtc/objc/avfoundationvideocapturer.h
+++ b/talk/app/webrtc/objc/avfoundationvideocapturer.h
@@ -71,7 +71,6 @@ class AVFoundationVideoCapturer : public cricket::VideoCapturer {
RTCAVFoundationVideoCapturerInternal* _capturer;
rtc::Thread* _startThread; // Set in Start(), unset in Stop().
- uint64_t _startTime;
}; // AVFoundationVideoCapturer
} // namespace webrtc
diff --git a/talk/app/webrtc/objc/avfoundationvideocapturer.mm b/talk/app/webrtc/objc/avfoundationvideocapturer.mm
index e1b0f88fb6..0f9dc6825e 100644
--- a/talk/app/webrtc/objc/avfoundationvideocapturer.mm
+++ b/talk/app/webrtc/objc/avfoundationvideocapturer.mm
@@ -33,6 +33,8 @@
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
+#import "webrtc/base/objc/RTCDispatcher.h"
+
// TODO(tkchin): support other formats.
static NSString* const kDefaultPreset = AVCaptureSessionPreset640x480;
static cricket::VideoFormat const kDefaultFormat =
@@ -41,11 +43,6 @@ static cricket::VideoFormat const kDefaultFormat =
cricket::VideoFormat::FpsToInterval(30),
cricket::FOURCC_NV12);
-// This queue is used to start and stop the capturer without blocking the
-// calling thread. -[AVCaptureSession startRunning] blocks until the camera is
-// running.
-static dispatch_queue_t kBackgroundQueue = nil;
-
// This class used to capture frames using AVFoundation APIs on iOS. It is meant
// to be owned by an instance of AVFoundationVideoCapturer. The reason for this
// because other webrtc objects own cricket::VideoCapturer, which is not
@@ -80,15 +77,6 @@ static dispatch_queue_t kBackgroundQueue = nil;
@synthesize useBackCamera = _useBackCamera;
@synthesize isRunning = _isRunning;
-+ (void)initialize {
- static dispatch_once_t onceToken;
- dispatch_once(&onceToken, ^{
- kBackgroundQueue = dispatch_queue_create(
- "com.google.webrtc.RTCAVFoundationCapturerBackground",
- DISPATCH_QUEUE_SERIAL);
- });
-}
-
- (instancetype)initWithCapturer:(webrtc::AVFoundationVideoCapturer*)capturer {
NSParameterAssert(capturer);
if (self = [super init]) {
@@ -132,9 +120,10 @@ static dispatch_queue_t kBackgroundQueue = nil;
_orientationHasChanged = NO;
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
AVCaptureSession* session = _captureSession;
- dispatch_async(kBackgroundQueue, ^{
+ [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
[session startRunning];
- });
+ }];
_isRunning = YES;
}
@@ -144,9 +133,10 @@ static dispatch_queue_t kBackgroundQueue = nil;
}
[_videoOutput setSampleBufferDelegate:nil queue:nullptr];
AVCaptureSession* session = _captureSession;
- dispatch_async(kBackgroundQueue, ^{
+ [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
[session stopRunning];
- });
+ }];
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
_isRunning = NO;
}
diff --git a/talk/app/webrtc/objc/public/RTCFileLogger.h b/talk/app/webrtc/objc/public/RTCFileLogger.h
index 3900cb6fbe..70b3825307 100644
--- a/talk/app/webrtc/objc/public/RTCFileLogger.h
+++ b/talk/app/webrtc/objc/public/RTCFileLogger.h
@@ -39,21 +39,38 @@ typedef NS_ENUM(NSUInteger, RTCFileLoggerSeverity) {
kRTCFileLoggerSeverityError
};
+typedef NS_ENUM(NSUInteger, RTCFileLoggerRotationType) {
+ kRTCFileLoggerTypeCall,
+ kRTCFileLoggerTypeApp,
+};
+
// This class intercepts WebRTC logs and saves them to a file. The file size
// will not exceed the given maximum bytesize. When the maximum bytesize is
-// reached logs from the beginning and the end are preserved while the middle
-// section is overwritten instead.
+// reached, logs are rotated according to the rotationType specified.
+// For kRTCFileLoggerTypeCall, logs from the beginning and the end
+// are preserved while the middle section is overwritten instead.
+// For kRTCFileLoggerTypeApp, the oldest log is overwritten.
// This class is not threadsafe.
@interface RTCFileLogger : NSObject
// The severity level to capture. The default is kRTCFileLoggerSeverityInfo.
@property(nonatomic, assign) RTCFileLoggerSeverity severity;
-// Default constructor provides default settings for dir path and file size.
+// The rotation type for this file logger. The default is
+// kRTCFileLoggerTypeCall.
+@property(nonatomic, readonly) RTCFileLoggerRotationType rotationType;
+
+// Default constructor provides default settings for dir path, file size and
+// rotation type.
- (instancetype)init;
+// Create file logger with default rotation type.
+- (instancetype)initWithDirPath:(NSString *)dirPath
+ maxFileSize:(NSUInteger)maxFileSize;
+
- (instancetype)initWithDirPath:(NSString *)dirPath
maxFileSize:(NSUInteger)maxFileSize
+ rotationType:(RTCFileLoggerRotationType)rotationType
NS_DESIGNATED_INITIALIZER;
// Starts writing WebRTC logs to disk if not already started. Overwrites any
diff --git a/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h b/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h
index b0cc72b5b7..44b971c85e 100644
--- a/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h
+++ b/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h
@@ -64,12 +64,14 @@ typedef NS_ENUM(NSInteger, RTCTcpCandidatePolicy) {
@property(nonatomic, assign) RTCTcpCandidatePolicy tcpCandidatePolicy;
@property(nonatomic, assign) int audioJitterBufferMaxPackets;
@property(nonatomic, assign) int iceConnectionReceivingTimeout;
+@property(nonatomic, assign) int iceBackupCandidatePairPingInterval;
- (instancetype)initWithIceTransportsType:(RTCIceTransportsType)iceTransportsType
bundlePolicy:(RTCBundlePolicy)bundlePolicy
rtcpMuxPolicy:(RTCRtcpMuxPolicy)rtcpMuxPolicy
tcpCandidatePolicy:(RTCTcpCandidatePolicy)tcpCandidatePolicy
audioJitterBufferMaxPackets:(int)audioJitterBufferMaxPackets
- iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout;
+ iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout
+ iceBackupCandidatePairPingInterval:(int)iceBackupCandidatePairPingInterval;
@end
diff --git a/talk/app/webrtc/peerconnection.cc b/talk/app/webrtc/peerconnection.cc
index 0d519b280b..ccca18af67 100644
--- a/talk/app/webrtc/peerconnection.cc
+++ b/talk/app/webrtc/peerconnection.cc
@@ -27,8 +27,10 @@
#include "talk/app/webrtc/peerconnection.h"
-#include <vector>
+#include <algorithm>
#include <cctype> // for isdigit
+#include <utility>
+#include <vector>
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/dtmfsender.h"
@@ -36,6 +38,7 @@
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/mediaconstraintsinterface.h"
#include "talk/app/webrtc/mediastream.h"
+#include "talk/app/webrtc/mediastreamobserver.h"
#include "talk/app/webrtc/mediastreamproxy.h"
#include "talk/app/webrtc/mediastreamtrackproxy.h"
#include "talk/app/webrtc/remoteaudiosource.h"
@@ -46,11 +49,13 @@
#include "talk/app/webrtc/videosource.h"
#include "talk/app/webrtc/videotrack.h"
#include "talk/media/sctp/sctpdataengine.h"
-#include "webrtc/p2p/client/basicportallocator.h"
#include "talk/session/media/channelmanager.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/stringencode.h"
#include "webrtc/base/stringutils.h"
+#include "webrtc/base/trace_event.h"
+#include "webrtc/p2p/client/basicportallocator.h"
#include "webrtc/system_wrappers/include/field_trial.h"
namespace {
@@ -59,13 +64,8 @@ using webrtc::DataChannel;
using webrtc::MediaConstraintsInterface;
using webrtc::MediaStreamInterface;
using webrtc::PeerConnectionInterface;
+using webrtc::RtpSenderInterface;
using webrtc::StreamCollection;
-using webrtc::StunConfigurations;
-using webrtc::TurnConfigurations;
-typedef webrtc::PortAllocatorFactoryInterface::StunConfiguration
- StunConfiguration;
-typedef webrtc::PortAllocatorFactoryInterface::TurnConfiguration
- TurnConfiguration;
static const char kDefaultStreamLabel[] = "default";
static const char kDefaultAudioTrackLabel[] = "defaulta0";
@@ -80,8 +80,6 @@ static const size_t kTurnTransportTokensNum = 2;
static const int kDefaultStunPort = 3478;
static const int kDefaultStunTlsPort = 5349;
static const char kTransport[] = "transport";
-static const char kUdpTransportType[] = "udp";
-static const char kTcpTransportType[] = "tcp";
// NOTE: Must be in the same order as the ServiceType enum.
static const char* kValidIceServiceTypes[] = {"stun", "stuns", "turn", "turns"};
@@ -95,7 +93,7 @@ enum ServiceType {
TURNS, // Indicates a TURN server used with a TLS session.
INVALID, // Unknown.
};
-static_assert(INVALID == ARRAY_SIZE(kValidIceServiceTypes),
+static_assert(INVALID == arraysize(kValidIceServiceTypes),
"kValidIceServiceTypes must have as many strings as ServiceType "
"has values.");
@@ -104,6 +102,7 @@ enum {
MSG_SET_SESSIONDESCRIPTION_FAILED,
MSG_CREATE_SESSIONDESCRIPTION_FAILED,
MSG_GETSTATS,
+ MSG_FREE_DATACHANNELS,
};
struct SetSessionDescriptionMsg : public rtc::MessageData {
@@ -156,7 +155,7 @@ bool GetServiceTypeAndHostnameFromUri(const std::string& in_str,
return false;
}
*service_type = INVALID;
- for (size_t i = 0; i < ARRAY_SIZE(kValidIceServiceTypes); ++i) {
+ for (size_t i = 0; i < arraysize(kValidIceServiceTypes); ++i) {
if (in_str.compare(0, colonpos, kValidIceServiceTypes[i]) == 0) {
*service_type = static_cast<ServiceType>(i);
break;
@@ -216,12 +215,12 @@ bool ParseHostnameAndPortFromString(const std::string& in_str,
return !host->empty();
}
-// Adds a StunConfiguration or TurnConfiguration to the appropriate list,
+// Adds a STUN or TURN server to the appropriate list,
// by parsing |url| and using the username/password in |server|.
bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
const std::string& url,
- StunConfigurations* stun_config,
- TurnConfigurations* turn_config) {
+ cricket::ServerAddresses* stun_servers,
+ std::vector<cricket::RelayServerConfig>* turn_servers) {
// draft-nandakumar-rtcweb-stun-uri-01
// stunURI = scheme ":" stun-host [ ":" stun-port ]
// scheme = "stun" / "stuns"
@@ -236,10 +235,10 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
// transport-ext = 1*unreserved
// turn-host = IP-literal / IPv4address / reg-name
// turn-port = *DIGIT
- RTC_DCHECK(stun_config != nullptr);
- RTC_DCHECK(turn_config != nullptr);
+ RTC_DCHECK(stun_servers != nullptr);
+ RTC_DCHECK(turn_servers != nullptr);
std::vector<std::string> tokens;
- std::string turn_transport_type = kUdpTransportType;
+ cricket::ProtocolType turn_transport_type = cricket::PROTO_UDP;
RTC_DCHECK(!url.empty());
rtc::tokenize(url, '?', &tokens);
std::string uri_without_transport = tokens[0];
@@ -250,11 +249,12 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
if (tokens[0] == kTransport) {
// As per above grammar transport param will be consist of lower case
// letters.
- if (tokens[1] != kUdpTransportType && tokens[1] != kTcpTransportType) {
+ if (!cricket::StringToProto(tokens[1].c_str(), &turn_transport_type) ||
+ (turn_transport_type != cricket::PROTO_UDP &&
+ turn_transport_type != cricket::PROTO_TCP)) {
LOG(LS_WARNING) << "Transport param should always be udp or tcp.";
return false;
}
- turn_transport_type = tokens[1];
}
}
@@ -293,7 +293,7 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
int port = kDefaultStunPort;
if (service_type == TURNS) {
port = kDefaultStunTlsPort;
- turn_transport_type = kTcpTransportType;
+ turn_transport_type = cricket::PROTO_TCP;
}
std::string address;
@@ -310,16 +310,14 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
switch (service_type) {
case STUN:
case STUNS:
- stun_config->push_back(StunConfiguration(address, port));
+ stun_servers->insert(rtc::SocketAddress(address, port));
break;
case TURN:
case TURNS: {
bool secure = (service_type == TURNS);
- turn_config->push_back(TurnConfiguration(address, port,
- username,
- server.password,
- turn_transport_type,
- secure));
+ turn_servers->push_back(
+ cricket::RelayServerConfig(address, port, username, server.password,
+ turn_transport_type, secure));
break;
}
case INVALID:
@@ -365,25 +363,15 @@ bool IsValidOfferToReceiveMedia(int value) {
}
// Add the stream and RTP data channel info to |session_options|.
-void SetStreams(cricket::MediaSessionOptions* session_options,
- rtc::scoped_refptr<StreamCollection> streams,
- const std::map<std::string, rtc::scoped_refptr<DataChannel>>&
- rtp_data_channels) {
+void AddSendStreams(
+ cricket::MediaSessionOptions* session_options,
+ const std::vector<rtc::scoped_refptr<RtpSenderInterface>>& senders,
+ const std::map<std::string, rtc::scoped_refptr<DataChannel>>&
+ rtp_data_channels) {
session_options->streams.clear();
- if (streams != nullptr) {
- for (size_t i = 0; i < streams->count(); ++i) {
- MediaStreamInterface* stream = streams->at(i);
- // For each audio track in the stream, add it to the MediaSessionOptions.
- for (const auto& track : stream->GetAudioTracks()) {
- session_options->AddSendStream(cricket::MEDIA_TYPE_AUDIO, track->id(),
- stream->label());
- }
- // For each video track in the stream, add it to the MediaSessionOptions.
- for (const auto& track : stream->GetVideoTracks()) {
- session_options->AddSendStream(cricket::MEDIA_TYPE_VIDEO, track->id(),
- stream->label());
- }
- }
+ for (const auto& sender : senders) {
+ session_options->AddSendStream(sender->media_type(), sender->id(),
+ sender->stream_id());
}
// Check for data channels.
@@ -421,10 +409,12 @@ class RemoteMediaStreamFactory {
MediaStream::Create(stream_label));
}
- AudioTrackInterface* AddAudioTrack(webrtc::MediaStreamInterface* stream,
+ AudioTrackInterface* AddAudioTrack(uint32_t ssrc,
+ AudioProviderInterface* provider,
+ webrtc::MediaStreamInterface* stream,
const std::string& track_id) {
return AddTrack<AudioTrackInterface, AudioTrack, AudioTrackProxy>(
- stream, track_id, RemoteAudioSource::Create().get());
+ stream, track_id, RemoteAudioSource::Create(ssrc, provider));
}
VideoTrackInterface* AddVideoTrack(webrtc::MediaStreamInterface* stream,
@@ -432,7 +422,7 @@ class RemoteMediaStreamFactory {
return AddTrack<VideoTrackInterface, VideoTrack, VideoTrackProxy>(
stream, track_id,
VideoSource::Create(channel_manager_, new RemoteVideoCapturer(),
- nullptr)
+ nullptr, true)
.get());
}
@@ -440,7 +430,7 @@ class RemoteMediaStreamFactory {
template <typename TI, typename T, typename TP, typename S>
TI* AddTrack(MediaStreamInterface* stream,
const std::string& track_id,
- S* source) {
+ const S& source) {
rtc::scoped_refptr<TI> track(
TP::Create(signaling_thread_, T::Create(track_id, source)));
track->set_state(webrtc::MediaStreamTrackInterface::kLive);
@@ -471,7 +461,11 @@ bool ConvertRtcOptionsForOffer(
}
session_options->vad_enabled = rtc_options.voice_activity_detection;
- session_options->transport_options.ice_restart = rtc_options.ice_restart;
+ session_options->audio_transport_options.ice_restart =
+ rtc_options.ice_restart;
+ session_options->video_transport_options.ice_restart =
+ rtc_options.ice_restart;
+ session_options->data_transport_options.ice_restart = rtc_options.ice_restart;
session_options->bundle_enabled = rtc_options.use_rtp_mux;
return true;
@@ -517,10 +511,14 @@ bool ParseConstraintsForAnswer(const MediaConstraintsInterface* constraints,
if (FindConstraint(constraints, MediaConstraintsInterface::kIceRestart,
&value, &mandatory_constraints_satisfied)) {
- session_options->transport_options.ice_restart = value;
+ session_options->audio_transport_options.ice_restart = value;
+ session_options->video_transport_options.ice_restart = value;
+ session_options->data_transport_options.ice_restart = value;
} else {
// kIceRestart defaults to false according to spec.
- session_options->transport_options.ice_restart = false;
+ session_options->audio_transport_options.ice_restart = false;
+ session_options->video_transport_options.ice_restart = false;
+ session_options->data_transport_options.ice_restart = false;
}
if (!constraints) {
@@ -530,8 +528,8 @@ bool ParseConstraintsForAnswer(const MediaConstraintsInterface* constraints,
}
bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
- StunConfigurations* stun_config,
- TurnConfigurations* turn_config) {
+ cricket::ServerAddresses* stun_servers,
+ std::vector<cricket::RelayServerConfig>* turn_servers) {
for (const webrtc::PeerConnectionInterface::IceServer& server : servers) {
if (!server.urls.empty()) {
for (const std::string& url : server.urls) {
@@ -539,13 +537,13 @@ bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
LOG(LS_ERROR) << "Empty uri.";
return false;
}
- if (!ParseIceServerUrl(server, url, stun_config, turn_config)) {
+ if (!ParseIceServerUrl(server, url, stun_servers, turn_servers)) {
return false;
}
}
} else if (!server.uri.empty()) {
// Fallback to old .uri if new .urls isn't present.
- if (!ParseIceServerUrl(server, server.uri, stun_config, turn_config)) {
+ if (!ParseIceServerUrl(server, server.uri, stun_servers, turn_servers)) {
return false;
}
} else {
@@ -553,6 +551,13 @@ bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
return false;
}
}
+ // Candidates must have unique priorities, so that connectivity checks
+ // are performed in a well-defined order.
+ int priority = static_cast<int>(turn_servers->size() - 1);
+ for (cricket::RelayServerConfig& turn_server : *turn_servers) {
+ // First in the list gets highest priority.
+ turn_server.priority = priority--;
+ }
return true;
}
@@ -568,6 +573,7 @@ PeerConnection::PeerConnection(PeerConnectionFactory* factory)
remote_streams_(StreamCollection::Create()) {}
PeerConnection::~PeerConnection() {
+ TRACE_EVENT0("webrtc", "PeerConnection::~PeerConnection");
RTC_DCHECK(signaling_thread()->IsCurrent());
// Need to detach RTP senders/receivers from WebRtcSession,
// since it's about to be destroyed.
@@ -582,22 +588,24 @@ PeerConnection::~PeerConnection() {
bool PeerConnection::Initialize(
const PeerConnectionInterface::RTCConfiguration& configuration,
const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
PeerConnectionObserver* observer) {
+ TRACE_EVENT0("webrtc", "PeerConnection::Initialize");
RTC_DCHECK(observer != nullptr);
if (!observer) {
return false;
}
observer_ = observer;
- std::vector<PortAllocatorFactoryInterface::StunConfiguration> stun_config;
- std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turn_config;
- if (!ParseIceServers(configuration.servers, &stun_config, &turn_config)) {
+ port_allocator_ = std::move(allocator);
+
+ cricket::ServerAddresses stun_servers;
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ if (!ParseIceServers(configuration.servers, &stun_servers, &turn_servers)) {
return false;
}
- port_allocator_.reset(
- allocator_factory->CreatePortAllocator(stun_config, turn_config));
+ port_allocator_->SetIceServers(stun_servers, turn_servers);
// To handle both internal and externally created port allocator, we will
// enable BUNDLE here.
@@ -637,7 +645,7 @@ bool PeerConnection::Initialize(
// Initialize the WebRtcSession. It creates transport channels etc.
if (!session_->Initialize(factory_->options(), constraints,
- dtls_identity_store.Pass(), configuration)) {
+ std::move(dtls_identity_store), configuration)) {
return false;
}
@@ -668,9 +676,8 @@ PeerConnection::remote_streams() {
return remote_streams_;
}
-// TODO(deadbeef): Create RtpSenders immediately here, even if local
-// description hasn't yet been set.
bool PeerConnection::AddStream(MediaStreamInterface* local_stream) {
+ TRACE_EVENT0("webrtc", "PeerConnection::AddStream");
if (IsClosed()) {
return false;
}
@@ -679,25 +686,22 @@ bool PeerConnection::AddStream(MediaStreamInterface* local_stream) {
}
local_streams_->AddStream(local_stream);
+ MediaStreamObserver* observer = new MediaStreamObserver(local_stream);
+ observer->SignalAudioTrackAdded.connect(this,
+ &PeerConnection::OnAudioTrackAdded);
+ observer->SignalAudioTrackRemoved.connect(
+ this, &PeerConnection::OnAudioTrackRemoved);
+ observer->SignalVideoTrackAdded.connect(this,
+ &PeerConnection::OnVideoTrackAdded);
+ observer->SignalVideoTrackRemoved.connect(
+ this, &PeerConnection::OnVideoTrackRemoved);
+ stream_observers_.push_back(rtc::scoped_ptr<MediaStreamObserver>(observer));
- // Find tracks that have already been configured in SDP. This can occur if a
- // local session description that contains the MSID of these tracks is set
- // before AddLocalStream is called. It can also occur if the local session
- // description is not changed and RemoveLocalStream is called and later
- // AddLocalStream is called again with the same stream.
for (const auto& track : local_stream->GetAudioTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_audio_tracks_, local_stream->label(), track->id());
- if (track_info) {
- CreateAudioSender(local_stream, track.get(), track_info->ssrc);
- }
+ OnAudioTrackAdded(track.get(), local_stream);
}
for (const auto& track : local_stream->GetVideoTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_video_tracks_, local_stream->label(), track->id());
- if (track_info) {
- CreateVideoSender(local_stream, track.get(), track_info->ssrc);
- }
+ OnVideoTrackAdded(track.get(), local_stream);
}
stats_->AddStream(local_stream);
@@ -705,25 +709,24 @@ bool PeerConnection::AddStream(MediaStreamInterface* local_stream) {
return true;
}
-// TODO(deadbeef): Don't destroy RtpSenders here; they should be kept around
-// indefinitely.
void PeerConnection::RemoveStream(MediaStreamInterface* local_stream) {
+ TRACE_EVENT0("webrtc", "PeerConnection::RemoveStream");
for (const auto& track : local_stream->GetAudioTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_audio_tracks_, local_stream->label(), track->id());
- if (track_info) {
- DestroyAudioSender(local_stream, track.get(), track_info->ssrc);
- }
+ OnAudioTrackRemoved(track.get(), local_stream);
}
for (const auto& track : local_stream->GetVideoTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_video_tracks_, local_stream->label(), track->id());
- if (track_info) {
- DestroyVideoSender(local_stream, track.get());
- }
+ OnVideoTrackRemoved(track.get(), local_stream);
}
local_streams_->RemoveStream(local_stream);
+ stream_observers_.erase(
+ std::remove_if(
+ stream_observers_.begin(), stream_observers_.end(),
+ [local_stream](const rtc::scoped_ptr<MediaStreamObserver>& observer) {
+ return observer->stream()->label().compare(local_stream->label()) ==
+ 0;
+ }),
+ stream_observers_.end());
if (IsClosed()) {
return;
@@ -733,6 +736,7 @@ void PeerConnection::RemoveStream(MediaStreamInterface* local_stream) {
rtc::scoped_refptr<DtmfSenderInterface> PeerConnection::CreateDtmfSender(
AudioTrackInterface* track) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateDtmfSender");
if (!track) {
LOG(LS_ERROR) << "CreateDtmfSender - track is NULL.";
return NULL;
@@ -751,6 +755,26 @@ rtc::scoped_refptr<DtmfSenderInterface> PeerConnection::CreateDtmfSender(
return DtmfSenderProxy::Create(signaling_thread(), sender.get());
}
+rtc::scoped_refptr<RtpSenderInterface> PeerConnection::CreateSender(
+ const std::string& kind,
+ const std::string& stream_id) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateSender");
+ RtpSenderInterface* new_sender;
+ if (kind == MediaStreamTrackInterface::kAudioKind) {
+ new_sender = new AudioRtpSender(session_.get(), stats_.get());
+ } else if (kind == MediaStreamTrackInterface::kVideoKind) {
+ new_sender = new VideoRtpSender(session_.get());
+ } else {
+ LOG(LS_ERROR) << "CreateSender called with invalid kind: " << kind;
+ return rtc::scoped_refptr<RtpSenderInterface>();
+ }
+ if (!stream_id.empty()) {
+ new_sender->set_stream_id(stream_id);
+ }
+ senders_.push_back(new_sender);
+ return RtpSenderProxy::Create(signaling_thread(), new_sender);
+}
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>> PeerConnection::GetSenders()
const {
std::vector<rtc::scoped_refptr<RtpSenderInterface>> senders;
@@ -773,6 +797,7 @@ PeerConnection::GetReceivers() const {
bool PeerConnection::GetStats(StatsObserver* observer,
MediaStreamTrackInterface* track,
StatsOutputLevel level) {
+ TRACE_EVENT0("webrtc", "PeerConnection::GetStats");
RTC_DCHECK(signaling_thread()->IsCurrent());
if (!VERIFY(observer != NULL)) {
LOG(LS_ERROR) << "GetStats - observer is NULL.";
@@ -807,6 +832,7 @@ rtc::scoped_refptr<DataChannelInterface>
PeerConnection::CreateDataChannel(
const std::string& label,
const DataChannelInit* config) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateDataChannel");
bool first_datachannel = !HasDataChannels();
rtc::scoped_ptr<InternalDataChannelInit> internal_config;
@@ -830,6 +856,7 @@ PeerConnection::CreateDataChannel(
void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
const MediaConstraintsInterface* constraints) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateOffer");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "CreateOffer - observer is NULL.";
return;
@@ -881,6 +908,7 @@ void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
const RTCOfferAnswerOptions& options) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateOffer");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "CreateOffer - observer is NULL.";
return;
@@ -900,6 +928,7 @@ void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
void PeerConnection::CreateAnswer(
CreateSessionDescriptionObserver* observer,
const MediaConstraintsInterface* constraints) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateAnswer");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "CreateAnswer - observer is NULL.";
return;
@@ -919,6 +948,7 @@ void PeerConnection::CreateAnswer(
void PeerConnection::SetLocalDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc) {
+ TRACE_EVENT0("webrtc", "PeerConnection::SetLocalDescription");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "SetLocalDescription - observer is NULL.";
return;
@@ -940,7 +970,7 @@ void PeerConnection::SetLocalDescription(
// SCTP sids.
rtc::SSLRole role;
if (session_->data_channel_type() == cricket::DCT_SCTP &&
- session_->GetSslRole(&role)) {
+ session_->GetSslRole(session_->data_channel(), &role)) {
AllocateSctpSids(role);
}
@@ -949,19 +979,27 @@ void PeerConnection::SetLocalDescription(
const cricket::ContentInfo* audio_content =
GetFirstAudioContent(desc->description());
if (audio_content) {
- const cricket::AudioContentDescription* audio_desc =
- static_cast<const cricket::AudioContentDescription*>(
- audio_content->description);
- UpdateLocalTracks(audio_desc->streams(), audio_desc->type());
+ if (audio_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_AUDIO);
+ } else {
+ const cricket::AudioContentDescription* audio_desc =
+ static_cast<const cricket::AudioContentDescription*>(
+ audio_content->description);
+ UpdateLocalTracks(audio_desc->streams(), audio_desc->type());
+ }
}
const cricket::ContentInfo* video_content =
GetFirstVideoContent(desc->description());
if (video_content) {
- const cricket::VideoContentDescription* video_desc =
- static_cast<const cricket::VideoContentDescription*>(
- video_content->description);
- UpdateLocalTracks(video_desc->streams(), video_desc->type());
+ if (video_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_VIDEO);
+ } else {
+ const cricket::VideoContentDescription* video_desc =
+ static_cast<const cricket::VideoContentDescription*>(
+ video_content->description);
+ UpdateLocalTracks(video_desc->streams(), video_desc->type());
+ }
}
const cricket::ContentInfo* data_content =
@@ -988,6 +1026,7 @@ void PeerConnection::SetLocalDescription(
void PeerConnection::SetRemoteDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc) {
+ TRACE_EVENT0("webrtc", "PeerConnection::SetRemoteDescription");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "SetRemoteDescription - observer is NULL.";
return;
@@ -1009,11 +1048,27 @@ void PeerConnection::SetRemoteDescription(
// SCTP sids.
rtc::SSLRole role;
if (session_->data_channel_type() == cricket::DCT_SCTP &&
- session_->GetSslRole(&role)) {
+ session_->GetSslRole(session_->data_channel(), &role)) {
AllocateSctpSids(role);
}
const cricket::SessionDescription* remote_desc = desc->description();
+ const cricket::ContentInfo* audio_content = GetFirstAudioContent(remote_desc);
+ const cricket::ContentInfo* video_content = GetFirstVideoContent(remote_desc);
+ const cricket::AudioContentDescription* audio_desc =
+ GetFirstAudioContentDescription(remote_desc);
+ const cricket::VideoContentDescription* video_desc =
+ GetFirstVideoContentDescription(remote_desc);
+ const cricket::DataContentDescription* data_desc =
+ GetFirstDataContentDescription(remote_desc);
+
+ // Check if the descriptions include streams, just in case the peer supports
+ // MSID, but doesn't indicate so with "a=msid-semantic".
+ if (remote_desc->msid_supported() ||
+ (audio_desc && !audio_desc->streams().empty()) ||
+ (video_desc && !video_desc->streams().empty())) {
+ remote_peer_supports_msid_ = true;
+ }
// We wait to signal new streams until we finish processing the description,
// since only at that point will new streams have all their tracks.
@@ -1021,39 +1076,39 @@ void PeerConnection::SetRemoteDescription(
// Find all audio rtp streams and create corresponding remote AudioTracks
// and MediaStreams.
- const cricket::ContentInfo* audio_content = GetFirstAudioContent(remote_desc);
if (audio_content) {
- const cricket::AudioContentDescription* desc =
- static_cast<const cricket::AudioContentDescription*>(
- audio_content->description);
- UpdateRemoteStreamsList(GetActiveStreams(desc), desc->type(), new_streams);
- remote_info_.default_audio_track_needed =
- !remote_desc->msid_supported() && desc->streams().empty() &&
- MediaContentDirectionHasSend(desc->direction());
+ if (audio_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_AUDIO);
+ } else {
+ bool default_audio_track_needed =
+ !remote_peer_supports_msid_ &&
+ MediaContentDirectionHasSend(audio_desc->direction());
+ UpdateRemoteStreamsList(GetActiveStreams(audio_desc),
+ default_audio_track_needed, audio_desc->type(),
+ new_streams);
+ }
}
// Find all video rtp streams and create corresponding remote VideoTracks
// and MediaStreams.
- const cricket::ContentInfo* video_content = GetFirstVideoContent(remote_desc);
if (video_content) {
- const cricket::VideoContentDescription* desc =
- static_cast<const cricket::VideoContentDescription*>(
- video_content->description);
- UpdateRemoteStreamsList(GetActiveStreams(desc), desc->type(), new_streams);
- remote_info_.default_video_track_needed =
- !remote_desc->msid_supported() && desc->streams().empty() &&
- MediaContentDirectionHasSend(desc->direction());
+ if (video_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_VIDEO);
+ } else {
+ bool default_video_track_needed =
+ !remote_peer_supports_msid_ &&
+ MediaContentDirectionHasSend(video_desc->direction());
+ UpdateRemoteStreamsList(GetActiveStreams(video_desc),
+ default_video_track_needed, video_desc->type(),
+ new_streams);
+ }
}
// Update the DataChannels with the information from the remote peer.
- const cricket::ContentInfo* data_content = GetFirstDataContent(remote_desc);
- if (data_content) {
- const cricket::DataContentDescription* desc =
- static_cast<const cricket::DataContentDescription*>(
- data_content->description);
- if (rtc::starts_with(desc->protocol().data(),
+ if (data_desc) {
+ if (rtc::starts_with(data_desc->protocol().data(),
cricket::kMediaProtocolRtpPrefix)) {
- UpdateRemoteRtpDataChannels(GetActiveStreams(desc));
+ UpdateRemoteRtpDataChannels(GetActiveStreams(data_desc));
}
}
@@ -1064,58 +1119,21 @@ void PeerConnection::SetRemoteDescription(
observer_->OnAddStream(new_stream);
}
- // Find removed MediaStreams.
- if (remote_info_.IsDefaultMediaStreamNeeded() &&
- remote_streams_->find(kDefaultStreamLabel) != nullptr) {
- // The default media stream already exists. No need to do anything.
- } else {
- UpdateEndedRemoteMediaStreams();
- remote_info_.msid_supported |= remote_streams_->count() > 0;
- }
- MaybeCreateDefaultStream();
+ UpdateEndedRemoteMediaStreams();
SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer);
signaling_thread()->Post(this, MSG_SET_SESSIONDESCRIPTION_SUCCESS, msg);
}
bool PeerConnection::SetConfiguration(const RTCConfiguration& config) {
+ TRACE_EVENT0("webrtc", "PeerConnection::SetConfiguration");
if (port_allocator_) {
- std::vector<PortAllocatorFactoryInterface::StunConfiguration> stuns;
- std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turns;
- if (!ParseIceServers(config.servers, &stuns, &turns)) {
+ cricket::ServerAddresses stun_servers;
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ if (!ParseIceServers(config.servers, &stun_servers, &turn_servers)) {
return false;
}
-
- std::vector<rtc::SocketAddress> stun_hosts;
- typedef std::vector<StunConfiguration>::const_iterator StunIt;
- for (StunIt stun_it = stuns.begin(); stun_it != stuns.end(); ++stun_it) {
- stun_hosts.push_back(stun_it->server);
- }
-
- rtc::SocketAddress stun_addr;
- if (!stun_hosts.empty()) {
- stun_addr = stun_hosts.front();
- LOG(LS_INFO) << "SetConfiguration: StunServer Address: "
- << stun_addr.ToString();
- }
-
- for (size_t i = 0; i < turns.size(); ++i) {
- cricket::RelayCredentials credentials(turns[i].username,
- turns[i].password);
- cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
- cricket::ProtocolType protocol;
- if (cricket::StringToProto(turns[i].transport_type.c_str(), &protocol)) {
- relay_server.ports.push_back(cricket::ProtocolAddress(
- turns[i].server, protocol, turns[i].secure));
- relay_server.credentials = credentials;
- LOG(LS_INFO) << "SetConfiguration: TurnServer Address: "
- << turns[i].server.ToString();
- } else {
- LOG(LS_WARNING) << "Ignoring TURN server " << turns[i].server << ". "
- << "Reason= Incorrect " << turns[i].transport_type
- << " transport parameter.";
- }
- }
+ port_allocator_->SetIceServers(stun_servers, turn_servers);
}
session_->SetIceConfig(session_->ParseIceConfig(config));
return session_->SetIceTransports(config.type);
@@ -1123,10 +1141,12 @@ bool PeerConnection::SetConfiguration(const RTCConfiguration& config) {
bool PeerConnection::AddIceCandidate(
const IceCandidateInterface* ice_candidate) {
+ TRACE_EVENT0("webrtc", "PeerConnection::AddIceCandidate");
return session_->ProcessIceMessage(ice_candidate);
}
void PeerConnection::RegisterUMAObserver(UMAObserver* observer) {
+ TRACE_EVENT0("webrtc", "PeerConnection::RegisterUmaObserver");
uma_observer_ = observer;
if (session_) {
@@ -1156,6 +1176,7 @@ const SessionDescriptionInterface* PeerConnection::remote_description() const {
}
void PeerConnection::Close() {
+ TRACE_EVENT0("webrtc", "PeerConnection::Close");
// Update stats here so that we have the most recent stats for tracks and
// streams before the channels are closed.
stats_->UpdateStats(kStatsOutputLevelStandard);
@@ -1223,6 +1244,10 @@ void PeerConnection::OnMessage(rtc::Message* msg) {
delete param;
break;
}
+ case MSG_FREE_DATACHANNELS: {
+ sctp_data_channels_to_free_.clear();
+ break;
+ }
default:
RTC_DCHECK(false && "Not implemented");
break;
@@ -1267,49 +1292,6 @@ void PeerConnection::DestroyVideoReceiver(MediaStreamInterface* stream,
}
}
-void PeerConnection::CreateAudioSender(MediaStreamInterface* stream,
- AudioTrackInterface* audio_track,
- uint32_t ssrc) {
- senders_.push_back(new AudioRtpSender(audio_track, ssrc, session_.get()));
- stats_->AddLocalAudioTrack(audio_track, ssrc);
-}
-
-void PeerConnection::CreateVideoSender(MediaStreamInterface* stream,
- VideoTrackInterface* video_track,
- uint32_t ssrc) {
- senders_.push_back(new VideoRtpSender(video_track, ssrc, session_.get()));
-}
-
-// TODO(deadbeef): Keep RtpSenders around even if track goes away in local
-// description.
-void PeerConnection::DestroyAudioSender(MediaStreamInterface* stream,
- AudioTrackInterface* audio_track,
- uint32_t ssrc) {
- auto it = FindSenderForTrack(audio_track);
- if (it == senders_.end()) {
- LOG(LS_WARNING) << "RtpSender for track with id " << audio_track->id()
- << " doesn't exist.";
- return;
- } else {
- (*it)->Stop();
- senders_.erase(it);
- }
- stats_->RemoveLocalAudioTrack(audio_track, ssrc);
-}
-
-void PeerConnection::DestroyVideoSender(MediaStreamInterface* stream,
- VideoTrackInterface* video_track) {
- auto it = FindSenderForTrack(video_track);
- if (it == senders_.end()) {
- LOG(LS_WARNING) << "RtpSender for track with id " << video_track->id()
- << " doesn't exist.";
- return;
- } else {
- (*it)->Stop();
- senders_.erase(it);
- }
-}
-
void PeerConnection::OnIceConnectionChange(
PeerConnectionInterface::IceConnectionState new_state) {
RTC_DCHECK(signaling_thread()->IsCurrent());
@@ -1362,6 +1344,80 @@ void PeerConnection::ChangeSignalingState(
observer_->OnStateChange(PeerConnectionObserver::kSignalingState);
}
+void PeerConnection::OnAudioTrackAdded(AudioTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender != senders_.end()) {
+ // We already have a sender for this track, so just change the stream_id
+ // so that it's correct in the next call to CreateOffer.
+ (*sender)->set_stream_id(stream->label());
+ return;
+ }
+
+ // Normal case; we've never seen this track before.
+ AudioRtpSender* new_sender =
+ new AudioRtpSender(track, stream->label(), session_.get(), stats_.get());
+ senders_.push_back(new_sender);
+ // If the sender has already been configured in SDP, we call SetSsrc,
+ // which will connect the sender to the underlying transport. This can
+ // occur if a local session description that contains the ID of the sender
+ // is set before AddStream is called. It can also occur if the local
+ // session description is not changed and RemoveStream is called, and
+ // later AddStream is called again with the same stream.
+ const TrackInfo* track_info =
+ FindTrackInfo(local_audio_tracks_, stream->label(), track->id());
+ if (track_info) {
+ new_sender->SetSsrc(track_info->ssrc);
+ }
+}
+
+// TODO(deadbeef): Don't destroy RtpSenders here; they should be kept around
+// indefinitely, when we have unified plan SDP.
+void PeerConnection::OnAudioTrackRemoved(AudioTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender == senders_.end()) {
+ LOG(LS_WARNING) << "RtpSender for track with id " << track->id()
+ << " doesn't exist.";
+ return;
+ }
+ (*sender)->Stop();
+ senders_.erase(sender);
+}
+
+void PeerConnection::OnVideoTrackAdded(VideoTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender != senders_.end()) {
+ // We already have a sender for this track, so just change the stream_id
+ // so that it's correct in the next call to CreateOffer.
+ (*sender)->set_stream_id(stream->label());
+ return;
+ }
+
+ // Normal case; we've never seen this track before.
+ VideoRtpSender* new_sender =
+ new VideoRtpSender(track, stream->label(), session_.get());
+ senders_.push_back(new_sender);
+ const TrackInfo* track_info =
+ FindTrackInfo(local_video_tracks_, stream->label(), track->id());
+ if (track_info) {
+ new_sender->SetSsrc(track_info->ssrc);
+ }
+}
+
+void PeerConnection::OnVideoTrackRemoved(VideoTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender == senders_.end()) {
+ LOG(LS_WARNING) << "RtpSender for track with id " << track->id()
+ << " doesn't exist.";
+ return;
+ }
+ (*sender)->Stop();
+ senders_.erase(sender);
+}
+
void PeerConnection::PostSetSessionDescriptionFailure(
SetSessionDescriptionObserver* observer,
const std::string& error) {
@@ -1385,7 +1441,7 @@ bool PeerConnection::GetOptionsForOffer(
return false;
}
- SetStreams(session_options, local_streams_, rtp_data_channels_);
+ AddSendStreams(session_options, senders_, rtp_data_channels_);
// Offer to receive audio/video if the constraint is not set and there are
// send streams, or we're currently receiving.
if (rtc_options.offer_to_receive_audio == RTCOfferAnswerOptions::kUndefined) {
@@ -1418,7 +1474,7 @@ bool PeerConnection::GetOptionsForAnswer(
return false;
}
- SetStreams(session_options, local_streams_, rtp_data_channels_);
+ AddSendStreams(session_options, senders_, rtp_data_channels_);
session_options->bundle_enabled =
session_options->bundle_enabled &&
(session_options->has_audio() || session_options->has_video() ||
@@ -1433,25 +1489,34 @@ bool PeerConnection::GetOptionsForAnswer(
return true;
}
+void PeerConnection::RemoveTracks(cricket::MediaType media_type) {
+ UpdateLocalTracks(std::vector<cricket::StreamParams>(), media_type);
+ UpdateRemoteStreamsList(std::vector<cricket::StreamParams>(), false,
+ media_type, nullptr);
+}
+
void PeerConnection::UpdateRemoteStreamsList(
const cricket::StreamParamsVec& streams,
+ bool default_track_needed,
cricket::MediaType media_type,
StreamCollection* new_streams) {
TrackInfos* current_tracks = GetRemoteTracks(media_type);
// Find removed tracks. I.e., tracks where the track id or ssrc don't match
- // the
- // new StreamParam.
+ // the new StreamParam.
auto track_it = current_tracks->begin();
while (track_it != current_tracks->end()) {
const TrackInfo& info = *track_it;
const cricket::StreamParams* params =
cricket::GetStreamBySsrc(streams, info.ssrc);
- if (!params || params->id != info.track_id) {
+ bool track_exists = params && params->id == info.track_id;
+ // If this is a default track, and we still need it, don't remove it.
+ if ((info.stream_label == kDefaultStreamLabel && default_track_needed) ||
+ track_exists) {
+ ++track_it;
+ } else {
OnRemoteTrackRemoved(info.stream_label, info.track_id, media_type);
track_it = current_tracks->erase(track_it);
- } else {
- ++track_it;
}
}
@@ -1479,6 +1544,29 @@ void PeerConnection::UpdateRemoteStreamsList(
OnRemoteTrackSeen(stream_label, track_id, ssrc, media_type);
}
}
+
+ // Add default track if necessary.
+ if (default_track_needed) {
+ rtc::scoped_refptr<MediaStreamInterface> default_stream =
+ remote_streams_->find(kDefaultStreamLabel);
+ if (!default_stream) {
+ // Create the new default MediaStream.
+ default_stream =
+ remote_stream_factory_->CreateMediaStream(kDefaultStreamLabel);
+ remote_streams_->AddStream(default_stream);
+ new_streams->AddStream(default_stream);
+ }
+ std::string default_track_id = (media_type == cricket::MEDIA_TYPE_AUDIO)
+ ? kDefaultAudioTrackLabel
+ : kDefaultVideoTrackLabel;
+ const TrackInfo* default_track_info =
+ FindTrackInfo(*current_tracks, kDefaultStreamLabel, default_track_id);
+ if (!default_track_info) {
+ current_tracks->push_back(
+ TrackInfo(kDefaultStreamLabel, default_track_id, 0));
+ OnRemoteTrackSeen(kDefaultStreamLabel, default_track_id, 0, media_type);
+ }
+ }
}
void PeerConnection::OnRemoteTrackSeen(const std::string& stream_label,
@@ -1488,8 +1576,8 @@ void PeerConnection::OnRemoteTrackSeen(const std::string& stream_label,
MediaStreamInterface* stream = remote_streams_->find(stream_label);
if (media_type == cricket::MEDIA_TYPE_AUDIO) {
- AudioTrackInterface* audio_track =
- remote_stream_factory_->AddAudioTrack(stream, track_id);
+ AudioTrackInterface* audio_track = remote_stream_factory_->AddAudioTrack(
+ ssrc, session_.get(), stream, track_id);
CreateAudioReceiver(stream, audio_track, ssrc);
} else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
VideoTrackInterface* video_track =
@@ -1541,41 +1629,6 @@ void PeerConnection::UpdateEndedRemoteMediaStreams() {
}
}
-void PeerConnection::MaybeCreateDefaultStream() {
- if (!remote_info_.IsDefaultMediaStreamNeeded()) {
- return;
- }
-
- bool default_created = false;
-
- rtc::scoped_refptr<MediaStreamInterface> default_remote_stream =
- remote_streams_->find(kDefaultStreamLabel);
- if (default_remote_stream == nullptr) {
- default_created = true;
- default_remote_stream =
- remote_stream_factory_->CreateMediaStream(kDefaultStreamLabel);
- remote_streams_->AddStream(default_remote_stream);
- }
- if (remote_info_.default_audio_track_needed &&
- default_remote_stream->GetAudioTracks().size() == 0) {
- remote_audio_tracks_.push_back(
- TrackInfo(kDefaultStreamLabel, kDefaultAudioTrackLabel, 0));
- OnRemoteTrackSeen(kDefaultStreamLabel, kDefaultAudioTrackLabel, 0,
- cricket::MEDIA_TYPE_AUDIO);
- }
- if (remote_info_.default_video_track_needed &&
- default_remote_stream->GetVideoTracks().size() == 0) {
- remote_video_tracks_.push_back(
- TrackInfo(kDefaultStreamLabel, kDefaultVideoTrackLabel, 0));
- OnRemoteTrackSeen(kDefaultStreamLabel, kDefaultVideoTrackLabel, 0,
- cricket::MEDIA_TYPE_VIDEO);
- }
- if (default_created) {
- stats_->AddStream(default_remote_stream);
- observer_->OnAddStream(default_remote_stream);
- }
-}
-
void PeerConnection::EndRemoteTracks(cricket::MediaType media_type) {
TrackInfos* current_tracks = GetRemoteTracks(media_type);
for (TrackInfos::iterator track_it = current_tracks->begin();
@@ -1643,62 +1696,44 @@ void PeerConnection::OnLocalTrackSeen(const std::string& stream_label,
const std::string& track_id,
uint32_t ssrc,
cricket::MediaType media_type) {
- MediaStreamInterface* stream = local_streams_->find(stream_label);
- if (!stream) {
- LOG(LS_WARNING) << "An unknown local MediaStream with label "
- << stream_label << " has been configured.";
+ RtpSenderInterface* sender = FindSenderById(track_id);
+ if (!sender) {
+ LOG(LS_WARNING) << "An unknown RtpSender with id " << track_id
+ << " has been configured in the local description.";
return;
}
- if (media_type == cricket::MEDIA_TYPE_AUDIO) {
- AudioTrackInterface* audio_track = stream->FindAudioTrack(track_id);
- if (!audio_track) {
- LOG(LS_WARNING) << "An unknown local AudioTrack with id , " << track_id
- << " has been configured.";
- return;
- }
- CreateAudioSender(stream, audio_track, ssrc);
- } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
- VideoTrackInterface* video_track = stream->FindVideoTrack(track_id);
- if (!video_track) {
- LOG(LS_WARNING) << "An unknown local VideoTrack with id , " << track_id
- << " has been configured.";
- return;
- }
- CreateVideoSender(stream, video_track, ssrc);
- } else {
- RTC_DCHECK(false && "Invalid media type");
+ if (sender->media_type() != media_type) {
+ LOG(LS_WARNING) << "An RtpSender has been configured in the local"
+ << " description with an unexpected media type.";
+ return;
}
+
+ sender->set_stream_id(stream_label);
+ sender->SetSsrc(ssrc);
}
void PeerConnection::OnLocalTrackRemoved(const std::string& stream_label,
const std::string& track_id,
uint32_t ssrc,
cricket::MediaType media_type) {
- MediaStreamInterface* stream = local_streams_->find(stream_label);
- if (!stream) {
- // This is the normal case. I.e., RemoveLocalStream has been called and the
+ RtpSenderInterface* sender = FindSenderById(track_id);
+ if (!sender) {
+ // This is the normal case. I.e., RemoveStream has been called and the
// SessionDescriptions has been renegotiated.
return;
}
- // A track has been removed from the SessionDescription but the MediaStream
- // is still associated with PeerConnection. This only occurs if the SDP
- // doesn't match with the calls to AddLocalStream and RemoveLocalStream.
- if (media_type == cricket::MEDIA_TYPE_AUDIO) {
- AudioTrackInterface* audio_track = stream->FindAudioTrack(track_id);
- if (!audio_track) {
- return;
- }
- DestroyAudioSender(stream, audio_track, ssrc);
- } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
- VideoTrackInterface* video_track = stream->FindVideoTrack(track_id);
- if (!video_track) {
- return;
- }
- DestroyVideoSender(stream, video_track);
- } else {
- RTC_DCHECK(false && "Invalid media type.");
+
+ // A sender has been removed from the SessionDescription but it's still
+ // associated with the PeerConnection. This only occurs if the SDP doesn't
+ // match with the calls to CreateSender, AddStream and RemoveStream.
+ if (sender->media_type() != media_type) {
+ LOG(LS_WARNING) << "An RtpSender has been configured in the local"
+ << " description with an unexpected media type.";
+ return;
}
+
+ sender->SetSsrc(0);
}
void PeerConnection::UpdateLocalRtpDataChannels(
@@ -1806,7 +1841,7 @@ rtc::scoped_refptr<DataChannel> PeerConnection::InternalCreateDataChannel(
if (session_->data_channel_type() == cricket::DCT_SCTP) {
if (new_config.id < 0) {
rtc::SSLRole role;
- if (session_->GetSslRole(&role) &&
+ if ((session_->GetSslRole(session_->data_channel(), &role)) &&
!sid_allocator_.AllocateSid(role, &new_config.id)) {
LOG(LS_ERROR) << "No id can be allocated for the SCTP data channel.";
return nullptr;
@@ -1860,13 +1895,18 @@ void PeerConnection::AllocateSctpSids(rtc::SSLRole role) {
}
void PeerConnection::OnSctpDataChannelClosed(DataChannel* channel) {
+ RTC_DCHECK(signaling_thread()->IsCurrent());
for (auto it = sctp_data_channels_.begin(); it != sctp_data_channels_.end();
++it) {
if (it->get() == channel) {
if (channel->id() >= 0) {
sid_allocator_.ReleaseSid(channel->id());
}
+ // Since this method is triggered by a signal from the DataChannel,
+ // we can't free it directly here; we need to free it asynchronously.
+ sctp_data_channels_to_free_.push_back(*it);
sctp_data_channels_.erase(it);
+ signaling_thread()->Post(this, MSG_FREE_DATACHANNELS, nullptr);
return;
}
}
@@ -1916,6 +1956,15 @@ void PeerConnection::OnDataChannelOpenMessage(
DataChannelProxy::Create(signaling_thread(), channel));
}
+RtpSenderInterface* PeerConnection::FindSenderById(const std::string& id) {
+ auto it =
+ std::find_if(senders_.begin(), senders_.end(),
+ [id](const rtc::scoped_refptr<RtpSenderInterface>& sender) {
+ return sender->id() == id;
+ });
+ return it != senders_.end() ? it->get() : nullptr;
+}
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>>::iterator
PeerConnection::FindSenderForTrack(MediaStreamTrackInterface* track) {
return std::find_if(
diff --git a/talk/app/webrtc/peerconnection.h b/talk/app/webrtc/peerconnection.h
index 2d388ae9f9..6e2b967fb4 100644
--- a/talk/app/webrtc/peerconnection.h
+++ b/talk/app/webrtc/peerconnection.h
@@ -42,13 +42,9 @@
namespace webrtc {
+class MediaStreamObserver;
class RemoteMediaStreamFactory;
-typedef std::vector<PortAllocatorFactoryInterface::StunConfiguration>
- StunConfigurations;
-typedef std::vector<PortAllocatorFactoryInterface::TurnConfiguration>
- TurnConfigurations;
-
// Populates |session_options| from |rtc_options|, and returns true if options
// are valid.
bool ConvertRtcOptionsForOffer(
@@ -60,11 +56,11 @@ bool ConvertRtcOptionsForOffer(
bool ParseConstraintsForAnswer(const MediaConstraintsInterface* constraints,
cricket::MediaSessionOptions* session_options);
-// Parses the URLs for each server in |servers| to build |stun_config| and
-// |turn_config|.
+// Parses the URLs for each server in |servers| to build |stun_servers| and
+// |turn_servers|.
bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
- StunConfigurations* stun_config,
- TurnConfigurations* turn_config);
+ cricket::ServerAddresses* stun_servers,
+ std::vector<cricket::RelayServerConfig>* turn_servers);
// PeerConnection implements the PeerConnectionInterface interface.
// It uses WebRtcSession to implement the PeerConnection functionality.
@@ -78,9 +74,10 @@ class PeerConnection : public PeerConnectionInterface,
bool Initialize(
const PeerConnectionInterface::RTCConfiguration& configuration,
const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
PeerConnectionObserver* observer);
+
rtc::scoped_refptr<StreamCollectionInterface> local_streams() override;
rtc::scoped_refptr<StreamCollectionInterface> remote_streams() override;
bool AddStream(MediaStreamInterface* local_stream) override;
@@ -91,6 +88,10 @@ class PeerConnection : public PeerConnectionInterface,
rtc::scoped_refptr<DtmfSenderInterface> CreateDtmfSender(
AudioTrackInterface* track) override;
+ rtc::scoped_refptr<RtpSenderInterface> CreateSender(
+ const std::string& kind,
+ const std::string& stream_id) override;
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>> GetSenders()
const override;
std::vector<rtc::scoped_refptr<RtpReceiverInterface>> GetReceivers()
@@ -148,32 +149,16 @@ class PeerConnection : public PeerConnectionInterface,
const std::string track_id,
uint32_t ssrc)
: stream_label(stream_label), track_id(track_id), ssrc(ssrc) {}
+ bool operator==(const TrackInfo& other) {
+ return this->stream_label == other.stream_label &&
+ this->track_id == other.track_id && this->ssrc == other.ssrc;
+ }
std::string stream_label;
std::string track_id;
uint32_t ssrc;
};
typedef std::vector<TrackInfo> TrackInfos;
- struct RemotePeerInfo {
- RemotePeerInfo()
- : msid_supported(false),
- default_audio_track_needed(false),
- default_video_track_needed(false) {}
- // True if it has been discovered that the remote peer support MSID.
- bool msid_supported;
- // The remote peer indicates in the session description that audio will be
- // sent but no MSID is given.
- bool default_audio_track_needed;
- // The remote peer indicates in the session description that video will be
- // sent but no MSID is given.
- bool default_video_track_needed;
-
- bool IsDefaultMediaStreamNeeded() {
- return !msid_supported &&
- (default_audio_track_needed || default_video_track_needed);
- }
- };
-
// Implements MessageHandler.
void OnMessage(rtc::Message* msg) override;
@@ -187,12 +172,6 @@ class PeerConnection : public PeerConnectionInterface,
AudioTrackInterface* audio_track);
void DestroyVideoReceiver(MediaStreamInterface* stream,
VideoTrackInterface* video_track);
- void CreateAudioSender(MediaStreamInterface* stream,
- AudioTrackInterface* audio_track,
- uint32_t ssrc);
- void CreateVideoSender(MediaStreamInterface* stream,
- VideoTrackInterface* video_track,
- uint32_t ssrc);
void DestroyAudioSender(MediaStreamInterface* stream,
AudioTrackInterface* audio_track,
uint32_t ssrc);
@@ -210,6 +189,16 @@ class PeerConnection : public PeerConnectionInterface,
void OnSessionStateChange(WebRtcSession* session, WebRtcSession::State state);
void ChangeSignalingState(SignalingState signaling_state);
+ // Signals from MediaStreamObserver.
+ void OnAudioTrackAdded(AudioTrackInterface* track,
+ MediaStreamInterface* stream);
+ void OnAudioTrackRemoved(AudioTrackInterface* track,
+ MediaStreamInterface* stream);
+ void OnVideoTrackAdded(VideoTrackInterface* track,
+ MediaStreamInterface* stream);
+ void OnVideoTrackRemoved(VideoTrackInterface* track,
+ MediaStreamInterface* stream);
+
rtc::Thread* signaling_thread() const {
return factory_->signaling_thread();
}
@@ -236,12 +225,19 @@ class PeerConnection : public PeerConnectionInterface,
const MediaConstraintsInterface* constraints,
cricket::MediaSessionOptions* session_options);
- // Makes sure a MediaStream Track is created for each StreamParam in
- // |streams|. |media_type| is the type of the |streams| and can be either
- // audio or video.
+ // Remove all local and remote tracks of type |media_type|.
+ // Called when a media type is rejected (m-line set to port 0).
+ void RemoveTracks(cricket::MediaType media_type);
+
+ // Makes sure a MediaStreamTrack is created for each StreamParam in |streams|,
+ // and existing MediaStreamTracks are removed if there is no corresponding
+ // StreamParam. If |default_track_needed| is true, a default MediaStreamTrack
+ // is created if it doesn't exist; if false, it's removed if it exists.
+ // |media_type| is the type of the |streams| and can be either audio or video.
// If a new MediaStream is created it is added to |new_streams|.
void UpdateRemoteStreamsList(
const std::vector<cricket::StreamParams>& streams,
+ bool default_track_needed,
cricket::MediaType media_type,
StreamCollection* new_streams);
@@ -265,8 +261,6 @@ class PeerConnection : public PeerConnectionInterface,
// exist.
void UpdateEndedRemoteMediaStreams();
- void MaybeCreateDefaultStream();
-
// Set the MediaStreamTrackInterface::TrackState to |kEnded| on all remote
// tracks of type |media_type|.
void EndRemoteTracks(cricket::MediaType media_type);
@@ -328,6 +322,8 @@ class PeerConnection : public PeerConnectionInterface,
void OnDataChannelOpenMessage(const std::string& label,
const InternalDataChannelInit& config);
+ RtpSenderInterface* FindSenderById(const std::string& id);
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>>::iterator
FindSenderForTrack(MediaStreamTrackInterface* track);
std::vector<rtc::scoped_refptr<RtpReceiverInterface>>::iterator
@@ -366,6 +362,8 @@ class PeerConnection : public PeerConnectionInterface,
// Streams created as a result of SetRemoteDescription.
rtc::scoped_refptr<StreamCollection> remote_streams_;
+ std::vector<rtc::scoped_ptr<MediaStreamObserver>> stream_observers_;
+
// These lists store track info seen in local/remote descriptions.
TrackInfos remote_audio_tracks_;
TrackInfos remote_video_tracks_;
@@ -376,8 +374,9 @@ class PeerConnection : public PeerConnectionInterface,
// label -> DataChannel
std::map<std::string, rtc::scoped_refptr<DataChannel>> rtp_data_channels_;
std::vector<rtc::scoped_refptr<DataChannel>> sctp_data_channels_;
+ std::vector<rtc::scoped_refptr<DataChannel>> sctp_data_channels_to_free_;
- RemotePeerInfo remote_info_;
+ bool remote_peer_supports_msid_ = false;
rtc::scoped_ptr<RemoteMediaStreamFactory> remote_stream_factory_;
std::vector<rtc::scoped_refptr<RtpSenderInterface>> senders_;
diff --git a/talk/app/webrtc/peerconnection_unittest.cc b/talk/app/webrtc/peerconnection_unittest.cc
index 3cf66d64d8..8d0793e25f 100644
--- a/talk/app/webrtc/peerconnection_unittest.cc
+++ b/talk/app/webrtc/peerconnection_unittest.cc
@@ -30,11 +30,11 @@
#include <algorithm>
#include <list>
#include <map>
+#include <utility>
#include <vector>
#include "talk/app/webrtc/dtmfsender.h"
#include "talk/app/webrtc/fakemetricsobserver.h"
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
#include "talk/app/webrtc/localaudiosource.h"
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/peerconnection.h"
@@ -58,6 +58,7 @@
#include "webrtc/base/virtualsocketserver.h"
#include "webrtc/p2p/base/constants.h"
#include "webrtc/p2p/base/sessiondescription.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
#define MAYBE_SKIP_TEST(feature) \
if (!(feature())) { \
@@ -78,11 +79,13 @@ using webrtc::DtmfSenderInterface;
using webrtc::DtmfSenderObserverInterface;
using webrtc::FakeConstraints;
using webrtc::MediaConstraintsInterface;
+using webrtc::MediaStreamInterface;
using webrtc::MediaStreamTrackInterface;
using webrtc::MockCreateSessionDescriptionObserver;
using webrtc::MockDataChannelObserver;
using webrtc::MockSetSessionDescriptionObserver;
using webrtc::MockStatsObserver;
+using webrtc::ObserverInterface;
using webrtc::PeerConnectionInterface;
using webrtc::PeerConnectionFactory;
using webrtc::SessionDescriptionInterface;
@@ -96,6 +99,7 @@ static const int kMaxWaitMs = 10000;
#if !defined(THREAD_SANITIZER)
static const int kMaxWaitForStatsMs = 3000;
#endif
+static const int kMaxWaitForActivationMs = 5000;
static const int kMaxWaitForFramesMs = 10000;
static const int kEndAudioFrameCount = 3;
static const int kEndVideoFrameCount = 3;
@@ -111,7 +115,7 @@ static const char kDataChannelLabel[] = "data_channel";
#if !defined(THREAD_SANITIZER)
// SRTP cipher name negotiated by the tests. This must be updated if the
// default changes.
-static const char kDefaultSrtpCipher[] = "AES_CM_128_HMAC_SHA1_32";
+static const int kDefaultSrtpCryptoSuite = rtc::SRTP_AES128_CM_SHA1_32;
#endif
static void RemoveLinesFromSdp(const std::string& line_start,
@@ -139,26 +143,35 @@ class SignalingMessageReceiver {
};
class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
- public SignalingMessageReceiver {
+ public SignalingMessageReceiver,
+ public ObserverInterface {
public:
- static PeerConnectionTestClient* CreateClient(
+ static PeerConnectionTestClient* CreateClientWithDtlsIdentityStore(
const std::string& id,
const MediaConstraintsInterface* constraints,
- const PeerConnectionFactory::Options* options) {
+ const PeerConnectionFactory::Options* options,
+ rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store) {
PeerConnectionTestClient* client(new PeerConnectionTestClient(id));
- if (!client->Init(constraints, options)) {
+ if (!client->Init(constraints, options, std::move(dtls_identity_store))) {
delete client;
return nullptr;
}
return client;
}
+ static PeerConnectionTestClient* CreateClient(
+ const std::string& id,
+ const MediaConstraintsInterface* constraints,
+ const PeerConnectionFactory::Options* options) {
+ rtc::scoped_ptr<FakeDtlsIdentityStore> dtls_identity_store(
+ rtc::SSLStreamAdapter::HaveDtlsSrtp() ? new FakeDtlsIdentityStore()
+ : nullptr);
+
+ return CreateClientWithDtlsIdentityStore(id, constraints, options,
+ std::move(dtls_identity_store));
+ }
+
~PeerConnectionTestClient() {
- while (!fake_video_renderers_.empty()) {
- RenderMap::iterator it = fake_video_renderers_.begin();
- delete it->second;
- fake_video_renderers_.erase(it);
- }
}
void Negotiate() { Negotiate(true, true); }
@@ -206,16 +219,17 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
webrtc::PeerConnectionInterface::SignalingState new_state) override {
EXPECT_EQ(pc()->signaling_state(), new_state);
}
- void OnAddStream(webrtc::MediaStreamInterface* media_stream) override {
+ void OnAddStream(MediaStreamInterface* media_stream) override {
+ media_stream->RegisterObserver(this);
for (size_t i = 0; i < media_stream->GetVideoTracks().size(); ++i) {
const std::string id = media_stream->GetVideoTracks()[i]->id();
ASSERT_TRUE(fake_video_renderers_.find(id) ==
fake_video_renderers_.end());
- fake_video_renderers_[id] =
- new webrtc::FakeVideoTrackRenderer(media_stream->GetVideoTracks()[i]);
+ fake_video_renderers_[id].reset(new webrtc::FakeVideoTrackRenderer(
+ media_stream->GetVideoTracks()[i]));
}
}
- void OnRemoveStream(webrtc::MediaStreamInterface* media_stream) override {}
+ void OnRemoveStream(MediaStreamInterface* media_stream) override {}
void OnRenegotiationNeeded() override {}
void OnIceConnectionChange(
webrtc::PeerConnectionInterface::IceConnectionState new_state) override {
@@ -238,6 +252,40 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
candidate->sdp_mid(), candidate->sdp_mline_index(), ice_sdp);
}
+ // MediaStreamInterface callback
+ void OnChanged() override {
+ // Track added or removed from MediaStream, so update our renderers.
+ rtc::scoped_refptr<StreamCollectionInterface> remote_streams =
+ pc()->remote_streams();
+ // Remove renderers for tracks that were removed.
+ for (auto it = fake_video_renderers_.begin();
+ it != fake_video_renderers_.end();) {
+ if (remote_streams->FindVideoTrack(it->first) == nullptr) {
+ auto to_remove = it++;
+ removed_fake_video_renderers_.push_back(std::move(to_remove->second));
+ fake_video_renderers_.erase(to_remove);
+ } else {
+ ++it;
+ }
+ }
+ // Create renderers for new video tracks.
+ for (size_t stream_index = 0; stream_index < remote_streams->count();
+ ++stream_index) {
+ MediaStreamInterface* remote_stream = remote_streams->at(stream_index);
+ for (size_t track_index = 0;
+ track_index < remote_stream->GetVideoTracks().size();
+ ++track_index) {
+ const std::string id =
+ remote_stream->GetVideoTracks()[track_index]->id();
+ if (fake_video_renderers_.find(id) != fake_video_renderers_.end()) {
+ continue;
+ }
+ fake_video_renderers_[id].reset(new webrtc::FakeVideoTrackRenderer(
+ remote_stream->GetVideoTracks()[track_index]));
+ }
+ }
+ }
+
void SetVideoConstraints(const webrtc::FakeConstraints& video_constraint) {
video_constraints_ = video_constraint;
}
@@ -246,22 +294,11 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
std::string stream_label =
kStreamLabelBase +
rtc::ToString<int>(static_cast<int>(pc()->local_streams()->count()));
- rtc::scoped_refptr<webrtc::MediaStreamInterface> stream =
+ rtc::scoped_refptr<MediaStreamInterface> stream =
peer_connection_factory_->CreateLocalMediaStream(stream_label);
if (audio && can_receive_audio()) {
- FakeConstraints constraints;
- // Disable highpass filter so that we can get all the test audio frames.
- constraints.AddMandatory(
- MediaConstraintsInterface::kHighpassFilter, false);
- rtc::scoped_refptr<webrtc::AudioSourceInterface> source =
- peer_connection_factory_->CreateAudioSource(&constraints);
- // TODO(perkj): Test audio source when it is implemented. Currently audio
- // always use the default input.
- std::string label = stream_label + kAudioTrackLabelBase;
- rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- peer_connection_factory_->CreateAudioTrack(label, source));
- stream->AddTrack(audio_track);
+ stream->AddTrack(CreateLocalAudioTrack(stream_label));
}
if (video && can_receive_video()) {
stream->AddTrack(CreateLocalVideoTrack(stream_label));
@@ -276,6 +313,12 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return pc()->signaling_state() == webrtc::PeerConnectionInterface::kStable;
}
+ // Automatically add a stream when receiving an offer, if we don't have one.
+ // Defaults to true.
+ void set_auto_add_stream(bool auto_add_stream) {
+ auto_add_stream_ = auto_add_stream;
+ }
+
void set_signaling_message_receiver(
SignalingMessageReceiver* signaling_message_receiver) {
signaling_message_receiver_ = signaling_message_receiver;
@@ -357,6 +400,35 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
data_observer_.reset(new MockDataChannelObserver(data_channel_));
}
+ rtc::scoped_refptr<webrtc::AudioTrackInterface> CreateLocalAudioTrack(
+ const std::string& stream_label) {
+ FakeConstraints constraints;
+ // Disable highpass filter so that we can get all the test audio frames.
+ constraints.AddMandatory(MediaConstraintsInterface::kHighpassFilter, false);
+ rtc::scoped_refptr<webrtc::AudioSourceInterface> source =
+ peer_connection_factory_->CreateAudioSource(&constraints);
+ // TODO(perkj): Test audio source when it is implemented. Currently audio
+ // always use the default input.
+ std::string label = stream_label + kAudioTrackLabelBase;
+ return peer_connection_factory_->CreateAudioTrack(label, source);
+ }
+
+ rtc::scoped_refptr<webrtc::VideoTrackInterface> CreateLocalVideoTrack(
+ const std::string& stream_label) {
+ // Set max frame rate to 10fps to reduce the risk of the tests to be flaky.
+ FakeConstraints source_constraints = video_constraints_;
+ source_constraints.SetMandatoryMaxFrameRate(10);
+
+ cricket::FakeVideoCapturer* fake_capturer =
+ new webrtc::FakePeriodicVideoCapturer();
+ video_capturers_.push_back(fake_capturer);
+ rtc::scoped_refptr<webrtc::VideoSourceInterface> source =
+ peer_connection_factory_->CreateVideoSource(fake_capturer,
+ &source_constraints);
+ std::string label = stream_label + kVideoTrackLabelBase;
+ return peer_connection_factory_->CreateVideoTrack(label, source);
+ }
+
DataChannelInterface* data_channel() { return data_channel_; }
const MockDataChannelObserver* data_observer() const {
return data_observer_.get();
@@ -376,6 +448,10 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return number_of_frames <= fake_audio_capture_module_->frames_received();
}
+ int audio_frames_received() const {
+ return fake_audio_capture_module_->frames_received();
+ }
+
bool VideoFramesReceivedCheck(int number_of_frames) {
if (video_decoder_factory_enabled_) {
const std::vector<FakeWebRtcVideoDecoder*>& decoders
@@ -384,9 +460,8 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return number_of_frames <= 0;
}
- for (std::vector<FakeWebRtcVideoDecoder*>::const_iterator
- it = decoders.begin(); it != decoders.end(); ++it) {
- if (number_of_frames > (*it)->GetNumFramesReceived()) {
+ for (FakeWebRtcVideoDecoder* decoder : decoders) {
+ if (number_of_frames > decoder->GetNumFramesReceived()) {
return false;
}
}
@@ -396,9 +471,8 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return number_of_frames <= 0;
}
- for (RenderMap::const_iterator it = fake_video_renderers_.begin();
- it != fake_video_renderers_.end(); ++it) {
- if (number_of_frames > it->second->num_rendered_frames()) {
+ for (const auto& pair : fake_video_renderers_) {
+ if (number_of_frames > pair.second->num_rendered_frames()) {
return false;
}
}
@@ -406,6 +480,25 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
}
}
+ int video_frames_received() const {
+ int total = 0;
+ if (video_decoder_factory_enabled_) {
+ const std::vector<FakeWebRtcVideoDecoder*>& decoders =
+ fake_video_decoder_factory_->decoders();
+ for (const FakeWebRtcVideoDecoder* decoder : decoders) {
+ total += decoder->GetNumFramesReceived();
+ }
+ } else {
+ for (const auto& pair : fake_video_renderers_) {
+ total += pair.second->num_rendered_frames();
+ }
+ for (const auto& renderer : removed_fake_video_renderers_) {
+ total += renderer->num_rendered_frames();
+ }
+ }
+ return total;
+ }
+
// Verify the CreateDtmfSender interface
void VerifyDtmf() {
rtc::scoped_ptr<DummyDtmfObserver> observer(new DummyDtmfObserver());
@@ -641,14 +734,14 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
explicit PeerConnectionTestClient(const std::string& id) : id_(id) {}
- bool Init(const MediaConstraintsInterface* constraints,
- const PeerConnectionFactory::Options* options) {
+ bool Init(
+ const MediaConstraintsInterface* constraints,
+ const PeerConnectionFactory::Options* options,
+ rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store) {
EXPECT_TRUE(!peer_connection_);
EXPECT_TRUE(!peer_connection_factory_);
- allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
- if (!allocator_factory_) {
- return false;
- }
+ rtc::scoped_ptr<cricket::PortAllocator> port_allocator(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
fake_audio_capture_module_ = FakeAudioCaptureModule::Create();
if (fake_audio_capture_module_ == nullptr) {
@@ -666,46 +759,29 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
if (options) {
peer_connection_factory_->SetOptions(*options);
}
- peer_connection_ = CreatePeerConnection(allocator_factory_.get(),
- constraints);
+ peer_connection_ = CreatePeerConnection(
+ std::move(port_allocator), constraints, std::move(dtls_identity_store));
return peer_connection_.get() != nullptr;
}
- rtc::scoped_refptr<webrtc::VideoTrackInterface>
- CreateLocalVideoTrack(const std::string stream_label) {
- // Set max frame rate to 10fps to reduce the risk of the tests to be flaky.
- FakeConstraints source_constraints = video_constraints_;
- source_constraints.SetMandatoryMaxFrameRate(10);
-
- cricket::FakeVideoCapturer* fake_capturer =
- new webrtc::FakePeriodicVideoCapturer();
- video_capturers_.push_back(fake_capturer);
- rtc::scoped_refptr<webrtc::VideoSourceInterface> source =
- peer_connection_factory_->CreateVideoSource(
- fake_capturer, &source_constraints);
- std::string label = stream_label + kVideoTrackLabelBase;
- return peer_connection_factory_->CreateVideoTrack(label, source);
- }
-
rtc::scoped_refptr<webrtc::PeerConnectionInterface> CreatePeerConnection(
- webrtc::PortAllocatorFactoryInterface* factory,
- const MediaConstraintsInterface* constraints) {
- // CreatePeerConnection with IceServers.
- webrtc::PeerConnectionInterface::IceServers ice_servers;
+ rtc::scoped_ptr<cricket::PortAllocator> port_allocator,
+ const MediaConstraintsInterface* constraints,
+ rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store) {
+ // CreatePeerConnection with RTCConfiguration.
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
webrtc::PeerConnectionInterface::IceServer ice_server;
ice_server.uri = "stun:stun.l.google.com:19302";
- ice_servers.push_back(ice_server);
+ config.servers.push_back(ice_server);
- rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store(
- rtc::SSLStreamAdapter::HaveDtlsSrtp() ? new FakeDtlsIdentityStore()
- : nullptr);
return peer_connection_factory_->CreatePeerConnection(
- ice_servers, constraints, factory, dtls_identity_store.Pass(), this);
+ config, constraints, std::move(port_allocator),
+ std::move(dtls_identity_store), this);
}
void HandleIncomingOffer(const std::string& msg) {
LOG(INFO) << id_ << "HandleIncomingOffer ";
- if (NumberOfLocalMediaStreams() == 0) {
+ if (NumberOfLocalMediaStreams() == 0 && auto_add_stream_) {
// If we are not sending any streams ourselves it is time to add some.
AddMediaStream(true, true);
}
@@ -807,20 +883,24 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
std::string id_;
- rtc::scoped_refptr<webrtc::PortAllocatorFactoryInterface> allocator_factory_;
rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface>
peer_connection_factory_;
+ bool auto_add_stream_ = true;
+
typedef std::pair<std::string, std::string> IceUfragPwdPair;
std::map<int, IceUfragPwdPair> ice_ufrag_pwd_;
bool expect_ice_restart_ = false;
- // Needed to keep track of number of frames send.
+ // Needed to keep track of number of frames sent.
rtc::scoped_refptr<FakeAudioCaptureModule> fake_audio_capture_module_;
// Needed to keep track of number of frames received.
- typedef std::map<std::string, webrtc::FakeVideoTrackRenderer*> RenderMap;
- RenderMap fake_video_renderers_;
+ std::map<std::string, rtc::scoped_ptr<webrtc::FakeVideoTrackRenderer>>
+ fake_video_renderers_;
+ // Needed to ensure frames aren't received for removed tracks.
+ std::vector<rtc::scoped_ptr<webrtc::FakeVideoTrackRenderer>>
+ removed_fake_video_renderers_;
// Needed to keep track of number of frames received when external decoder
// used.
FakeWebRtcVideoDecoderFactory* fake_video_decoder_factory_ = nullptr;
@@ -846,11 +926,9 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
rtc::scoped_ptr<MockDataChannelObserver> data_observer_;
};
-// TODO(deadbeef): Rename this to P2PTestConductor once the Linux memcheck and
-// Windows DrMemory Full bots' blacklists are updated.
-class JsepPeerConnectionP2PTestClient : public testing::Test {
+class P2PTestConductor : public testing::Test {
public:
- JsepPeerConnectionP2PTestClient()
+ P2PTestConductor()
: pss_(new rtc::PhysicalSocketServer),
ss_(new rtc::VirtualSocketServer(pss_.get())),
ss_scope_(ss_.get()) {}
@@ -882,13 +960,26 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
}
void TestUpdateOfferWithRejectedContent() {
+ // Renegotiate, rejecting the video m-line.
initiating_client_->Negotiate(true, false);
- EXPECT_TRUE_WAIT(
- FramesNotPending(kEndAudioFrameCount * 2, kEndVideoFrameCount),
- kMaxWaitForFramesMs);
- // There shouldn't be any more video frame after the new offer is
- // negotiated.
- EXPECT_FALSE(VideoFramesReceivedCheck(kEndVideoFrameCount + 1));
+ ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
+
+ int pc1_audio_received = initiating_client_->audio_frames_received();
+ int pc1_video_received = initiating_client_->video_frames_received();
+ int pc2_audio_received = receiving_client_->audio_frames_received();
+ int pc2_video_received = receiving_client_->video_frames_received();
+
+ // Wait for some additional audio frames to be received.
+ EXPECT_TRUE_WAIT(initiating_client_->AudioFramesReceivedCheck(
+ pc1_audio_received + kEndAudioFrameCount) &&
+ receiving_client_->AudioFramesReceivedCheck(
+ pc2_audio_received + kEndAudioFrameCount),
+ kMaxWaitForFramesMs);
+
+ // During this time, we shouldn't have received any additional video frames
+ // for the rejected video tracks.
+ EXPECT_EQ(pc1_video_received, initiating_client_->video_frames_received());
+ EXPECT_EQ(pc2_video_received, receiving_client_->video_frames_received());
}
void VerifyRenderedSize(int width, int height) {
@@ -905,7 +996,7 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
receiving_client_->VerifyLocalIceUfragAndPassword();
}
- ~JsepPeerConnectionP2PTestClient() {
+ ~P2PTestConductor() {
if (initiating_client_) {
initiating_client_->set_signaling_message_receiver(nullptr);
}
@@ -922,6 +1013,11 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
nullptr);
}
+ void SetSignalingReceivers() {
+ initiating_client_->set_signaling_message_receiver(receiving_client_.get());
+ receiving_client_->set_signaling_message_receiver(initiating_client_.get());
+ }
+
bool CreateTestClients(MediaConstraintsInterface* init_constraints,
PeerConnectionFactory::Options* init_options,
MediaConstraintsInterface* recv_constraints,
@@ -933,8 +1029,7 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
if (!initiating_client_ || !receiving_client_) {
return false;
}
- initiating_client_->set_signaling_message_receiver(receiving_client_.get());
- receiving_client_->set_signaling_message_receiver(initiating_client_.get());
+ SetSignalingReceivers();
return true;
}
@@ -957,13 +1052,11 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
initiating_client_->AddMediaStream(true, true);
}
initiating_client_->Negotiate();
- const int kMaxWaitForActivationMs = 5000;
// Assert true is used here since next tests are guaranteed to fail and
// would eat up 5 seconds.
ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
VerifySessionDescriptions();
-
int audio_frame_count = kEndAudioFrameCount;
// TODO(ronghuawu): Add test to cover the case of sendonly and recvonly.
if (!initiating_client_->can_receive_audio() ||
@@ -1013,6 +1106,32 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
kMaxWaitForFramesMs);
}
+ void SetupAndVerifyDtlsCall() {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+ }
+
+ PeerConnectionTestClient* CreateDtlsClientWithAlternateKey() {
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+
+ rtc::scoped_ptr<FakeDtlsIdentityStore> dtls_identity_store(
+ rtc::SSLStreamAdapter::HaveDtlsSrtp() ? new FakeDtlsIdentityStore()
+ : nullptr);
+ dtls_identity_store->use_alternate_key();
+
+ // Make sure the new client is using a different certificate.
+ return PeerConnectionTestClient::CreateClientWithDtlsIdentityStore(
+ "New Peer: ", &setup_constraints, nullptr,
+ std::move(dtls_identity_store));
+ }
+
void SendRtpData(webrtc::DataChannelInterface* dc, const std::string& data) {
// Messages may get lost on the unreliable DataChannel, so we send multiple
// times to avoid test flakiness.
@@ -1026,10 +1145,29 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
PeerConnectionTestClient* initializing_client() {
return initiating_client_.get();
}
+
+ // Set the |initiating_client_| to the |client| passed in and return the
+ // original |initiating_client_|.
+ PeerConnectionTestClient* set_initializing_client(
+ PeerConnectionTestClient* client) {
+ PeerConnectionTestClient* old = initiating_client_.release();
+ initiating_client_.reset(client);
+ return old;
+ }
+
PeerConnectionTestClient* receiving_client() {
return receiving_client_.get();
}
+ // Set the |receiving_client_| to the |client| passed in and return the
+ // original |receiving_client_|.
+ PeerConnectionTestClient* set_receiving_client(
+ PeerConnectionTestClient* client) {
+ PeerConnectionTestClient* old = receiving_client_.release();
+ receiving_client_.reset(client);
+ return old;
+ }
+
private:
rtc::scoped_ptr<rtc::PhysicalSocketServer> pss_;
rtc::scoped_ptr<rtc::VirtualSocketServer> ss_;
@@ -1045,7 +1183,7 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
// This test sets up a Jsep call between two parties and test Dtmf.
// TODO(holmer): Disabled due to sometimes crashing on buildbots.
// See issue webrtc/2378.
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDtmf) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTestDtmf) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
VerifyDtmf();
@@ -1053,7 +1191,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDtmf) {
// This test sets up a Jsep call between two parties and test that we can get a
// video aspect ratio of 16:9.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) {
+TEST_F(P2PTestConductor, LocalP2PTest16To9) {
ASSERT_TRUE(CreateTestClients());
FakeConstraints constraint;
double requested_ratio = 640.0/360;
@@ -1078,7 +1216,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) {
// received video has a resolution of 1280*720.
// TODO(mallinath): Enable when
// http://code.google.com/p/webrtc/issues/detail?id=981 is fixed.
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTest1280By720) {
ASSERT_TRUE(CreateTestClients());
FakeConstraints constraint;
constraint.SetMandatoryMinWidth(1280);
@@ -1090,34 +1228,84 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) {
// This test sets up a call between two endpoints that are configured to use
// DTLS key agreement. As a result, DTLS is negotiated and used for transport.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtls) {
+TEST_F(P2PTestConductor, LocalP2PTestDtls) {
+ SetupAndVerifyDtlsCall();
+}
+
+// This test sets up a audio call initially and then upgrades to audio/video,
+// using DTLS.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsRenegotiate) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
true);
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ receiving_client()->SetReceiveAudioVideo(true, false);
+ LocalP2PTest();
+ receiving_client()->SetReceiveAudioVideo(true, true);
+ receiving_client()->Negotiate();
+}
+
+// This test sets up a call transfer to a new caller with a different DTLS
+// fingerprint.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsTransferCallee) {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+ SetupAndVerifyDtlsCall();
+
+ // Keeping the original peer around which will still send packets to the
+ // receiving client. These SRTP packets will be dropped.
+ rtc::scoped_ptr<PeerConnectionTestClient> original_peer(
+ set_initializing_client(CreateDtlsClientWithAlternateKey()));
+ original_peer->pc()->Close();
+
+ SetSignalingReceivers();
+ receiving_client()->SetExpectIceRestart(true);
LocalP2PTest();
VerifyRenderedSize(640, 480);
}
-// This test sets up a audio call initially and then upgrades to audio/video,
-// using DTLS.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtlsRenegotiate) {
+// This test sets up a non-bundle call and apply bundle during ICE restart. When
+// bundle is in effect in the restart, the channel can successfully reset its
+// DTLS-SRTP context.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsBundleInIceRestart) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
true);
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
- receiving_client()->SetReceiveAudioVideo(true, false);
+ receiving_client()->RemoveBundleFromReceivedSdp(true);
LocalP2PTest();
- receiving_client()->SetReceiveAudioVideo(true, true);
- receiving_client()->Negotiate();
+ VerifyRenderedSize(640, 480);
+
+ initializing_client()->IceRestart();
+ receiving_client()->SetExpectIceRestart(true);
+ receiving_client()->RemoveBundleFromReceivedSdp(false);
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+}
+
+// This test sets up a call transfer to a new callee with a different DTLS
+// fingerprint.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsTransferCaller) {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+ SetupAndVerifyDtlsCall();
+
+ // Keeping the original peer around which will still send packets to the
+ // receiving client. These SRTP packets will be dropped.
+ rtc::scoped_ptr<PeerConnectionTestClient> original_peer(
+ set_receiving_client(CreateDtlsClientWithAlternateKey()));
+ original_peer->pc()->Close();
+
+ SetSignalingReceivers();
+ initializing_client()->IceRestart();
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
}
// This test sets up a call between two endpoints that are configured to use
// DTLS key agreement. The offerer don't support SDES. As a result, DTLS is
// negotiated and used for transport.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) {
+TEST_F(P2PTestConductor, LocalP2PTestOfferDtlsButNotSdes) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
@@ -1130,7 +1318,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) {
// This test sets up a Jsep call between two parties, and the callee only
// accept to receive video.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerVideo) {
+TEST_F(P2PTestConductor, LocalP2PTestAnswerVideo) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->SetReceiveAudioVideo(false, true);
LocalP2PTest();
@@ -1138,7 +1326,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerVideo) {
// This test sets up a Jsep call between two parties, and the callee only
// accept to receive audio.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerAudio) {
+TEST_F(P2PTestConductor, LocalP2PTestAnswerAudio) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->SetReceiveAudioVideo(true, false);
LocalP2PTest();
@@ -1146,7 +1334,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerAudio) {
// This test sets up a Jsep call between two parties, and the callee reject both
// audio and video.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) {
+TEST_F(P2PTestConductor, LocalP2PTestAnswerNone) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->SetReceiveAudioVideo(false, false);
LocalP2PTest();
@@ -1156,9 +1344,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) {
// runs for a while (10 frames), the caller sends an update offer with video
// being rejected. Once the re-negotiation is done, the video flow should stop
// and the audio flow should continue.
-// Disabled due to b/14955157.
-TEST_F(JsepPeerConnectionP2PTestClient,
- DISABLED_UpdateOfferWithRejectedContent) {
+TEST_F(P2PTestConductor, UpdateOfferWithRejectedContent) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
TestUpdateOfferWithRejectedContent();
@@ -1166,8 +1352,7 @@ TEST_F(JsepPeerConnectionP2PTestClient,
// This test sets up a Jsep call between two parties. The MSID is removed from
// the SDP strings from the caller.
-// Disabled due to b/14955157.
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestWithoutMsid) {
+TEST_F(P2PTestConductor, LocalP2PTestWithoutMsid) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->RemoveMsidFromReceivedSdp(true);
// TODO(perkj): Currently there is a bug that cause audio to stop playing if
@@ -1182,7 +1367,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestWithoutMsid) {
// sends two steams.
// TODO(perkj): Disabled due to
// https://code.google.com/p/webrtc/issues/detail?id=1454
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTestTwoStreams) {
ASSERT_TRUE(CreateTestClients());
// Set optional video constraint to max 320pixels to decrease CPU usage.
FakeConstraints constraint;
@@ -1196,7 +1381,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) {
}
// Test that we can receive the audio output level from a remote audio track.
-TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) {
+TEST_F(P2PTestConductor, GetAudioOutputLevelStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1215,7 +1400,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) {
}
// Test that an audio input level is reported.
-TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) {
+TEST_F(P2PTestConductor, GetAudioInputLevelStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1226,7 +1411,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) {
}
// Test that we can get incoming byte counts from both audio and video tracks.
-TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) {
+TEST_F(P2PTestConductor, GetBytesReceivedStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1248,7 +1433,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) {
}
// Test that we can get outgoing byte counts from both audio and video tracks.
-TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) {
+TEST_F(P2PTestConductor, GetBytesSentStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1270,7 +1455,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) {
}
// Test that DTLS 1.0 is used if both sides only support DTLS 1.0.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12None) {
+TEST_F(P2PTestConductor, GetDtls12None) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10;
PeerConnectionFactory::Options recv_options;
@@ -1282,7 +1467,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12None) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1292,16 +1477,23 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12None) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
+#if defined(MEMORY_SANITIZER)
+// Fails under MemorySanitizer:
+// See https://code.google.com/p/webrtc/issues/detail?id=5381.
+#define MAYBE_GetDtls12Both DISABLED_GetDtls12Both
+#else
+#define MAYBE_GetDtls12Both GetDtls12Both
+#endif
// Test that DTLS 1.2 is used if both ends support it.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Both) {
+TEST_F(P2PTestConductor, MAYBE_GetDtls12Both) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
PeerConnectionFactory::Options recv_options;
@@ -1313,7 +1505,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Both) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_12, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1323,17 +1515,17 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Both) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_12, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
// Test that DTLS 1.0 is used if the initator supports DTLS 1.2 and the
// received supports 1.0.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Init) {
+TEST_F(P2PTestConductor, GetDtls12Init) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
PeerConnectionFactory::Options recv_options;
@@ -1345,7 +1537,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Init) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1355,17 +1547,17 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Init) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
// Test that DTLS 1.0 is used if the initator supports DTLS 1.0 and the
// received supports 1.2.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Recv) {
+TEST_F(P2PTestConductor, GetDtls12Recv) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10;
PeerConnectionFactory::Options recv_options;
@@ -1377,7 +1569,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Recv) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1387,16 +1579,17 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Recv) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
-// This test sets up a call between two parties with audio, video and data.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
+// This test sets up a call between two parties with audio, video and an RTP
+// data channel.
+TEST_F(P2PTestConductor, LocalP2PTestRtpDataChannel) {
FakeConstraints setup_constraints;
setup_constraints.SetAllowRtpDataChannels();
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
@@ -1426,6 +1619,34 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
EXPECT_FALSE(receiving_client()->data_observer()->IsOpen());
}
+// This test sets up a call between two parties with audio, video and an SCTP
+// data channel.
+TEST_F(P2PTestConductor, LocalP2PTestSctpDataChannel) {
+ ASSERT_TRUE(CreateTestClients());
+ initializing_client()->CreateDataChannel();
+ LocalP2PTest();
+ ASSERT_TRUE(initializing_client()->data_channel() != nullptr);
+ EXPECT_TRUE_WAIT(receiving_client()->data_channel() != nullptr, kMaxWaitMs);
+ EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+ EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(), kMaxWaitMs);
+
+ std::string data = "hello world";
+
+ initializing_client()->data_channel()->Send(DataBuffer(data));
+ EXPECT_EQ_WAIT(data, receiving_client()->data_observer()->last_message(),
+ kMaxWaitMs);
+
+ receiving_client()->data_channel()->Send(DataBuffer(data));
+ EXPECT_EQ_WAIT(data, initializing_client()->data_observer()->last_message(),
+ kMaxWaitMs);
+
+ receiving_client()->data_channel()->Close();
+ EXPECT_TRUE_WAIT(!initializing_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+ EXPECT_TRUE_WAIT(!receiving_client()->data_observer()->IsOpen(), kMaxWaitMs);
+}
+
// This test sets up a call between two parties and creates a data channel.
// The test tests that received data is buffered unless an observer has been
// registered.
@@ -1433,7 +1654,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
// transport has detected that a channel is writable and thus data can be
// received before the data channel state changes to open. That is hard to test
// but the same buffering is used in that case.
-TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) {
+TEST_F(P2PTestConductor, RegisterDataChannelObserver) {
FakeConstraints setup_constraints;
setup_constraints.SetAllowRtpDataChannels();
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
@@ -1463,7 +1684,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) {
// This test sets up a call between two parties with audio, video and but only
// the initiating client support data.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) {
+TEST_F(P2PTestConductor, LocalP2PTestReceiverDoesntSupportData) {
FakeConstraints setup_constraints_1;
setup_constraints_1.SetAllowRtpDataChannels();
// Must disable DTLS to make negotiation succeed.
@@ -1482,7 +1703,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) {
// This test sets up a call between two parties with audio, video. When audio
// and video is setup and flowing and data channel is negotiated.
-TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) {
+TEST_F(P2PTestConductor, AddDataChannelAfterRenegotiation) {
FakeConstraints setup_constraints;
setup_constraints.SetAllowRtpDataChannels();
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
@@ -1501,7 +1722,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) {
// This test sets up a Jsep call with SCTP DataChannel and verifies the
// negotiation is completed without error.
#ifdef HAVE_SCTP
-TEST_F(JsepPeerConnectionP2PTestClient, CreateOfferWithSctpDataChannel) {
+TEST_F(P2PTestConductor, CreateOfferWithSctpDataChannel) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints constraints;
constraints.SetMandatory(
@@ -1515,7 +1736,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, CreateOfferWithSctpDataChannel) {
// This test sets up a call between two parties with audio, and video.
// During the call, the initializing side restart ice and the test verifies that
// new ice candidates are generated and audio and video still can flow.
-TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) {
+TEST_F(P2PTestConductor, IceRestart) {
ASSERT_TRUE(CreateTestClients());
// Negotiate and wait for ice completion and make sure audio and video plays.
@@ -1562,17 +1783,69 @@ TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) {
EXPECT_NE(receiver_candidate, receiver_candidate_restart);
}
+// This test sets up a call between two parties with audio, and video.
+// It then renegotiates setting the video m-line to "port 0", then later
+// renegotiates again, enabling video.
+TEST_F(P2PTestConductor, LocalP2PTestVideoDisableEnable) {
+ ASSERT_TRUE(CreateTestClients());
+
+ // Do initial negotiation. Will result in video and audio sendonly m-lines.
+ receiving_client()->set_auto_add_stream(false);
+ initializing_client()->AddMediaStream(true, true);
+ initializing_client()->Negotiate();
+
+ // Negotiate again, disabling the video m-line (receiving client will
+ // set port to 0 due to mandatory "OfferToReceiveVideo: false" constraint).
+ receiving_client()->SetReceiveVideo(false);
+ initializing_client()->Negotiate();
+
+ // Enable video and do negotiation again, making sure video is received
+ // end-to-end.
+ receiving_client()->SetReceiveVideo(true);
+ receiving_client()->AddMediaStream(true, true);
+ LocalP2PTest();
+}
+
// This test sets up a Jsep call between two parties with external
// VideoDecoderFactory.
// TODO(holmer): Disabled due to sometimes crashing on buildbots.
// See issue webrtc/2378.
-TEST_F(JsepPeerConnectionP2PTestClient,
- DISABLED_LocalP2PTestWithVideoDecoderFactory) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTestWithVideoDecoderFactory) {
ASSERT_TRUE(CreateTestClients());
EnableVideoDecoderFactory();
LocalP2PTest();
}
+// This tests that if we negotiate after calling CreateSender but before we
+// have a track, then set a track later, frames from the newly-set track are
+// received end-to-end.
+TEST_F(P2PTestConductor, EarlyWarmupTest) {
+ ASSERT_TRUE(CreateTestClients());
+ auto audio_sender =
+ initializing_client()->pc()->CreateSender("audio", "stream_id");
+ auto video_sender =
+ initializing_client()->pc()->CreateSender("video", "stream_id");
+ initializing_client()->Negotiate();
+ // Wait for ICE connection to complete, without any tracks.
+ // Note that the receiving client WILL (in HandleIncomingOffer) create
+ // tracks, so it's only the initiator here that's doing early warmup.
+ ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
+ VerifySessionDescriptions();
+ EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceConnectionCompleted,
+ initializing_client()->ice_connection_state(),
+ kMaxWaitForFramesMs);
+ EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceConnectionConnected,
+ receiving_client()->ice_connection_state(),
+ kMaxWaitForFramesMs);
+ // Now set the tracks, and expect frames to immediately start flowing.
+ EXPECT_TRUE(
+ audio_sender->SetTrack(initializing_client()->CreateLocalAudioTrack("")));
+ EXPECT_TRUE(
+ video_sender->SetTrack(initializing_client()->CreateLocalVideoTrack("")));
+ EXPECT_TRUE_WAIT(FramesNotPending(kEndAudioFrameCount, kEndVideoFrameCount),
+ kMaxWaitForFramesMs);
+}
+
class IceServerParsingTest : public testing::Test {
public:
// Convenience for parsing a single URL.
@@ -1589,38 +1862,37 @@ class IceServerParsingTest : public testing::Test {
server.username = username;
server.password = password;
servers.push_back(server);
- return webrtc::ParseIceServers(servers, &stun_configurations_,
- &turn_configurations_);
+ return webrtc::ParseIceServers(servers, &stun_servers_, &turn_servers_);
}
protected:
- webrtc::StunConfigurations stun_configurations_;
- webrtc::TurnConfigurations turn_configurations_;
+ cricket::ServerAddresses stun_servers_;
+ std::vector<cricket::RelayServerConfig> turn_servers_;
};
// Make sure all STUN/TURN prefixes are parsed correctly.
TEST_F(IceServerParsingTest, ParseStunPrefixes) {
EXPECT_TRUE(ParseUrl("stun:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(0U, turn_configurations_.size());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(0U, turn_servers_.size());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stuns:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(0U, turn_configurations_.size());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(0U, turn_servers_.size());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("turn:hostname"));
- EXPECT_EQ(0U, stun_configurations_.size());
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_FALSE(turn_configurations_[0].secure);
- turn_configurations_.clear();
+ EXPECT_EQ(0U, stun_servers_.size());
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_FALSE(turn_servers_[0].ports[0].secure);
+ turn_servers_.clear();
EXPECT_TRUE(ParseUrl("turns:hostname"));
- EXPECT_EQ(0U, stun_configurations_.size());
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_TRUE(turn_configurations_[0].secure);
- turn_configurations_.clear();
+ EXPECT_EQ(0U, stun_servers_.size());
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_TRUE(turn_servers_[0].ports[0].secure);
+ turn_servers_.clear();
// invalid prefixes
EXPECT_FALSE(ParseUrl("stunn:hostname"));
@@ -1632,67 +1904,69 @@ TEST_F(IceServerParsingTest, ParseStunPrefixes) {
TEST_F(IceServerParsingTest, VerifyDefaults) {
// TURNS defaults
EXPECT_TRUE(ParseUrl("turns:hostname"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ(5349, turn_configurations_[0].server.port());
- EXPECT_EQ("tcp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(5349, turn_servers_[0].ports[0].address.port());
+ EXPECT_EQ(cricket::PROTO_TCP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
// TURN defaults
EXPECT_TRUE(ParseUrl("turn:hostname"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ(3478, turn_configurations_[0].server.port());
- EXPECT_EQ("udp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(3478, turn_servers_[0].ports[0].address.port());
+ EXPECT_EQ(cricket::PROTO_UDP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
// STUN defaults
EXPECT_TRUE(ParseUrl("stun:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
}
// Check that the 6 combinations of IPv4/IPv6/hostname and with/without port
// can be parsed correctly.
TEST_F(IceServerParsingTest, ParseHostnameAndPort) {
EXPECT_TRUE(ParseUrl("stun:1.2.3.4:1234"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1.2.3.4", stun_configurations_[0].server.hostname());
- EXPECT_EQ(1234, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1.2.3.4", stun_servers_.begin()->hostname());
+ EXPECT_EQ(1234, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:[1:2:3:4:5:6:7:8]:4321"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1:2:3:4:5:6:7:8", stun_configurations_[0].server.hostname());
- EXPECT_EQ(4321, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1:2:3:4:5:6:7:8", stun_servers_.begin()->hostname());
+ EXPECT_EQ(4321, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:hostname:9999"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("hostname", stun_configurations_[0].server.hostname());
- EXPECT_EQ(9999, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("hostname", stun_servers_.begin()->hostname());
+ EXPECT_EQ(9999, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:1.2.3.4"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1.2.3.4", stun_configurations_[0].server.hostname());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1.2.3.4", stun_servers_.begin()->hostname());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:[1:2:3:4:5:6:7:8]"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1:2:3:4:5:6:7:8", stun_configurations_[0].server.hostname());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1:2:3:4:5:6:7:8", stun_servers_.begin()->hostname());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("hostname", stun_configurations_[0].server.hostname());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("hostname", stun_servers_.begin()->hostname());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
// Try some invalid hostname:port strings.
EXPECT_FALSE(ParseUrl("stun:hostname:99a99"));
EXPECT_FALSE(ParseUrl("stun:hostname:-1"));
+ EXPECT_FALSE(ParseUrl("stun:hostname:port:more"));
+ EXPECT_FALSE(ParseUrl("stun:hostname:port more"));
EXPECT_FALSE(ParseUrl("stun:hostname:"));
EXPECT_FALSE(ParseUrl("stun:[1:2:3:4:5:6:7:8]junk:1000"));
EXPECT_FALSE(ParseUrl("stun::5555"));
@@ -1702,14 +1976,14 @@ TEST_F(IceServerParsingTest, ParseHostnameAndPort) {
// Test parsing the "?transport=xxx" part of the URL.
TEST_F(IceServerParsingTest, ParseTransport) {
EXPECT_TRUE(ParseUrl("turn:hostname:1234?transport=tcp"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("tcp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(cricket::PROTO_TCP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
EXPECT_TRUE(ParseUrl("turn:hostname?transport=udp"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("udp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(cricket::PROTO_UDP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
EXPECT_FALSE(ParseUrl("turn:hostname?transport=invalid"));
}
@@ -1717,9 +1991,9 @@ TEST_F(IceServerParsingTest, ParseTransport) {
// Test parsing ICE username contained in URL.
TEST_F(IceServerParsingTest, ParseUsername) {
EXPECT_TRUE(ParseUrl("turn:user@hostname"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("user", turn_configurations_[0].username);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ("user", turn_servers_[0].credentials.username);
+ turn_servers_.clear();
EXPECT_FALSE(ParseUrl("turn:@hostname"));
EXPECT_FALSE(ParseUrl("turn:username@"));
@@ -1728,12 +2002,12 @@ TEST_F(IceServerParsingTest, ParseUsername) {
}
// Test that username and password from IceServer is copied into the resulting
-// TurnConfiguration.
+// RelayServerConfig.
TEST_F(IceServerParsingTest, CopyUsernameAndPasswordFromIceServer) {
EXPECT_TRUE(ParseUrl("turn:hostname", "username", "password"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("username", turn_configurations_[0].username);
- EXPECT_EQ("password", turn_configurations_[0].password);
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ("username", turn_servers_[0].credentials.username);
+ EXPECT_EQ("password", turn_servers_[0].credentials.password);
}
// Ensure that if a server has multiple URLs, each one is parsed.
@@ -1743,10 +2017,22 @@ TEST_F(IceServerParsingTest, ParseMultipleUrls) {
server.urls.push_back("stun:hostname");
server.urls.push_back("turn:hostname");
servers.push_back(server);
- EXPECT_TRUE(webrtc::ParseIceServers(servers, &stun_configurations_,
- &turn_configurations_));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(1U, turn_configurations_.size());
+ EXPECT_TRUE(webrtc::ParseIceServers(servers, &stun_servers_, &turn_servers_));
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(1U, turn_servers_.size());
+}
+
+// Ensure that TURN servers are given unique priorities,
+// so that their resulting candidates have unique priorities.
+TEST_F(IceServerParsingTest, TurnServerPrioritiesUnique) {
+ PeerConnectionInterface::IceServers servers;
+ PeerConnectionInterface::IceServer server;
+ server.urls.push_back("turn:hostname");
+ server.urls.push_back("turn:hostname2");
+ servers.push_back(server);
+ EXPECT_TRUE(webrtc::ParseIceServers(servers, &stun_servers_, &turn_servers_));
+ EXPECT_EQ(2U, turn_servers_.size());
+ EXPECT_NE(turn_servers_[0].priority, turn_servers_[1].priority);
}
#endif // if !defined(THREAD_SANITIZER)
diff --git a/talk/app/webrtc/peerconnectionendtoend_unittest.cc b/talk/app/webrtc/peerconnectionendtoend_unittest.cc
index eacedd4eea..1a180317ac 100644
--- a/talk/app/webrtc/peerconnectionendtoend_unittest.cc
+++ b/talk/app/webrtc/peerconnectionendtoend_unittest.cc
@@ -27,6 +27,9 @@
#include "talk/app/webrtc/test/peerconnectiontestwrapper.h"
#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "webrtc/base/gunit.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/ssladapter.h"
@@ -50,56 +53,6 @@ namespace {
const size_t kMaxWait = 10000;
-void RemoveLinesFromSdp(const std::string& line_start,
- std::string* sdp) {
- const char kSdpLineEnd[] = "\r\n";
- size_t ssrc_pos = 0;
- while ((ssrc_pos = sdp->find(line_start, ssrc_pos)) !=
- std::string::npos) {
- size_t end_ssrc = sdp->find(kSdpLineEnd, ssrc_pos);
- sdp->erase(ssrc_pos, end_ssrc - ssrc_pos + strlen(kSdpLineEnd));
- }
-}
-
-// Add |newlines| to the |message| after |line|.
-void InjectAfter(const std::string& line,
- const std::string& newlines,
- std::string* message) {
- const std::string tmp = line + newlines;
- rtc::replace_substrs(line.c_str(), line.length(),
- tmp.c_str(), tmp.length(), message);
-}
-
-void Replace(const std::string& line,
- const std::string& newlines,
- std::string* message) {
- rtc::replace_substrs(line.c_str(), line.length(),
- newlines.c_str(), newlines.length(), message);
-}
-
-void UseExternalSdes(std::string* sdp) {
- // Remove current crypto specification.
- RemoveLinesFromSdp("a=crypto", sdp);
- RemoveLinesFromSdp("a=fingerprint", sdp);
- // Add external crypto.
- const char kAudioSdes[] =
- "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
- "inline:PS1uQCVeeCFCanVmcjkpPywjNWhcYD0mXXtxaVBR\r\n";
- const char kVideoSdes[] =
- "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
- "inline:d0RmdmcmVCspeEc3QGZiNWpVLFJhQX1cfHAwJSoj\r\n";
- const char kDataSdes[] =
- "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
- "inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj\r\n";
- InjectAfter("a=mid:audio\r\n", kAudioSdes, sdp);
- InjectAfter("a=mid:video\r\n", kVideoSdes, sdp);
- InjectAfter("a=mid:data\r\n", kDataSdes, sdp);
-}
-
-void RemoveBundle(std::string* sdp) {
- RemoveLinesFromSdp("a=group:BUNDLE", sdp);
-}
-
} // namespace
class PeerConnectionEndToEndTest
@@ -114,6 +67,9 @@ class PeerConnectionEndToEndTest
"caller")),
callee_(new rtc::RefCountedObject<PeerConnectionTestWrapper>(
"callee")) {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
}
void CreatePcs() {
@@ -217,15 +173,20 @@ class PeerConnectionEndToEndTest
DataChannelList callee_signaled_data_channels_;
};
+// Disabled for TSan v2, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=4719 for details.
+// Disabled for Mac, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5231 for details.
+#if !defined(THREAD_SANITIZER) && !defined(WEBRTC_MAC)
TEST_F(PeerConnectionEndToEndTest, Call) {
CreatePcs();
GetAndAddUserMedia();
Negotiate();
WaitForCallEstablished();
}
+#endif // if !defined(THREAD_SANITIZER) && !defined(WEBRTC_MAC)
-// Disabled per b/14899892
-TEST_F(PeerConnectionEndToEndTest, DISABLED_CallWithLegacySdp) {
+TEST_F(PeerConnectionEndToEndTest, CallWithLegacySdp) {
FakeConstraints pc_constraints;
pc_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
false);
@@ -396,3 +357,30 @@ TEST_F(PeerConnectionEndToEndTest,
CloseDataChannels(caller_dc, callee_signaled_data_channels_, 1);
}
+
+// This tests that if a data channel is closed remotely while not referenced
+// by the application (meaning only the PeerConnection contributes to its
+// reference count), no memory access violation will occur.
+// See: https://code.google.com/p/chromium/issues/detail?id=565048
+TEST_F(PeerConnectionEndToEndTest, CloseDataChannelRemotelyWhileNotReferenced) {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+
+ CreatePcs();
+
+ webrtc::DataChannelInit init;
+ rtc::scoped_refptr<DataChannelInterface> caller_dc(
+ caller_->CreateDataChannel("data", init));
+
+ Negotiate();
+ WaitForConnection();
+
+ WaitForDataChannelsToOpen(caller_dc, callee_signaled_data_channels_, 0);
+ // This removes the reference to the remote data channel that we hold.
+ callee_signaled_data_channels_.clear();
+ caller_dc->Close();
+ EXPECT_EQ_WAIT(DataChannelInterface::kClosed, caller_dc->state(), kMaxWait);
+
+ // Wait for a bit longer so the remote data channel will receive the
+ // close message and be destroyed.
+ rtc::Thread::Current()->ProcessMessages(100);
+}
diff --git a/talk/app/webrtc/peerconnectionfactory.cc b/talk/app/webrtc/peerconnectionfactory.cc
index b46b4b68d3..c58f88cb41 100644
--- a/talk/app/webrtc/peerconnectionfactory.cc
+++ b/talk/app/webrtc/peerconnectionfactory.cc
@@ -27,6 +27,8 @@
#include "talk/app/webrtc/peerconnectionfactory.h"
+#include <utility>
+
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/localaudiosource.h"
#include "talk/app/webrtc/mediastream.h"
@@ -35,7 +37,6 @@
#include "talk/app/webrtc/peerconnection.h"
#include "talk/app/webrtc/peerconnectionfactoryproxy.h"
#include "talk/app/webrtc/peerconnectionproxy.h"
-#include "talk/app/webrtc/portallocatorfactory.h"
#include "talk/app/webrtc/videosource.h"
#include "talk/app/webrtc/videosourceproxy.h"
#include "talk/app/webrtc/videotrack.h"
@@ -44,6 +45,8 @@
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
#include "webrtc/base/bind.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/p2p/base/basicpacketsocketfactory.h"
+#include "webrtc/p2p/client/basicportallocator.h"
namespace webrtc {
@@ -153,11 +156,13 @@ PeerConnectionFactory::PeerConnectionFactory(
PeerConnectionFactory::~PeerConnectionFactory() {
RTC_DCHECK(signaling_thread_->IsCurrent());
channel_manager_.reset(nullptr);
- default_allocator_factory_ = nullptr;
// Make sure |worker_thread_| and |signaling_thread_| outlive
- // |dtls_identity_store_|.
+ // |dtls_identity_store_|, |default_socket_factory_| and
+ // |default_network_manager_|.
dtls_identity_store_ = nullptr;
+ default_socket_factory_ = nullptr;
+ default_network_manager_ = nullptr;
if (owns_ptrs_) {
if (wraps_current_thread_)
@@ -170,9 +175,16 @@ bool PeerConnectionFactory::Initialize() {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::InitRandom(rtc::Time());
- default_allocator_factory_ = PortAllocatorFactory::Create(worker_thread_);
- if (!default_allocator_factory_)
+ default_network_manager_.reset(new rtc::BasicNetworkManager());
+ if (!default_network_manager_) {
return false;
+ }
+
+ default_socket_factory_.reset(
+ new rtc::BasicPacketSocketFactory(worker_thread_));
+ if (!default_socket_factory_) {
+ return false;
+ }
// TODO: Need to make sure only one VoE is created inside
// WebRtcMediaEngine.
@@ -208,8 +220,8 @@ PeerConnectionFactory::CreateVideoSource(
cricket::VideoCapturer* capturer,
const MediaConstraintsInterface* constraints) {
RTC_DCHECK(signaling_thread_->IsCurrent());
- rtc::scoped_refptr<VideoSource> source(
- VideoSource::Create(channel_manager_.get(), capturer, constraints));
+ rtc::scoped_refptr<VideoSource> source(VideoSource::Create(
+ channel_manager_.get(), capturer, constraints, false));
return VideoSourceProxy::Create(signaling_thread_, source);
}
@@ -237,11 +249,10 @@ rtc::scoped_refptr<PeerConnectionInterface>
PeerConnectionFactory::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
PeerConnectionObserver* observer) {
RTC_DCHECK(signaling_thread_->IsCurrent());
- RTC_DCHECK(allocator_factory || default_allocator_factory_);
if (!dtls_identity_store.get()) {
// Because |pc|->Initialize takes ownership of the store we need a new
@@ -251,19 +262,17 @@ PeerConnectionFactory::CreatePeerConnection(
new DtlsIdentityStoreWrapper(dtls_identity_store_));
}
- PortAllocatorFactoryInterface* chosen_allocator_factory =
- allocator_factory ? allocator_factory : default_allocator_factory_.get();
- chosen_allocator_factory->SetNetworkIgnoreMask(options_.network_ignore_mask);
+ if (!allocator) {
+ allocator.reset(new cricket::BasicPortAllocator(
+ default_network_manager_.get(), default_socket_factory_.get()));
+ }
+ allocator->SetNetworkIgnoreMask(options_.network_ignore_mask);
rtc::scoped_refptr<PeerConnection> pc(
new rtc::RefCountedObject<PeerConnection>(this));
- if (!pc->Initialize(
- configuration,
- constraints,
- chosen_allocator_factory,
- dtls_identity_store.Pass(),
- observer)) {
- return NULL;
+ if (!pc->Initialize(configuration, constraints, std::move(allocator),
+ std::move(dtls_identity_store), observer)) {
+ return nullptr;
}
return PeerConnectionProxy::Create(signaling_thread(), pc);
}
@@ -289,8 +298,7 @@ rtc::scoped_refptr<AudioTrackInterface>
PeerConnectionFactory::CreateAudioTrack(const std::string& id,
AudioSourceInterface* source) {
RTC_DCHECK(signaling_thread_->IsCurrent());
- rtc::scoped_refptr<AudioTrackInterface> track(
- AudioTrack::Create(id, source));
+ rtc::scoped_refptr<AudioTrackInterface> track(AudioTrack::Create(id, source));
return AudioTrackProxy::Create(signaling_thread_, track);
}
diff --git a/talk/app/webrtc/peerconnectionfactory.h b/talk/app/webrtc/peerconnectionfactory.h
index af4117a9d3..8b274e118c 100644
--- a/talk/app/webrtc/peerconnectionfactory.h
+++ b/talk/app/webrtc/peerconnectionfactory.h
@@ -39,6 +39,11 @@
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/base/thread.h"
+namespace rtc {
+class BasicNetworkManager;
+class BasicPacketSocketFactory;
+}
+
namespace webrtc {
typedef rtc::RefCountedObject<DtlsIdentityStoreImpl>
@@ -50,14 +55,12 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
options_ = options;
}
- // webrtc::PeerConnectionFactoryInterface override;
- rtc::scoped_refptr<PeerConnectionInterface>
- CreatePeerConnection(
- const PeerConnectionInterface::RTCConfiguration& configuration,
- const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
- PeerConnectionObserver* observer) override;
+ rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
+ const PeerConnectionInterface::RTCConfiguration& configuration,
+ const MediaConstraintsInterface* constraints,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
+ rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
+ PeerConnectionObserver* observer) override;
bool Initialize();
@@ -107,7 +110,6 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
rtc::Thread* signaling_thread_;
rtc::Thread* worker_thread_;
Options options_;
- rtc::scoped_refptr<PortAllocatorFactoryInterface> default_allocator_factory_;
// External Audio device used for audio playback.
rtc::scoped_refptr<AudioDeviceModule> default_adm_;
rtc::scoped_ptr<cricket::ChannelManager> channel_manager_;
@@ -119,6 +121,8 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
// injected any. In that case, video engine will use the internal SW decoder.
rtc::scoped_ptr<cricket::WebRtcVideoDecoderFactory>
video_decoder_factory_;
+ rtc::scoped_ptr<rtc::BasicNetworkManager> default_network_manager_;
+ rtc::scoped_ptr<rtc::BasicPacketSocketFactory> default_socket_factory_;
rtc::scoped_refptr<RefCountedDtlsIdentityStore> dtls_identity_store_;
};
diff --git a/talk/app/webrtc/peerconnectionfactory_unittest.cc b/talk/app/webrtc/peerconnectionfactory_unittest.cc
index f1d5353abd..9fb013b54f 100644
--- a/talk/app/webrtc/peerconnectionfactory_unittest.cc
+++ b/talk/app/webrtc/peerconnectionfactory_unittest.cc
@@ -26,10 +26,13 @@
*/
#include <string>
+#include <utility>
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/peerconnectionfactory.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "talk/app/webrtc/test/fakedtlsidentitystore.h"
#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
#include "talk/app/webrtc/videosourceinterface.h"
@@ -39,6 +42,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
using webrtc::DataChannelInterface;
using webrtc::DtlsIdentityStoreInterface;
@@ -47,17 +51,11 @@ using webrtc::MediaStreamInterface;
using webrtc::PeerConnectionFactoryInterface;
using webrtc::PeerConnectionInterface;
using webrtc::PeerConnectionObserver;
-using webrtc::PortAllocatorFactoryInterface;
using webrtc::VideoSourceInterface;
using webrtc::VideoTrackInterface;
namespace {
-typedef std::vector<PortAllocatorFactoryInterface::StunConfiguration>
- StunConfigurations;
-typedef std::vector<PortAllocatorFactoryInterface::TurnConfiguration>
- TurnConfigurations;
-
static const char kStunIceServer[] = "stun:stun.l.google.com:19302";
static const char kTurnIceServer[] = "turn:test%40hello.com@test.com:1234";
static const char kTurnIceServerWithTransport[] =
@@ -103,6 +101,9 @@ class NullPeerConnectionObserver : public PeerConnectionObserver {
class PeerConnectionFactoryTest : public testing::Test {
void SetUp() {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
factory_ = webrtc::CreatePeerConnectionFactory(rtc::Thread::Current(),
rtc::Thread::Current(),
NULL,
@@ -110,57 +111,58 @@ class PeerConnectionFactoryTest : public testing::Test {
NULL);
ASSERT_TRUE(factory_.get() != NULL);
- allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
+ port_allocator_.reset(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
+ raw_port_allocator_ = port_allocator_.get();
}
protected:
- void VerifyStunConfigurations(StunConfigurations stun_config) {
- webrtc::FakePortAllocatorFactory* allocator =
- static_cast<webrtc::FakePortAllocatorFactory*>(
- allocator_factory_.get());
- ASSERT_TRUE(allocator != NULL);
- EXPECT_EQ(stun_config.size(), allocator->stun_configs().size());
- for (size_t i = 0; i < stun_config.size(); ++i) {
- EXPECT_EQ(stun_config[i].server.ToString(),
- allocator->stun_configs()[i].server.ToString());
- }
+ void VerifyStunServers(cricket::ServerAddresses stun_servers) {
+ EXPECT_EQ(stun_servers, raw_port_allocator_->stun_servers());
}
- void VerifyTurnConfigurations(TurnConfigurations turn_config) {
- webrtc::FakePortAllocatorFactory* allocator =
- static_cast<webrtc::FakePortAllocatorFactory*>(
- allocator_factory_.get());
- ASSERT_TRUE(allocator != NULL);
- EXPECT_EQ(turn_config.size(), allocator->turn_configs().size());
- for (size_t i = 0; i < turn_config.size(); ++i) {
- EXPECT_EQ(turn_config[i].server.ToString(),
- allocator->turn_configs()[i].server.ToString());
- EXPECT_EQ(turn_config[i].username, allocator->turn_configs()[i].username);
- EXPECT_EQ(turn_config[i].password, allocator->turn_configs()[i].password);
- EXPECT_EQ(turn_config[i].transport_type,
- allocator->turn_configs()[i].transport_type);
+ void VerifyTurnServers(std::vector<cricket::RelayServerConfig> turn_servers) {
+ EXPECT_EQ(turn_servers.size(), raw_port_allocator_->turn_servers().size());
+ for (size_t i = 0; i < turn_servers.size(); ++i) {
+ ASSERT_EQ(1u, turn_servers[i].ports.size());
+ EXPECT_EQ(1u, raw_port_allocator_->turn_servers()[i].ports.size());
+ EXPECT_EQ(
+ turn_servers[i].ports[0].address.ToString(),
+ raw_port_allocator_->turn_servers()[i].ports[0].address.ToString());
+ EXPECT_EQ(turn_servers[i].ports[0].proto,
+ raw_port_allocator_->turn_servers()[i].ports[0].proto);
+ EXPECT_EQ(turn_servers[i].credentials.username,
+ raw_port_allocator_->turn_servers()[i].credentials.username);
+ EXPECT_EQ(turn_servers[i].credentials.password,
+ raw_port_allocator_->turn_servers()[i].credentials.password);
}
}
rtc::scoped_refptr<PeerConnectionFactoryInterface> factory_;
NullPeerConnectionObserver observer_;
- rtc::scoped_refptr<PortAllocatorFactoryInterface> allocator_factory_;
+ rtc::scoped_ptr<cricket::FakePortAllocator> port_allocator_;
+ // Since the PC owns the port allocator after it's been initialized,
+ // this should only be used when known to be safe.
+ cricket::FakePortAllocator* raw_port_allocator_;
};
// Verify creation of PeerConnection using internal ADM, video factory and
// internal libjingle threads.
TEST(PeerConnectionFactoryTestInternal, CreatePCUsingInternalModules) {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
+
rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
webrtc::CreatePeerConnectionFactory());
NullPeerConnectionObserver observer;
- webrtc::PeerConnectionInterface::IceServers servers;
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
rtc::scoped_ptr<FakeDtlsIdentityStore> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory->CreatePeerConnection(
- servers, nullptr, nullptr, dtls_identity_store.Pass(), &observer));
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory->CreatePeerConnection(
+ config, nullptr, nullptr, std::move(dtls_identity_store), &observer));
EXPECT_TRUE(pc.get() != nullptr);
}
@@ -180,25 +182,22 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServers) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "stun.l.google.com", 19302);
- stun_configs.push_back(stun1);
- VerifyStunConfigurations(stun_configs);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "test.com", 1234, "test@hello.com", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn2);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ cricket::ServerAddresses stun_servers;
+ rtc::SocketAddress stun1("stun.l.google.com", 19302);
+ stun_servers.insert(stun1);
+ VerifyStunServers(stun_servers);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("test.com", 1234, "test@hello.com",
+ kTurnPassword, cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn1);
+ cricket::RelayServerConfig turn2("hello.com", kDefaultStunPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, false);
+ turn_servers.push_back(turn2);
+ VerifyTurnServers(turn_servers);
}
// This test verifies creation of PeerConnection with valid STUN and TURN
@@ -213,63 +212,22 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServersUrls) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "stun.l.google.com", 19302);
- stun_configs.push_back(stun1);
- VerifyStunConfigurations(stun_configs);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "test.com", 1234, "test@hello.com", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn2);
- VerifyTurnConfigurations(turn_configs);
-}
-
-// This test verifies creation of PeerConnection with valid STUN and TURN
-// configuration. Also verifies the URL's parsed correctly as expected.
-// This version doesn't use RTCConfiguration.
-// TODO(mallinath) - Remove this method after clients start using RTCConfig.
-TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServersOldSignature) {
- webrtc::PeerConnectionInterface::IceServers ice_servers;
- webrtc::PeerConnectionInterface::IceServer ice_server;
- ice_server.uri = kStunIceServer;
- ice_servers.push_back(ice_server);
- ice_server.uri = kTurnIceServer;
- ice_server.password = kTurnPassword;
- ice_servers.push_back(ice_server);
- ice_server.uri = kTurnIceServerWithTransport;
- ice_server.password = kTurnPassword;
- ice_servers.push_back(ice_server);
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
- new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(ice_servers, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "stun.l.google.com", 19302);
- stun_configs.push_back(stun1);
- VerifyStunConfigurations(stun_configs);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "test.com", 1234, "test@hello.com", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn2);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ cricket::ServerAddresses stun_servers;
+ rtc::SocketAddress stun1("stun.l.google.com", 19302);
+ stun_servers.insert(stun1);
+ VerifyStunServers(stun_servers);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("test.com", 1234, "test@hello.com",
+ kTurnPassword, cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn1);
+ cricket::RelayServerConfig turn2("hello.com", kDefaultStunPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, false);
+ turn_servers.push_back(turn2);
+ VerifyTurnServers(turn_servers);
}
TEST_F(PeerConnectionFactoryTest, CreatePCUsingNoUsernameInUri) {
@@ -283,17 +241,15 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingNoUsernameInUri) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
- "test.com", 1234, kTurnUsername, kTurnPassword, "udp", false);
- turn_configs.push_back(turn);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn("test.com", 1234, kTurnUsername,
+ kTurnPassword, cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn);
+ VerifyTurnServers(turn_servers);
}
// This test verifies the PeerConnection created properly with TURN url which
@@ -306,17 +262,15 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingTurnUrlWithTransportParam) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn("hello.com", kDefaultStunPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, false);
+ turn_servers.push_back(turn);
+ VerifyTurnServers(turn_servers);
}
TEST_F(PeerConnectionFactoryTest, CreatePCUsingSecureTurnUrl) {
@@ -333,25 +287,23 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingSecureTurnUrl) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "hello.com", kDefaultStunTlsPort, "test", kTurnPassword, "tcp", true);
- turn_configs.push_back(turn1);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("hello.com", kDefaultStunTlsPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, true);
+ turn_servers.push_back(turn1);
// TURNS with transport param should be default to tcp.
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", 443, "test_no_transport", kTurnPassword, "tcp", true);
- turn_configs.push_back(turn2);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn3(
- "hello.com", kDefaultStunTlsPort, "test_no_transport",
- kTurnPassword, "tcp", true);
- turn_configs.push_back(turn3);
- VerifyTurnConfigurations(turn_configs);
+ cricket::RelayServerConfig turn2("hello.com", 443, "test_no_transport",
+ kTurnPassword, cricket::PROTO_TCP, true);
+ turn_servers.push_back(turn2);
+ cricket::RelayServerConfig turn3("hello.com", kDefaultStunTlsPort,
+ "test_no_transport", kTurnPassword,
+ cricket::PROTO_TCP, true);
+ turn_servers.push_back(turn3);
+ VerifyTurnServers(turn_servers);
}
TEST_F(PeerConnectionFactoryTest, CreatePCUsingIPLiteralAddress) {
@@ -370,32 +322,26 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIPLiteralAddress) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "1.2.3.4", 1234);
- stun_configs.push_back(stun1);
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun2(
- "1.2.3.4", 3478);
- stun_configs.push_back(stun2); // Default port
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun3(
- "2401:fa00:4::", 1234);
- stun_configs.push_back(stun3);
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun4(
- "2401:fa00:4::", 3478);
- stun_configs.push_back(stun4); // Default port
- VerifyStunConfigurations(stun_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ cricket::ServerAddresses stun_servers;
+ rtc::SocketAddress stun1("1.2.3.4", 1234);
+ stun_servers.insert(stun1);
+ rtc::SocketAddress stun2("1.2.3.4", 3478);
+ stun_servers.insert(stun2); // Default port
+ rtc::SocketAddress stun3("2401:fa00:4::", 1234);
+ stun_servers.insert(stun3);
+ rtc::SocketAddress stun4("2401:fa00:4::", 3478);
+ stun_servers.insert(stun4); // Default port
+ VerifyStunServers(stun_servers);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "2401:fa00:4::", 1234, "test", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- VerifyTurnConfigurations(turn_configs);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("2401:fa00:4::", 1234, "test", kTurnPassword,
+ cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn1);
+ VerifyTurnServers(turn_servers);
}
// This test verifies the captured stream is rendered locally using a
diff --git a/talk/app/webrtc/peerconnectionfactoryproxy.h b/talk/app/webrtc/peerconnectionfactoryproxy.h
index 5e924df3a1..714ce6b7eb 100644
--- a/talk/app/webrtc/peerconnectionfactoryproxy.h
+++ b/talk/app/webrtc/peerconnectionfactoryproxy.h
@@ -29,6 +29,7 @@
#define TALK_APP_WEBRTC_PEERCONNECTIONFACTORYPROXY_H_
#include <string>
+#include <utility>
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/proxy.h"
@@ -38,17 +39,17 @@ namespace webrtc {
BEGIN_PROXY_MAP(PeerConnectionFactory)
PROXY_METHOD1(void, SetOptions, const Options&)
- // Can't use PROXY_METHOD5 because scoped_ptr must be Pass()ed.
+ // Can't use PROXY_METHOD5 because scoped_ptr must be moved.
// TODO(tommi,hbos): Use of templates to support scoped_ptr?
rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& a1,
const MediaConstraintsInterface* a2,
- PortAllocatorFactoryInterface* a3,
+ rtc::scoped_ptr<cricket::PortAllocator> a3,
rtc::scoped_ptr<DtlsIdentityStoreInterface> a4,
PeerConnectionObserver* a5) override {
return owner_thread_->Invoke<rtc::scoped_refptr<PeerConnectionInterface>>(
rtc::Bind(&PeerConnectionFactoryProxy::CreatePeerConnection_ot, this,
- a1, a2, a3, a4.release(), a5));
+ a1, a2, a3.release(), a4.release(), a5));
}
PROXY_METHOD1(rtc::scoped_refptr<MediaStreamInterface>,
CreateLocalMediaStream, const std::string&)
@@ -70,11 +71,13 @@ BEGIN_PROXY_MAP(PeerConnectionFactory)
rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection_ot(
const PeerConnectionInterface::RTCConfiguration& a1,
const MediaConstraintsInterface* a2,
- PortAllocatorFactoryInterface* a3,
+ cricket::PortAllocator* a3,
DtlsIdentityStoreInterface* a4,
PeerConnectionObserver* a5) {
+ rtc::scoped_ptr<cricket::PortAllocator> ptr_a3(a3);
rtc::scoped_ptr<DtlsIdentityStoreInterface> ptr_a4(a4);
- return c_->CreatePeerConnection(a1, a2, a3, ptr_a4.Pass(), a5);
+ return c_->CreatePeerConnection(a1, a2, std::move(ptr_a3),
+ std::move(ptr_a4), a5);
}
END_PROXY()
diff --git a/talk/app/webrtc/peerconnectioninterface.h b/talk/app/webrtc/peerconnectioninterface.h
index 77caa9d78b..b9afbad204 100644
--- a/talk/app/webrtc/peerconnectioninterface.h
+++ b/talk/app/webrtc/peerconnectioninterface.h
@@ -69,6 +69,7 @@
#define TALK_APP_WEBRTC_PEERCONNECTIONINTERFACE_H_
#include <string>
+#include <utility>
#include <vector>
#include "talk/app/webrtc/datachannelinterface.h"
@@ -86,6 +87,7 @@
#include "webrtc/base/rtccertificate.h"
#include "webrtc/base/sslstreamadapter.h"
#include "webrtc/base/socketaddress.h"
+#include "webrtc/p2p/base/portallocator.h"
namespace rtc {
class SSLIdentity;
@@ -93,7 +95,6 @@ class Thread;
}
namespace cricket {
-class PortAllocator;
class WebRtcVideoDecoderFactory;
class WebRtcVideoEncoderFactory;
}
@@ -248,28 +249,27 @@ class PeerConnectionInterface : public rtc::RefCountInterface {
// TODO(pthatcher): Rename this ice_servers, but update Chromium
// at the same time.
IceServers servers;
- // A localhost candidate is signaled whenever a candidate with the any
- // address is allocated.
- bool enable_localhost_ice_candidate;
BundlePolicy bundle_policy;
RtcpMuxPolicy rtcp_mux_policy;
TcpCandidatePolicy tcp_candidate_policy;
int audio_jitter_buffer_max_packets;
bool audio_jitter_buffer_fast_accelerate;
- int ice_connection_receiving_timeout;
+ int ice_connection_receiving_timeout; // ms
+ int ice_backup_candidate_pair_ping_interval; // ms
ContinualGatheringPolicy continual_gathering_policy;
std::vector<rtc::scoped_refptr<rtc::RTCCertificate>> certificates;
-
+ bool disable_prerenderer_smoothing;
RTCConfiguration()
: type(kAll),
- enable_localhost_ice_candidate(false),
bundle_policy(kBundlePolicyBalanced),
rtcp_mux_policy(kRtcpMuxPolicyNegotiate),
tcp_candidate_policy(kTcpCandidatePolicyEnabled),
audio_jitter_buffer_max_packets(kAudioJitterBufferMaxPackets),
audio_jitter_buffer_fast_accelerate(false),
ice_connection_receiving_timeout(kUndefined),
- continual_gathering_policy(GATHER_ONCE) {}
+ ice_backup_candidate_pair_ping_interval(kUndefined),
+ continual_gathering_policy(GATHER_ONCE),
+ disable_prerenderer_smoothing(false) {}
};
struct RTCOfferAnswerOptions {
@@ -337,6 +337,15 @@ class PeerConnectionInterface : public rtc::RefCountInterface {
AudioTrackInterface* track) = 0;
// TODO(deadbeef): Make these pure virtual once all subclasses implement them.
+ // |kind| must be "audio" or "video".
+ // |stream_id| is used to populate the msid attribute; if empty, one will
+ // be generated automatically.
+ virtual rtc::scoped_refptr<RtpSenderInterface> CreateSender(
+ const std::string& kind,
+ const std::string& stream_id) {
+ return rtc::scoped_refptr<RtpSenderInterface>();
+ }
+
virtual std::vector<rtc::scoped_refptr<RtpSenderInterface>> GetSenders()
const {
return std::vector<rtc::scoped_refptr<RtpSenderInterface>>();
@@ -480,51 +489,6 @@ class PeerConnectionObserver {
~PeerConnectionObserver() {}
};
-// Factory class used for creating cricket::PortAllocator that is used
-// for ICE negotiation.
-class PortAllocatorFactoryInterface : public rtc::RefCountInterface {
- public:
- struct StunConfiguration {
- StunConfiguration(const std::string& address, int port)
- : server(address, port) {}
- // STUN server address and port.
- rtc::SocketAddress server;
- };
-
- struct TurnConfiguration {
- TurnConfiguration(const std::string& address,
- int port,
- const std::string& username,
- const std::string& password,
- const std::string& transport_type,
- bool secure)
- : server(address, port),
- username(username),
- password(password),
- transport_type(transport_type),
- secure(secure) {}
- rtc::SocketAddress server;
- std::string username;
- std::string password;
- std::string transport_type;
- bool secure;
- };
-
- virtual cricket::PortAllocator* CreatePortAllocator(
- const std::vector<StunConfiguration>& stun_servers,
- const std::vector<TurnConfiguration>& turn_configurations) = 0;
-
- // TODO(phoglund): Make pure virtual when Chrome's factory implements this.
- // After this method is called, the port allocator should consider loopback
- // network interfaces as well.
- virtual void SetNetworkIgnoreMask(int network_ignore_mask) {
- }
-
- protected:
- PortAllocatorFactoryInterface() {}
- ~PortAllocatorFactoryInterface() {}
-};
-
// PeerConnectionFactoryInterface is the factory interface use for creating
// PeerConnection, MediaStream and media tracks.
// PeerConnectionFactoryInterface will create required libjingle threads,
@@ -532,19 +496,18 @@ class PortAllocatorFactoryInterface : public rtc::RefCountInterface {
// If an application decides to provide its own threads and network
// implementation of these classes it should use the alternate
// CreatePeerConnectionFactory method which accepts threads as input and use the
-// CreatePeerConnection version that takes a PortAllocatorFactoryInterface as
+// CreatePeerConnection version that takes a PortAllocator as an
// argument.
class PeerConnectionFactoryInterface : public rtc::RefCountInterface {
public:
class Options {
public:
- Options() :
- disable_encryption(false),
- disable_sctp_data_channels(false),
- disable_network_monitor(false),
- network_ignore_mask(rtc::kDefaultNetworkIgnoreMask),
- ssl_max_version(rtc::SSL_PROTOCOL_DTLS_10) {
- }
+ Options()
+ : disable_encryption(false),
+ disable_sctp_data_channels(false),
+ disable_network_monitor(false),
+ network_ignore_mask(rtc::kDefaultNetworkIgnoreMask),
+ ssl_max_version(rtc::SSL_PROTOCOL_DTLS_12) {}
bool disable_encryption;
bool disable_sctp_data_channels;
bool disable_network_monitor;
@@ -562,31 +525,12 @@ class PeerConnectionFactoryInterface : public rtc::RefCountInterface {
virtual void SetOptions(const Options& options) = 0;
- virtual rtc::scoped_refptr<PeerConnectionInterface>
- CreatePeerConnection(
- const PeerConnectionInterface::RTCConfiguration& configuration,
- const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
- PeerConnectionObserver* observer) = 0;
-
- // TODO(hbos): Remove below version after clients are updated to above method.
- // In latest W3C WebRTC draft, PC constructor will take RTCConfiguration,
- // and not IceServers. RTCConfiguration is made up of ice servers and
- // ice transport type.
- // http://dev.w3.org/2011/webrtc/editor/webrtc.html
- inline rtc::scoped_refptr<PeerConnectionInterface>
- CreatePeerConnection(
- const PeerConnectionInterface::IceServers& servers,
- const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
- PeerConnectionObserver* observer) {
- PeerConnectionInterface::RTCConfiguration rtc_config;
- rtc_config.servers = servers;
- return CreatePeerConnection(rtc_config, constraints, allocator_factory,
- dtls_identity_store.Pass(), observer);
- }
+ virtual rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
+ const PeerConnectionInterface::RTCConfiguration& configuration,
+ const MediaConstraintsInterface* constraints,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
+ rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
+ PeerConnectionObserver* observer) = 0;
virtual rtc::scoped_refptr<MediaStreamInterface>
CreateLocalMediaStream(const std::string& label) = 0;
diff --git a/talk/app/webrtc/peerconnectioninterface_unittest.cc b/talk/app/webrtc/peerconnectioninterface_unittest.cc
index 63163fd651..c3789b7dd8 100644
--- a/talk/app/webrtc/peerconnectioninterface_unittest.cc
+++ b/talk/app/webrtc/peerconnectioninterface_unittest.cc
@@ -26,9 +26,9 @@
*/
#include <string>
+#include <utility>
#include "talk/app/webrtc/audiotrack.h"
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/mediastream.h"
#include "talk/app/webrtc/mediastreaminterface.h"
@@ -37,6 +37,9 @@
#include "talk/app/webrtc/rtpreceiverinterface.h"
#include "talk/app/webrtc/rtpsenderinterface.h"
#include "talk/app/webrtc/streamcollection.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "talk/app/webrtc/test/fakeconstraints.h"
#include "talk/app/webrtc/test/fakedtlsidentitystore.h"
#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
@@ -52,6 +55,7 @@
#include "webrtc/base/sslstreamadapter.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/base/thread.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
static const char kStreamLabel1[] = "local_stream_1";
static const char kStreamLabel2[] = "local_stream_2";
@@ -258,7 +262,6 @@ using webrtc::AudioTrackInterface;
using webrtc::DataBuffer;
using webrtc::DataChannelInterface;
using webrtc::FakeConstraints;
-using webrtc::FakePortAllocatorFactory;
using webrtc::IceCandidateInterface;
using webrtc::MediaConstraintsInterface;
using webrtc::MediaStream;
@@ -270,7 +273,6 @@ using webrtc::MockSetSessionDescriptionObserver;
using webrtc::MockStatsObserver;
using webrtc::PeerConnectionInterface;
using webrtc::PeerConnectionObserver;
-using webrtc::PortAllocatorFactoryInterface;
using webrtc::RtpReceiverInterface;
using webrtc::RtpSenderInterface;
using webrtc::SdpParseError;
@@ -515,6 +517,12 @@ class MockPeerConnectionObserver : public PeerConnectionObserver {
class PeerConnectionInterfaceTest : public testing::Test {
protected:
+ PeerConnectionInterfaceTest() {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
+ }
+
virtual void SetUp() {
pc_factory_ = webrtc::CreatePeerConnectionFactory(
rtc::Thread::Current(), rtc::Thread::Current(), NULL, NULL,
@@ -533,15 +541,17 @@ class PeerConnectionInterfaceTest : public testing::Test {
void CreatePeerConnection(const std::string& uri,
const std::string& password,
webrtc::MediaConstraintsInterface* constraints) {
+ PeerConnectionInterface::RTCConfiguration config;
PeerConnectionInterface::IceServer server;
- PeerConnectionInterface::IceServers servers;
if (!uri.empty()) {
server.uri = uri;
server.password = password;
- servers.push_back(server);
+ config.servers.push_back(server);
}
- port_allocator_factory_ = FakePortAllocatorFactory::Create();
+ rtc::scoped_ptr<cricket::FakePortAllocator> port_allocator(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
+ port_allocator_ = port_allocator.get();
// DTLS does not work in a loopback call, so is disabled for most of the
// tests in this file. We only create a FakeIdentityService if the test
@@ -562,52 +572,47 @@ class PeerConnectionInterfaceTest : public testing::Test {
nullptr) && dtls) {
dtls_identity_store.reset(new FakeDtlsIdentityStore());
}
- pc_ = pc_factory_->CreatePeerConnection(servers, constraints,
- port_allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_);
+ pc_ = pc_factory_->CreatePeerConnection(
+ config, constraints, std::move(port_allocator),
+ std::move(dtls_identity_store), &observer_);
ASSERT_TRUE(pc_.get() != NULL);
observer_.SetPeerConnectionInterface(pc_.get());
EXPECT_EQ(PeerConnectionInterface::kStable, observer_.state_);
}
void CreatePeerConnectionExpectFail(const std::string& uri) {
+ PeerConnectionInterface::RTCConfiguration config;
PeerConnectionInterface::IceServer server;
- PeerConnectionInterface::IceServers servers;
server.uri = uri;
- servers.push_back(server);
+ config.servers.push_back(server);
- scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store;
- port_allocator_factory_ = FakePortAllocatorFactory::Create();
scoped_refptr<PeerConnectionInterface> pc;
- pc = pc_factory_->CreatePeerConnection(
- servers, nullptr, port_allocator_factory_.get(),
- dtls_identity_store.Pass(), &observer_);
- ASSERT_EQ(nullptr, pc);
+ pc = pc_factory_->CreatePeerConnection(config, nullptr, nullptr, nullptr,
+ &observer_);
+ EXPECT_EQ(nullptr, pc);
}
void CreatePeerConnectionWithDifferentConfigurations() {
CreatePeerConnection(kStunAddressOnly, "", NULL);
- EXPECT_EQ(1u, port_allocator_factory_->stun_configs().size());
- EXPECT_EQ(0u, port_allocator_factory_->turn_configs().size());
- EXPECT_EQ("address",
- port_allocator_factory_->stun_configs()[0].server.hostname());
+ EXPECT_EQ(1u, port_allocator_->stun_servers().size());
+ EXPECT_EQ(0u, port_allocator_->turn_servers().size());
+ EXPECT_EQ("address", port_allocator_->stun_servers().begin()->hostname());
EXPECT_EQ(kDefaultStunPort,
- port_allocator_factory_->stun_configs()[0].server.port());
+ port_allocator_->stun_servers().begin()->port());
CreatePeerConnectionExpectFail(kStunInvalidPort);
CreatePeerConnectionExpectFail(kStunAddressPortAndMore1);
CreatePeerConnectionExpectFail(kStunAddressPortAndMore2);
CreatePeerConnection(kTurnIceServerUri, kTurnPassword, NULL);
- EXPECT_EQ(0u, port_allocator_factory_->stun_configs().size());
- EXPECT_EQ(1u, port_allocator_factory_->turn_configs().size());
+ EXPECT_EQ(0u, port_allocator_->stun_servers().size());
+ EXPECT_EQ(1u, port_allocator_->turn_servers().size());
EXPECT_EQ(kTurnUsername,
- port_allocator_factory_->turn_configs()[0].username);
+ port_allocator_->turn_servers()[0].credentials.username);
EXPECT_EQ(kTurnPassword,
- port_allocator_factory_->turn_configs()[0].password);
+ port_allocator_->turn_servers()[0].credentials.password);
EXPECT_EQ(kTurnHostname,
- port_allocator_factory_->turn_configs()[0].server.hostname());
+ port_allocator_->turn_servers()[0].ports[0].address.hostname());
}
void ReleasePeerConnection() {
@@ -926,7 +931,7 @@ class PeerConnectionInterfaceTest : public testing::Test {
ASSERT_TRUE(stream->AddTrack(video_track));
}
- scoped_refptr<FakePortAllocatorFactory> port_allocator_factory_;
+ cricket::FakePortAllocator* port_allocator_ = nullptr;
scoped_refptr<webrtc::PeerConnectionFactoryInterface> pc_factory_;
scoped_refptr<PeerConnectionInterface> pc_;
MockPeerConnectionObserver observer_;
@@ -1156,6 +1161,64 @@ TEST_F(PeerConnectionInterfaceTest, SsrcInOfferAnswer) {
EXPECT_NE(audio_ssrc, video_ssrc);
}
+// Test that it's possible to call AddTrack on a MediaStream after adding
+// the stream to a PeerConnection.
+// TODO(deadbeef): Remove this test once this behavior is no longer supported.
+TEST_F(PeerConnectionInterfaceTest, AddTrackAfterAddStream) {
+ CreatePeerConnection();
+ // Create audio stream and add to PeerConnection.
+ AddVoiceStream(kStreamLabel1);
+ MediaStreamInterface* stream = pc_->local_streams()->at(0);
+
+ // Add video track to the audio-only stream.
+ scoped_refptr<VideoTrackInterface> video_track(
+ pc_factory_->CreateVideoTrack("video_label", nullptr));
+ stream->AddTrack(video_track.get());
+
+ scoped_ptr<SessionDescriptionInterface> offer;
+ ASSERT_TRUE(DoCreateOffer(offer.use(), nullptr));
+
+ const cricket::MediaContentDescription* video_desc =
+ cricket::GetFirstVideoContentDescription(offer->description());
+ EXPECT_TRUE(video_desc != nullptr);
+}
+
+// Test that it's possible to call RemoveTrack on a MediaStream after adding
+// the stream to a PeerConnection.
+// TODO(deadbeef): Remove this test once this behavior is no longer supported.
+TEST_F(PeerConnectionInterfaceTest, RemoveTrackAfterAddStream) {
+ CreatePeerConnection();
+ // Create audio/video stream and add to PeerConnection.
+ AddAudioVideoStream(kStreamLabel1, "audio_label", "video_label");
+ MediaStreamInterface* stream = pc_->local_streams()->at(0);
+
+ // Remove the video track.
+ stream->RemoveTrack(stream->GetVideoTracks()[0]);
+
+ scoped_ptr<SessionDescriptionInterface> offer;
+ ASSERT_TRUE(DoCreateOffer(offer.use(), nullptr));
+
+ const cricket::MediaContentDescription* video_desc =
+ cricket::GetFirstVideoContentDescription(offer->description());
+ EXPECT_TRUE(video_desc == nullptr);
+}
+
+// Test creating a sender with a stream ID, and ensure the ID is populated
+// in the offer.
+TEST_F(PeerConnectionInterfaceTest, CreateSenderWithStream) {
+ CreatePeerConnection();
+ pc_->CreateSender("video", kStreamLabel1);
+
+ scoped_ptr<SessionDescriptionInterface> offer;
+ ASSERT_TRUE(DoCreateOffer(offer.use(), nullptr));
+
+ const cricket::MediaContentDescription* video_desc =
+ cricket::GetFirstVideoContentDescription(offer->description());
+ ASSERT_TRUE(video_desc != nullptr);
+ ASSERT_EQ(1u, video_desc->streams().size());
+ EXPECT_EQ(kStreamLabel1, video_desc->streams()[0].sync_label);
+}
+
// Test that we can specify a certain track that we want statistics about.
TEST_F(PeerConnectionInterfaceTest, GetStatsForSpecificTrack) {
InitiateCall();
@@ -1660,6 +1723,22 @@ TEST_F(PeerConnectionInterfaceTest, CreateSubsequentInactiveOffer) {
ASSERT_EQ(cricket::MD_INACTIVE, audio_desc->direction());
}
+// Test that we can use SetConfiguration to change the ICE servers of the
+// PortAllocator.
+TEST_F(PeerConnectionInterfaceTest, SetConfigurationChangesIceServers) {
+ CreatePeerConnection();
+
+ PeerConnectionInterface::RTCConfiguration config;
+ PeerConnectionInterface::IceServer server;
+ server.uri = "stun:test_hostname";
+ config.servers.push_back(server);
+ EXPECT_TRUE(pc_->SetConfiguration(config));
+
+ EXPECT_EQ(1u, port_allocator_->stun_servers().size());
+ EXPECT_EQ("test_hostname",
+ port_allocator_->stun_servers().begin()->hostname());
+}
+
// Test that PeerConnection::Close changes the states to closed and all remote
// tracks change state to ended.
TEST_F(PeerConnectionInterfaceTest, CloseAndTestStreamsAndStates) {
@@ -1977,6 +2056,28 @@ TEST_F(PeerConnectionInterfaceTest, SdpWithMsidDontCreatesDefaultStream) {
EXPECT_EQ(0u, observer_.remote_streams()->count());
}
+// This tests that when setting a new description, the old default tracks are
+// not destroyed and recreated.
+// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5250
+TEST_F(PeerConnectionInterfaceTest, DefaultTracksNotDestroyedAndRecreated) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(webrtc::MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ CreatePeerConnection(&constraints);
+ CreateAndSetRemoteOffer(kSdpStringWithoutStreamsAudioOnly);
+
+ ASSERT_EQ(1u, observer_.remote_streams()->count());
+ MediaStreamInterface* remote_stream = observer_.remote_streams()->at(0);
+ ASSERT_EQ(1u, remote_stream->GetAudioTracks().size());
+
+ // Set the track to "disabled", then set a new description and ensure the
+ // track is still disabled, which ensures it hasn't been recreated.
+ remote_stream->GetAudioTracks()[0]->set_enabled(false);
+ CreateAndSetRemoteOffer(kSdpStringWithoutStreamsAudioOnly);
+ ASSERT_EQ(1u, remote_stream->GetAudioTracks().size());
+ EXPECT_FALSE(remote_stream->GetAudioTracks()[0]->enabled());
+}
+
// This tests that a default MediaStream is not created if a remote session
// description is updated to not have any MediaStreams.
TEST_F(PeerConnectionInterfaceTest, VerifyDefaultStreamIsNotCreated) {
@@ -2020,8 +2121,10 @@ TEST_F(PeerConnectionInterfaceTest, LocalDescriptionChanged) {
EXPECT_TRUE(ContainsSender(senders, kVideoTracks[1]));
// Remove an audio and video track.
+ pc_->RemoveStream(reference_collection_->at(0));
rtc::scoped_ptr<SessionDescriptionInterface> desc_2;
CreateSessionDescriptionAndReference(1, 1, desc_2.accept());
+ pc_->AddStream(reference_collection_->at(0));
EXPECT_TRUE(DoSetLocalDescription(desc_2.release()));
senders = pc_->GetSenders();
EXPECT_EQ(2u, senders.size());
@@ -2220,7 +2323,9 @@ TEST(CreateSessionOptionsTest, GetDefaultMediaSessionOptionsForOffer) {
EXPECT_FALSE(options.has_video());
EXPECT_TRUE(options.bundle_enabled);
EXPECT_TRUE(options.vad_enabled);
- EXPECT_FALSE(options.transport_options.ice_restart);
+ EXPECT_FALSE(options.audio_transport_options.ice_restart);
+ EXPECT_FALSE(options.video_transport_options.ice_restart);
+ EXPECT_FALSE(options.data_transport_options.ice_restart);
}
// Test that a correct MediaSessionOptions is created for an offer if
@@ -2255,18 +2360,22 @@ TEST(CreateSessionOptionsTest,
// Test that a correct MediaSessionOptions is created to restart ice if
// IceRestart is set. It also tests that subsequent MediaSessionOptions don't
-// have |transport_options.ice_restart| set.
+// have |audio_transport_options.ice_restart| etc. set.
TEST(CreateSessionOptionsTest, GetMediaSessionOptionsForOfferWithIceRestart) {
RTCOfferAnswerOptions rtc_options;
rtc_options.ice_restart = true;
cricket::MediaSessionOptions options;
EXPECT_TRUE(ConvertRtcOptionsForOffer(rtc_options, &options));
- EXPECT_TRUE(options.transport_options.ice_restart);
+ EXPECT_TRUE(options.audio_transport_options.ice_restart);
+ EXPECT_TRUE(options.video_transport_options.ice_restart);
+ EXPECT_TRUE(options.data_transport_options.ice_restart);
rtc_options = RTCOfferAnswerOptions();
EXPECT_TRUE(ConvertRtcOptionsForOffer(rtc_options, &options));
- EXPECT_FALSE(options.transport_options.ice_restart);
+ EXPECT_FALSE(options.audio_transport_options.ice_restart);
+ EXPECT_FALSE(options.video_transport_options.ice_restart);
+ EXPECT_FALSE(options.data_transport_options.ice_restart);
}
// Test that the MediaConstraints in an answer don't affect if audio and video
diff --git a/talk/app/webrtc/peerconnectionproxy.h b/talk/app/webrtc/peerconnectionproxy.h
index d207fbbdd8..3c983d73c9 100644
--- a/talk/app/webrtc/peerconnectionproxy.h
+++ b/talk/app/webrtc/peerconnectionproxy.h
@@ -43,6 +43,10 @@ BEGIN_PROXY_MAP(PeerConnection)
PROXY_METHOD1(void, RemoveStream, MediaStreamInterface*)
PROXY_METHOD1(rtc::scoped_refptr<DtmfSenderInterface>,
CreateDtmfSender, AudioTrackInterface*)
+ PROXY_METHOD2(rtc::scoped_refptr<RtpSenderInterface>,
+ CreateSender,
+ const std::string&,
+ const std::string&)
PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpSenderInterface>>,
GetSenders)
PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpReceiverInterface>>,
diff --git a/talk/app/webrtc/portallocatorfactory.cc b/talk/app/webrtc/portallocatorfactory.cc
index bd6caccc80..64d714cd50 100644
--- a/talk/app/webrtc/portallocatorfactory.cc
+++ b/talk/app/webrtc/portallocatorfactory.cc
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2004--2011 Google Inc.
+ * Copyright 2011 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -24,69 +24,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+// TODO(deadbeef): Remove this file once chromium build files no longer
+// reference it.
#include "talk/app/webrtc/portallocatorfactory.h"
-
-#include "webrtc/p2p/base/basicpacketsocketfactory.h"
-#include "webrtc/p2p/client/basicportallocator.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/base/network.h"
-#include "webrtc/base/thread.h"
-
-namespace webrtc {
-
-using rtc::scoped_ptr;
-
-rtc::scoped_refptr<PortAllocatorFactoryInterface>
-PortAllocatorFactory::Create(
- rtc::Thread* worker_thread) {
- rtc::RefCountedObject<PortAllocatorFactory>* allocator =
- new rtc::RefCountedObject<PortAllocatorFactory>(worker_thread);
- return allocator;
-}
-
-PortAllocatorFactory::PortAllocatorFactory(rtc::Thread* worker_thread)
- : network_manager_(new rtc::BasicNetworkManager()),
- socket_factory_(new rtc::BasicPacketSocketFactory(worker_thread)) {
-}
-
-PortAllocatorFactory::~PortAllocatorFactory() {}
-
-void PortAllocatorFactory::SetNetworkIgnoreMask(int network_ignore_mask) {
- network_manager_->set_network_ignore_mask(network_ignore_mask);
-}
-
-cricket::PortAllocator* PortAllocatorFactory::CreatePortAllocator(
- const std::vector<StunConfiguration>& stun,
- const std::vector<TurnConfiguration>& turn) {
- cricket::ServerAddresses stun_hosts;
- typedef std::vector<StunConfiguration>::const_iterator StunIt;
- for (StunIt stun_it = stun.begin(); stun_it != stun.end(); ++stun_it) {
- stun_hosts.insert(stun_it->server);
- }
-
- scoped_ptr<cricket::BasicPortAllocator> allocator(
- new cricket::BasicPortAllocator(
- network_manager_.get(), socket_factory_.get(), stun_hosts));
-
- for (size_t i = 0; i < turn.size(); ++i) {
- cricket::RelayCredentials credentials(turn[i].username, turn[i].password);
- cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
- cricket::ProtocolType protocol;
- if (cricket::StringToProto(turn[i].transport_type.c_str(), &protocol)) {
- relay_server.ports.push_back(cricket::ProtocolAddress(
- turn[i].server, protocol, turn[i].secure));
- relay_server.credentials = credentials;
- // First in the list gets highest priority.
- relay_server.priority = static_cast<int>(turn.size() - i - 1);
- allocator->AddRelay(relay_server);
- } else {
- LOG(LS_WARNING) << "Ignoring TURN server " << turn[i].server << ". "
- << "Reason= Incorrect " << turn[i].transport_type
- << " transport parameter.";
- }
- }
- return allocator.release();
-}
-
-} // namespace webrtc
diff --git a/talk/app/webrtc/portallocatorfactory.h b/talk/app/webrtc/portallocatorfactory.h
index 83376d0b84..bb6cf4741f 100644
--- a/talk/app/webrtc/portallocatorfactory.h
+++ b/talk/app/webrtc/portallocatorfactory.h
@@ -24,49 +24,10 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
-// This file defines the default implementation of
-// PortAllocatorFactoryInterface.
-// This implementation creates instances of cricket::HTTPPortAllocator and uses
-// the BasicNetworkManager and BasicPacketSocketFactory.
+// TODO(deadbeef): Remove this file once chromium build files no longer
+// reference it.
#ifndef TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
#define TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
-#include "talk/app/webrtc/peerconnectioninterface.h"
-#include "webrtc/base/scoped_ptr.h"
-
-namespace cricket {
-class PortAllocator;
-}
-
-namespace rtc {
-class BasicNetworkManager;
-class BasicPacketSocketFactory;
-}
-
-namespace webrtc {
-
-class PortAllocatorFactory : public PortAllocatorFactoryInterface {
- public:
- static rtc::scoped_refptr<PortAllocatorFactoryInterface> Create(
- rtc::Thread* worker_thread);
-
- virtual cricket::PortAllocator* CreatePortAllocator(
- const std::vector<StunConfiguration>& stun,
- const std::vector<TurnConfiguration>& turn);
-
- virtual void SetNetworkIgnoreMask(int network_ignore_mask);
-
- protected:
- explicit PortAllocatorFactory(rtc::Thread* worker_thread);
- ~PortAllocatorFactory();
-
- private:
- rtc::scoped_ptr<rtc::BasicNetworkManager> network_manager_;
- rtc::scoped_ptr<rtc::BasicPacketSocketFactory> socket_factory_;
-};
-
-} // namespace webrtc
-
#endif // TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
diff --git a/talk/app/webrtc/remoteaudiosource.cc b/talk/app/webrtc/remoteaudiosource.cc
index 41f3d8798a..e904dd9192 100644
--- a/talk/app/webrtc/remoteaudiosource.cc
+++ b/talk/app/webrtc/remoteaudiosource.cc
@@ -29,44 +29,148 @@
#include <algorithm>
#include <functional>
+#include <utility>
+#include "talk/app/webrtc/mediastreamprovider.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
+#include "webrtc/base/thread.h"
namespace webrtc {
-rtc::scoped_refptr<RemoteAudioSource> RemoteAudioSource::Create() {
- return new rtc::RefCountedObject<RemoteAudioSource>();
+class RemoteAudioSource::MessageHandler : public rtc::MessageHandler {
+ public:
+ explicit MessageHandler(RemoteAudioSource* source) : source_(source) {}
+
+ private:
+ ~MessageHandler() override {}
+
+ void OnMessage(rtc::Message* msg) override {
+ source_->OnMessage(msg);
+ delete this;
+ }
+
+ const rtc::scoped_refptr<RemoteAudioSource> source_;
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MessageHandler);
+};
+
+class RemoteAudioSource::Sink : public AudioSinkInterface {
+ public:
+ explicit Sink(RemoteAudioSource* source) : source_(source) {}
+ ~Sink() override { source_->OnAudioProviderGone(); }
+
+ private:
+ void OnData(const AudioSinkInterface::Data& audio) override {
+ if (source_)
+ source_->OnData(audio);
+ }
+
+ const rtc::scoped_refptr<RemoteAudioSource> source_;
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Sink);
+};
+
+rtc::scoped_refptr<RemoteAudioSource> RemoteAudioSource::Create(
+ uint32_t ssrc,
+ AudioProviderInterface* provider) {
+ rtc::scoped_refptr<RemoteAudioSource> ret(
+ new rtc::RefCountedObject<RemoteAudioSource>());
+ ret->Initialize(ssrc, provider);
+ return ret;
}
-RemoteAudioSource::RemoteAudioSource() {
+RemoteAudioSource::RemoteAudioSource()
+ : main_thread_(rtc::Thread::Current()),
+ state_(MediaSourceInterface::kLive) {
+ RTC_DCHECK(main_thread_);
}
RemoteAudioSource::~RemoteAudioSource() {
- ASSERT(audio_observers_.empty());
+ RTC_DCHECK(main_thread_->IsCurrent());
+ RTC_DCHECK(audio_observers_.empty());
+ RTC_DCHECK(sinks_.empty());
+}
+
+void RemoteAudioSource::Initialize(uint32_t ssrc,
+ AudioProviderInterface* provider) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ // To make sure we always get notified when the provider goes out of scope,
+ // we register for callbacks here and not on demand in AddSink.
+ if (provider) { // May be null in tests.
+ provider->SetRawAudioSink(
+ ssrc, rtc::scoped_ptr<AudioSinkInterface>(new Sink(this)));
+ }
}
MediaSourceInterface::SourceState RemoteAudioSource::state() const {
- return MediaSourceInterface::kLive;
+ RTC_DCHECK(main_thread_->IsCurrent());
+ return state_;
+}
+
+bool RemoteAudioSource::remote() const {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ return true;
}
void RemoteAudioSource::SetVolume(double volume) {
- ASSERT(volume >= 0 && volume <= 10);
- for (AudioObserverList::iterator it = audio_observers_.begin();
- it != audio_observers_.end(); ++it) {
- (*it)->OnSetVolume(volume);
- }
+ RTC_DCHECK(volume >= 0 && volume <= 10);
+ for (auto* observer : audio_observers_)
+ observer->OnSetVolume(volume);
}
void RemoteAudioSource::RegisterAudioObserver(AudioObserver* observer) {
- ASSERT(observer != NULL);
- ASSERT(std::find(audio_observers_.begin(), audio_observers_.end(),
- observer) == audio_observers_.end());
+ RTC_DCHECK(observer != NULL);
+ RTC_DCHECK(std::find(audio_observers_.begin(), audio_observers_.end(),
+ observer) == audio_observers_.end());
audio_observers_.push_back(observer);
}
void RemoteAudioSource::UnregisterAudioObserver(AudioObserver* observer) {
- ASSERT(observer != NULL);
+ RTC_DCHECK(observer != NULL);
audio_observers_.remove(observer);
}
+void RemoteAudioSource::AddSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ RTC_DCHECK(sink);
+
+ if (state_ != MediaSourceInterface::kLive) {
+ LOG(LS_ERROR) << "Can't register sink as the source isn't live.";
+ return;
+ }
+
+ rtc::CritScope lock(&sink_lock_);
+ RTC_DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end());
+ sinks_.push_back(sink);
+}
+
+void RemoteAudioSource::RemoveSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ RTC_DCHECK(sink);
+
+ rtc::CritScope lock(&sink_lock_);
+ sinks_.remove(sink);
+}
+
+void RemoteAudioSource::OnData(const AudioSinkInterface::Data& audio) {
+ // Called on the externally-owned audio callback thread, via/from webrtc.
+ rtc::CritScope lock(&sink_lock_);
+ for (auto* sink : sinks_) {
+ sink->OnData(audio.data, 16, audio.sample_rate, audio.channels,
+ audio.samples_per_channel);
+ }
+}
+
+void RemoteAudioSource::OnAudioProviderGone() {
+ // Called when the data provider is deleted. It may be the worker thread
+ // in libjingle or may be a different worker thread.
+ main_thread_->Post(new MessageHandler(this));
+}
+
+void RemoteAudioSource::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ sinks_.clear();
+ state_ = MediaSourceInterface::kEnded;
+ FireOnChanged();
+}
+
} // namespace webrtc
diff --git a/talk/app/webrtc/remoteaudiosource.h b/talk/app/webrtc/remoteaudiosource.h
index e49aca5684..d648ba4604 100644
--- a/talk/app/webrtc/remoteaudiosource.h
+++ b/talk/app/webrtc/remoteaudiosource.h
@@ -29,36 +29,66 @@
#define TALK_APP_WEBRTC_REMOTEAUDIOSOURCE_H_
#include <list>
+#include <string>
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/notifier.h"
+#include "talk/media/base/audiorenderer.h"
+#include "webrtc/audio/audio_sink.h"
+#include "webrtc/base/criticalsection.h"
+
+namespace rtc {
+struct Message;
+class Thread;
+} // namespace rtc
namespace webrtc {
-using webrtc::AudioSourceInterface;
+class AudioProviderInterface;
// This class implements the audio source used by the remote audio track.
class RemoteAudioSource : public Notifier<AudioSourceInterface> {
public:
// Creates an instance of RemoteAudioSource.
- static rtc::scoped_refptr<RemoteAudioSource> Create();
+ static rtc::scoped_refptr<RemoteAudioSource> Create(
+ uint32_t ssrc,
+ AudioProviderInterface* provider);
+
+ // MediaSourceInterface implementation.
+ MediaSourceInterface::SourceState state() const override;
+ bool remote() const override;
+
+ void AddSink(AudioTrackSinkInterface* sink) override;
+ void RemoveSink(AudioTrackSinkInterface* sink) override;
protected:
RemoteAudioSource();
- virtual ~RemoteAudioSource();
+ ~RemoteAudioSource() override;
+
+ // Post construction initialize where we can do things like save a reference
+ // to ourselves (need to be fully constructed).
+ void Initialize(uint32_t ssrc, AudioProviderInterface* provider);
private:
typedef std::list<AudioObserver*> AudioObserverList;
- // MediaSourceInterface implementation.
- MediaSourceInterface::SourceState state() const override;
-
// AudioSourceInterface implementation.
void SetVolume(double volume) override;
void RegisterAudioObserver(AudioObserver* observer) override;
void UnregisterAudioObserver(AudioObserver* observer) override;
+ class Sink;
+ void OnData(const AudioSinkInterface::Data& audio);
+ void OnAudioProviderGone();
+
+ class MessageHandler;
+ void OnMessage(rtc::Message* msg);
+
AudioObserverList audio_observers_;
+ rtc::CriticalSection sink_lock_;
+ std::list<AudioTrackSinkInterface*> sinks_;
+ rtc::Thread* const main_thread_;
+ SourceState state_;
};
} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamsignaling.h b/talk/app/webrtc/remoteaudiotrack.cc
index e8c5c110d0..5f0b23e59e 100644
--- a/talk/app/webrtc/mediastreamsignaling.h
+++ b/talk/app/webrtc/remoteaudiotrack.cc
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2012 Google Inc.
+ * Copyright 2015 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -25,4 +25,4 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-// TODO(deadbeef): Remove this file once Chrome build files don't reference it.
+// TODO(tommi): Delete this file when removed from build files in Chromium.
diff --git a/talk/media/base/voiceprocessor.h b/talk/app/webrtc/remoteaudiotrack.h
index 8de2678c95..5f0b23e59e 100755..100644
--- a/talk/media/base/voiceprocessor.h
+++ b/talk/app/webrtc/remoteaudiotrack.h
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2004 Google Inc.
+ * Copyright 2015 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -25,5 +25,4 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-// TODO(solenberg): Remove this file once Chromium's libjingle.gyp/.gn are
-// updated.
+// TODO(tommi): Delete this file when removed from build files in Chromium.
diff --git a/talk/app/webrtc/rtpreceiver.cc b/talk/app/webrtc/rtpreceiver.cc
index b88554f0ac..9540f36f2f 100644
--- a/talk/app/webrtc/rtpreceiver.cc
+++ b/talk/app/webrtc/rtpreceiver.cc
@@ -39,6 +39,7 @@ AudioRtpReceiver::AudioRtpReceiver(AudioTrackInterface* track,
ssrc_(ssrc),
provider_(provider),
cached_track_enabled_(track->enabled()) {
+ RTC_DCHECK(track_->GetSource()->remote());
track_->RegisterObserver(this);
track_->GetSource()->RegisterAudioObserver(this);
Reconfigure();
@@ -85,6 +86,7 @@ VideoRtpReceiver::VideoRtpReceiver(VideoTrackInterface* track,
uint32_t ssrc,
VideoProviderInterface* provider)
: id_(track->id()), track_(track), ssrc_(ssrc), provider_(provider) {
+ RTC_DCHECK(track_->GetSource()->remote());
provider_->SetVideoPlayout(ssrc_, true, track_->GetSource()->FrameInput());
}
diff --git a/talk/app/webrtc/rtpreceiver.h b/talk/app/webrtc/rtpreceiver.h
index a93ccbcbfe..db021baf68 100644
--- a/talk/app/webrtc/rtpreceiver.h
+++ b/talk/app/webrtc/rtpreceiver.h
@@ -68,10 +68,10 @@ class AudioRtpReceiver : public ObserverInterface,
private:
void Reconfigure();
- std::string id_;
- rtc::scoped_refptr<AudioTrackInterface> track_;
- uint32_t ssrc_;
- AudioProviderInterface* provider_;
+ const std::string id_;
+ const rtc::scoped_refptr<AudioTrackInterface> track_;
+ const uint32_t ssrc_;
+ AudioProviderInterface* provider_; // Set to null in Stop().
bool cached_track_enabled_;
};
diff --git a/talk/app/webrtc/rtpsender.cc b/talk/app/webrtc/rtpsender.cc
index 3a78f4598a..91e484b733 100644
--- a/talk/app/webrtc/rtpsender.cc
+++ b/talk/app/webrtc/rtpsender.cc
@@ -29,6 +29,7 @@
#include "talk/app/webrtc/localaudiosource.h"
#include "talk/app/webrtc/videosourceinterface.h"
+#include "webrtc/base/helpers.h"
namespace webrtc {
@@ -43,7 +44,7 @@ LocalAudioSinkAdapter::~LocalAudioSinkAdapter() {
void LocalAudioSinkAdapter::OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) {
rtc::CritScope lock(&lock_);
if (sink_) {
@@ -59,34 +60,49 @@ void LocalAudioSinkAdapter::SetSink(cricket::AudioRenderer::Sink* sink) {
}
AudioRtpSender::AudioRtpSender(AudioTrackInterface* track,
- uint32_t ssrc,
- AudioProviderInterface* provider)
+ const std::string& stream_id,
+ AudioProviderInterface* provider,
+ StatsCollector* stats)
: id_(track->id()),
- track_(track),
- ssrc_(ssrc),
+ stream_id_(stream_id),
provider_(provider),
+ stats_(stats),
+ track_(track),
cached_track_enabled_(track->enabled()),
sink_adapter_(new LocalAudioSinkAdapter()) {
+ RTC_DCHECK(provider != nullptr);
track_->RegisterObserver(this);
track_->AddSink(sink_adapter_.get());
- Reconfigure();
}
+AudioRtpSender::AudioRtpSender(AudioProviderInterface* provider,
+ StatsCollector* stats)
+ : id_(rtc::CreateRandomUuid()),
+ stream_id_(rtc::CreateRandomUuid()),
+ provider_(provider),
+ stats_(stats),
+ sink_adapter_(new LocalAudioSinkAdapter()) {}
+
AudioRtpSender::~AudioRtpSender() {
- track_->RemoveSink(sink_adapter_.get());
- track_->UnregisterObserver(this);
Stop();
}
void AudioRtpSender::OnChanged() {
+ RTC_DCHECK(!stopped_);
if (cached_track_enabled_ != track_->enabled()) {
cached_track_enabled_ = track_->enabled();
- Reconfigure();
+ if (can_send_track()) {
+ SetAudioSend();
+ }
}
}
bool AudioRtpSender::SetTrack(MediaStreamTrackInterface* track) {
- if (track->kind() != "audio") {
+ if (stopped_) {
+ LOG(LS_ERROR) << "SetTrack can't be called on a stopped RtpSender.";
+ return false;
+ }
+ if (track && track->kind() != MediaStreamTrackInterface::kAudioKind) {
LOG(LS_ERROR) << "SetTrack called on audio RtpSender with " << track->kind()
<< " track.";
return false;
@@ -94,36 +110,84 @@ bool AudioRtpSender::SetTrack(MediaStreamTrackInterface* track) {
AudioTrackInterface* audio_track = static_cast<AudioTrackInterface*>(track);
// Detach from old track.
- track_->RemoveSink(sink_adapter_.get());
- track_->UnregisterObserver(this);
+ if (track_) {
+ track_->RemoveSink(sink_adapter_.get());
+ track_->UnregisterObserver(this);
+ }
+
+ if (can_send_track() && stats_) {
+ stats_->RemoveLocalAudioTrack(track_.get(), ssrc_);
+ }
// Attach to new track.
+ bool prev_can_send_track = can_send_track();
track_ = audio_track;
- cached_track_enabled_ = track_->enabled();
- track_->RegisterObserver(this);
- track_->AddSink(sink_adapter_.get());
- Reconfigure();
+ if (track_) {
+ cached_track_enabled_ = track_->enabled();
+ track_->RegisterObserver(this);
+ track_->AddSink(sink_adapter_.get());
+ }
+
+ // Update audio provider.
+ if (can_send_track()) {
+ SetAudioSend();
+ if (stats_) {
+ stats_->AddLocalAudioTrack(track_.get(), ssrc_);
+ }
+ } else if (prev_can_send_track) {
+ cricket::AudioOptions options;
+ provider_->SetAudioSend(ssrc_, false, options, nullptr);
+ }
return true;
}
-void AudioRtpSender::Stop() {
- // TODO(deadbeef): Need to do more here to fully stop sending packets.
- if (!provider_) {
+void AudioRtpSender::SetSsrc(uint32_t ssrc) {
+ if (stopped_ || ssrc == ssrc_) {
return;
}
- cricket::AudioOptions options;
- provider_->SetAudioSend(ssrc_, false, options, nullptr);
- provider_ = nullptr;
+ // If we are already sending with a particular SSRC, stop sending.
+ if (can_send_track()) {
+ cricket::AudioOptions options;
+ provider_->SetAudioSend(ssrc_, false, options, nullptr);
+ if (stats_) {
+ stats_->RemoveLocalAudioTrack(track_.get(), ssrc_);
+ }
+ }
+ ssrc_ = ssrc;
+ if (can_send_track()) {
+ SetAudioSend();
+ if (stats_) {
+ stats_->AddLocalAudioTrack(track_.get(), ssrc_);
+ }
+ }
}
-void AudioRtpSender::Reconfigure() {
- if (!provider_) {
+void AudioRtpSender::Stop() {
+ // TODO(deadbeef): Need to do more here to fully stop sending packets.
+ if (stopped_) {
return;
}
+ if (track_) {
+ track_->RemoveSink(sink_adapter_.get());
+ track_->UnregisterObserver(this);
+ }
+ if (can_send_track()) {
+ cricket::AudioOptions options;
+ provider_->SetAudioSend(ssrc_, false, options, nullptr);
+ if (stats_) {
+ stats_->RemoveLocalAudioTrack(track_.get(), ssrc_);
+ }
+ }
+ stopped_ = true;
+}
+
+void AudioRtpSender::SetAudioSend() {
+ RTC_DCHECK(!stopped_ && can_send_track());
cricket::AudioOptions options;
- if (track_->enabled() && track_->GetSource()) {
+ if (track_->enabled() && track_->GetSource() &&
+ !track_->GetSource()->remote()) {
// TODO(xians): Remove this static_cast since we should be able to connect
- // a remote audio track to peer connection.
+ // a remote audio track to a peer connection.
options = static_cast<LocalAudioSource*>(track_->GetSource())->options();
}
@@ -136,35 +200,42 @@ void AudioRtpSender::Reconfigure() {
}
VideoRtpSender::VideoRtpSender(VideoTrackInterface* track,
- uint32_t ssrc,
+ const std::string& stream_id,
VideoProviderInterface* provider)
: id_(track->id()),
- track_(track),
- ssrc_(ssrc),
+ stream_id_(stream_id),
provider_(provider),
+ track_(track),
cached_track_enabled_(track->enabled()) {
+ RTC_DCHECK(provider != nullptr);
track_->RegisterObserver(this);
- VideoSourceInterface* source = track_->GetSource();
- if (source) {
- provider_->SetCaptureDevice(ssrc_, source->GetVideoCapturer());
- }
- Reconfigure();
}
+VideoRtpSender::VideoRtpSender(VideoProviderInterface* provider)
+ : id_(rtc::CreateRandomUuid()),
+ stream_id_(rtc::CreateRandomUuid()),
+ provider_(provider) {}
+
VideoRtpSender::~VideoRtpSender() {
- track_->UnregisterObserver(this);
Stop();
}
void VideoRtpSender::OnChanged() {
+ RTC_DCHECK(!stopped_);
if (cached_track_enabled_ != track_->enabled()) {
cached_track_enabled_ = track_->enabled();
- Reconfigure();
+ if (can_send_track()) {
+ SetVideoSend();
+ }
}
}
bool VideoRtpSender::SetTrack(MediaStreamTrackInterface* track) {
- if (track->kind() != "video") {
+ if (stopped_) {
+ LOG(LS_ERROR) << "SetTrack can't be called on a stopped RtpSender.";
+ return false;
+ }
+ if (track && track->kind() != MediaStreamTrackInterface::kVideoKind) {
LOG(LS_ERROR) << "SetTrack called on video RtpSender with " << track->kind()
<< " track.";
return false;
@@ -172,30 +243,72 @@ bool VideoRtpSender::SetTrack(MediaStreamTrackInterface* track) {
VideoTrackInterface* video_track = static_cast<VideoTrackInterface*>(track);
// Detach from old track.
- track_->UnregisterObserver(this);
+ if (track_) {
+ track_->UnregisterObserver(this);
+ }
// Attach to new track.
+ bool prev_can_send_track = can_send_track();
track_ = video_track;
- cached_track_enabled_ = track_->enabled();
- track_->RegisterObserver(this);
- Reconfigure();
+ if (track_) {
+ cached_track_enabled_ = track_->enabled();
+ track_->RegisterObserver(this);
+ }
+
+ // Update video provider.
+ if (can_send_track()) {
+ VideoSourceInterface* source = track_->GetSource();
+ // TODO(deadbeef): If SetTrack is called with a disabled track, and the
+ // previous track was enabled, this could cause a frame from the new track
+ // to slip out. Really, what we need is for SetCaptureDevice and
+ // SetVideoSend
+ // to be combined into one atomic operation, all the way down to
+ // WebRtcVideoSendStream.
+ provider_->SetCaptureDevice(ssrc_,
+ source ? source->GetVideoCapturer() : nullptr);
+ SetVideoSend();
+ } else if (prev_can_send_track) {
+ provider_->SetCaptureDevice(ssrc_, nullptr);
+ provider_->SetVideoSend(ssrc_, false, nullptr);
+ }
return true;
}
-void VideoRtpSender::Stop() {
- // TODO(deadbeef): Need to do more here to fully stop sending packets.
- if (!provider_) {
+void VideoRtpSender::SetSsrc(uint32_t ssrc) {
+ if (stopped_ || ssrc == ssrc_) {
return;
}
- provider_->SetCaptureDevice(ssrc_, nullptr);
- provider_->SetVideoSend(ssrc_, false, nullptr);
- provider_ = nullptr;
+ // If we are already sending with a particular SSRC, stop sending.
+ if (can_send_track()) {
+ provider_->SetCaptureDevice(ssrc_, nullptr);
+ provider_->SetVideoSend(ssrc_, false, nullptr);
+ }
+ ssrc_ = ssrc;
+ if (can_send_track()) {
+ VideoSourceInterface* source = track_->GetSource();
+ provider_->SetCaptureDevice(ssrc_,
+ source ? source->GetVideoCapturer() : nullptr);
+ SetVideoSend();
+ }
}
-void VideoRtpSender::Reconfigure() {
- if (!provider_) {
+void VideoRtpSender::Stop() {
+ // TODO(deadbeef): Need to do more here to fully stop sending packets.
+ if (stopped_) {
return;
}
+ if (track_) {
+ track_->UnregisterObserver(this);
+ }
+ if (can_send_track()) {
+ provider_->SetCaptureDevice(ssrc_, nullptr);
+ provider_->SetVideoSend(ssrc_, false, nullptr);
+ }
+ stopped_ = true;
+}
+
+void VideoRtpSender::SetVideoSend() {
+ RTC_DCHECK(!stopped_ && can_send_track());
const cricket::VideoOptions* options = nullptr;
VideoSourceInterface* source = track_->GetSource();
if (track_->enabled() && source) {
diff --git a/talk/app/webrtc/rtpsender.h b/talk/app/webrtc/rtpsender.h
index 3741909323..dd846b556c 100644
--- a/talk/app/webrtc/rtpsender.h
+++ b/talk/app/webrtc/rtpsender.h
@@ -36,6 +36,7 @@
#include "talk/app/webrtc/mediastreamprovider.h"
#include "talk/app/webrtc/rtpsenderinterface.h"
+#include "talk/app/webrtc/statscollector.h"
#include "talk/media/base/audiorenderer.h"
#include "webrtc/base/basictypes.h"
#include "webrtc/base/criticalsection.h"
@@ -56,7 +57,7 @@ class LocalAudioSinkAdapter : public AudioTrackSinkInterface,
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) override;
// cricket::AudioRenderer implementation.
@@ -70,9 +71,15 @@ class LocalAudioSinkAdapter : public AudioTrackSinkInterface,
class AudioRtpSender : public ObserverInterface,
public rtc::RefCountedObject<RtpSenderInterface> {
public:
+ // StatsCollector provided so that Add/RemoveLocalAudioTrack can be called
+ // at the appropriate times.
AudioRtpSender(AudioTrackInterface* track,
- uint32_t ssrc,
- AudioProviderInterface* provider);
+ const std::string& stream_id,
+ AudioProviderInterface* provider,
+ StatsCollector* stats);
+
+ // Randomly generates id and stream_id.
+ AudioRtpSender(AudioProviderInterface* provider, StatsCollector* stats);
virtual ~AudioRtpSender();
@@ -85,18 +92,37 @@ class AudioRtpSender : public ObserverInterface,
return track_.get();
}
+ void SetSsrc(uint32_t ssrc) override;
+
+ uint32_t ssrc() const override { return ssrc_; }
+
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_AUDIO;
+ }
+
std::string id() const override { return id_; }
+ void set_stream_id(const std::string& stream_id) override {
+ stream_id_ = stream_id;
+ }
+ std::string stream_id() const override { return stream_id_; }
+
void Stop() override;
private:
- void Reconfigure();
+ bool can_send_track() const { return track_ && ssrc_; }
+ // Helper function to construct options for
+ // AudioProviderInterface::SetAudioSend.
+ void SetAudioSend();
std::string id_;
- rtc::scoped_refptr<AudioTrackInterface> track_;
- uint32_t ssrc_;
+ std::string stream_id_;
AudioProviderInterface* provider_;
- bool cached_track_enabled_;
+ StatsCollector* stats_;
+ rtc::scoped_refptr<AudioTrackInterface> track_;
+ uint32_t ssrc_ = 0;
+ bool cached_track_enabled_ = false;
+ bool stopped_ = false;
// Used to pass the data callback from the |track_| to the other end of
// cricket::AudioRenderer.
@@ -107,9 +133,12 @@ class VideoRtpSender : public ObserverInterface,
public rtc::RefCountedObject<RtpSenderInterface> {
public:
VideoRtpSender(VideoTrackInterface* track,
- uint32_t ssrc,
+ const std::string& stream_id,
VideoProviderInterface* provider);
+ // Randomly generates id and stream_id.
+ explicit VideoRtpSender(VideoProviderInterface* provider);
+
virtual ~VideoRtpSender();
// ObserverInterface implementation
@@ -121,18 +150,36 @@ class VideoRtpSender : public ObserverInterface,
return track_.get();
}
+ void SetSsrc(uint32_t ssrc) override;
+
+ uint32_t ssrc() const override { return ssrc_; }
+
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_VIDEO;
+ }
+
std::string id() const override { return id_; }
+ void set_stream_id(const std::string& stream_id) override {
+ stream_id_ = stream_id;
+ }
+ std::string stream_id() const override { return stream_id_; }
+
void Stop() override;
private:
- void Reconfigure();
+ bool can_send_track() const { return track_ && ssrc_; }
+ // Helper function to construct options for
+ // VideoProviderInterface::SetVideoSend.
+ void SetVideoSend();
std::string id_;
- rtc::scoped_refptr<VideoTrackInterface> track_;
- uint32_t ssrc_;
+ std::string stream_id_;
VideoProviderInterface* provider_;
- bool cached_track_enabled_;
+ rtc::scoped_refptr<VideoTrackInterface> track_;
+ uint32_t ssrc_ = 0;
+ bool cached_track_enabled_ = false;
+ bool stopped_ = false;
};
} // namespace webrtc
diff --git a/talk/app/webrtc/rtpsenderinterface.h b/talk/app/webrtc/rtpsenderinterface.h
index fca98f21db..f54e8ca090 100644
--- a/talk/app/webrtc/rtpsenderinterface.h
+++ b/talk/app/webrtc/rtpsenderinterface.h
@@ -35,6 +35,7 @@
#include "talk/app/webrtc/proxy.h"
#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/session/media/mediasession.h"
#include "webrtc/base/refcount.h"
#include "webrtc/base/scoped_ref_ptr.h"
@@ -47,10 +48,24 @@ class RtpSenderInterface : public rtc::RefCountInterface {
virtual bool SetTrack(MediaStreamTrackInterface* track) = 0;
virtual rtc::scoped_refptr<MediaStreamTrackInterface> track() const = 0;
+ // Used to set the SSRC of the sender, once a local description has been set.
+ // If |ssrc| is 0, this indiates that the sender should disconnect from the
+ // underlying transport (this occurs if the sender isn't seen in a local
+ // description).
+ virtual void SetSsrc(uint32_t ssrc) = 0;
+ virtual uint32_t ssrc() const = 0;
+
+ // Audio or video sender?
+ virtual cricket::MediaType media_type() const = 0;
+
// Not to be confused with "mid", this is a field we can temporarily use
// to uniquely identify a receiver until we implement Unified Plan SDP.
virtual std::string id() const = 0;
+ // TODO(deadbeef): Support one sender having multiple stream ids.
+ virtual void set_stream_id(const std::string& stream_id) = 0;
+ virtual std::string stream_id() const = 0;
+
virtual void Stop() = 0;
protected:
@@ -61,7 +76,12 @@ class RtpSenderInterface : public rtc::RefCountInterface {
BEGIN_PROXY_MAP(RtpSender)
PROXY_METHOD1(bool, SetTrack, MediaStreamTrackInterface*)
PROXY_CONSTMETHOD0(rtc::scoped_refptr<MediaStreamTrackInterface>, track)
+PROXY_METHOD1(void, SetSsrc, uint32_t)
+PROXY_CONSTMETHOD0(uint32_t, ssrc)
+PROXY_CONSTMETHOD0(cricket::MediaType, media_type)
PROXY_CONSTMETHOD0(std::string, id)
+PROXY_METHOD1(void, set_stream_id, const std::string&)
+PROXY_CONSTMETHOD0(std::string, stream_id)
PROXY_METHOD0(void, Stop)
END_PROXY()
diff --git a/talk/app/webrtc/rtpsenderreceiver_unittest.cc b/talk/app/webrtc/rtpsenderreceiver_unittest.cc
index c9d7e008c3..a590e1d01f 100644
--- a/talk/app/webrtc/rtpsenderreceiver_unittest.cc
+++ b/talk/app/webrtc/rtpsenderreceiver_unittest.cc
@@ -26,6 +26,7 @@
*/
#include <string>
+#include <utility>
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/mediastream.h"
@@ -48,14 +49,17 @@ static const char kStreamLabel1[] = "local_stream_1";
static const char kVideoTrackId[] = "video_1";
static const char kAudioTrackId[] = "audio_1";
static const uint32_t kVideoSsrc = 98;
+static const uint32_t kVideoSsrc2 = 100;
static const uint32_t kAudioSsrc = 99;
+static const uint32_t kAudioSsrc2 = 101;
namespace webrtc {
// Helper class to test RtpSender/RtpReceiver.
class MockAudioProvider : public AudioProviderInterface {
public:
- virtual ~MockAudioProvider() {}
+ ~MockAudioProvider() override {}
+
MOCK_METHOD2(SetAudioPlayout,
void(uint32_t ssrc,
bool enable));
@@ -65,6 +69,14 @@ class MockAudioProvider : public AudioProviderInterface {
const cricket::AudioOptions& options,
cricket::AudioRenderer* renderer));
MOCK_METHOD2(SetAudioPlayoutVolume, void(uint32_t ssrc, double volume));
+
+ void SetRawAudioSink(uint32_t,
+ rtc::scoped_ptr<AudioSinkInterface> sink) override {
+ sink_ = std::move(sink);
+ }
+
+ private:
+ rtc::scoped_ptr<AudioSinkInterface> sink_;
};
// Helper class to test RtpSender/RtpReceiver.
@@ -85,8 +97,8 @@ class MockVideoProvider : public VideoProviderInterface {
class FakeVideoSource : public Notifier<VideoSourceInterface> {
public:
- static rtc::scoped_refptr<FakeVideoSource> Create() {
- return new rtc::RefCountedObject<FakeVideoSource>();
+ static rtc::scoped_refptr<FakeVideoSource> Create(bool remote) {
+ return new rtc::RefCountedObject<FakeVideoSource>(remote);
}
virtual cricket::VideoCapturer* GetVideoCapturer() { return &fake_capturer_; }
virtual void Stop() {}
@@ -94,16 +106,18 @@ class FakeVideoSource : public Notifier<VideoSourceInterface> {
virtual void AddSink(cricket::VideoRenderer* output) {}
virtual void RemoveSink(cricket::VideoRenderer* output) {}
virtual SourceState state() const { return state_; }
+ virtual bool remote() const { return remote_; }
virtual const cricket::VideoOptions* options() const { return &options_; }
virtual cricket::VideoRenderer* FrameInput() { return NULL; }
protected:
- FakeVideoSource() : state_(kLive) {}
+ explicit FakeVideoSource(bool remote) : state_(kLive), remote_(remote) {}
~FakeVideoSource() {}
private:
cricket::FakeVideoCapturer fake_capturer_;
SourceState state_;
+ bool remote_;
cricket::VideoOptions options_;
};
@@ -111,7 +125,11 @@ class RtpSenderReceiverTest : public testing::Test {
public:
virtual void SetUp() {
stream_ = MediaStream::Create(kStreamLabel1);
- rtc::scoped_refptr<VideoSourceInterface> source(FakeVideoSource::Create());
+ }
+
+ void AddVideoTrack(bool remote) {
+ rtc::scoped_refptr<VideoSourceInterface> source(
+ FakeVideoSource::Create(remote));
video_track_ = VideoTrack::Create(kVideoTrackId, source);
EXPECT_TRUE(stream_->AddTrack(video_track_));
}
@@ -120,17 +138,21 @@ class RtpSenderReceiverTest : public testing::Test {
audio_track_ = AudioTrack::Create(kAudioTrackId, NULL);
EXPECT_TRUE(stream_->AddTrack(audio_track_));
EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
- audio_rtp_sender_ = new AudioRtpSender(stream_->GetAudioTracks()[0],
- kAudioSsrc, &audio_provider_);
+ audio_rtp_sender_ =
+ new AudioRtpSender(stream_->GetAudioTracks()[0], stream_->label(),
+ &audio_provider_, nullptr);
+ audio_rtp_sender_->SetSsrc(kAudioSsrc);
}
void CreateVideoRtpSender() {
+ AddVideoTrack(false);
EXPECT_CALL(video_provider_,
SetCaptureDevice(
kVideoSsrc, video_track_->GetSource()->GetVideoCapturer()));
EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
video_rtp_sender_ = new VideoRtpSender(stream_->GetVideoTracks()[0],
- kVideoSsrc, &video_provider_);
+ stream_->label(), &video_provider_);
+ video_rtp_sender_->SetSsrc(kVideoSsrc);
}
void DestroyAudioRtpSender() {
@@ -146,8 +168,8 @@ class RtpSenderReceiverTest : public testing::Test {
}
void CreateAudioRtpReceiver() {
- audio_track_ =
- AudioTrack::Create(kAudioTrackId, RemoteAudioSource::Create().get());
+ audio_track_ = AudioTrack::Create(
+ kAudioTrackId, RemoteAudioSource::Create(kAudioSsrc, NULL));
EXPECT_TRUE(stream_->AddTrack(audio_track_));
EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true));
audio_rtp_receiver_ = new AudioRtpReceiver(stream_->GetAudioTracks()[0],
@@ -155,6 +177,7 @@ class RtpSenderReceiverTest : public testing::Test {
}
void CreateVideoRtpReceiver() {
+ AddVideoTrack(true);
EXPECT_CALL(video_provider_,
SetVideoPlayout(kVideoSsrc, true,
video_track_->GetSource()->FrameInput()));
@@ -280,4 +303,212 @@ TEST_F(RtpSenderReceiverTest, RemoteAudioTrackSetVolume) {
DestroyAudioRtpReceiver();
}
+// Test that provider methods aren't called without both a track and an SSRC.
+TEST_F(RtpSenderReceiverTest, AudioSenderWithoutTrackAndSsrc) {
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(&audio_provider_, nullptr);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_TRUE(sender->SetTrack(track));
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+ sender->SetSsrc(kAudioSsrc);
+ sender->SetSsrc(0);
+ // Just let it get destroyed and make sure it doesn't call any methods on the
+ // provider interface.
+}
+
+// Test that provider methods aren't called without both a track and an SSRC.
+TEST_F(RtpSenderReceiverTest, VideoSenderWithoutTrackAndSsrc) {
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(&video_provider_);
+ EXPECT_TRUE(sender->SetTrack(video_track_));
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+ sender->SetSsrc(kVideoSsrc);
+ sender->SetSsrc(0);
+ // Just let it get destroyed and make sure it doesn't call any methods on the
+ // provider interface.
+}
+
+// Test that an audio sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set first.
+TEST_F(RtpSenderReceiverTest, AudioSenderEarlyWarmupSsrcThenTrack) {
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(&audio_provider_, nullptr);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ sender->SetTrack(track);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+}
+
+// Test that an audio sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set last.
+TEST_F(RtpSenderReceiverTest, AudioSenderEarlyWarmupTrackThenSsrc) {
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(&audio_provider_, nullptr);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ sender->SetTrack(track);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ sender->SetSsrc(kAudioSsrc);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+}
+
+// Test that a video sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set first.
+TEST_F(RtpSenderReceiverTest, VideoSenderEarlyWarmupSsrcThenTrack) {
+ AddVideoTrack(false);
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(&video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ sender->SetTrack(video_track_);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+}
+
+// Test that a video sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set last.
+TEST_F(RtpSenderReceiverTest, VideoSenderEarlyWarmupTrackThenSsrc) {
+ AddVideoTrack(false);
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(&video_provider_);
+ sender->SetTrack(video_track_);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ sender->SetSsrc(kVideoSsrc);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+}
+
+// Test that the sender is disconnected from the provider when its SSRC is
+// set to 0.
+TEST_F(RtpSenderReceiverTest, AudioSenderSsrcSetToZero) {
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(track, kStreamLabel1, &audio_provider_, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+ sender->SetSsrc(0);
+
+ // Make sure it's SetSsrc that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(_, _, _, _)).Times(0);
+}
+
+// Test that the sender is disconnected from the provider when its SSRC is
+// set to 0.
+TEST_F(RtpSenderReceiverTest, VideoSenderSsrcSetToZero) {
+ AddVideoTrack(false);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(video_track_, kStreamLabel1, &video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+ sender->SetSsrc(0);
+
+ // Make sure it's SetSsrc that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(_, _)).Times(0);
+ EXPECT_CALL(video_provider_, SetVideoSend(_, _, _)).Times(0);
+}
+
+TEST_F(RtpSenderReceiverTest, AudioSenderTrackSetToNull) {
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(track, kStreamLabel1, &audio_provider_, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+
+ // Make sure it's SetTrack that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(_, _, _, _)).Times(0);
+}
+
+TEST_F(RtpSenderReceiverTest, VideoSenderTrackSetToNull) {
+ AddVideoTrack(false);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(video_track_, kStreamLabel1, &video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+
+ // Make sure it's SetTrack that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(_, _)).Times(0);
+ EXPECT_CALL(video_provider_, SetVideoSend(_, _, _)).Times(0);
+}
+
+TEST_F(RtpSenderReceiverTest, AudioSenderSsrcChanged) {
+ AddVideoTrack(false);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(track, kStreamLabel1, &audio_provider_, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc2, true, _, _)).Times(1);
+ sender->SetSsrc(kAudioSsrc2);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc2, false, _, _)).Times(1);
+}
+
+TEST_F(RtpSenderReceiverTest, VideoSenderSsrcChanged) {
+ AddVideoTrack(false);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(video_track_, kStreamLabel1, &video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc2,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc2, true, _));
+ sender->SetSsrc(kVideoSsrc2);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc2, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc2, false, _)).Times(1);
+}
+
} // namespace webrtc
diff --git a/talk/app/webrtc/statscollector.cc b/talk/app/webrtc/statscollector.cc
index 347a84640c..b514b42fee 100644
--- a/talk/app/webrtc/statscollector.cc
+++ b/talk/app/webrtc/statscollector.cc
@@ -115,17 +115,17 @@ void ExtractCommonReceiveProperties(const cricket::MediaReceiverInfo& info,
report->AddString(StatsReport::kStatsValueNameCodecName, info.codec_name);
}
-void SetAudioProcessingStats(StatsReport* report, int signal_level,
- bool typing_noise_detected, int echo_return_loss,
- int echo_return_loss_enhancement, int echo_delay_median_ms,
- float aec_quality_min, int echo_delay_std_ms) {
+void SetAudioProcessingStats(StatsReport* report,
+ bool typing_noise_detected,
+ int echo_return_loss,
+ int echo_return_loss_enhancement,
+ int echo_delay_median_ms,
+ float aec_quality_min,
+ int echo_delay_std_ms) {
report->AddBoolean(StatsReport::kStatsValueNameTypingNoiseState,
typing_noise_detected);
report->AddFloat(StatsReport::kStatsValueNameEchoCancellationQualityMin,
aec_quality_min);
- // Don't overwrite the previous signal level if it's not available now.
- if (signal_level >= 0)
- report->AddInt(StatsReport::kStatsValueNameAudioInputLevel, signal_level);
const IntForAdd ints[] = {
{ StatsReport::kStatsValueNameEchoReturnLoss, echo_return_loss },
{ StatsReport::kStatsValueNameEchoReturnLossEnhancement,
@@ -182,11 +182,14 @@ void ExtractStats(const cricket::VoiceReceiverInfo& info, StatsReport* report) {
void ExtractStats(const cricket::VoiceSenderInfo& info, StatsReport* report) {
ExtractCommonSendProperties(info, report);
- SetAudioProcessingStats(report, info.audio_level, info.typing_noise_detected,
- info.echo_return_loss, info.echo_return_loss_enhancement,
- info.echo_delay_median_ms, info.aec_quality_min, info.echo_delay_std_ms);
+ SetAudioProcessingStats(
+ report, info.typing_noise_detected, info.echo_return_loss,
+ info.echo_return_loss_enhancement, info.echo_delay_median_ms,
+ info.aec_quality_min, info.echo_delay_std_ms);
+ RTC_DCHECK_GE(info.audio_level, 0);
const IntForAdd ints[] = {
+ { StatsReport::kStatsValueNameAudioInputLevel, info.audio_level},
{ StatsReport::kStatsValueNameJitterReceived, info.jitter_ms },
{ StatsReport::kStatsValueNamePacketsLost, info.packets_lost },
{ StatsReport::kStatsValueNamePacketsSent, info.packets_sent },
@@ -198,6 +201,8 @@ void ExtractStats(const cricket::VoiceSenderInfo& info, StatsReport* report) {
void ExtractStats(const cricket::VideoReceiverInfo& info, StatsReport* report) {
ExtractCommonReceiveProperties(info, report);
+ report->AddString(StatsReport::kStatsValueNameCodecImplementationName,
+ info.decoder_implementation_name);
report->AddInt64(StatsReport::kStatsValueNameBytesReceived,
info.bytes_rcvd);
report->AddInt64(StatsReport::kStatsValueNameCaptureStartNtpTimeMs,
@@ -230,6 +235,8 @@ void ExtractStats(const cricket::VideoReceiverInfo& info, StatsReport* report) {
void ExtractStats(const cricket::VideoSenderInfo& info, StatsReport* report) {
ExtractCommonSendProperties(info, report);
+ report->AddString(StatsReport::kStatsValueNameCodecImplementationName,
+ info.encoder_implementation_name);
report->AddBoolean(StatsReport::kStatsValueNameBandwidthLimitedResolution,
(info.adapt_reason & 0x2) > 0);
report->AddBoolean(StatsReport::kStatsValueNameCpuLimitedResolution,
@@ -730,17 +737,20 @@ void StatsCollector::ExtractSessionInfo() {
channel_report->AddId(StatsReport::kStatsValueNameRemoteCertificateId,
remote_cert_report_id);
}
- const std::string& srtp_cipher = channel_iter.srtp_cipher;
- if (!srtp_cipher.empty()) {
- channel_report->AddString(StatsReport::kStatsValueNameSrtpCipher,
- srtp_cipher);
+ int srtp_crypto_suite = channel_iter.srtp_crypto_suite;
+ if (srtp_crypto_suite != rtc::SRTP_INVALID_CRYPTO_SUITE &&
+ rtc::SrtpCryptoSuiteToName(srtp_crypto_suite).length()) {
+ channel_report->AddString(
+ StatsReport::kStatsValueNameSrtpCipher,
+ rtc::SrtpCryptoSuiteToName(srtp_crypto_suite));
}
- int ssl_cipher = channel_iter.ssl_cipher;
- if (ssl_cipher &&
- rtc::SSLStreamAdapter::GetSslCipherSuiteName(ssl_cipher).length()) {
+ int ssl_cipher_suite = channel_iter.ssl_cipher_suite;
+ if (ssl_cipher_suite != rtc::TLS_NULL_WITH_NULL_NULL &&
+ rtc::SSLStreamAdapter::SslCipherSuiteToName(ssl_cipher_suite)
+ .length()) {
channel_report->AddString(
StatsReport::kStatsValueNameDtlsCipher,
- rtc::SSLStreamAdapter::GetSslCipherSuiteName(ssl_cipher));
+ rtc::SSLStreamAdapter::SslCipherSuiteToName(ssl_cipher_suite));
}
int connection_id = 0;
@@ -888,21 +898,24 @@ void StatsCollector::UpdateReportFromAudioTrack(AudioTrackInterface* track,
RTC_DCHECK(pc_->session()->signaling_thread()->IsCurrent());
RTC_DCHECK(track != NULL);
- int signal_level = 0;
- if (!track->GetSignalLevel(&signal_level))
- signal_level = -1;
+ // Don't overwrite report values if they're not available.
+ int signal_level;
+ if (track->GetSignalLevel(&signal_level)) {
+ RTC_DCHECK_GE(signal_level, 0);
+ report->AddInt(StatsReport::kStatsValueNameAudioInputLevel, signal_level);
+ }
- rtc::scoped_refptr<AudioProcessorInterface> audio_processor(
- track->GetAudioProcessor());
+ auto audio_processor(track->GetAudioProcessor());
- AudioProcessorInterface::AudioProcessorStats stats;
- if (audio_processor.get())
+ if (audio_processor.get()) {
+ AudioProcessorInterface::AudioProcessorStats stats;
audio_processor->GetStats(&stats);
- SetAudioProcessingStats(report, signal_level, stats.typing_noise_detected,
- stats.echo_return_loss, stats.echo_return_loss_enhancement,
- stats.echo_delay_median_ms, stats.aec_quality_min,
- stats.echo_delay_std_ms);
+ SetAudioProcessingStats(
+ report, stats.typing_noise_detected, stats.echo_return_loss,
+ stats.echo_return_loss_enhancement, stats.echo_delay_median_ms,
+ stats.aec_quality_min, stats.echo_delay_std_ms);
+ }
}
bool StatsCollector::GetTrackIdBySsrc(uint32_t ssrc,
diff --git a/talk/app/webrtc/statscollector.h b/talk/app/webrtc/statscollector.h
index 18a345d71d..56db79de20 100644
--- a/talk/app/webrtc/statscollector.h
+++ b/talk/app/webrtc/statscollector.h
@@ -36,7 +36,6 @@
#include <vector>
#include "talk/app/webrtc/mediastreaminterface.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/statstypes.h"
#include "talk/app/webrtc/webrtcsession.h"
diff --git a/talk/app/webrtc/statscollector_unittest.cc b/talk/app/webrtc/statscollector_unittest.cc
index 9121c691b1..e7ee91190e 100644
--- a/talk/app/webrtc/statscollector_unittest.cc
+++ b/talk/app/webrtc/statscollector_unittest.cc
@@ -35,7 +35,6 @@
#include "talk/app/webrtc/peerconnectionfactory.h"
#include "talk/app/webrtc/mediastream.h"
#include "talk/app/webrtc/mediastreaminterface.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/mediastreamtrack.h"
#include "talk/app/webrtc/test/fakedatachannelprovider.h"
#include "talk/app/webrtc/videotrack.h"
@@ -683,8 +682,8 @@ class StatsCollectorTest : public testing::Test {
// Fake stats to process.
cricket::TransportChannelStats channel_stats;
channel_stats.component = 1;
- channel_stats.srtp_cipher = "the-srtp-cipher";
- channel_stats.ssl_cipher = TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA;
+ channel_stats.srtp_crypto_suite = rtc::SRTP_AES128_CM_SHA1_80;
+ channel_stats.ssl_cipher_suite = TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA;
cricket::TransportStats transport_stats;
transport_stats.transport_name = "audio";
@@ -697,8 +696,7 @@ class StatsCollectorTest : public testing::Test {
// Fake certificate to report
rtc::scoped_refptr<rtc::RTCCertificate> local_certificate(
rtc::RTCCertificate::Create(rtc::scoped_ptr<rtc::FakeSSLIdentity>(
- new rtc::FakeSSLIdentity(local_cert))
- .Pass()));
+ new rtc::FakeSSLIdentity(local_cert))));
// Configure MockWebRtcSession
EXPECT_CALL(session_,
@@ -747,18 +745,17 @@ class StatsCollectorTest : public testing::Test {
}
// Check negotiated ciphers.
- std::string dtls_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameDtlsCipher);
- EXPECT_EQ(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ std::string dtls_cipher_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameDtlsCipher);
+ EXPECT_EQ(rtc::SSLStreamAdapter::SslCipherSuiteToName(
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA),
- dtls_cipher);
- std::string srtp_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameSrtpCipher);
- EXPECT_EQ("the-srtp-cipher", srtp_cipher);
+ dtls_cipher_suite);
+ std::string srtp_crypto_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameSrtpCipher);
+ EXPECT_EQ(rtc::SrtpCryptoSuiteToName(rtc::SRTP_AES128_CM_SHA1_80),
+ srtp_crypto_suite);
}
cricket::FakeMediaEngine* media_engine_;
@@ -1407,16 +1404,14 @@ TEST_F(StatsCollectorTest, NoTransport) {
ASSERT_EQ(kNotFound, remote_certificate_id);
// Check that the negotiated ciphers are absent.
- std::string dtls_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameDtlsCipher);
- ASSERT_EQ(kNotFound, dtls_cipher);
- std::string srtp_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameSrtpCipher);
- ASSERT_EQ(kNotFound, srtp_cipher);
+ std::string dtls_cipher_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameDtlsCipher);
+ ASSERT_EQ(kNotFound, dtls_cipher_suite);
+ std::string srtp_crypto_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameSrtpCipher);
+ ASSERT_EQ(kNotFound, srtp_crypto_suite);
}
// This test verifies that the stats are generated correctly when the transport
diff --git a/talk/app/webrtc/statstypes.cc b/talk/app/webrtc/statstypes.cc
index e45833c668..19cb1f5d78 100644
--- a/talk/app/webrtc/statstypes.cc
+++ b/talk/app/webrtc/statstypes.cc
@@ -408,6 +408,8 @@ const char* StatsReport::Value::display_name() const {
return "state";
case kStatsValueNameDataChannelId:
return "datachannelid";
+ case kStatsValueNameCodecImplementationName:
+ return "codecImplementationName";
// 'goog' prefixed constants.
case kStatsValueNameAccelerateRate:
@@ -592,9 +594,6 @@ const char* StatsReport::Value::display_name() const {
return "googViewLimitedResolution";
case kStatsValueNameWritable:
return "googWritable";
- default:
- RTC_DCHECK(false);
- break;
}
return nullptr;
diff --git a/talk/app/webrtc/statstypes.h b/talk/app/webrtc/statstypes.h
index 7fa9f3212d..60439b9bc8 100644
--- a/talk/app/webrtc/statstypes.h
+++ b/talk/app/webrtc/statstypes.h
@@ -120,6 +120,7 @@ class StatsReport {
kStatsValueNameAudioOutputLevel,
kStatsValueNameBytesReceived,
kStatsValueNameBytesSent,
+ kStatsValueNameCodecImplementationName,
kStatsValueNameDataChannelId,
kStatsValueNamePacketsLost,
kStatsValueNamePacketsReceived,
diff --git a/talk/app/webrtc/test/DEPS b/talk/app/webrtc/test/DEPS
new file mode 100644
index 0000000000..a814b152f2
--- /dev/null
+++ b/talk/app/webrtc/test/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ # Allow include of Chrome base/android to allow inclusion of headers needed
+ # for accessing the JVM and Application context in gtest.
+ "+base/android",
+]
diff --git a/talk/app/webrtc/test/androidtestinitializer.cc b/talk/app/webrtc/test/androidtestinitializer.cc
new file mode 100644
index 0000000000..883c2d8178
--- /dev/null
+++ b/talk/app/webrtc/test/androidtestinitializer.cc
@@ -0,0 +1,74 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+
+#include <pthread.h>
+
+// Note: this dependency is dangerous since it reaches into Chromium's base.
+// There's a risk of e.g. macro clashes. This file may only be used in tests.
+// Since we use Chromes build system for creating the gtest binary, this should
+// be fine.
+#include "base/android/context_utils.h"
+#include "base/android/jni_android.h"
+
+#include "talk/app/webrtc/java/jni/classreferenceholder.h"
+#include "talk/app/webrtc/java/jni/jni_helpers.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/ssladapter.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+
+namespace webrtc {
+
+namespace {
+
+static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
+
+// There can only be one JNI_OnLoad in each binary. So since this is a GTEST
+// C++ runner binary, we want to initialize the same global objects we normally
+// do if this had been a Java binary.
+void EnsureInitializedOnce() {
+ RTC_CHECK(::base::android::IsVMInitialized());
+ JNIEnv* jni = ::base::android::AttachCurrentThread();
+ JavaVM* jvm = NULL;
+ RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
+ jobject context = ::base::android::GetApplicationContext();
+
+ RTC_CHECK_GE(webrtc_jni::InitGlobalJniVariables(jvm), 0);
+ RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
+ webrtc_jni::LoadGlobalClassReferenceHolder();
+
+ webrtc::VoiceEngine::SetAndroidObjects(jvm, context);
+}
+
+} // anonymous namespace
+
+void InitializeAndroidObjects() {
+ RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamsignaling.cc b/talk/app/webrtc/test/androidtestinitializer.h
index b405273902..e6992825dd 100644
--- a/talk/app/webrtc/mediastreamsignaling.cc
+++ b/talk/app/webrtc/test/androidtestinitializer.h
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2012 Google Inc.
+ * Copyright 2015 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -25,6 +25,13 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "talk/app/webrtc/mediastreamsignaling.h"
+#ifndef TALK_APP_WEBRTC_TEST_ANDROIDTESTINITIALIZER_H_
+#define TALK_APP_WEBRTC_TEST_ANDROIDTESTINITIALIZER_H_
-// TODO(deadbeef): Remove this file once Chrome build files don't reference it.
+namespace webrtc {
+
+void InitializeAndroidObjects();
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_TEST_ANDROIDTESTINITIALIZER_H_
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
index e2dc12375b..6b675a9395 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
@@ -58,7 +58,7 @@ class FakeAdmTest : public testing::Test,
int32_t RecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -82,7 +82,7 @@ class FakeAdmTest : public testing::Test,
// ADM is pulling data.
int32_t NeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
diff --git a/talk/app/webrtc/test/fakedtlsidentitystore.h b/talk/app/webrtc/test/fakedtlsidentitystore.h
index 0f9bdb9e6c..98074c742a 100644
--- a/talk/app/webrtc/test/fakedtlsidentitystore.h
+++ b/talk/app/webrtc/test/fakedtlsidentitystore.h
@@ -29,41 +29,73 @@
#define TALK_APP_WEBRTC_TEST_FAKEDTLSIDENTITYSERVICE_H_
#include <string>
+#include <utility>
#include "talk/app/webrtc/dtlsidentitystore.h"
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "webrtc/base/rtccertificate.h"
-static const char kRSA_PRIVATE_KEY_PEM[] =
- "-----BEGIN RSA PRIVATE KEY-----\n"
- "MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAMYRkbhmI7kVA/rM\n"
- "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
- "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
- "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAECgYAvgOs4FJcgvp+TuREx7YtiYVsH\n"
- "mwQPTum2z/8VzWGwR8BBHBvIpVe1MbD/Y4seyI2aco/7UaisatSgJhsU46/9Y4fq\n"
- "2TwXH9QANf4at4d9n/R6rzwpAJOpgwZgKvdQjkfrKTtgLV+/dawvpxUYkRH4JZM1\n"
- "CVGukMfKNrSVH4Ap4QJBAOJmGV1ASPnB4r4nc99at7JuIJmd7fmuVUwUgYi4XgaR\n"
- "WhScBsgYwZ/JoywdyZJgnbcrTDuVcWG56B3vXbhdpMsCQQDf9zeJrjnPZ3Cqm79y\n"
- "kdqANep0uwZciiNiWxsQrCHztywOvbFhdp8iYVFG9EK8DMY41Y5TxUwsHD+67zao\n"
- "ZNqJAkEA1suLUP/GvL8IwuRneQd2tWDqqRQ/Td3qq03hP7e77XtF/buya3Ghclo5\n"
- "54czUR89QyVfJEC6278nzA7n2h1uVQJAcG6mztNL6ja/dKZjYZye2CY44QjSlLo0\n"
- "MTgTSjdfg/28fFn2Jjtqf9Pi/X+50LWI/RcYMC2no606wRk9kyOuIQJBAK6VSAim\n"
- "1pOEjsYQn0X5KEIrz1G3bfCbB848Ime3U2/FWlCHMr6ch8kCZ5d1WUeJD3LbwMNG\n"
- "UCXiYxSsu20QNVw=\n"
- "-----END RSA PRIVATE KEY-----\n";
-
-static const char kCERT_PEM[] =
- "-----BEGIN CERTIFICATE-----\n"
- "MIIBmTCCAQKgAwIBAgIEbzBSAjANBgkqhkiG9w0BAQsFADARMQ8wDQYDVQQDEwZX\n"
- "ZWJSVEMwHhcNMTQwMTAyMTgyNDQ3WhcNMTQwMjAxMTgyNDQ3WjARMQ8wDQYDVQQD\n"
- "EwZXZWJSVEMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMYRkbhmI7kVA/rM\n"
- "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
- "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
- "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAUflI\n"
- "VUe5Krqf5RVa5C3u/UTAOAUJBiDS3VANTCLBxjuMsvqOG0WvaYWP3HYPgrz0jXK2\n"
- "LJE/mGw3MyFHEqi81jh95J+ypl6xKW6Rm8jKLR87gUvCaVYn/Z4/P3AqcQTB7wOv\n"
- "UD0A8qfhfDM+LK6rPAnCsVN0NRDY3jvd6rzix9M=\n"
- "-----END CERTIFICATE-----\n";
+static const struct {
+ const char* rsa_private_key_pem;
+ const char* cert_pem;
+} kKeysAndCerts[] = {
+ {"-----BEGIN RSA PRIVATE KEY-----\n"
+ "MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAMYRkbhmI7kVA/rM\n"
+ "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
+ "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
+ "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAECgYAvgOs4FJcgvp+TuREx7YtiYVsH\n"
+ "mwQPTum2z/8VzWGwR8BBHBvIpVe1MbD/Y4seyI2aco/7UaisatSgJhsU46/9Y4fq\n"
+ "2TwXH9QANf4at4d9n/R6rzwpAJOpgwZgKvdQjkfrKTtgLV+/dawvpxUYkRH4JZM1\n"
+ "CVGukMfKNrSVH4Ap4QJBAOJmGV1ASPnB4r4nc99at7JuIJmd7fmuVUwUgYi4XgaR\n"
+ "WhScBsgYwZ/JoywdyZJgnbcrTDuVcWG56B3vXbhdpMsCQQDf9zeJrjnPZ3Cqm79y\n"
+ "kdqANep0uwZciiNiWxsQrCHztywOvbFhdp8iYVFG9EK8DMY41Y5TxUwsHD+67zao\n"
+ "ZNqJAkEA1suLUP/GvL8IwuRneQd2tWDqqRQ/Td3qq03hP7e77XtF/buya3Ghclo5\n"
+ "54czUR89QyVfJEC6278nzA7n2h1uVQJAcG6mztNL6ja/dKZjYZye2CY44QjSlLo0\n"
+ "MTgTSjdfg/28fFn2Jjtqf9Pi/X+50LWI/RcYMC2no606wRk9kyOuIQJBAK6VSAim\n"
+ "1pOEjsYQn0X5KEIrz1G3bfCbB848Ime3U2/FWlCHMr6ch8kCZ5d1WUeJD3LbwMNG\n"
+ "UCXiYxSsu20QNVw=\n"
+ "-----END RSA PRIVATE KEY-----\n",
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBmTCCAQKgAwIBAgIEbzBSAjANBgkqhkiG9w0BAQsFADARMQ8wDQYDVQQDEwZX\n"
+ "ZWJSVEMwHhcNMTQwMTAyMTgyNDQ3WhcNMTQwMjAxMTgyNDQ3WjARMQ8wDQYDVQQD\n"
+ "EwZXZWJSVEMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMYRkbhmI7kVA/rM\n"
+ "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
+ "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
+ "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAUflI\n"
+ "VUe5Krqf5RVa5C3u/UTAOAUJBiDS3VANTCLBxjuMsvqOG0WvaYWP3HYPgrz0jXK2\n"
+ "LJE/mGw3MyFHEqi81jh95J+ypl6xKW6Rm8jKLR87gUvCaVYn/Z4/P3AqcQTB7wOv\n"
+ "UD0A8qfhfDM+LK6rPAnCsVN0NRDY3jvd6rzix9M=\n"
+ "-----END CERTIFICATE-----\n"},
+ {"-----BEGIN RSA PRIVATE KEY-----\n"
+ "MIICXQIBAAKBgQDeYqlyJ1wuiMsi905e3X81/WA/G3ym50PIDZBVtSwZi7JVQPgj\n"
+ "Bl8CPZMvDh9EwB4Ji9ytA8dZZbQ4WbJWPr73zPpJSCvQqz6sOXSlenBRi72acNaQ\n"
+ "sOR/qPvviJx5I6Hqo4qemfnjZhAW85a5BpgrAwKgMLIQTHCTLWwVSyrDrwIDAQAB\n"
+ "AoGARni9eY8/hv+SX+I+05EdXt6MQXNUbQ+cSykBNCfVccLzIFEWUQMT2IHqwl6X\n"
+ "ShIXcq7/n1QzOAEiuzixauM3YHg4xZ1Um2Ha9a7ig5Xg4v6b43bmMkNE6LkoAtYs\n"
+ "qnQdfMh442b1liDud6IMb1Qk0amt3fSrgRMc547TZQVx4QECQQDxUeDm94r3p4ng\n"
+ "5rCLLC1K5/6HSTZsh7jatKPlz7GfP/IZlYV7iE5784/n0wRiCjZOS7hQRy/8m2Gp\n"
+ "pf4aZq+DAkEA6+np4d36FYikydvUrupLT3FkdRHGn/v83qOll/VmeNh+L1xMZlIP\n"
+ "tM26hAXCcQb7O5+J9y3cx2CAQsBS11ZXZQJAfGgTo76WG9p5UEJdXUInD2jOZPwv\n"
+ "XIATolxh6kXKcijLLLlSmT7KB0inNYIpzkkpee+7U1d/u6B3FriGaSHq9QJBAM/J\n"
+ "ICnDdLCgwNvWVraVQC3BpwSB2pswvCFwq7py94V60XFvbw80Ogc6qIv98qvQxVlX\n"
+ "hJIEgA/PjEi+0ng94Q0CQQDm8XSDby35gmjO+6eRmJtAjtB7nguLvrPXM6CPXRmD\n"
+ "sRoBocpHw6j9UdzZ6qYG0FkdXZghezXFY58ro2BYYRR3\n"
+ "-----END RSA PRIVATE KEY-----\n",
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIICWDCCAcGgAwIBAgIJALgDjxMbBOhbMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"
+ "BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"
+ "aWRnaXRzIFB0eSBMdGQwHhcNMTUxMTEzMjIzMjEzWhcNMTYxMTEyMjIzMjEzWjBF\n"
+ "MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"
+ "ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB\n"
+ "gQDeYqlyJ1wuiMsi905e3X81/WA/G3ym50PIDZBVtSwZi7JVQPgjBl8CPZMvDh9E\n"
+ "wB4Ji9ytA8dZZbQ4WbJWPr73zPpJSCvQqz6sOXSlenBRi72acNaQsOR/qPvviJx5\n"
+ "I6Hqo4qemfnjZhAW85a5BpgrAwKgMLIQTHCTLWwVSyrDrwIDAQABo1AwTjAdBgNV\n"
+ "HQ4EFgQUx2tbJdlcSTCepn09UdYORXKuSTAwHwYDVR0jBBgwFoAUx2tbJdlcSTCe\n"
+ "pn09UdYORXKuSTAwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQAmp9Id\n"
+ "E716gHMqeBG4S2FCgVFCr0a0ugkaneQAN/c2L9CbMemEN9W6jvucUIVOtYd90dDW\n"
+ "lXuowWmT/JctPe3D2qt4yvYW3puECHk2tVQmrJOZiZiTRtWm6HxkmoUYHYp/DtaS\n"
+ "1Xe29gSTnZtI5sQCrGMzk3SGRSSs7ejLKiVDBQ==\n"
+ "-----END CERTIFICATE-----\n"}};
class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
public rtc::MessageHandler {
@@ -77,6 +109,9 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
should_fail_ = should_fail;
}
+ void use_original_key() { key_index_ = 0; }
+ void use_alternate_key() { key_index_ = 1; }
+
void RequestIdentity(
rtc::KeyType key_type,
const rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver>&
@@ -92,8 +127,9 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
static rtc::scoped_refptr<rtc::RTCCertificate> GenerateCertificate() {
std::string cert;
std::string key;
- rtc::SSLIdentity::PemToDer("CERTIFICATE", kCERT_PEM, &cert);
- rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY", kRSA_PRIVATE_KEY_PEM, &key);
+ rtc::SSLIdentity::PemToDer("CERTIFICATE", kKeysAndCerts[0].cert_pem, &cert);
+ rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY",
+ kKeysAndCerts[0].rsa_private_key_pem, &key);
std::string pem_cert = rtc::SSLIdentity::DerToPem(
rtc::kPemTypeCertificate,
@@ -106,7 +142,7 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
rtc::scoped_ptr<rtc::SSLIdentity> identity(
rtc::SSLIdentity::FromPEMStrings(pem_key, pem_cert));
- return rtc::RTCCertificate::Create(identity.Pass());
+ return rtc::RTCCertificate::Create(std::move(identity));
}
private:
@@ -115,6 +151,11 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
MSG_FAILURE,
};
+ const char* get_key() {
+ return kKeysAndCerts[key_index_].rsa_private_key_pem;
+ }
+ const char* get_cert() { return kKeysAndCerts[key_index_].cert_pem; }
+
// rtc::MessageHandler implementation.
void OnMessage(rtc::Message* msg) {
MessageData* message_data = static_cast<MessageData*>(msg->pdata);
@@ -124,9 +165,8 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
case MSG_SUCCESS: {
std::string cert;
std::string key;
- rtc::SSLIdentity::PemToDer("CERTIFICATE", kCERT_PEM, &cert);
- rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY", kRSA_PRIVATE_KEY_PEM,
- &key);
+ rtc::SSLIdentity::PemToDer("CERTIFICATE", get_cert(), &cert);
+ rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY", get_key(), &key);
observer->OnSuccess(cert, key);
break;
}
@@ -138,6 +178,7 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
}
bool should_fail_;
+ int key_index_ = 0;
};
#endif // TALK_APP_WEBRTC_TEST_FAKEDTLSIDENTITYSERVICE_H_
diff --git a/talk/app/webrtc/test/fakemediastreamsignaling.h b/talk/app/webrtc/test/fakemediastreamsignaling.h
deleted file mode 100644
index 562c4ad306..0000000000
--- a/talk/app/webrtc/test/fakemediastreamsignaling.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * libjingle
- * Copyright 2013 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_APP_WEBRTC_TEST_FAKEMEDIASTREAMSIGNALING_H_
-#define TALK_APP_WEBRTC_TEST_FAKEMEDIASTREAMSIGNALING_H_
-
-#include "talk/app/webrtc/audiotrack.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
-#include "talk/app/webrtc/videotrack.h"
-
-static const char kStream1[] = "stream1";
-static const char kVideoTrack1[] = "video1";
-static const char kAudioTrack1[] = "audio1";
-
-static const char kStream2[] = "stream2";
-static const char kVideoTrack2[] = "video2";
-static const char kAudioTrack2[] = "audio2";
-
-class FakeMediaStreamSignaling : public webrtc::MediaStreamSignaling,
- public webrtc::MediaStreamSignalingObserver {
- public:
- explicit FakeMediaStreamSignaling(cricket::ChannelManager* channel_manager) :
- webrtc::MediaStreamSignaling(rtc::Thread::Current(), this,
- channel_manager) {
- }
-
- void SendAudioVideoStream1() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream1, kAudioTrack1, kVideoTrack1));
- }
-
- void SendAudioVideoStream2() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream2, kAudioTrack2, kVideoTrack2));
- }
-
- void SendAudioVideoStream1And2() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream1, kAudioTrack1, kVideoTrack1));
- AddLocalStream(CreateStream(kStream2, kAudioTrack2, kVideoTrack2));
- }
-
- void SendNothing() {
- ClearLocalStreams();
- }
-
- void UseOptionsAudioOnly() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream2, kAudioTrack2, ""));
- }
-
- void UseOptionsVideoOnly() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream2, "", kVideoTrack2));
- }
-
- void ClearLocalStreams() {
- while (local_streams()->count() != 0) {
- RemoveLocalStream(local_streams()->at(0));
- }
- }
-
- // Implements MediaStreamSignalingObserver.
- virtual void OnAddRemoteStream(webrtc::MediaStreamInterface* stream) {}
- virtual void OnRemoveRemoteStream(webrtc::MediaStreamInterface* stream) {}
- virtual void OnAddDataChannel(webrtc::DataChannelInterface* data_channel) {}
- virtual void OnAddLocalAudioTrack(webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track,
- uint32_t ssrc) {}
- virtual void OnAddLocalVideoTrack(webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track,
- uint32_t ssrc) {}
- virtual void OnAddRemoteAudioTrack(webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track,
- uint32_t ssrc) {}
- virtual void OnAddRemoteVideoTrack(webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track,
- uint32_t ssrc) {}
- virtual void OnRemoveRemoteAudioTrack(
- webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track) {}
- virtual void OnRemoveRemoteVideoTrack(
- webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track) {}
- virtual void OnRemoveLocalAudioTrack(webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track,
- uint32_t ssrc) {}
- virtual void OnRemoveLocalVideoTrack(
- webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track) {}
- virtual void OnRemoveLocalStream(webrtc::MediaStreamInterface* stream) {}
-
- private:
- rtc::scoped_refptr<webrtc::MediaStreamInterface> CreateStream(
- const std::string& stream_label,
- const std::string& audio_track_id,
- const std::string& video_track_id) {
- rtc::scoped_refptr<webrtc::MediaStreamInterface> stream(
- webrtc::MediaStream::Create(stream_label));
-
- if (!audio_track_id.empty()) {
- rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- webrtc::AudioTrack::Create(audio_track_id, NULL));
- stream->AddTrack(audio_track);
- }
-
- if (!video_track_id.empty()) {
- rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track(
- webrtc::VideoTrack::Create(video_track_id, NULL));
- stream->AddTrack(video_track);
- }
- return stream;
- }
-};
-
-#endif // TALK_APP_WEBRTC_TEST_FAKEMEDIASTREAMSIGNALING_H_
diff --git a/talk/app/webrtc/test/peerconnectiontestwrapper.cc b/talk/app/webrtc/test/peerconnectiontestwrapper.cc
index 2eb24d9700..86b7842517 100644
--- a/talk/app/webrtc/test/peerconnectiontestwrapper.cc
+++ b/talk/app/webrtc/test/peerconnectiontestwrapper.cc
@@ -25,13 +25,15 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
+#include <utility>
+
#include "talk/app/webrtc/test/fakedtlsidentitystore.h"
#include "talk/app/webrtc/test/fakeperiodicvideocapturer.h"
#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
#include "talk/app/webrtc/test/peerconnectiontestwrapper.h"
#include "talk/app/webrtc/videosourceinterface.h"
#include "webrtc/base/gunit.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
static const char kStreamLabelBase[] = "stream_label";
static const char kVideoTrackLabelBase[] = "video_track";
@@ -70,10 +72,8 @@ PeerConnectionTestWrapper::~PeerConnectionTestWrapper() {}
bool PeerConnectionTestWrapper::CreatePc(
const MediaConstraintsInterface* constraints) {
- allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
- if (!allocator_factory_) {
- return false;
- }
+ rtc::scoped_ptr<cricket::PortAllocator> port_allocator(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
fake_audio_capture_module_ = FakeAudioCaptureModule::Create();
if (fake_audio_capture_module_ == NULL) {
@@ -87,17 +87,17 @@ bool PeerConnectionTestWrapper::CreatePc(
return false;
}
- // CreatePeerConnection with IceServers.
- webrtc::PeerConnectionInterface::IceServers ice_servers;
+ // CreatePeerConnection with RTCConfiguration.
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
webrtc::PeerConnectionInterface::IceServer ice_server;
ice_server.uri = "stun:stun.l.google.com:19302";
- ice_servers.push_back(ice_server);
+ config.servers.push_back(ice_server);
rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store(
rtc::SSLStreamAdapter::HaveDtlsSrtp() ?
new FakeDtlsIdentityStore() : nullptr);
peer_connection_ = peer_connection_factory_->CreatePeerConnection(
- ice_servers, constraints, allocator_factory_.get(),
- dtls_identity_store.Pass(), this);
+ config, constraints, std::move(port_allocator),
+ std::move(dtls_identity_store), this);
return peer_connection_.get() != NULL;
}
diff --git a/talk/app/webrtc/test/peerconnectiontestwrapper.h b/talk/app/webrtc/test/peerconnectiontestwrapper.h
index b65426326f..883f2f2454 100644
--- a/talk/app/webrtc/test/peerconnectiontestwrapper.h
+++ b/talk/app/webrtc/test/peerconnectiontestwrapper.h
@@ -34,11 +34,6 @@
#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
#include "webrtc/base/sigslot.h"
-namespace webrtc {
-class DtlsIdentityStoreInterface;
-class PortAllocatorFactoryInterface;
-}
-
class PeerConnectionTestWrapper
: public webrtc::PeerConnectionObserver,
public webrtc::CreateSessionDescriptionObserver,
@@ -110,8 +105,6 @@ class PeerConnectionTestWrapper
bool video, const webrtc::FakeConstraints& video_constraints);
std::string name_;
- rtc::scoped_refptr<webrtc::PortAllocatorFactoryInterface>
- allocator_factory_;
rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface>
peer_connection_factory_;
diff --git a/talk/app/webrtc/videosource.cc b/talk/app/webrtc/videosource.cc
index b33f5f9e13..4b371e3ed5 100644
--- a/talk/app/webrtc/videosource.cc
+++ b/talk/app/webrtc/videosource.cc
@@ -32,6 +32,7 @@
#include "talk/app/webrtc/mediaconstraintsinterface.h"
#include "talk/session/media/channelmanager.h"
+#include "webrtc/base/arraysize.h"
using cricket::CaptureState;
using webrtc::MediaConstraintsInterface;
@@ -267,11 +268,12 @@ const cricket::VideoFormat& GetBestCaptureFormat(
// Set |option| to the highest-priority value of |key| in the constraints.
// Return false if the key is mandatory, and the value is invalid.
bool ExtractOption(const MediaConstraintsInterface* all_constraints,
- const std::string& key, cricket::Settable<bool>* option) {
+ const std::string& key,
+ rtc::Optional<bool>* option) {
size_t mandatory = 0;
bool value;
if (FindConstraint(all_constraints, key, &value, &mandatory)) {
- option->Set(value);
+ *option = rtc::Optional<bool>(value);
return true;
}
@@ -302,8 +304,6 @@ class FrameInputWrapper : public cricket::VideoRenderer {
virtual ~FrameInputWrapper() {}
// VideoRenderer implementation.
- bool SetSize(int width, int height, int reserved) override { return true; }
-
bool RenderFrame(const cricket::VideoFrame* frame) override {
if (!capturer_->IsRunning()) {
return true;
@@ -329,21 +329,23 @@ namespace webrtc {
rtc::scoped_refptr<VideoSource> VideoSource::Create(
cricket::ChannelManager* channel_manager,
cricket::VideoCapturer* capturer,
- const webrtc::MediaConstraintsInterface* constraints) {
+ const webrtc::MediaConstraintsInterface* constraints,
+ bool remote) {
ASSERT(channel_manager != NULL);
ASSERT(capturer != NULL);
- rtc::scoped_refptr<VideoSource> source(
- new rtc::RefCountedObject<VideoSource>(channel_manager,
- capturer));
+ rtc::scoped_refptr<VideoSource> source(new rtc::RefCountedObject<VideoSource>(
+ channel_manager, capturer, remote));
source->Initialize(constraints);
return source;
}
VideoSource::VideoSource(cricket::ChannelManager* channel_manager,
- cricket::VideoCapturer* capturer)
+ cricket::VideoCapturer* capturer,
+ bool remote)
: channel_manager_(channel_manager),
video_capturer_(capturer),
- state_(kInitializing) {
+ state_(kInitializing),
+ remote_(remote) {
channel_manager_->SignalVideoCaptureStateChange.connect(
this, &VideoSource::OnStateChange);
}
@@ -368,7 +370,7 @@ void VideoSource::Initialize(
} else {
// The VideoCapturer implementation doesn't support capability
// enumeration. We need to guess what the camera supports.
- for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) {
+ for (int i = 0; i < arraysize(kVideoFormats); ++i) {
formats.push_back(cricket::VideoFormat(kVideoFormats[i]));
}
}
@@ -460,7 +462,9 @@ void VideoSource::OnStateChange(cricket::VideoCapturer* capturer,
}
void VideoSource::SetState(SourceState new_state) {
- if (VERIFY(state_ != new_state)) {
+ // TODO(hbos): Temporarily disabled VERIFY due to webrtc:4776.
+ // if (VERIFY(state_ != new_state)) {
+ if (state_ != new_state) {
state_ = new_state;
FireOnChanged();
}
diff --git a/talk/app/webrtc/videosource.h b/talk/app/webrtc/videosource.h
index 8253cbac18..98c1e083a3 100644
--- a/talk/app/webrtc/videosource.h
+++ b/talk/app/webrtc/videosource.h
@@ -66,9 +66,12 @@ class VideoSource : public Notifier<VideoSourceInterface>,
static rtc::scoped_refptr<VideoSource> Create(
cricket::ChannelManager* channel_manager,
cricket::VideoCapturer* capturer,
- const webrtc::MediaConstraintsInterface* constraints);
+ const webrtc::MediaConstraintsInterface* constraints,
+ bool remote);
+
+ SourceState state() const override { return state_; }
+ bool remote() const override { return remote_; }
- virtual SourceState state() const { return state_; }
virtual const cricket::VideoOptions* options() const { return &options_; }
virtual cricket::VideoRenderer* FrameInput();
@@ -86,7 +89,8 @@ class VideoSource : public Notifier<VideoSourceInterface>,
protected:
VideoSource(cricket::ChannelManager* channel_manager,
- cricket::VideoCapturer* capturer);
+ cricket::VideoCapturer* capturer,
+ bool remote);
virtual ~VideoSource();
void Initialize(const webrtc::MediaConstraintsInterface* constraints);
@@ -104,6 +108,7 @@ class VideoSource : public Notifier<VideoSourceInterface>,
cricket::VideoFormat format_;
cricket::VideoOptions options_;
SourceState state_;
+ const bool remote_;
};
} // namespace webrtc
diff --git a/talk/app/webrtc/videosource_unittest.cc b/talk/app/webrtc/videosource_unittest.cc
index 2efcc1d84e..6f1df3434e 100644
--- a/talk/app/webrtc/videosource_unittest.cc
+++ b/talk/app/webrtc/videosource_unittest.cc
@@ -144,9 +144,9 @@ class VideoSourceTest : public testing::Test {
void CreateVideoSource(
const webrtc::MediaConstraintsInterface* constraints) {
// VideoSource take ownership of |capturer_|
- source_ = VideoSource::Create(channel_manager_.get(),
- capturer_cleanup_.release(),
- constraints);
+ source_ =
+ VideoSource::Create(channel_manager_.get(), capturer_cleanup_.release(),
+ constraints, false);
ASSERT_TRUE(source_.get() != NULL);
EXPECT_EQ(capturer_, source_->GetVideoCapturer());
@@ -210,8 +210,7 @@ TEST_F(VideoSourceTest, StopRestart) {
// RemoteVideoCapturer and takes video frames from FrameInput.
TEST_F(VideoSourceTest, StartStopRemote) {
source_ = VideoSource::Create(channel_manager_.get(),
- new webrtc::RemoteVideoCapturer(),
- NULL);
+ new webrtc::RemoteVideoCapturer(), NULL, true);
ASSERT_TRUE(source_.get() != NULL);
EXPECT_TRUE(NULL != source_->GetVideoCapturer());
@@ -392,16 +391,14 @@ TEST_F(VideoSourceTest, SetValidOptionValues) {
CreateVideoSource(&constraints);
- bool value = true;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false),
+ source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, OptionNotSet) {
FakeConstraints constraints;
CreateVideoSource(&constraints);
- bool value;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, MandatoryOptionOverridesOptional) {
@@ -413,9 +410,8 @@ TEST_F(VideoSourceTest, MandatoryOptionOverridesOptional) {
CreateVideoSource(&constraints);
- bool value = false;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_TRUE(value);
+ EXPECT_EQ(rtc::Optional<bool>(true),
+ source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionKeyOptional) {
@@ -428,9 +424,8 @@ TEST_F(VideoSourceTest, InvalidOptionKeyOptional) {
EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
kMaxWaitMs);
- bool value = true;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false),
+ source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionKeyMandatory) {
@@ -443,8 +438,7 @@ TEST_F(VideoSourceTest, InvalidOptionKeyMandatory) {
EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
kMaxWaitMs);
- bool value;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionValueOptional) {
@@ -456,8 +450,7 @@ TEST_F(VideoSourceTest, InvalidOptionValueOptional) {
EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
kMaxWaitMs);
- bool value = false;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionValueMandatory) {
@@ -473,8 +466,7 @@ TEST_F(VideoSourceTest, InvalidOptionValueMandatory) {
EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
kMaxWaitMs);
- bool value;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, MixedOptionsAndConstraints) {
@@ -497,9 +489,8 @@ TEST_F(VideoSourceTest, MixedOptionsAndConstraints) {
EXPECT_EQ(288, format->height);
EXPECT_EQ(30, format->framerate());
- bool value = true;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false),
+ source_->options()->video_noise_reduction);
}
// Tests that the source starts video with the default resolution for
diff --git a/talk/app/webrtc/videosourceproxy.h b/talk/app/webrtc/videosourceproxy.h
index 677fa9cf0f..ce96e8e6d1 100644
--- a/talk/app/webrtc/videosourceproxy.h
+++ b/talk/app/webrtc/videosourceproxy.h
@@ -38,6 +38,7 @@ namespace webrtc {
// signaling thread.
BEGIN_PROXY_MAP(VideoSource)
PROXY_CONSTMETHOD0(SourceState, state)
+ PROXY_CONSTMETHOD0(bool, remote)
PROXY_METHOD0(cricket::VideoCapturer*, GetVideoCapturer)
PROXY_METHOD0(void, Stop)
PROXY_METHOD0(void, Restart)
diff --git a/talk/app/webrtc/videotrack.cc b/talk/app/webrtc/videotrack.cc
index 7c78aea91f..f138240068 100644
--- a/talk/app/webrtc/videotrack.cc
+++ b/talk/app/webrtc/videotrack.cc
@@ -31,7 +31,7 @@
namespace webrtc {
-static const char kVideoTrackKind[] = "video";
+const char MediaStreamTrackInterface::kVideoKind[] = "video";
VideoTrack::VideoTrack(const std::string& label,
VideoSourceInterface* video_source)
@@ -47,7 +47,7 @@ VideoTrack::~VideoTrack() {
}
std::string VideoTrack::kind() const {
- return kVideoTrackKind;
+ return kVideoKind;
}
void VideoTrack::AddRenderer(VideoRendererInterface* renderer) {
diff --git a/talk/app/webrtc/videotrack_unittest.cc b/talk/app/webrtc/videotrack_unittest.cc
index 609ee80ffc..013d925cd2 100644
--- a/talk/app/webrtc/videotrack_unittest.cc
+++ b/talk/app/webrtc/videotrack_unittest.cc
@@ -62,7 +62,7 @@ class VideoTrackTest : public testing::Test {
video_track_ = VideoTrack::Create(
kVideoTrackId,
VideoSource::Create(channel_manager_.get(),
- new webrtc::RemoteVideoCapturer(), NULL));
+ new webrtc::RemoteVideoCapturer(), NULL, true));
}
protected:
diff --git a/talk/app/webrtc/videotrackrenderers.cc b/talk/app/webrtc/videotrackrenderers.cc
index 3c47c6edab..3f9301b718 100644
--- a/talk/app/webrtc/videotrackrenderers.cc
+++ b/talk/app/webrtc/videotrackrenderers.cc
@@ -54,10 +54,6 @@ void VideoTrackRenderers::SetEnabled(bool enable) {
enabled_ = enable;
}
-bool VideoTrackRenderers::SetSize(int width, int height, int reserved) {
- return true;
-}
-
bool VideoTrackRenderers::RenderFrame(const cricket::VideoFrame* frame) {
rtc::CritScope cs(&critical_section_);
if (!enabled_) {
diff --git a/talk/app/webrtc/videotrackrenderers.h b/talk/app/webrtc/videotrackrenderers.h
index 15274a1530..3262e22dff 100644
--- a/talk/app/webrtc/videotrackrenderers.h
+++ b/talk/app/webrtc/videotrackrenderers.h
@@ -48,7 +48,6 @@ class VideoTrackRenderers : public cricket::VideoRenderer {
~VideoTrackRenderers();
// Implements cricket::VideoRenderer
- virtual bool SetSize(int width, int height, int reserved);
virtual bool RenderFrame(const cricket::VideoFrame* frame);
void AddRenderer(VideoRendererInterface* renderer);
diff --git a/talk/app/webrtc/webrtcsdp.cc b/talk/app/webrtc/webrtcsdp.cc
index 3fa9a7d469..e287e90916 100644
--- a/talk/app/webrtc/webrtcsdp.cc
+++ b/talk/app/webrtc/webrtcsdp.cc
@@ -45,6 +45,7 @@
#include "webrtc/p2p/base/constants.h"
#include "webrtc/p2p/base/port.h"
#include "talk/session/media/mediasession.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/common.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/messagedigest.h"
@@ -121,6 +122,7 @@ static const char kLineTypeAttributes = 'a';
static const char kAttributeGroup[] = "group";
static const char kAttributeMid[] = "mid";
static const char kAttributeRtcpMux[] = "rtcp-mux";
+static const char kAttributeRtcpReducedSize[] = "rtcp-rsize";
static const char kAttributeSsrc[] = "ssrc";
static const char kSsrcAttributeCname[] = "cname";
static const char kAttributeExtmap[] = "extmap";
@@ -138,8 +140,8 @@ static const char kAttributeCandidate[] = "candidate";
static const char kAttributeCandidateTyp[] = "typ";
static const char kAttributeCandidateRaddr[] = "raddr";
static const char kAttributeCandidateRport[] = "rport";
-static const char kAttributeCandidateUsername[] = "username";
-static const char kAttributeCandidatePassword[] = "password";
+static const char kAttributeCandidateUfrag[] = "ufrag";
+static const char kAttributeCandidatePwd[] = "pwd";
static const char kAttributeCandidateGeneration[] = "generation";
static const char kAttributeFingerprint[] = "fingerprint";
static const char kAttributeSetup[] = "setup";
@@ -260,6 +262,7 @@ static void BuildRtpMap(const MediaContentDescription* media_desc,
const MediaType media_type,
std::string* message);
static void BuildCandidate(const std::vector<Candidate>& candidates,
+ bool include_ufrag,
std::string* message);
static void BuildIceOptions(const std::vector<std::string>& transport_options,
std::string* message);
@@ -876,7 +879,7 @@ std::string SdpSerializeCandidate(
std::string message;
std::vector<cricket::Candidate> candidates;
candidates.push_back(candidate.candidate());
- BuildCandidate(candidates, &message);
+ BuildCandidate(candidates, true, &message);
// From WebRTC draft section 4.8.1.1 candidate-attribute will be
// just candidate:<candidate> not a=candidate:<blah>CRLF
ASSERT(message.find("a=") == 0);
@@ -1070,10 +1073,9 @@ bool ParseCandidate(const std::string& message, Candidate* candidate,
}
// Extension
- // Empty string as the candidate username and password.
- // Will be updated later with the ice-ufrag and ice-pwd.
- // TODO: Remove the username/password extension, which is currently
- // kept for backwards compatibility.
+ // Though non-standard, we support the ICE ufrag and pwd being signaled on
+ // the candidate to avoid issues with confusing which generation a candidate
+ // belongs to when trickling multiple generations at the same time.
std::string username;
std::string password;
uint32_t generation = 0;
@@ -1084,9 +1086,9 @@ bool ParseCandidate(const std::string& message, Candidate* candidate,
if (!GetValueFromString(first_line, fields[++i], &generation, error)) {
return false;
}
- } else if (fields[i] == kAttributeCandidateUsername) {
+ } else if (fields[i] == kAttributeCandidateUfrag) {
username = fields[++i];
- } else if (fields[i] == kAttributeCandidatePassword) {
+ } else if (fields[i] == kAttributeCandidatePwd) {
password = fields[++i];
} else {
// Skip the unknown extension.
@@ -1283,8 +1285,9 @@ void BuildMediaDescription(const ContentInfo* content_info,
}
}
- // Build the a=candidate lines.
- BuildCandidate(candidates, message);
+ // Build the a=candidate lines. We don't include ufrag and pwd in the
+ // candidates in the SDP to avoid redundancy.
+ BuildCandidate(candidates, false, message);
// Use the transport_info to build the media level ice-ufrag and ice-pwd.
if (transport_info) {
@@ -1292,13 +1295,17 @@ void BuildMediaDescription(const ContentInfo* content_info,
// ice-pwd-att = "ice-pwd" ":" password
// ice-ufrag-att = "ice-ufrag" ":" ufrag
// ice-ufrag
- InitAttrLine(kAttributeIceUfrag, &os);
- os << kSdpDelimiterColon << transport_info->description.ice_ufrag;
- AddLine(os.str(), message);
+ if (!transport_info->description.ice_ufrag.empty()) {
+ InitAttrLine(kAttributeIceUfrag, &os);
+ os << kSdpDelimiterColon << transport_info->description.ice_ufrag;
+ AddLine(os.str(), message);
+ }
// ice-pwd
- InitAttrLine(kAttributeIcePwd, &os);
- os << kSdpDelimiterColon << transport_info->description.ice_pwd;
- AddLine(os.str(), message);
+ if (!transport_info->description.ice_pwd.empty()) {
+ InitAttrLine(kAttributeIcePwd, &os);
+ os << kSdpDelimiterColon << transport_info->description.ice_pwd;
+ AddLine(os.str(), message);
+ }
// draft-petithuguenin-mmusic-ice-attributes-level-03
BuildIceOptions(transport_info->description.transport_options, message);
@@ -1399,6 +1406,13 @@ void BuildRtpContentAttributes(
AddLine(os.str(), message);
}
+ // RFC 5506
+ // a=rtcp-rsize
+ if (media_desc->rtcp_reduced_size()) {
+ InitAttrLine(kAttributeRtcpReducedSize, &os);
+ AddLine(os.str(), message);
+ }
+
// RFC 4568
// a=crypto:<tag> <crypto-suite> <key-params> [<session-params>]
for (std::vector<CryptoParams>::const_iterator it =
@@ -1525,7 +1539,7 @@ bool IsFmtpParam(const std::string& name) {
kCodecParamMaxAverageBitrate, kCodecParamMaxPlaybackRate,
kCodecParamAssociatedPayloadType
};
- for (size_t i = 0; i < ARRAY_SIZE(kFmtpParams); ++i) {
+ for (size_t i = 0; i < arraysize(kFmtpParams); ++i) {
if (_stricmp(name.c_str(), kFmtpParams[i]) == 0) {
return true;
}
@@ -1708,6 +1722,7 @@ void BuildRtpMap(const MediaContentDescription* media_desc,
}
void BuildCandidate(const std::vector<Candidate>& candidates,
+ bool include_ufrag,
std::string* message) {
std::ostringstream os;
@@ -1757,6 +1772,9 @@ void BuildCandidate(const std::vector<Candidate>& candidates,
// Extensions
os << kAttributeCandidateGeneration << " " << it->generation();
+ if (include_ufrag && !it->username().empty()) {
+ os << " " << kAttributeCandidateUfrag << " " << it->username();
+ }
AddLine(os.str(), message);
}
@@ -2046,7 +2064,7 @@ static bool ParseDtlsSetup(const std::string& line,
struct StaticPayloadAudioCodec {
const char* name;
int clockrate;
- int channels;
+ size_t channels;
};
static const StaticPayloadAudioCodec kStaticPayloadAudioCodecs[] = {
{ "PCMU", 8000, 1 },
@@ -2082,10 +2100,10 @@ void MaybeCreateStaticPayloadAudioCodecs(
int payload_type = *it;
if (!media_desc->HasCodec(payload_type) &&
payload_type >= 0 &&
- payload_type < ARRAY_SIZE(kStaticPayloadAudioCodecs)) {
+ payload_type < arraysize(kStaticPayloadAudioCodecs)) {
std::string encoding_name = kStaticPayloadAudioCodecs[payload_type].name;
int clock_rate = kStaticPayloadAudioCodecs[payload_type].clockrate;
- int channels = kStaticPayloadAudioCodecs[payload_type].channels;
+ size_t channels = kStaticPayloadAudioCodecs[payload_type].channels;
media_desc->AddCodec(cricket::AudioCodec(payload_type, encoding_name,
clock_rate, 0, channels,
preference));
@@ -2552,6 +2570,8 @@ bool ParseContent(const std::string& message,
//
if (HasAttribute(line, kAttributeRtcpMux)) {
media_desc->set_rtcp_mux(true);
+ } else if (HasAttribute(line, kAttributeRtcpReducedSize)) {
+ media_desc->set_rtcp_reduced_size(true);
} else if (HasAttribute(line, kAttributeSsrcGroup)) {
if (!ParseSsrcGroupAttribute(line, &ssrc_groups, error)) {
return false;
@@ -2666,7 +2686,8 @@ bool ParseContent(const std::string& message,
// Update the candidates with the media level "ice-pwd" and "ice-ufrag".
for (Candidates::iterator it = candidates_orig.begin();
it != candidates_orig.end(); ++it) {
- ASSERT((*it).username().empty());
+ ASSERT((*it).username().empty() ||
+ (*it).username() == transport->ice_ufrag);
(*it).set_username(transport->ice_ufrag);
ASSERT((*it).password().empty());
(*it).set_password(transport->ice_pwd);
@@ -2817,7 +2838,7 @@ bool ParseCryptoAttribute(const std::string& line,
// Updates or creates a new codec entry in the audio description with according
// to |name|, |clockrate|, |bitrate|, |channels| and |preference|.
void UpdateCodec(int payload_type, const std::string& name, int clockrate,
- int bitrate, int channels, int preference,
+ int bitrate, size_t channels, int preference,
AudioContentDescription* audio_desc) {
// Codec may already be populated with (only) optional parameters
// (from an fmtp).
@@ -2916,7 +2937,7 @@ bool ParseRtpmapAttribute(const std::string& line,
// of audio channels. This parameter is OPTIONAL and may be
// omitted if the number of channels is one, provided that no
// additional parameters are needed.
- int channels = 1;
+ size_t channels = 1;
if (codec_params.size() == 3) {
if (!GetValueFromString(line, codec_params[2], &channels, error)) {
return false;
diff --git a/talk/app/webrtc/webrtcsdp_unittest.cc b/talk/app/webrtc/webrtcsdp_unittest.cc
index cb6a392ab4..15fc8083b4 100644
--- a/talk/app/webrtc/webrtcsdp_unittest.cc
+++ b/talk/app/webrtc/webrtcsdp_unittest.cc
@@ -30,6 +30,9 @@
#include <vector>
#include "talk/app/webrtc/jsepsessiondescription.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "talk/app/webrtc/webrtcsdp.h"
#include "talk/media/base/constants.h"
#include "webrtc/p2p/base/constants.h"
@@ -80,11 +83,13 @@ static const char kSessionTime[] = "t=0 0\r\n";
static const uint32_t kCandidatePriority = 2130706432U; // pref = 1.0
static const char kCandidateUfragVoice[] = "ufrag_voice";
static const char kCandidatePwdVoice[] = "pwd_voice";
+static const char kAttributeIceUfragVoice[] = "a=ice-ufrag:ufrag_voice\r\n";
static const char kAttributeIcePwdVoice[] = "a=ice-pwd:pwd_voice\r\n";
static const char kCandidateUfragVideo[] = "ufrag_video";
static const char kCandidatePwdVideo[] = "pwd_video";
static const char kCandidateUfragData[] = "ufrag_data";
static const char kCandidatePwdData[] = "pwd_data";
+static const char kAttributeIceUfragVideo[] = "a=ice-ufrag:ufrag_video\r\n";
static const char kAttributeIcePwdVideo[] = "a=ice-pwd:pwd_video\r\n";
static const uint32_t kCandidateGeneration = 2;
static const char kCandidateFoundation1[] = "a0+B/1";
@@ -153,6 +158,7 @@ static const char kSdpFullString[] =
"a=mid:audio_content_name\r\n"
"a=sendrecv\r\n"
"a=rtcp-mux\r\n"
+ "a=rtcp-rsize\r\n"
"a=crypto:1 AES_CM_128_HMAC_SHA1_32 "
"inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32 "
"dummy_session_params\r\n"
@@ -220,6 +226,7 @@ static const char kSdpString[] =
"a=mid:audio_content_name\r\n"
"a=sendrecv\r\n"
"a=rtcp-mux\r\n"
+ "a=rtcp-rsize\r\n"
"a=crypto:1 AES_CM_128_HMAC_SHA1_32 "
"inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32 "
"dummy_session_params\r\n"
@@ -394,9 +401,9 @@ static const char kRawIPV6Candidate[] =
"abcd::abcd::abcd::abcd::abcd::abcd::abcd::abcd 1234 typ host generation 2";
// One candidate reference string.
-static const char kSdpOneCandidateOldFormat[] =
+static const char kSdpOneCandidateWithUfragPwd[] =
"a=candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1234 typ host network_name"
- " eth0 username user_rtp password password_rtp generation 2\r\n";
+ " eth0 ufrag user_rtp pwd password_rtp generation 2\r\n";
// Session id and version
static const char kSessionId[] = "18446744069414584320";
@@ -523,10 +530,14 @@ static void ReplaceDirection(cricket::MediaContentDirection direction,
static void ReplaceRejected(bool audio_rejected, bool video_rejected,
std::string* message) {
if (audio_rejected) {
- Replace("m=audio 2345", "m=audio 0", message);
+ Replace("m=audio 9", "m=audio 0", message);
+ Replace(kAttributeIceUfragVoice, "", message);
+ Replace(kAttributeIcePwdVoice, "", message);
}
if (video_rejected) {
- Replace("m=video 3457", "m=video 0", message);
+ Replace("m=video 9", "m=video 0", message);
+ Replace(kAttributeIceUfragVideo, "", message);
+ Replace(kAttributeIcePwdVideo, "", message);
}
}
@@ -536,6 +547,9 @@ class WebRtcSdpTest : public testing::Test {
public:
WebRtcSdpTest()
: jdesc_(kDummyString) {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
// AudioContentDescription
audio_desc_ = CreateAudioContentDescription();
AudioCodec opus(111, "opus", 48000, 0, 2, 3);
@@ -704,6 +718,7 @@ class WebRtcSdpTest : public testing::Test {
AudioContentDescription* CreateAudioContentDescription() {
AudioContentDescription* audio = new AudioContentDescription();
audio->set_rtcp_mux(true);
+ audio->set_rtcp_reduced_size(true);
StreamParams audio_stream1;
audio_stream1.id = kAudioTrackId1;
audio_stream1.cname = kStream1Cname;
@@ -735,6 +750,9 @@ class WebRtcSdpTest : public testing::Test {
// rtcp_mux
EXPECT_EQ(cd1->rtcp_mux(), cd2->rtcp_mux());
+ // rtcp_reduced_size
+ EXPECT_EQ(cd1->rtcp_reduced_size(), cd2->rtcp_reduced_size());
+
// cryptos
EXPECT_EQ(cd1->cryptos().size(), cd2->cryptos().size());
if (cd1->cryptos().size() != cd2->cryptos().size()) {
@@ -979,6 +997,18 @@ class WebRtcSdpTest : public testing::Test {
desc_.AddTransportInfo(transport_info);
}
+ void SetIceUfragPwd(const std::string& content_name,
+ const std::string& ice_ufrag,
+ const std::string& ice_pwd) {
+ ASSERT_TRUE(desc_.GetTransportInfoByName(content_name) != NULL);
+ cricket::TransportInfo transport_info =
+ *(desc_.GetTransportInfoByName(content_name));
+ desc_.RemoveTransportInfoByName(content_name);
+ transport_info.description.ice_ufrag = ice_ufrag;
+ transport_info.description.ice_pwd = ice_pwd;
+ desc_.AddTransportInfo(transport_info);
+ }
+
void AddFingerprint() {
desc_.RemoveTransportInfoByName(kAudioContentName);
desc_.RemoveTransportInfoByName(kVideoContentName);
@@ -1050,15 +1080,22 @@ class WebRtcSdpTest : public testing::Test {
audio_desc_);
desc_.AddContent(kVideoContentName, NS_JINGLE_RTP, video_rejected,
video_desc_);
- std::string new_sdp = kSdpFullString;
+ SetIceUfragPwd(kAudioContentName,
+ audio_rejected ? "" : kCandidateUfragVoice,
+ audio_rejected ? "" : kCandidatePwdVoice);
+ SetIceUfragPwd(kVideoContentName,
+ video_rejected ? "" : kCandidateUfragVideo,
+ video_rejected ? "" : kCandidatePwdVideo);
+
+ std::string new_sdp = kSdpString;
ReplaceRejected(audio_rejected, video_rejected, &new_sdp);
- if (!jdesc_.Initialize(desc_.Copy(),
- jdesc_.session_id(),
- jdesc_.session_version())) {
+ JsepSessionDescription jdesc_no_candidates(kDummyString);
+ if (!jdesc_no_candidates.Initialize(desc_.Copy(), kSessionId,
+ kSessionVersion)) {
return false;
}
- std::string message = webrtc::SdpSerialize(jdesc_);
+ std::string message = webrtc::SdpSerialize(jdesc_no_candidates);
EXPECT_EQ(new_sdp, message);
return true;
}
@@ -1121,11 +1158,11 @@ class WebRtcSdpTest : public testing::Test {
}
bool TestDeserializeRejected(bool audio_rejected, bool video_rejected) {
- std::string new_sdp = kSdpFullString;
+ std::string new_sdp = kSdpString;
ReplaceRejected(audio_rejected, video_rejected, &new_sdp);
JsepSessionDescription new_jdesc(JsepSessionDescription::kOffer);
-
EXPECT_TRUE(SdpDeserialize(new_sdp, &new_jdesc));
+
audio_desc_ = static_cast<AudioContentDescription*>(
audio_desc_->Copy());
video_desc_ = static_cast<VideoContentDescription*>(
@@ -1136,12 +1173,18 @@ class WebRtcSdpTest : public testing::Test {
audio_desc_);
desc_.AddContent(kVideoContentName, NS_JINGLE_RTP, video_rejected,
video_desc_);
- if (!jdesc_.Initialize(desc_.Copy(),
- jdesc_.session_id(),
- jdesc_.session_version())) {
+ SetIceUfragPwd(kAudioContentName,
+ audio_rejected ? "" : kCandidateUfragVoice,
+ audio_rejected ? "" : kCandidatePwdVoice);
+ SetIceUfragPwd(kVideoContentName,
+ video_rejected ? "" : kCandidateUfragVideo,
+ video_rejected ? "" : kCandidatePwdVideo);
+ JsepSessionDescription jdesc_no_candidates(kDummyString);
+ if (!jdesc_no_candidates.Initialize(desc_.Copy(), jdesc_.session_id(),
+ jdesc_.session_version())) {
return false;
}
- EXPECT_TRUE(CompareSessionDescription(jdesc_, new_jdesc));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_no_candidates, new_jdesc));
return true;
}
@@ -1540,8 +1583,8 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithFingerprintNoCryptos) {
TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithoutCandidates) {
// JsepSessionDescription with desc but without candidates.
JsepSessionDescription jdesc_no_candidates(kDummyString);
- ASSERT_TRUE(jdesc_no_candidates.Initialize(desc_.Copy(),
- kSessionId, kSessionVersion));
+ ASSERT_TRUE(jdesc_no_candidates.Initialize(desc_.Copy(), kSessionId,
+ kSessionVersion));
std::string message = webrtc::SdpSerialize(jdesc_no_candidates);
EXPECT_EQ(std::string(kSdpString), message);
}
@@ -1721,6 +1764,13 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithExtmap) {
TEST_F(WebRtcSdpTest, SerializeCandidates) {
std::string message = webrtc::SdpSerializeCandidate(*jcandidate_);
EXPECT_EQ(std::string(kRawCandidate), message);
+
+ Candidate candidate_with_ufrag(candidates_.front());
+ candidate_with_ufrag.set_username("ABC");
+ jcandidate_.reset(new JsepIceCandidate(std::string("audio_content_name"), 0,
+ candidate_with_ufrag));
+ message = webrtc::SdpSerializeCandidate(*jcandidate_);
+ EXPECT_EQ(std::string(kRawCandidate) + " ufrag ABC", message);
}
// TODO(mallinath) : Enable this test once WebRTCSdp capable of parsing
@@ -2317,9 +2367,10 @@ TEST_F(WebRtcSdpTest, DeserializeCandidateWithDifferentTransport) {
EXPECT_TRUE(jcandidate.candidate().IsEquivalent(jcandidate_->candidate()));
}
-TEST_F(WebRtcSdpTest, DeserializeCandidateOldFormat) {
+TEST_F(WebRtcSdpTest, DeserializeCandidateWithUfragPwd) {
JsepIceCandidate jcandidate(kDummyMid, kDummyIndex);
- EXPECT_TRUE(SdpDeserializeCandidate(kSdpOneCandidateOldFormat,&jcandidate));
+ EXPECT_TRUE(
+ SdpDeserializeCandidate(kSdpOneCandidateWithUfragPwd, &jcandidate));
EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
Candidate ref_candidate = jcandidate_->candidate();
diff --git a/talk/app/webrtc/webrtcsession.cc b/talk/app/webrtc/webrtcsession.cc
index 95abeab77a..d8f76379c1 100644
--- a/talk/app/webrtc/webrtcsession.cc
+++ b/talk/app/webrtc/webrtcsession.cc
@@ -30,13 +30,13 @@
#include <limits.h>
#include <algorithm>
-#include <vector>
#include <set>
+#include <utility>
+#include <vector>
#include "talk/app/webrtc/jsepicecandidate.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/mediaconstraintsinterface.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/sctputils.h"
#include "talk/app/webrtc/webrtcsessiondescriptionfactory.h"
@@ -45,6 +45,7 @@
#include "talk/session/media/channel.h"
#include "talk/session/media/channelmanager.h"
#include "talk/session/media/mediasession.h"
+#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/basictypes.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/helpers.h"
@@ -441,10 +442,11 @@ static std::string MakeTdErrorString(const std::string& desc) {
// Set |option| to the highest-priority value of |key| in the optional
// constraints if the key is found and has a valid value.
-template<typename T>
+template <typename T>
static void SetOptionFromOptionalConstraint(
const MediaConstraintsInterface* constraints,
- const std::string& key, cricket::Settable<T>* option) {
+ const std::string& key,
+ rtc::Optional<T>* option) {
if (!constraints) {
return;
}
@@ -452,7 +454,7 @@ static void SetOptionFromOptionalConstraint(
T value;
if (constraints->GetOptional().FindFirst(key, &string_value)) {
if (rtc::FromString(string_value, &value)) {
- option->Set(value);
+ *option = rtc::Optional<T>(value);
}
}
}
@@ -492,9 +494,13 @@ class IceRestartAnswerLatch {
}
}
+ // This method has two purposes: 1. Return whether |new_desc| requests
+ // an ICE restart (i.e., new ufrag/pwd). 2. If it requests an ICE restart
+ // and it is an OFFER, remember this in |ice_restart_| so that the next
+ // Local Answer will be created with new ufrag and pwd.
bool CheckForRemoteIceRestart(const SessionDescriptionInterface* old_desc,
const SessionDescriptionInterface* new_desc) {
- if (!old_desc || new_desc->type() != SessionDescriptionInterface::kOffer) {
+ if (!old_desc) {
return false;
}
const SessionDescription* new_sd = new_desc->description();
@@ -520,7 +526,9 @@ class IceRestartAnswerLatch {
new_transport_desc->ice_ufrag,
new_transport_desc->ice_pwd)) {
LOG(LS_INFO) << "Remote peer request ice restart.";
- ice_restart_ = true;
+ if (new_desc->type() == SessionDescriptionInterface::kOffer) {
+ ice_restart_ = true;
+ }
return true;
}
}
@@ -593,6 +601,8 @@ bool WebRtcSession::Initialize(
const PeerConnectionInterface::RTCConfiguration& rtc_configuration) {
bundle_policy_ = rtc_configuration.bundle_policy;
rtcp_mux_policy_ = rtc_configuration.rtcp_mux_policy;
+ video_options_.disable_prerenderer_smoothing =
+ rtc::Optional<bool>(rtc_configuration.disable_prerenderer_smoothing);
transport_controller_->SetSslMaxProtocolVersion(options.ssl_max_version);
// Obtain a certificate from RTCConfiguration if any were provided (optional).
@@ -644,8 +654,8 @@ bool WebRtcSession::Initialize(
constraints,
MediaConstraintsInterface::kEnableDscp,
&value, NULL)) {
- audio_options_.dscp.Set(value);
- video_options_.dscp.Set(value);
+ audio_options_.dscp = rtc::Optional<bool>(value);
+ video_options_.dscp = rtc::Optional<bool>(value);
}
// Find Suspend Below Min Bitrate constraint.
@@ -654,7 +664,7 @@ bool WebRtcSession::Initialize(
MediaConstraintsInterface::kEnableVideoSuspendBelowMinBitrate,
&value,
NULL)) {
- video_options_.suspend_below_min_bitrate.Set(value);
+ video_options_.suspend_below_min_bitrate = rtc::Optional<bool>(value);
}
SetOptionFromOptionalConstraint(constraints,
@@ -684,12 +694,10 @@ bool WebRtcSession::Initialize(
SetOptionFromOptionalConstraint(constraints,
MediaConstraintsInterface::kNumUnsignalledRecvStreams,
&video_options_.unsignalled_recv_stream_limit);
- if (video_options_.unsignalled_recv_stream_limit.IsSet()) {
- int stream_limit;
- video_options_.unsignalled_recv_stream_limit.Get(&stream_limit);
- stream_limit = std::min(kMaxUnsignalledRecvStreams, stream_limit);
- stream_limit = std::max(0, stream_limit);
- video_options_.unsignalled_recv_stream_limit.Set(stream_limit);
+ if (video_options_.unsignalled_recv_stream_limit) {
+ video_options_.unsignalled_recv_stream_limit = rtc::Optional<int>(
+ std::max(0, std::min(kMaxUnsignalledRecvStreams,
+ *video_options_.unsignalled_recv_stream_limit)));
}
SetOptionFromOptionalConstraint(constraints,
@@ -700,22 +708,12 @@ bool WebRtcSession::Initialize(
MediaConstraintsInterface::kCombinedAudioVideoBwe,
&audio_options_.combined_audio_video_bwe);
- audio_options_.audio_jitter_buffer_max_packets.Set(
- rtc_configuration.audio_jitter_buffer_max_packets);
+ audio_options_.audio_jitter_buffer_max_packets =
+ rtc::Optional<int>(rtc_configuration.audio_jitter_buffer_max_packets);
- audio_options_.audio_jitter_buffer_fast_accelerate.Set(
+ audio_options_.audio_jitter_buffer_fast_accelerate = rtc::Optional<bool>(
rtc_configuration.audio_jitter_buffer_fast_accelerate);
- const cricket::VideoCodec default_codec(
- JsepSessionDescription::kDefaultVideoCodecId,
- JsepSessionDescription::kDefaultVideoCodecName,
- JsepSessionDescription::kMaxVideoCodecWidth,
- JsepSessionDescription::kMaxVideoCodecHeight,
- JsepSessionDescription::kDefaultVideoCodecFramerate,
- JsepSessionDescription::kDefaultVideoCodecPreference);
- channel_manager_->SetDefaultVideoEncoderConfig(
- cricket::VideoEncoderConfig(default_codec));
-
if (!dtls_enabled_) {
// Construct with DTLS disabled.
webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory(
@@ -726,7 +724,7 @@ bool WebRtcSession::Initialize(
// Use the |dtls_identity_store| to generate a certificate.
RTC_DCHECK(dtls_identity_store);
webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory(
- signaling_thread(), channel_manager_, dtls_identity_store.Pass(),
+ signaling_thread(), channel_manager_, std::move(dtls_identity_store),
this, id()));
} else {
// Use the already generated certificate.
@@ -744,12 +742,6 @@ bool WebRtcSession::Initialize(
port_allocator()->set_candidate_filter(
ConvertIceTransportTypeToCandidateFilter(rtc_configuration.type));
- if (rtc_configuration.enable_localhost_ice_candidate) {
- port_allocator()->set_flags(
- port_allocator()->flags() |
- cricket::PORTALLOCATOR_ENABLE_LOCALHOST_CANDIDATE);
- }
-
return true;
}
@@ -769,14 +761,20 @@ cricket::SecurePolicy WebRtcSession::SdesPolicy() const {
return webrtc_session_desc_factory_->SdesPolicy();
}
-bool WebRtcSession::GetSslRole(rtc::SSLRole* role) {
+bool WebRtcSession::GetSslRole(const std::string& transport_name,
+ rtc::SSLRole* role) {
if (!local_desc_ || !remote_desc_) {
LOG(LS_INFO) << "Local and Remote descriptions must be applied to get "
<< "SSL Role of the session.";
return false;
}
- return transport_controller_->GetSslRole(role);
+ return transport_controller_->GetSslRole(transport_name, role);
+}
+
+bool WebRtcSession::GetSslRole(const cricket::BaseChannel* channel,
+ rtc::SSLRole* role) {
+ return channel && GetSslRole(channel->transport_name(), role);
}
void WebRtcSession::CreateOffer(
@@ -978,15 +976,12 @@ bool WebRtcSession::UpdateSessionState(
return BadPranswerSdp(source, GetSessionErrorMsg(), err_desc);
}
} else if (action == kAnswer) {
- if (!PushdownTransportDescription(source, cricket::CA_ANSWER, &td_err)) {
- return BadAnswerSdp(source, MakeTdErrorString(td_err), err_desc);
- }
const cricket::ContentGroup* local_bundle =
local_desc_->description()->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
const cricket::ContentGroup* remote_bundle =
remote_desc_->description()->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
if (local_bundle && remote_bundle) {
- // The answerer decides the transport to bundle on
+ // The answerer decides the transport to bundle on.
const cricket::ContentGroup* answer_bundle =
(source == cricket::CS_LOCAL ? local_bundle : remote_bundle);
if (!EnableBundle(*answer_bundle)) {
@@ -994,6 +989,11 @@ bool WebRtcSession::UpdateSessionState(
return BadAnswerSdp(source, kEnableBundleFailed, err_desc);
}
}
+ // Only push down the transport description after enabling BUNDLE; we don't
+ // want to push down a description on a transport about to be destroyed.
+ if (!PushdownTransportDescription(source, cricket::CA_ANSWER, &td_err)) {
+ return BadAnswerSdp(source, MakeTdErrorString(td_err), err_desc);
+ }
EnableChannels();
SetState(STATE_INPROGRESS);
if (!PushdownMediaDescription(cricket::CA_ANSWER, source, err_desc)) {
@@ -1250,6 +1250,8 @@ cricket::IceConfig WebRtcSession::ParseIceConfig(
const PeerConnectionInterface::RTCConfiguration& config) const {
cricket::IceConfig ice_config;
ice_config.receiving_timeout_ms = config.ice_connection_receiving_timeout;
+ ice_config.backup_connection_ping_interval =
+ config.ice_backup_candidate_pair_ping_interval;
ice_config.gather_continually = (config.continual_gathering_policy ==
PeerConnectionInterface::GATHER_CONTINUALLY);
return ice_config;
@@ -1326,6 +1328,15 @@ void WebRtcSession::SetAudioPlayoutVolume(uint32_t ssrc, double volume) {
}
}
+void WebRtcSession::SetRawAudioSink(uint32_t ssrc,
+ rtc::scoped_ptr<AudioSinkInterface> sink) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!voice_channel_)
+ return;
+
+ voice_channel_->SetRawAudioSink(ssrc, std::move(sink));
+}
+
bool WebRtcSession::SetCaptureDevice(uint32_t ssrc,
cricket::VideoCapturer* camera) {
ASSERT(signaling_thread()->IsCurrent());
@@ -1409,8 +1420,7 @@ bool WebRtcSession::InsertDtmf(const std::string& track_id,
LOG(LS_ERROR) << "InsertDtmf: Track does not exist: " << track_id;
return false;
}
- if (!voice_channel_->InsertDtmf(send_ssrc, code, duration,
- cricket::DF_SEND)) {
+ if (!voice_channel_->InsertDtmf(send_ssrc, code, duration)) {
LOG(LS_ERROR) << "Failed to insert DTMF to channel.";
return false;
}
@@ -1747,7 +1757,6 @@ void WebRtcSession::RemoveUnusedChannels(const SessionDescription* desc) {
cricket::GetFirstVideoContent(desc);
if ((!video_info || video_info->rejected) && video_channel_) {
SignalVideoChannelDestroyed();
- const std::string content_name = video_channel_->content_name();
channel_manager_->DestroyVideoChannel(video_channel_.release());
}
@@ -1755,7 +1764,6 @@ void WebRtcSession::RemoveUnusedChannels(const SessionDescription* desc) {
cricket::GetFirstAudioContent(desc);
if ((!voice_info || voice_info->rejected) && voice_channel_) {
SignalVoiceChannelDestroyed();
- const std::string content_name = voice_channel_->content_name();
channel_manager_->DestroyVoiceChannel(voice_channel_.release());
}
@@ -1763,7 +1771,6 @@ void WebRtcSession::RemoveUnusedChannels(const SessionDescription* desc) {
cricket::GetFirstDataContent(desc);
if ((!data_info || data_info->rejected) && data_channel_) {
SignalDataChannelDestroyed();
- const std::string content_name = data_channel_->content_name();
channel_manager_->DestroyDataChannel(data_channel_.release());
}
}
@@ -2164,9 +2171,10 @@ void WebRtcSession::ReportNegotiatedCiphers(
return;
}
- const std::string& srtp_cipher = stats.channel_stats[0].srtp_cipher;
- int ssl_cipher = stats.channel_stats[0].ssl_cipher;
- if (srtp_cipher.empty() && !ssl_cipher) {
+ int srtp_crypto_suite = stats.channel_stats[0].srtp_crypto_suite;
+ int ssl_cipher_suite = stats.channel_stats[0].ssl_cipher_suite;
+ if (srtp_crypto_suite == rtc::SRTP_INVALID_CRYPTO_SUITE &&
+ ssl_cipher_suite == rtc::TLS_NULL_WITH_NULL_NULL) {
return;
}
@@ -2186,12 +2194,13 @@ void WebRtcSession::ReportNegotiatedCiphers(
return;
}
- if (!srtp_cipher.empty()) {
- metrics_observer_->IncrementSparseEnumCounter(
- srtp_counter_type, rtc::GetSrtpCryptoSuiteFromName(srtp_cipher));
+ if (srtp_crypto_suite != rtc::SRTP_INVALID_CRYPTO_SUITE) {
+ metrics_observer_->IncrementSparseEnumCounter(srtp_counter_type,
+ srtp_crypto_suite);
}
- if (ssl_cipher) {
- metrics_observer_->IncrementSparseEnumCounter(ssl_counter_type, ssl_cipher);
+ if (ssl_cipher_suite != rtc::TLS_NULL_WITH_NULL_NULL) {
+ metrics_observer_->IncrementSparseEnumCounter(ssl_counter_type,
+ ssl_cipher_suite);
}
}
diff --git a/talk/app/webrtc/webrtcsession.h b/talk/app/webrtc/webrtcsession.h
index d9c40d1a83..b79e0ec270 100644
--- a/talk/app/webrtc/webrtcsession.h
+++ b/talk/app/webrtc/webrtcsession.h
@@ -38,11 +38,11 @@
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/statstypes.h"
#include "talk/media/base/mediachannel.h"
-#include "webrtc/p2p/base/transportcontroller.h"
#include "talk/session/media/mediasession.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/base/sslidentity.h"
#include "webrtc/base/thread.h"
+#include "webrtc/p2p/base/transportcontroller.h"
namespace cricket {
@@ -204,7 +204,11 @@ class WebRtcSession : public AudioProviderInterface,
cricket::SecurePolicy SdesPolicy() const;
// Get current ssl role from transport.
- bool GetSslRole(rtc::SSLRole* role);
+ bool GetSslRole(const std::string& transport_name, rtc::SSLRole* role);
+
+ // Get current SSL role for this channel's transport.
+ // If |transport| is null, returns false.
+ bool GetSslRole(const cricket::BaseChannel* channel, rtc::SSLRole* role);
void CreateOffer(
CreateSessionDescriptionObserver* observer,
@@ -250,6 +254,8 @@ class WebRtcSession : public AudioProviderInterface,
const cricket::AudioOptions& options,
cricket::AudioRenderer* renderer) override;
void SetAudioPlayoutVolume(uint32_t ssrc, double volume) override;
+ void SetRawAudioSink(uint32_t ssrc,
+ rtc::scoped_ptr<AudioSinkInterface> sink) override;
// Implements VideoMediaProviderInterface.
bool SetCaptureDevice(uint32_t ssrc, cricket::VideoCapturer* camera) override;
diff --git a/talk/app/webrtc/webrtcsession_unittest.cc b/talk/app/webrtc/webrtcsession_unittest.cc
index 3eb46f1d3c..e81b8b5b54 100644
--- a/talk/app/webrtc/webrtcsession_unittest.cc
+++ b/talk/app/webrtc/webrtcsession_unittest.cc
@@ -25,6 +25,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <utility>
#include <vector>
#include "talk/app/webrtc/audiotrack.h"
@@ -33,7 +34,6 @@
#include "talk/app/webrtc/jsepicecandidate.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/peerconnection.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/sctputils.h"
#include "talk/app/webrtc/streamcollection.h"
#include "talk/app/webrtc/streamcollection.h"
@@ -72,8 +72,6 @@
return; \
}
-using cricket::DF_PLAY;
-using cricket::DF_SEND;
using cricket::FakeVoiceMediaChannel;
using cricket::TransportInfo;
using rtc::SocketAddress;
@@ -173,15 +171,6 @@ static const char kAudioTrack2[] = "audio2";
enum RTCCertificateGenerationMethod { ALREADY_GENERATED, DTLS_IDENTITY_STORE };
-// Add some extra |newlines| to the |message| after |line|.
-static void InjectAfter(const std::string& line,
- const std::string& newlines,
- std::string* message) {
- const std::string tmp = line + newlines;
- rtc::replace_substrs(line.c_str(), line.length(), tmp.c_str(), tmp.length(),
- message);
-}
-
class MockIceObserver : public webrtc::IceObserver {
public:
MockIceObserver()
@@ -428,7 +417,7 @@ class WebRtcSessionTest
observer_.ice_gathering_state_);
EXPECT_TRUE(session_->Initialize(options_, constraints_.get(),
- dtls_identity_store.Pass(),
+ std::move(dtls_identity_store),
rtc_configuration));
session_->set_metrics_observer(metrics_observer_);
}
@@ -479,7 +468,7 @@ class WebRtcSessionTest
} else {
RTC_CHECK(false);
}
- Init(dtls_identity_store.Pass(), configuration);
+ Init(std::move(dtls_identity_store), configuration);
}
// Init with DTLS with a store that will fail to generate a certificate.
@@ -488,7 +477,7 @@ class WebRtcSessionTest
new FakeDtlsIdentityStore());
dtls_identity_store->set_should_fail(true);
PeerConnectionInterface::RTCConfiguration configuration;
- Init(dtls_identity_store.Pass(), configuration);
+ Init(std::move(dtls_identity_store), configuration);
}
void InitWithDtmfCodec() {
@@ -726,9 +715,9 @@ class WebRtcSessionTest
std::string identity_name = "WebRTC" +
rtc::ToString(rtc::CreateRandomId());
// Confirmed to work with KT_RSA and KT_ECDSA.
- tdesc_factory_->set_certificate(rtc::RTCCertificate::Create(
- rtc::scoped_ptr<rtc::SSLIdentity>(rtc::SSLIdentity::Generate(
- identity_name, rtc::KT_DEFAULT)).Pass()));
+ tdesc_factory_->set_certificate(
+ rtc::RTCCertificate::Create(rtc::scoped_ptr<rtc::SSLIdentity>(
+ rtc::SSLIdentity::Generate(identity_name, rtc::KT_DEFAULT))));
tdesc_factory_->set_secure(cricket::SEC_REQUIRED);
}
@@ -789,7 +778,7 @@ class WebRtcSessionTest
ASSERT_TRUE(video_channel_ != NULL);
const cricket::VideoOptions& video_options = video_channel_->options();
EXPECT_EQ(value_expected,
- video_options.unsignalled_recv_stream_limit.GetWithDefaultIfUnset(-1));
+ video_options.unsignalled_recv_stream_limit.value_or(-1));
}
void CompareIceUfragAndPassword(const cricket::SessionDescription* desc1,
@@ -1442,12 +1431,12 @@ class WebRtcSessionTest
}
void ConfigureAllocatorWithTurn() {
- cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
+ cricket::RelayServerConfig turn_server(cricket::RELAY_TURN);
cricket::RelayCredentials credentials(kTurnUsername, kTurnPassword);
- relay_server.credentials = credentials;
- relay_server.ports.push_back(cricket::ProtocolAddress(
- kTurnUdpIntAddr, cricket::PROTO_UDP, false));
- allocator_->AddRelay(relay_server);
+ turn_server.credentials = credentials;
+ turn_server.ports.push_back(
+ cricket::ProtocolAddress(kTurnUdpIntAddr, cricket::PROTO_UDP, false));
+ allocator_->AddTurnServer(turn_server);
allocator_->set_step_delay(cricket::kMinimumStepDelay);
allocator_->set_flags(cricket::PORTALLOCATOR_DISABLE_TCP);
}
@@ -1968,6 +1957,67 @@ TEST_P(WebRtcSessionTest, TestCreateAnswerReceiveOfferWithoutEncryption) {
SetLocalDescriptionWithoutError(answer);
}
+// Test that we can create and set an answer correctly when different
+// SSL roles have been negotiated for different transports.
+// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=4525
+TEST_P(WebRtcSessionTest, TestCreateAnswerWithDifferentSslRoles) {
+ SendAudioVideoStream1();
+ InitWithDtls(GetParam());
+ SetFactoryDtlsSrtp();
+
+ SessionDescriptionInterface* offer = CreateOffer();
+ SetLocalDescriptionWithoutError(offer);
+
+ cricket::MediaSessionOptions options;
+ options.recv_video = true;
+
+ // First, negotiate different SSL roles.
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(offer, options, cricket::SEC_DISABLED);
+ TransportInfo* audio_transport_info =
+ answer->description()->GetTransportInfoByName("audio");
+ audio_transport_info->description.connection_role =
+ cricket::CONNECTIONROLE_ACTIVE;
+ TransportInfo* video_transport_info =
+ answer->description()->GetTransportInfoByName("video");
+ video_transport_info->description.connection_role =
+ cricket::CONNECTIONROLE_PASSIVE;
+ SetRemoteDescriptionWithoutError(answer);
+
+ // Now create an offer in the reverse direction, and ensure the initial
+ // offerer responds with an answer with correct SSL roles.
+ offer = CreateRemoteOfferWithVersion(options, cricket::SEC_DISABLED,
+ kSessionVersion,
+ session_->remote_description());
+ SetRemoteDescriptionWithoutError(offer);
+
+ answer = CreateAnswer(nullptr);
+ audio_transport_info = answer->description()->GetTransportInfoByName("audio");
+ EXPECT_EQ(cricket::CONNECTIONROLE_PASSIVE,
+ audio_transport_info->description.connection_role);
+ video_transport_info = answer->description()->GetTransportInfoByName("video");
+ EXPECT_EQ(cricket::CONNECTIONROLE_ACTIVE,
+ video_transport_info->description.connection_role);
+ SetLocalDescriptionWithoutError(answer);
+
+ // Lastly, start BUNDLE-ing on "audio", expecting that the "passive" role of
+ // audio is transferred over to video in the answer that completes the BUNDLE
+ // negotiation.
+ options.bundle_enabled = true;
+ offer = CreateRemoteOfferWithVersion(options, cricket::SEC_DISABLED,
+ kSessionVersion,
+ session_->remote_description());
+ SetRemoteDescriptionWithoutError(offer);
+ answer = CreateAnswer(nullptr);
+ audio_transport_info = answer->description()->GetTransportInfoByName("audio");
+ EXPECT_EQ(cricket::CONNECTIONROLE_PASSIVE,
+ audio_transport_info->description.connection_role);
+ video_transport_info = answer->description()->GetTransportInfoByName("video");
+ EXPECT_EQ(cricket::CONNECTIONROLE_PASSIVE,
+ video_transport_info->description.connection_role);
+ SetLocalDescriptionWithoutError(answer);
+}
+
TEST_F(WebRtcSessionTest, TestSetLocalOfferTwice) {
Init();
SendNothing();
@@ -2809,10 +2859,9 @@ TEST_F(WebRtcSessionTest, TestSetRemoteDescriptionInvalidIceCredentials) {
EXPECT_FALSE(session_->SetRemoteDescription(modified_offer, &error));
}
-// Test that if the remote description indicates the peer requested ICE restart
-// (via a new ufrag or pwd), the old ICE candidates are not copied,
-// and vice versa.
-TEST_F(WebRtcSessionTest, TestSetRemoteDescriptionWithIceRestart) {
+// Test that if the remote offer indicates the peer requested ICE restart (via
+// a new ufrag or pwd), the old ICE candidates are not copied, and vice versa.
+TEST_F(WebRtcSessionTest, TestSetRemoteOfferWithIceRestart) {
Init();
scoped_ptr<SessionDescriptionInterface> offer(CreateRemoteOffer());
@@ -2866,6 +2915,64 @@ TEST_F(WebRtcSessionTest, TestSetRemoteDescriptionWithIceRestart) {
EXPECT_EQ(0, session_->remote_description()->candidates(0)->count());
}
+// Test that if the remote answer indicates the peer requested ICE restart (via
+// a new ufrag or pwd), the old ICE candidates are not copied, and vice versa.
+TEST_F(WebRtcSessionTest, TestSetRemoteAnswerWithIceRestart) {
+ Init();
+ SessionDescriptionInterface* offer = CreateOffer();
+ SetLocalDescriptionWithoutError(offer);
+ scoped_ptr<SessionDescriptionInterface> answer(CreateRemoteAnswer(offer));
+
+ // Create the first answer.
+ std::string sdp;
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012345",
+ "abcdefghijklmnopqrstuvwx", &sdp);
+ SessionDescriptionInterface* answer1 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ cricket::Candidate candidate1(1, "udp", rtc::SocketAddress("1.1.1.1", 5000),
+ 0, "", "", "relay", 0, "");
+ JsepIceCandidate ice_candidate1(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ EXPECT_TRUE(answer1->AddCandidate(&ice_candidate1));
+ SetRemoteDescriptionWithoutError(answer1);
+ EXPECT_EQ(1, session_->remote_description()->candidates(0)->count());
+
+ // The second answer has the same ufrag and pwd but different address.
+ sdp.clear();
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012345",
+ "abcdefghijklmnopqrstuvwx", &sdp);
+ SessionDescriptionInterface* answer2 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ candidate1.set_address(rtc::SocketAddress("1.1.1.1", 6000));
+ JsepIceCandidate ice_candidate2(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ EXPECT_TRUE(answer2->AddCandidate(&ice_candidate2));
+ SetRemoteDescriptionWithoutError(answer2);
+ EXPECT_EQ(2, session_->remote_description()->candidates(0)->count());
+
+ // The third answer has a different ufrag and different address.
+ sdp.clear();
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012333",
+ "abcdefghijklmnopqrstuvwx", &sdp);
+ SessionDescriptionInterface* answer3 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ candidate1.set_address(rtc::SocketAddress("1.1.1.1", 7000));
+ JsepIceCandidate ice_candidate3(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ EXPECT_TRUE(answer3->AddCandidate(&ice_candidate3));
+ SetRemoteDescriptionWithoutError(answer3);
+ EXPECT_EQ(1, session_->remote_description()->candidates(0)->count());
+
+ // The fourth answer has no candidate but a different ufrag/pwd.
+ sdp.clear();
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012444",
+ "abcdefghijklmnopqrstuvyz", &sdp);
+ SessionDescriptionInterface* offer4 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ SetRemoteDescriptionWithoutError(offer4);
+ EXPECT_EQ(0, session_->remote_description()->candidates(0)->count());
+}
+
// Test that candidates sent to the "video" transport do not get pushed down to
// the "audio" transport channel when bundling.
TEST_F(WebRtcSessionTest, TestIgnoreCandidatesForUnusedTransportWhenBundling) {
@@ -3297,20 +3404,18 @@ TEST_F(WebRtcSessionTest, SetAudioSend) {
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
cricket::AudioOptions options;
- options.echo_cancellation.Set(true);
+ options.echo_cancellation = rtc::Optional<bool>(true);
rtc::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
session_->SetAudioSend(send_ssrc, false, options, renderer.get());
EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
- EXPECT_FALSE(channel->options().echo_cancellation.IsSet());
+ EXPECT_EQ(rtc::Optional<bool>(), channel->options().echo_cancellation);
EXPECT_TRUE(renderer->sink() != NULL);
// This will trigger SetSink(NULL) to the |renderer|.
session_->SetAudioSend(send_ssrc, true, options, NULL);
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
- bool value;
- EXPECT_TRUE(channel->options().echo_cancellation.Get(&value));
- EXPECT_TRUE(value);
+ EXPECT_EQ(rtc::Optional<bool>(true), channel->options().echo_cancellation);
EXPECT_TRUE(renderer->sink() == NULL);
}
@@ -3387,7 +3492,6 @@ TEST_F(WebRtcSessionTest, InsertDtmf) {
EXPECT_EQ(0U, channel->dtmf_info_queue().size());
// Insert DTMF
- const int expected_flags = DF_SEND;
const int expected_duration = 90;
session_->InsertDtmf(kAudioTrack1, 0, expected_duration);
session_->InsertDtmf(kAudioTrack1, 1, expected_duration);
@@ -3397,11 +3501,11 @@ TEST_F(WebRtcSessionTest, InsertDtmf) {
ASSERT_EQ(3U, channel->dtmf_info_queue().size());
const uint32_t send_ssrc = channel->send_streams()[0].first_ssrc();
EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[0], send_ssrc, 0,
- expected_duration, expected_flags));
+ expected_duration));
EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[1], send_ssrc, 1,
- expected_duration, expected_flags));
+ expected_duration));
EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[2], send_ssrc, 2,
- expected_duration, expected_flags));
+ expected_duration));
}
// This test verifies the |initial_offerer| flag when session initiates the
@@ -3582,7 +3686,9 @@ TEST_F(WebRtcSessionTest, TestCreateAnswerWithNewUfragAndPassword) {
SetLocalDescriptionWithoutError(answer.release());
// Receive an offer with new ufrag and password.
- options.transport_options.ice_restart = true;
+ options.audio_transport_options.ice_restart = true;
+ options.video_transport_options.ice_restart = true;
+ options.data_transport_options.ice_restart = true;
rtc::scoped_ptr<JsepSessionDescription> updated_offer1(
CreateRemoteOffer(options, session_->remote_description()));
SetRemoteDescriptionWithoutError(updated_offer1.release());
@@ -3613,7 +3719,9 @@ TEST_F(WebRtcSessionTest, TestCreateAnswerWithOldUfragAndPassword) {
SetLocalDescriptionWithoutError(answer.release());
// Receive an offer without changed ufrag or password.
- options.transport_options.ice_restart = false;
+ options.audio_transport_options.ice_restart = false;
+ options.video_transport_options.ice_restart = false;
+ options.data_transport_options.ice_restart = false;
rtc::scoped_ptr<JsepSessionDescription> updated_offer2(
CreateRemoteOffer(options, session_->remote_description()));
SetRemoteDescriptionWithoutError(updated_offer2.release());
@@ -3993,10 +4101,8 @@ TEST_F(WebRtcSessionTest, TestDscpConstraint) {
ASSERT_TRUE(voice_channel_ != NULL);
const cricket::AudioOptions& audio_options = voice_channel_->options();
const cricket::VideoOptions& video_options = video_channel_->options();
- EXPECT_TRUE(audio_options.dscp.IsSet());
- EXPECT_TRUE(audio_options.dscp.GetWithDefaultIfUnset(false));
- EXPECT_TRUE(video_options.dscp.IsSet());
- EXPECT_TRUE(video_options.dscp.GetWithDefaultIfUnset(false));
+ EXPECT_EQ(rtc::Optional<bool>(true), audio_options.dscp);
+ EXPECT_EQ(rtc::Optional<bool>(true), video_options.dscp);
}
TEST_F(WebRtcSessionTest, TestSuspendBelowMinBitrateConstraint) {
@@ -4014,8 +4120,7 @@ TEST_F(WebRtcSessionTest, TestSuspendBelowMinBitrateConstraint) {
ASSERT_TRUE(video_channel_ != NULL);
const cricket::VideoOptions& video_options = video_channel_->options();
- EXPECT_TRUE(
- video_options.suspend_below_min_bitrate.GetWithDefaultIfUnset(false));
+ EXPECT_EQ(rtc::Optional<bool>(true), video_options.suspend_below_min_bitrate);
}
TEST_F(WebRtcSessionTest, TestNumUnsignalledRecvStreamsConstraint) {
@@ -4042,8 +4147,7 @@ TEST_F(WebRtcSessionTest, TestCombinedAudioVideoBweConstraint) {
ASSERT_TRUE(voice_channel_ != NULL);
const cricket::AudioOptions& audio_options = voice_channel_->options();
- EXPECT_TRUE(
- audio_options.combined_audio_video_bwe.GetWithDefaultIfUnset(false));
+ EXPECT_EQ(rtc::Optional<bool>(true), audio_options.combined_audio_video_bwe);
}
// Tests that we can renegotiate new media content with ICE candidates in the
diff --git a/talk/app/webrtc/webrtcsessiondescriptionfactory.cc b/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
index 25965af79d..f08b77eb40 100644
--- a/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
+++ b/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
@@ -27,6 +27,8 @@
#include "talk/app/webrtc/webrtcsessiondescriptionfactory.h"
+#include <utility>
+
#include "talk/app/webrtc/dtlsidentitystore.h"
#include "talk/app/webrtc/jsep.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
@@ -99,12 +101,12 @@ void WebRtcIdentityRequestObserver::OnSuccess(
der_private_key.length());
rtc::scoped_ptr<rtc::SSLIdentity> identity(
rtc::SSLIdentity::FromPEMStrings(pem_key, pem_cert));
- SignalCertificateReady(rtc::RTCCertificate::Create(identity.Pass()));
+ SignalCertificateReady(rtc::RTCCertificate::Create(std::move(identity)));
}
void WebRtcIdentityRequestObserver::OnSuccess(
rtc::scoped_ptr<rtc::SSLIdentity> identity) {
- SignalCertificateReady(rtc::RTCCertificate::Create(identity.Pass()));
+ SignalCertificateReady(rtc::RTCCertificate::Create(std::move(identity)));
}
// static
@@ -143,7 +145,7 @@ WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory(
// to just use a random number as session id and start version from
// |kInitSessionVersion|.
session_version_(kInitSessionVersion),
- dtls_identity_store_(dtls_identity_store.Pass()),
+ dtls_identity_store_(std::move(dtls_identity_store)),
identity_request_observer_(identity_request_observer),
session_(session),
session_id_(session_id),
@@ -177,7 +179,7 @@ WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory(
: WebRtcSessionDescriptionFactory(
signaling_thread,
channel_manager,
- dtls_identity_store.Pass(),
+ std::move(dtls_identity_store),
new rtc::RefCountedObject<WebRtcIdentityRequestObserver>(),
session,
session_id,
@@ -390,7 +392,9 @@ void WebRtcSessionDescriptionFactory::InternalCreateOffer(
return;
}
if (session_->local_description() &&
- !request.options.transport_options.ice_restart) {
+ !request.options.audio_transport_options.ice_restart &&
+ !request.options.video_transport_options.ice_restart &&
+ !request.options.data_transport_options.ice_restart) {
// Include all local ice candidates in the SessionDescription unless
// the an ice restart has been requested.
CopyCandidatesFromSessionDescription(session_->local_description(), offer);
@@ -403,12 +407,25 @@ void WebRtcSessionDescriptionFactory::InternalCreateAnswer(
// According to http://tools.ietf.org/html/rfc5245#section-9.2.1.1
// an answer should also contain new ice ufrag and password if an offer has
// been received with new ufrag and password.
- request.options.transport_options.ice_restart = session_->IceRestartPending();
+ request.options.audio_transport_options.ice_restart =
+ session_->IceRestartPending();
+ request.options.video_transport_options.ice_restart =
+ session_->IceRestartPending();
+ request.options.data_transport_options.ice_restart =
+ session_->IceRestartPending();
// We should pass current ssl role to the transport description factory, if
// there is already an existing ongoing session.
rtc::SSLRole ssl_role;
- if (session_->GetSslRole(&ssl_role)) {
- request.options.transport_options.prefer_passive_role =
+ if (session_->GetSslRole(session_->voice_channel(), &ssl_role)) {
+ request.options.audio_transport_options.prefer_passive_role =
+ (rtc::SSL_SERVER == ssl_role);
+ }
+ if (session_->GetSslRole(session_->video_channel(), &ssl_role)) {
+ request.options.video_transport_options.prefer_passive_role =
+ (rtc::SSL_SERVER == ssl_role);
+ }
+ if (session_->GetSslRole(session_->data_channel(), &ssl_role)) {
+ request.options.data_transport_options.prefer_passive_role =
(rtc::SSL_SERVER == ssl_role);
}
@@ -437,7 +454,9 @@ void WebRtcSessionDescriptionFactory::InternalCreateAnswer(
return;
}
if (session_->local_description() &&
- !request.options.transport_options.ice_restart) {
+ !request.options.audio_transport_options.ice_restart &&
+ !request.options.video_transport_options.ice_restart &&
+ !request.options.data_transport_options.ice_restart) {
// Include all local ice candidates in the SessionDescription unless
// the remote peer has requested an ice restart.
CopyCandidatesFromSessionDescription(session_->local_description(), answer);
diff --git a/talk/build/common.gypi b/talk/build/common.gypi
index 36a96c5c55..061b06ba70 100644
--- a/talk/build/common.gypi
+++ b/talk/build/common.gypi
@@ -41,7 +41,6 @@
],
# Disable these to not build components which can be externally provided.
'build_expat%': 1,
- 'build_icu%': 1,
'build_json%': 1,
'build_libsrtp%': 1,
'build_libyuv%': 1,
@@ -61,17 +60,9 @@
'../../webrtc',
],
'defines': [
- 'EXPAT_RELATIVE_PATH',
- 'FEATURE_ENABLE_VOICEMAIL',
- 'GTEST_RELATIVE_PATH',
- 'JSONCPP_RELATIVE_PATH',
- 'LOGGING=1',
'SRTP_RELATIVE_PATH',
# Feature selection
- 'FEATURE_ENABLE_SSL',
- 'FEATURE_ENABLE_VOICEMAIL',
- 'FEATURE_ENABLE_PSTN',
'HAVE_SCTP',
'HAVE_SRTP',
'HAVE_WEBRTC_VIDEO',
@@ -80,7 +71,6 @@
'conditions': [
['OS=="linux"', {
'defines': [
- 'LINUX',
'WEBRTC_LINUX',
],
# Remove Chromium's disabling of the -Wformat warning.
@@ -112,7 +102,6 @@
}],
['OS=="mac"', {
'defines': [
- 'OSX',
'WEBRTC_MAC',
],
}],
@@ -129,7 +118,6 @@
}],
['OS=="ios"', {
'defines': [
- 'IOS',
'WEBRTC_MAC',
'WEBRTC_IOS',
],
diff --git a/talk/build/merge_ios_libs.gyp b/talk/build/merge_ios_libs.gyp
index 0c7114da14..f7e4875eba 100644
--- a/talk/build/merge_ios_libs.gyp
+++ b/talk/build/merge_ios_libs.gyp
@@ -27,7 +27,7 @@
{
'includes': ['common.gypi',],
'conditions': [
- ['OS=="ios" or (OS=="mac" and mac_sdk>="10.8")', {
+ ['OS=="ios" or OS=="mac"', {
'targets': [
{
'target_name': 'libjingle_peerconnection_objc_no_op',
diff --git a/talk/codereview.settings b/talk/codereview.settings
index 97bee14549..c441cc61bc 100644
--- a/talk/codereview.settings
+++ b/talk/codereview.settings
@@ -1,4 +1,5 @@
-Creating CLs from this location is not supported!
-Please create a full WebRTC checkout using 'fetch webrtc'
-or by cloning https://chromium.googlesource.com/external/webrtc
+Creating CLs from this location is not supported! Please make sure the current
+working directory is the parent directory of this directory.
+If you're working with a Chromium checkout, you'll have to create a full WebRTC
+checkout and upload a CL from that. See http://www.webrtc.org for instructions.
diff --git a/talk/libjingle.gyp b/talk/libjingle.gyp
index 81d723a0d9..6e0f8a3424 100755
--- a/talk/libjingle.gyp
+++ b/talk/libjingle.gyp
@@ -43,8 +43,8 @@
['OS=="linux" or OS=="android"', {
'targets': [
{
- 'target_name': 'libjingle_peerconnection_so',
- 'type': 'shared_library',
+ 'target_name': 'libjingle_peerconnection_jni',
+ 'type': 'static_library',
'dependencies': [
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:field_trial_default',
'libjingle_peerconnection',
@@ -62,11 +62,55 @@
'<(libyuv_dir)/include',
],
'conditions': [
- ['build_icu==1', {
+ ['OS=="linux"', {
+ 'include_dirs': [
+ '<(java_home)/include',
+ '<(java_home)/include/linux',
+ ],
+ }],
+ ['build_json==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
+ ],
+ 'export_dependent_settings': [
+ '<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
],
}],
+ ['OS=="android"', {
+ 'sources': [
+ 'app/webrtc/androidvideocapturer.cc',
+ 'app/webrtc/androidvideocapturer.h',
+ 'app/webrtc/java/jni/androidmediacodeccommon.h',
+ 'app/webrtc/java/jni/androidmediadecoder_jni.cc',
+ 'app/webrtc/java/jni/androidmediadecoder_jni.h',
+ 'app/webrtc/java/jni/androidmediaencoder_jni.cc',
+ 'app/webrtc/java/jni/androidmediaencoder_jni.h',
+ 'app/webrtc/java/jni/androidnetworkmonitor_jni.cc',
+ 'app/webrtc/java/jni/androidnetworkmonitor_jni.h',
+ 'app/webrtc/java/jni/androidvideocapturer_jni.cc',
+ 'app/webrtc/java/jni/androidvideocapturer_jni.h',
+ 'app/webrtc/java/jni/surfacetexturehelper_jni.cc',
+ 'app/webrtc/java/jni/surfacetexturehelper_jni.h',
+ ]
+ }],
+ ],
+ },
+ {
+ 'target_name': 'libjingle_peerconnection_so',
+ 'type': 'shared_library',
+ 'dependencies': [
+ 'libjingle_peerconnection',
+ 'libjingle_peerconnection_jni',
+ ],
+ 'sources': [
+ 'app/webrtc/java/jni/jni_onload.cc',
+ ],
+ 'variables': {
+ # This library uses native JNI exports; tell GYP so that the
+ # required symbols will be kept.
+ 'use_native_jni_exports': 1,
+ },
+ 'conditions': [
['OS=="linux"', {
'defines': [
'HAVE_GTK',
@@ -86,30 +130,6 @@
}],
],
}],
- ['OS=="android"', {
- 'sources': [
- 'app/webrtc/java/jni/androidvideocapturer_jni.cc',
- 'app/webrtc/java/jni/androidvideocapturer_jni.h',
- ],
- 'variables': {
- # This library uses native JNI exports; tell GYP so that the
- # required symbols will be kept.
- 'use_native_jni_exports': 1,
- },
- }],
- ['OS=="android" and build_with_chromium==0', {
- 'sources': [
- 'app/webrtc/java/jni/androidmediacodeccommon.h',
- 'app/webrtc/java/jni/androidmediadecoder_jni.cc',
- 'app/webrtc/java/jni/androidmediadecoder_jni.h',
- 'app/webrtc/java/jni/androidmediaencoder_jni.cc',
- 'app/webrtc/java/jni/androidmediaencoder_jni.h',
- 'app/webrtc/java/jni/androidnetworkmonitor_jni.cc',
- 'app/webrtc/java/jni/androidnetworkmonitor_jni.h',
- 'app/webrtc/java/jni/surfacetexturehelper_jni.cc',
- 'app/webrtc/java/jni/surfacetexturehelper_jni.h',
- ]
- }],
],
},
{
@@ -154,6 +174,8 @@
'app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java',
'app/webrtc/java/android/org/webrtc/CameraEnumerator.java',
'app/webrtc/java/android/org/webrtc/EglBase.java',
+ 'app/webrtc/java/android/org/webrtc/EglBase10.java',
+ 'app/webrtc/java/android/org/webrtc/EglBase14.java',
'app/webrtc/java/android/org/webrtc/GlRectDrawer.java',
'app/webrtc/java/android/org/webrtc/GlShader.java',
'app/webrtc/java/android/org/webrtc/GlUtil.java',
@@ -232,6 +254,12 @@
'libjingle_peerconnection_so',
],
'variables': {
+ # Designate as Chromium code and point to our lint settings to
+ # enable linting of the WebRTC code (this is the only way to make
+ # lint_action invoke the Android linter).
+ 'android_manifest_path': '<(webrtc_root)/build/android/AndroidManifest.xml',
+ 'suppressions_file': '<(webrtc_root)/build/android/suppressions.xml',
+ 'chromium_code': 1,
'java_in_dir': 'app/webrtc/java',
'webrtc_base_dir': '<(webrtc_root)/base',
'webrtc_modules_dir': '<(webrtc_root)/modules',
@@ -246,7 +274,7 @@
}, # libjingle_peerconnection_java
]
}],
- ['OS=="ios" or (OS=="mac" and target_arch!="ia32" and mac_sdk>="10.7")', {
+ ['OS=="ios" or (OS=="mac" and target_arch!="ia32")', {
# The >= 10.7 above is required for ARC.
'targets': [
{
@@ -354,6 +382,9 @@
# common.gypi enables this for mac but we want this to be disabled
# like it is for ios.
'CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS': 'NO',
+ # Disabled due to failing when compiled with -Wall, see
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5397
+ 'WARNING_CFLAGS': ['-Wno-unused-property-ivar'],
},
'conditions': [
['OS=="ios"', {
@@ -366,6 +397,9 @@
'app/webrtc/objc/public/RTCEAGLVideoView.h',
'app/webrtc/objc/public/RTCAVFoundationVideoSource.h',
],
+ 'dependencies': [
+ '<(webrtc_root)/base/base.gyp:rtc_base_objc',
+ ],
'link_settings': {
'xcode_settings': {
'OTHER_LDFLAGS': [
@@ -534,7 +568,7 @@
'include_dirs': [
# TODO(jiayl): move this into the direct_dependent_settings of
# usrsctp.gyp.
- '<(DEPTH)/third_party/usrsctp',
+ '<(DEPTH)/third_party/usrsctp/usrsctplib',
],
'dependencies': [
'<(DEPTH)/third_party/usrsctp/usrsctp.gyp:usrsctplib',
@@ -678,10 +712,16 @@
'include_dirs': [
'<(DEPTH)/testing/gtest/include',
],
+ 'include_dirs!': [
+ '<(DEPTH)/webrtc',
+ ],
'direct_dependent_settings': {
'include_dirs': [
'<(DEPTH)/testing/gtest/include',
],
+ 'include_dirs!': [
+ '<(DEPTH)/webrtc',
+ ],
},
'sources': [
'session/media/audiomonitor.cc',
@@ -725,7 +765,6 @@
'app/webrtc/dtmfsender.cc',
'app/webrtc/dtmfsender.h',
'app/webrtc/dtmfsenderinterface.h',
- 'app/webrtc/fakeportallocatorfactory.h',
'app/webrtc/jsep.h',
'app/webrtc/jsepicecandidate.cc',
'app/webrtc/jsepicecandidate.h',
@@ -740,10 +779,10 @@
'app/webrtc/mediastream.cc',
'app/webrtc/mediastream.h',
'app/webrtc/mediastreaminterface.h',
+ 'app/webrtc/mediastreamobserver.cc',
+ 'app/webrtc/mediastreamobserver.h',
'app/webrtc/mediastreamprovider.h',
'app/webrtc/mediastreamproxy.h',
- 'app/webrtc/mediastreamsignaling.cc',
- 'app/webrtc/mediastreamsignaling.h',
'app/webrtc/mediastreamtrack.h',
'app/webrtc/mediastreamtrackproxy.h',
'app/webrtc/notifier.h',
@@ -754,8 +793,6 @@
'app/webrtc/peerconnectionfactoryproxy.h',
'app/webrtc/peerconnectioninterface.h',
'app/webrtc/peerconnectionproxy.h',
- 'app/webrtc/portallocatorfactory.cc',
- 'app/webrtc/portallocatorfactory.h',
'app/webrtc/proxy.h',
'app/webrtc/remoteaudiosource.cc',
'app/webrtc/remoteaudiosource.h',
@@ -789,14 +826,6 @@
'app/webrtc/webrtcsessiondescriptionfactory.cc',
'app/webrtc/webrtcsessiondescriptionfactory.h',
],
- 'conditions': [
- ['OS=="android" and build_with_chromium==0', {
- 'sources': [
- 'app/webrtc/androidvideocapturer.h',
- 'app/webrtc/androidvideocapturer.cc',
- ],
- }],
- ],
}, # target libjingle_peerconnection
],
}
diff --git a/talk/libjingle_tests.gyp b/talk/libjingle_tests.gyp
index 41b38b345d..1dc3649186 100755
--- a/talk/libjingle_tests.gyp
+++ b/talk/libjingle_tests.gyp
@@ -91,15 +91,15 @@
'media/base/videocapturer_unittest.cc',
'media/base/videocommon_unittest.cc',
'media/base/videoengine_unittest.h',
+ 'media/base/videoframe_unittest.h',
'media/devices/dummydevicemanager_unittest.cc',
'media/devices/filevideocapturer_unittest.cc',
'media/sctp/sctpdataengine_unittest.cc',
'media/webrtc/simulcast_unittest.cc',
+ 'media/webrtc/webrtcmediaengine_unittest.cc',
'media/webrtc/webrtcvideocapturer_unittest.cc',
- 'media/base/videoframe_unittest.h',
'media/webrtc/webrtcvideoframe_unittest.cc',
'media/webrtc/webrtcvideoframefactory_unittest.cc',
-
# Disabled because some tests fail.
# TODO(ronghuawu): Reenable these tests.
# 'media/devices/devicemanager_unittest.cc',
@@ -128,6 +128,17 @@
},
},
}],
+ ['OS=="win" and clang==1', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ # Disable warnings failing when compiling with Clang on Windows.
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+ '-Wno-unused-function',
+ ],
+ },
+ },
+ },],
['OS=="ios"', {
'sources!': [
'media/sctp/sctpdataengine_unittest.cc',
@@ -176,7 +187,7 @@
}, # target libjingle_p2p_unittest
{
'target_name': 'libjingle_peerconnection_unittest',
- 'type': 'executable',
+ 'type': '<(gtest_target_type)',
'dependencies': [
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
@@ -207,7 +218,6 @@
# 'app/webrtc/peerconnectionproxy_unittest.cc',
'app/webrtc/remotevideocapturer_unittest.cc',
'app/webrtc/rtpsenderreceiver_unittest.cc',
- 'app/webrtc/sctputils.cc',
'app/webrtc/statscollector_unittest.cc',
'app/webrtc/test/fakeaudiocapturemodule.cc',
'app/webrtc/test/fakeaudiocapturemodule.h',
@@ -215,7 +225,6 @@
'app/webrtc/test/fakeconstraints.h',
'app/webrtc/test/fakedatachannelprovider.h',
'app/webrtc/test/fakedtlsidentitystore.h',
- 'app/webrtc/test/fakemediastreamsignaling.h',
'app/webrtc/test/fakeperiodicvideocapturer.h',
'app/webrtc/test/fakevideotrackrenderer.h',
'app/webrtc/test/mockpeerconnectionobservers.h',
@@ -229,17 +238,25 @@
],
'conditions': [
['OS=="android"', {
- # We want gmock features that use tr1::tuple, but we currently
- # don't support the variadic templates used by libstdc++'s
- # implementation. gmock supports this scenario by providing its
- # own implementation but we must opt in to it.
- 'defines': [
- 'GTEST_USE_OWN_TR1_TUPLE=1',
- # GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
- # gmock r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
- # automatically on android, so it has to be set explicitly here.
- 'GTEST_HAS_TR1_TUPLE=1',
- ],
+ 'sources': [
+ 'app/webrtc/test/androidtestinitializer.cc',
+ 'app/webrtc/test/androidtestinitializer.h',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
+ 'libjingle.gyp:libjingle_peerconnection_jni',
+ ],
+ }],
+ ['OS=="win" and clang==1', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ # Disable warnings failing when compiling with Clang on Windows.
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+ '-Wno-unused-function',
+ ],
+ },
+ },
}],
],
}, # target libjingle_peerconnection_unittest
@@ -333,7 +350,7 @@
},
], # targets
}], # OS=="android"
- ['OS=="ios" or (OS=="mac" and target_arch!="ia32" and mac_sdk>="10.7")', {
+ ['OS=="ios" or (OS=="mac" and target_arch!="ia32")', {
# The >=10.7 above is required to make ARC link cleanly (e.g. as
# opposed to _compile_ cleanly, which the library under test
# does just fine on 10.6 too).
@@ -378,7 +395,7 @@
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:field_trial_default',
'<(DEPTH)/third_party/ocmock/ocmock.gyp:ocmock',
- '<(webrtc_root)/libjingle_examples.gyp:apprtc_signaling',
+ '<(webrtc_root)/webrtc_examples.gyp:apprtc_signaling',
],
'sources': [
'app/webrtc/objctests/mac/main.mm',
@@ -394,6 +411,17 @@
}, # target apprtc_signaling_gunit_test
],
}],
+ ['OS=="android"', {
+ 'targets': [
+ {
+ 'target_name': 'libjingle_peerconnection_unittest_apk_target',
+ 'type': 'none',
+ 'dependencies': [
+ '<(DEPTH)/webrtc/build/apk_tests.gyp:libjingle_peerconnection_unittest_apk',
+ ],
+ },
+ ],
+ }],
['test_isolation_mode != "noop"', {
'targets': [
{
diff --git a/talk/media/base/audiorenderer.h b/talk/media/base/audiorenderer.h
index 229c36e8b1..a42cd7de8f 100644
--- a/talk/media/base/audiorenderer.h
+++ b/talk/media/base/audiorenderer.h
@@ -41,7 +41,7 @@ class AudioRenderer {
virtual void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) = 0;
// Called when the AudioRenderer is going away.
diff --git a/talk/media/base/capturemanager_unittest.cc b/talk/media/base/capturemanager_unittest.cc
index e9903425b8..84086abae4 100644
--- a/talk/media/base/capturemanager_unittest.cc
+++ b/talk/media/base/capturemanager_unittest.cc
@@ -29,6 +29,7 @@
#include "talk/media/base/fakevideocapturer.h"
#include "talk/media/base/fakevideorenderer.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/gunit.h"
#include "webrtc/base/sigslot.h"
@@ -57,7 +58,7 @@ class CaptureManagerTest : public ::testing::Test, public sigslot::has_slots<> {
}
void PopulateSupportedFormats() {
std::vector<cricket::VideoFormat> formats;
- for (int i = 0; i < ARRAY_SIZE(kCameraFormats); ++i) {
+ for (int i = 0; i < arraysize(kCameraFormats); ++i) {
formats.push_back(cricket::VideoFormat(kCameraFormats[i]));
}
video_capturer_.ResetSupportedFormats(formats);
diff --git a/talk/media/base/codec.cc b/talk/media/base/codec.cc
index 5b747d1917..59708b37dd 100644
--- a/talk/media/base/codec.cc
+++ b/talk/media/base/codec.cc
@@ -163,13 +163,15 @@ void Codec::IntersectFeedbackParams(const Codec& other) {
feedback_params.Intersect(other.feedback_params);
}
-AudioCodec::AudioCodec(int pt,
- const std::string& nm,
- int cr,
- int br,
- int cs,
- int pr)
- : Codec(pt, nm, cr, pr), bitrate(br), channels(cs) {
+AudioCodec::AudioCodec(int id,
+ const std::string& name,
+ int clockrate,
+ int bitrate,
+ size_t channels,
+ int preference)
+ : Codec(id, name, clockrate, preference),
+ bitrate(bitrate),
+ channels(channels) {
}
AudioCodec::AudioCodec() : Codec(), bitrate(0), channels(0) {
@@ -219,20 +221,20 @@ std::string VideoCodec::ToString() const {
return os.str();
}
-VideoCodec::VideoCodec(int pt,
- const std::string& nm,
- int w,
- int h,
- int fr,
- int pr)
- : Codec(pt, nm, kVideoCodecClockrate, pr),
- width(w),
- height(h),
- framerate(fr) {
+VideoCodec::VideoCodec(int id,
+ const std::string& name,
+ int width,
+ int height,
+ int framerate,
+ int preference)
+ : Codec(id, name, kVideoCodecClockrate, preference),
+ width(width),
+ height(height),
+ framerate(framerate) {
}
-VideoCodec::VideoCodec(int pt, const std::string& nm)
- : Codec(pt, nm, kVideoCodecClockrate, 0),
+VideoCodec::VideoCodec(int id, const std::string& name)
+ : Codec(id, name, kVideoCodecClockrate, 0),
width(0),
height(0),
framerate(0) {
@@ -334,6 +336,11 @@ bool HasRemb(const VideoCodec& codec) {
FeedbackParam(kRtcpFbParamRemb, kParamValueEmpty));
}
+bool HasTransportCc(const VideoCodec& codec) {
+ return codec.HasFeedbackParam(
+ FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
+}
+
bool CodecNamesEq(const std::string& name1, const std::string& name2) {
return _stricmp(name1.c_str(), name2.c_str()) == 0;
}
diff --git a/talk/media/base/codec.h b/talk/media/base/codec.h
index 3bb08e7c7a..da78e1c627 100644
--- a/talk/media/base/codec.h
+++ b/talk/media/base/codec.h
@@ -128,10 +128,15 @@ struct Codec {
struct AudioCodec : public Codec {
int bitrate;
- int channels;
+ size_t channels;
// Creates a codec with the given parameters.
- AudioCodec(int pt, const std::string& nm, int cr, int br, int cs, int pr);
+ AudioCodec(int id,
+ const std::string& name,
+ int clockrate,
+ int bitrate,
+ size_t channels,
+ int preference);
// Creates an empty codec.
AudioCodec();
AudioCodec(const AudioCodec& c);
@@ -161,8 +166,13 @@ struct VideoCodec : public Codec {
int framerate;
// Creates a codec with the given parameters.
- VideoCodec(int pt, const std::string& nm, int w, int h, int fr, int pr);
- VideoCodec(int pt, const std::string& nm);
+ VideoCodec(int id,
+ const std::string& name,
+ int width,
+ int height,
+ int framerate,
+ int preference);
+ VideoCodec(int id, const std::string& name);
// Creates an empty codec.
VideoCodec();
VideoCodec(const VideoCodec& c);
@@ -209,50 +219,6 @@ struct DataCodec : public Codec {
std::string ToString() const;
};
-struct VideoEncoderConfig {
- static const int kDefaultMaxThreads = -1;
- static const int kDefaultCpuProfile = -1;
-
- VideoEncoderConfig()
- : max_codec(),
- num_threads(kDefaultMaxThreads),
- cpu_profile(kDefaultCpuProfile) {
- }
-
- VideoEncoderConfig(const VideoCodec& c)
- : max_codec(c),
- num_threads(kDefaultMaxThreads),
- cpu_profile(kDefaultCpuProfile) {
- }
-
- VideoEncoderConfig(const VideoCodec& c, int t, int p)
- : max_codec(c),
- num_threads(t),
- cpu_profile(p) {
- }
-
- VideoEncoderConfig& operator=(const VideoEncoderConfig& config) {
- max_codec = config.max_codec;
- num_threads = config.num_threads;
- cpu_profile = config.cpu_profile;
- return *this;
- }
-
- bool operator==(const VideoEncoderConfig& config) const {
- return max_codec == config.max_codec &&
- num_threads == config.num_threads &&
- cpu_profile == config.cpu_profile;
- }
-
- bool operator!=(const VideoEncoderConfig& config) const {
- return !(*this == config);
- }
-
- VideoCodec max_codec;
- int num_threads;
- int cpu_profile;
-};
-
// Get the codec setting associated with |payload_type|. If there
// is no codec associated with that payload type it returns false.
template <class Codec>
@@ -271,6 +237,7 @@ bool FindCodecById(const std::vector<Codec>& codecs,
bool CodecNamesEq(const std::string& name1, const std::string& name2);
bool HasNack(const VideoCodec& codec);
bool HasRemb(const VideoCodec& codec);
+bool HasTransportCc(const VideoCodec& codec);
} // namespace cricket
diff --git a/talk/media/base/codec_unittest.cc b/talk/media/base/codec_unittest.cc
index 7bd3735a9b..b2aff507ea 100644
--- a/talk/media/base/codec_unittest.cc
+++ b/talk/media/base/codec_unittest.cc
@@ -33,7 +33,6 @@ using cricket::Codec;
using cricket::DataCodec;
using cricket::FeedbackParam;
using cricket::VideoCodec;
-using cricket::VideoEncoderConfig;
using cricket::kCodecParamAssociatedPayloadType;
using cricket::kCodecParamMaxBitrate;
using cricket::kCodecParamMinBitrate;
@@ -214,54 +213,6 @@ TEST_F(CodecTest, TestVideoCodecMatches) {
EXPECT_FALSE(c1.Matches(VideoCodec(95, "V", 640, 400, 15, 0)));
}
-TEST_F(CodecTest, TestVideoEncoderConfigOperators) {
- VideoEncoderConfig c1(VideoCodec(
- 96, "SVC", 320, 200, 30, 3), 1, 2);
- VideoEncoderConfig c2(VideoCodec(
- 95, "SVC", 320, 200, 30, 3), 1, 2);
- VideoEncoderConfig c3(VideoCodec(
- 96, "xxx", 320, 200, 30, 3), 1, 2);
- VideoEncoderConfig c4(VideoCodec(
- 96, "SVC", 120, 200, 30, 3), 1, 2);
- VideoEncoderConfig c5(VideoCodec(
- 96, "SVC", 320, 100, 30, 3), 1, 2);
- VideoEncoderConfig c6(VideoCodec(
- 96, "SVC", 320, 200, 10, 3), 1, 2);
- VideoEncoderConfig c7(VideoCodec(
- 96, "SVC", 320, 200, 30, 1), 1, 2);
- VideoEncoderConfig c8(VideoCodec(
- 96, "SVC", 320, 200, 30, 3), 0, 2);
- VideoEncoderConfig c9(VideoCodec(
- 96, "SVC", 320, 200, 30, 3), 1, 1);
- EXPECT_TRUE(c1 != c2);
- EXPECT_TRUE(c1 != c2);
- EXPECT_TRUE(c1 != c3);
- EXPECT_TRUE(c1 != c4);
- EXPECT_TRUE(c1 != c5);
- EXPECT_TRUE(c1 != c6);
- EXPECT_TRUE(c1 != c7);
- EXPECT_TRUE(c1 != c8);
- EXPECT_TRUE(c1 != c9);
-
- VideoEncoderConfig c10;
- VideoEncoderConfig c11(VideoCodec(
- 0, "", 0, 0, 0, 0));
- VideoEncoderConfig c12(VideoCodec(
- 0, "", 0, 0, 0, 0),
- VideoEncoderConfig::kDefaultMaxThreads,
- VideoEncoderConfig::kDefaultCpuProfile);
- VideoEncoderConfig c13 = c1;
- VideoEncoderConfig c14(VideoCodec(
- 0, "", 0, 0, 0, 0), 0, 0);
-
- EXPECT_TRUE(c11 == c10);
- EXPECT_TRUE(c12 == c10);
- EXPECT_TRUE(c13 != c10);
- EXPECT_TRUE(c13 == c1);
- EXPECT_TRUE(c14 != c11);
- EXPECT_TRUE(c14 != c12);
-}
-
TEST_F(CodecTest, TestDataCodecMatches) {
// Test a codec with a static payload type.
DataCodec c0(95, "D", 0);
diff --git a/talk/media/base/constants.cc b/talk/media/base/constants.cc
index 4063004968..2361be6f50 100644
--- a/talk/media/base/constants.cc
+++ b/talk/media/base/constants.cc
@@ -90,6 +90,7 @@ const int kPreferredUseInbandFec = 0;
const char kRtcpFbParamNack[] = "nack";
const char kRtcpFbNackParamPli[] = "pli";
const char kRtcpFbParamRemb[] = "goog-remb";
+const char kRtcpFbParamTransportCc[] = "transport-cc";
const char kRtcpFbParamCcm[] = "ccm";
const char kRtcpFbCcmParamFir[] = "fir";
diff --git a/talk/media/base/constants.h b/talk/media/base/constants.h
index b6a9e5681f..706a7bdc87 100644
--- a/talk/media/base/constants.h
+++ b/talk/media/base/constants.h
@@ -107,6 +107,9 @@ extern const char kRtcpFbNackParamPli[];
// rtcp-fb messages according to
// http://tools.ietf.org/html/draft-alvestrand-rmcat-remb-00
extern const char kRtcpFbParamRemb[];
+// rtcp-fb messages according to
+// https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions-01
+extern const char kRtcpFbParamTransportCc[];
// ccm submessages according to RFC 5104
extern const char kRtcpFbParamCcm[];
extern const char kRtcpFbCcmParamFir[];
diff --git a/talk/media/base/cryptoparams.h b/talk/media/base/cryptoparams.h
index 9dd1db5166..589953db3e 100644
--- a/talk/media/base/cryptoparams.h
+++ b/talk/media/base/cryptoparams.h
@@ -35,8 +35,10 @@ namespace cricket {
// Parameters for SRTP negotiation, as described in RFC 4568.
struct CryptoParams {
CryptoParams() : tag(0) {}
- CryptoParams(int t, const std::string& cs,
- const std::string& kp, const std::string& sp)
+ CryptoParams(int t,
+ const std::string& cs,
+ const std::string& kp,
+ const std::string& sp)
: tag(t), cipher_suite(cs), key_params(kp), session_params(sp) {}
bool Matches(const CryptoParams& params) const {
diff --git a/talk/media/base/executablehelpers.h b/talk/media/base/executablehelpers.h
index 401890f4e8..dd165c25da 100644
--- a/talk/media/base/executablehelpers.h
+++ b/talk/media/base/executablehelpers.h
@@ -28,7 +28,7 @@
#ifndef TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
#define TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
-#ifdef OSX
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
#include <mach-o/dyld.h>
#endif
@@ -62,15 +62,15 @@ inline Pathname GetExecutablePath() {
#else // UNICODE
rtc::Pathname path(exe_path_buffer);
#endif // UNICODE
-#elif defined(OSX) || defined(LINUX)
+#elif (defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)) || defined(WEBRTC_LINUX)
char exe_path_buffer[kMaxExePathSize];
-#ifdef OSX
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
uint32_t copied_length = kMaxExePathSize - 1;
if (_NSGetExecutablePath(exe_path_buffer, &copied_length) == -1) {
LOG(LS_ERROR) << "Buffer too small";
return rtc::Pathname();
}
-#elif defined LINUX
+#elif defined WEBRTC_LINUX
int32_t copied_length = kMaxExePathSize - 1;
const char* kProcExeFmt = "/proc/%d/exe";
char proc_exe_link[40];
@@ -86,11 +86,11 @@ inline Pathname GetExecutablePath() {
return rtc::Pathname();
}
exe_path_buffer[copied_length] = '\0';
-#endif // LINUX
+#endif // WEBRTC_LINUX
rtc::Pathname path(exe_path_buffer);
-#else // Android || IOS
+#else // Android || iOS
rtc::Pathname path;
-#endif // OSX || LINUX
+#endif // Mac || Linux
return path;
}
diff --git a/talk/media/base/fakemediaengine.h b/talk/media/base/fakemediaengine.h
index a6fa960dee..149704f92d 100644
--- a/talk/media/base/fakemediaengine.h
+++ b/talk/media/base/fakemediaengine.h
@@ -38,9 +38,10 @@
#include "talk/media/base/mediaengine.h"
#include "talk/media/base/rtputils.h"
#include "talk/media/base/streamparams.h"
-#include "webrtc/p2p/base/sessiondescription.h"
+#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/stringutils.h"
+#include "webrtc/p2p/base/sessiondescription.h"
namespace cricket {
@@ -229,15 +230,13 @@ template <class Base> class RtpHelper : public Base {
class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
public:
struct DtmfInfo {
- DtmfInfo(uint32_t ssrc, int event_code, int duration, int flags)
+ DtmfInfo(uint32_t ssrc, int event_code, int duration)
: ssrc(ssrc),
event_code(event_code),
- duration(duration),
- flags(flags) {}
+ duration(duration) {}
uint32_t ssrc;
int event_code;
int duration;
- int flags;
};
explicit FakeVoiceMediaChannel(FakeVoiceEngine* engine,
const AudioOptions& options)
@@ -321,9 +320,8 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
}
virtual bool InsertDtmf(uint32_t ssrc,
int event_code,
- int duration,
- int flags) {
- dtmf_info_queue_.push_back(DtmfInfo(ssrc, event_code, duration, flags));
+ int duration) {
+ dtmf_info_queue_.push_back(DtmfInfo(ssrc, event_code, duration));
return true;
}
@@ -349,6 +347,12 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
virtual bool GetStats(VoiceMediaInfo* info) { return false; }
+ virtual void SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
+ sink_ = std::move(sink);
+ }
+
private:
class VoiceChannelAudioSink : public AudioRenderer::Sink {
public:
@@ -364,7 +368,7 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) override {}
void OnClose() override { renderer_ = NULL; }
AudioRenderer* renderer() const { return renderer_; }
@@ -421,16 +425,16 @@ class FakeVoiceMediaChannel : public RtpHelper<VoiceMediaChannel> {
int time_since_last_typing_;
AudioOptions options_;
std::map<uint32_t, VoiceChannelAudioSink*> local_renderers_;
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink_;
};
// A helper function to compare the FakeVoiceMediaChannel::DtmfInfo.
inline bool CompareDtmfInfo(const FakeVoiceMediaChannel::DtmfInfo& info,
uint32_t ssrc,
int event_code,
- int duration,
- int flags) {
+ int duration) {
return (info.duration == duration && info.event_code == event_code &&
- info.flags == flags && info.ssrc == ssrc);
+ info.ssrc == ssrc);
}
class FakeVideoMediaChannel : public RtpHelper<VideoMediaChannel> {
@@ -694,33 +698,23 @@ class FakeDataMediaChannel : public RtpHelper<DataMediaChannel> {
class FakeBaseEngine {
public:
FakeBaseEngine()
- : loglevel_(-1),
- options_changed_(false),
+ : options_changed_(false),
fail_create_channel_(false) {}
- void SetLogging(int level, const char* filter) {
- loglevel_ = level;
- logfilter_ = filter;
- }
-
void set_fail_create_channel(bool fail) { fail_create_channel_ = fail; }
- const std::vector<RtpHeaderExtension>& rtp_header_extensions() const {
- return rtp_header_extensions_;
- }
+ RtpCapabilities GetCapabilities() const { return capabilities_; }
void set_rtp_header_extensions(
const std::vector<RtpHeaderExtension>& extensions) {
- rtp_header_extensions_ = extensions;
+ capabilities_.header_extensions = extensions;
}
protected:
- int loglevel_;
- std::string logfilter_;
// Flag used by optionsmessagehandler_unittest for checking whether any
// relevant setting has been updated.
// TODO(thaloun): Replace with explicit checks of before & after values.
bool options_changed_;
bool fail_create_channel_;
- std::vector<RtpHeaderExtension> rtp_header_extensions_;
+ RtpCapabilities capabilities_;
};
class FakeVoiceEngine : public FakeBaseEngine {
@@ -733,14 +727,8 @@ class FakeVoiceEngine : public FakeBaseEngine {
}
bool Init(rtc::Thread* worker_thread) { return true; }
void Terminate() {}
- webrtc::VoiceEngine* GetVoE() { return nullptr; }
- AudioOptions GetOptions() const {
- return options_;
- }
- bool SetOptions(const AudioOptions& options) {
- options_ = options;
- options_changed_ = true;
- return true;
+ rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const {
+ return rtc::scoped_refptr<webrtc::AudioState>();
}
VoiceMediaChannel* CreateChannel(webrtc::Call* call,
@@ -763,21 +751,12 @@ class FakeVoiceEngine : public FakeBaseEngine {
const std::vector<AudioCodec>& codecs() { return codecs_; }
void SetCodecs(const std::vector<AudioCodec> codecs) { codecs_ = codecs; }
- bool SetDevices(const Device* in_device, const Device* out_device) {
- in_device_ = (in_device) ? in_device->name : "";
- out_device_ = (out_device) ? out_device->name : "";
- options_changed_ = true;
- return true;
- }
-
bool GetOutputVolume(int* level) {
*level = output_volume_;
return true;
}
-
bool SetOutputVolume(int level) {
output_volume_ = level;
- options_changed_ = true;
return true;
}
@@ -795,9 +774,6 @@ class FakeVoiceEngine : public FakeBaseEngine {
std::vector<FakeVoiceMediaChannel*> channels_;
std::vector<AudioCodec> codecs_;
int output_volume_;
- std::string in_device_;
- std::string out_device_;
- AudioOptions options_;
friend class FakeMediaEngine;
};
@@ -815,13 +791,6 @@ class FakeVideoEngine : public FakeBaseEngine {
options_changed_ = true;
return true;
}
- bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) {
- default_encoder_config_ = config;
- return true;
- }
- const VideoEncoderConfig& default_encoder_config() const {
- return default_encoder_config_;
- }
VideoMediaChannel* CreateChannel(webrtc::Call* call,
const VideoOptions& options) {
@@ -864,7 +833,6 @@ class FakeVideoEngine : public FakeBaseEngine {
private:
std::vector<FakeVideoMediaChannel*> channels_;
std::vector<VideoCodec> codecs_;
- VideoEncoderConfig default_encoder_config_;
std::string in_device_;
bool capture_;
VideoOptions options_;
@@ -875,10 +843,7 @@ class FakeVideoEngine : public FakeBaseEngine {
class FakeMediaEngine :
public CompositeMediaEngine<FakeVoiceEngine, FakeVideoEngine> {
public:
- FakeMediaEngine() {
- voice_ = FakeVoiceEngine();
- video_ = FakeVideoEngine();
- }
+ FakeMediaEngine() {}
virtual ~FakeMediaEngine() {}
void SetAudioCodecs(const std::vector<AudioCodec>& codecs) {
@@ -904,24 +869,13 @@ class FakeMediaEngine :
return video_.GetChannel(index);
}
- AudioOptions audio_options() const { return voice_.options_; }
int output_volume() const { return voice_.output_volume_; }
- const VideoEncoderConfig& default_video_encoder_config() const {
- return video_.default_encoder_config_;
- }
- const std::string& audio_in_device() const { return voice_.in_device_; }
- const std::string& audio_out_device() const { return voice_.out_device_; }
- int voice_loglevel() const { return voice_.loglevel_; }
- const std::string& voice_logfilter() const { return voice_.logfilter_; }
- int video_loglevel() const { return video_.loglevel_; }
- const std::string& video_logfilter() const { return video_.logfilter_; }
bool capture() const { return video_.capture_; }
bool options_changed() const {
- return voice_.options_changed_ || video_.options_changed_;
+ return video_.options_changed_;
}
void clear_options_changed() {
video_.options_changed_ = false;
- voice_.options_changed_ = false;
}
void set_fail_create_channel(bool fail) {
voice_.set_fail_create_channel(fail);
diff --git a/talk/media/base/mediachannel.h b/talk/media/base/mediachannel.h
index 14660847fa..f6fb77d8a6 100644
--- a/talk/media/base/mediachannel.h
+++ b/talk/media/base/mediachannel.h
@@ -38,6 +38,7 @@
#include "webrtc/base/buffer.h"
#include "webrtc/base/dscp.h"
#include "webrtc/base/logging.h"
+#include "webrtc/base/optional.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/base/socket.h"
#include "webrtc/base/window.h"
@@ -50,88 +51,30 @@ class RateLimiter;
class Timing;
}
+namespace webrtc {
+class AudioSinkInterface;
+}
+
namespace cricket {
class AudioRenderer;
-struct RtpHeader;
class ScreencastId;
-struct VideoFormat;
class VideoCapturer;
class VideoRenderer;
+struct RtpHeader;
+struct VideoFormat;
const int kMinRtpHeaderExtensionId = 1;
const int kMaxRtpHeaderExtensionId = 255;
const int kScreencastDefaultFps = 5;
-// Used in AudioOptions and VideoOptions to signify "unset" values.
-template <class T>
-class Settable {
- public:
- Settable() : set_(false), val_() {}
- explicit Settable(T val) : set_(true), val_(val) {}
-
- bool IsSet() const {
- return set_;
- }
-
- bool Get(T* out) const {
- *out = val_;
- return set_;
- }
-
- T GetWithDefaultIfUnset(const T& default_value) const {
- return set_ ? val_ : default_value;
- }
-
- void Set(T val) {
- set_ = true;
- val_ = val;
- }
-
- void Clear() {
- Set(T());
- set_ = false;
- }
-
- void SetFrom(const Settable<T>& o) {
- // Set this value based on the value of o, iff o is set. If this value is
- // set and o is unset, the current value will be unchanged.
- T val;
- if (o.Get(&val)) {
- Set(val);
- }
- }
-
- std::string ToString() const {
- return set_ ? rtc::ToString(val_) : "";
- }
-
- bool operator==(const Settable<T>& o) const {
- // Equal if both are unset with any value or both set with the same value.
- return (set_ == o.set_) && (!set_ || (val_ == o.val_));
- }
-
- bool operator!=(const Settable<T>& o) const {
- return !operator==(o);
- }
-
- protected:
- void InitializeValue(const T &val) {
- val_ = val;
- }
-
- private:
- bool set_;
- T val_;
-};
-
template <class T>
-static std::string ToStringIfSet(const char* key, const Settable<T>& val) {
+static std::string ToStringIfSet(const char* key, const rtc::Optional<T>& val) {
std::string str;
- if (val.IsSet()) {
+ if (val) {
str = key;
str += ": ";
- str += val.ToString();
+ str += val ? rtc::ToString(*val) : "";
str += ", ";
}
return str;
@@ -157,32 +100,32 @@ static std::string VectorToString(const std::vector<T>& vals) {
// but some things currently still use flags.
struct AudioOptions {
void SetAll(const AudioOptions& change) {
- echo_cancellation.SetFrom(change.echo_cancellation);
- auto_gain_control.SetFrom(change.auto_gain_control);
- noise_suppression.SetFrom(change.noise_suppression);
- highpass_filter.SetFrom(change.highpass_filter);
- stereo_swapping.SetFrom(change.stereo_swapping);
- audio_jitter_buffer_max_packets.SetFrom(
- change.audio_jitter_buffer_max_packets);
- audio_jitter_buffer_fast_accelerate.SetFrom(
- change.audio_jitter_buffer_fast_accelerate);
- typing_detection.SetFrom(change.typing_detection);
- aecm_generate_comfort_noise.SetFrom(change.aecm_generate_comfort_noise);
- conference_mode.SetFrom(change.conference_mode);
- adjust_agc_delta.SetFrom(change.adjust_agc_delta);
- experimental_agc.SetFrom(change.experimental_agc);
- extended_filter_aec.SetFrom(change.extended_filter_aec);
- delay_agnostic_aec.SetFrom(change.delay_agnostic_aec);
- experimental_ns.SetFrom(change.experimental_ns);
- aec_dump.SetFrom(change.aec_dump);
- tx_agc_target_dbov.SetFrom(change.tx_agc_target_dbov);
- tx_agc_digital_compression_gain.SetFrom(
- change.tx_agc_digital_compression_gain);
- tx_agc_limiter.SetFrom(change.tx_agc_limiter);
- recording_sample_rate.SetFrom(change.recording_sample_rate);
- playout_sample_rate.SetFrom(change.playout_sample_rate);
- dscp.SetFrom(change.dscp);
- combined_audio_video_bwe.SetFrom(change.combined_audio_video_bwe);
+ SetFrom(&echo_cancellation, change.echo_cancellation);
+ SetFrom(&auto_gain_control, change.auto_gain_control);
+ SetFrom(&noise_suppression, change.noise_suppression);
+ SetFrom(&highpass_filter, change.highpass_filter);
+ SetFrom(&stereo_swapping, change.stereo_swapping);
+ SetFrom(&audio_jitter_buffer_max_packets,
+ change.audio_jitter_buffer_max_packets);
+ SetFrom(&audio_jitter_buffer_fast_accelerate,
+ change.audio_jitter_buffer_fast_accelerate);
+ SetFrom(&typing_detection, change.typing_detection);
+ SetFrom(&aecm_generate_comfort_noise, change.aecm_generate_comfort_noise);
+ SetFrom(&conference_mode, change.conference_mode);
+ SetFrom(&adjust_agc_delta, change.adjust_agc_delta);
+ SetFrom(&experimental_agc, change.experimental_agc);
+ SetFrom(&extended_filter_aec, change.extended_filter_aec);
+ SetFrom(&delay_agnostic_aec, change.delay_agnostic_aec);
+ SetFrom(&experimental_ns, change.experimental_ns);
+ SetFrom(&aec_dump, change.aec_dump);
+ SetFrom(&tx_agc_target_dbov, change.tx_agc_target_dbov);
+ SetFrom(&tx_agc_digital_compression_gain,
+ change.tx_agc_digital_compression_gain);
+ SetFrom(&tx_agc_limiter, change.tx_agc_limiter);
+ SetFrom(&recording_sample_rate, change.recording_sample_rate);
+ SetFrom(&playout_sample_rate, change.playout_sample_rate);
+ SetFrom(&dscp, change.dscp);
+ SetFrom(&combined_audio_video_bwe, change.combined_audio_video_bwe);
}
bool operator==(const AudioOptions& o) const {
@@ -247,39 +190,47 @@ struct AudioOptions {
// Audio processing that attempts to filter away the output signal from
// later inbound pickup.
- Settable<bool> echo_cancellation;
+ rtc::Optional<bool> echo_cancellation;
// Audio processing to adjust the sensitivity of the local mic dynamically.
- Settable<bool> auto_gain_control;
+ rtc::Optional<bool> auto_gain_control;
// Audio processing to filter out background noise.
- Settable<bool> noise_suppression;
+ rtc::Optional<bool> noise_suppression;
// Audio processing to remove background noise of lower frequencies.
- Settable<bool> highpass_filter;
+ rtc::Optional<bool> highpass_filter;
// Audio processing to swap the left and right channels.
- Settable<bool> stereo_swapping;
+ rtc::Optional<bool> stereo_swapping;
// Audio receiver jitter buffer (NetEq) max capacity in number of packets.
- Settable<int> audio_jitter_buffer_max_packets;
+ rtc::Optional<int> audio_jitter_buffer_max_packets;
// Audio receiver jitter buffer (NetEq) fast accelerate mode.
- Settable<bool> audio_jitter_buffer_fast_accelerate;
+ rtc::Optional<bool> audio_jitter_buffer_fast_accelerate;
// Audio processing to detect typing.
- Settable<bool> typing_detection;
- Settable<bool> aecm_generate_comfort_noise;
- Settable<bool> conference_mode;
- Settable<int> adjust_agc_delta;
- Settable<bool> experimental_agc;
- Settable<bool> extended_filter_aec;
- Settable<bool> delay_agnostic_aec;
- Settable<bool> experimental_ns;
- Settable<bool> aec_dump;
+ rtc::Optional<bool> typing_detection;
+ rtc::Optional<bool> aecm_generate_comfort_noise;
+ rtc::Optional<bool> conference_mode;
+ rtc::Optional<int> adjust_agc_delta;
+ rtc::Optional<bool> experimental_agc;
+ rtc::Optional<bool> extended_filter_aec;
+ rtc::Optional<bool> delay_agnostic_aec;
+ rtc::Optional<bool> experimental_ns;
+ rtc::Optional<bool> aec_dump;
// Note that tx_agc_* only applies to non-experimental AGC.
- Settable<uint16_t> tx_agc_target_dbov;
- Settable<uint16_t> tx_agc_digital_compression_gain;
- Settable<bool> tx_agc_limiter;
- Settable<uint32_t> recording_sample_rate;
- Settable<uint32_t> playout_sample_rate;
+ rtc::Optional<uint16_t> tx_agc_target_dbov;
+ rtc::Optional<uint16_t> tx_agc_digital_compression_gain;
+ rtc::Optional<bool> tx_agc_limiter;
+ rtc::Optional<uint32_t> recording_sample_rate;
+ rtc::Optional<uint32_t> playout_sample_rate;
// Set DSCP value for packet sent from audio channel.
- Settable<bool> dscp;
+ rtc::Optional<bool> dscp;
// Enable combined audio+bandwidth BWE.
- Settable<bool> combined_audio_video_bwe;
+ rtc::Optional<bool> combined_audio_video_bwe;
+
+ private:
+ template <typename T>
+ static void SetFrom(rtc::Optional<T>* s, const rtc::Optional<T>& o) {
+ if (o) {
+ *s = o;
+ }
+ }
};
// Options that can be applied to a VideoMediaChannel or a VideoMediaEngine.
@@ -287,38 +238,41 @@ struct AudioOptions {
// We are moving all of the setting of options to structs like this,
// but some things currently still use flags.
struct VideoOptions {
- VideoOptions() {
- process_adaptation_threshhold.Set(kProcessCpuThreshold);
- system_low_adaptation_threshhold.Set(kLowSystemCpuThreshold);
- system_high_adaptation_threshhold.Set(kHighSystemCpuThreshold);
- unsignalled_recv_stream_limit.Set(kNumDefaultUnsignalledVideoRecvStreams);
- }
+ VideoOptions()
+ : process_adaptation_threshhold(kProcessCpuThreshold),
+ system_low_adaptation_threshhold(kLowSystemCpuThreshold),
+ system_high_adaptation_threshhold(kHighSystemCpuThreshold),
+ unsignalled_recv_stream_limit(kNumDefaultUnsignalledVideoRecvStreams) {}
void SetAll(const VideoOptions& change) {
- adapt_input_to_cpu_usage.SetFrom(change.adapt_input_to_cpu_usage);
- adapt_cpu_with_smoothing.SetFrom(change.adapt_cpu_with_smoothing);
- video_adapt_third.SetFrom(change.video_adapt_third);
- video_noise_reduction.SetFrom(change.video_noise_reduction);
- video_start_bitrate.SetFrom(change.video_start_bitrate);
- cpu_overuse_detection.SetFrom(change.cpu_overuse_detection);
- cpu_underuse_threshold.SetFrom(change.cpu_underuse_threshold);
- cpu_overuse_threshold.SetFrom(change.cpu_overuse_threshold);
- cpu_underuse_encode_rsd_threshold.SetFrom(
- change.cpu_underuse_encode_rsd_threshold);
- cpu_overuse_encode_rsd_threshold.SetFrom(
- change.cpu_overuse_encode_rsd_threshold);
- cpu_overuse_encode_usage.SetFrom(change.cpu_overuse_encode_usage);
- conference_mode.SetFrom(change.conference_mode);
- process_adaptation_threshhold.SetFrom(change.process_adaptation_threshhold);
- system_low_adaptation_threshhold.SetFrom(
- change.system_low_adaptation_threshhold);
- system_high_adaptation_threshhold.SetFrom(
- change.system_high_adaptation_threshhold);
- dscp.SetFrom(change.dscp);
- suspend_below_min_bitrate.SetFrom(change.suspend_below_min_bitrate);
- unsignalled_recv_stream_limit.SetFrom(change.unsignalled_recv_stream_limit);
- use_simulcast_adapter.SetFrom(change.use_simulcast_adapter);
- screencast_min_bitrate.SetFrom(change.screencast_min_bitrate);
+ SetFrom(&adapt_input_to_cpu_usage, change.adapt_input_to_cpu_usage);
+ SetFrom(&adapt_cpu_with_smoothing, change.adapt_cpu_with_smoothing);
+ SetFrom(&video_adapt_third, change.video_adapt_third);
+ SetFrom(&video_noise_reduction, change.video_noise_reduction);
+ SetFrom(&video_start_bitrate, change.video_start_bitrate);
+ SetFrom(&cpu_overuse_detection, change.cpu_overuse_detection);
+ SetFrom(&cpu_underuse_threshold, change.cpu_underuse_threshold);
+ SetFrom(&cpu_overuse_threshold, change.cpu_overuse_threshold);
+ SetFrom(&cpu_underuse_encode_rsd_threshold,
+ change.cpu_underuse_encode_rsd_threshold);
+ SetFrom(&cpu_overuse_encode_rsd_threshold,
+ change.cpu_overuse_encode_rsd_threshold);
+ SetFrom(&cpu_overuse_encode_usage, change.cpu_overuse_encode_usage);
+ SetFrom(&conference_mode, change.conference_mode);
+ SetFrom(&process_adaptation_threshhold,
+ change.process_adaptation_threshhold);
+ SetFrom(&system_low_adaptation_threshhold,
+ change.system_low_adaptation_threshhold);
+ SetFrom(&system_high_adaptation_threshhold,
+ change.system_high_adaptation_threshhold);
+ SetFrom(&dscp, change.dscp);
+ SetFrom(&suspend_below_min_bitrate, change.suspend_below_min_bitrate);
+ SetFrom(&unsignalled_recv_stream_limit,
+ change.unsignalled_recv_stream_limit);
+ SetFrom(&use_simulcast_adapter, change.use_simulcast_adapter);
+ SetFrom(&screencast_min_bitrate, change.screencast_min_bitrate);
+ SetFrom(&disable_prerenderer_smoothing,
+ change.disable_prerenderer_smoothing);
}
bool operator==(const VideoOptions& o) const {
@@ -345,7 +299,8 @@ struct VideoOptions {
suspend_below_min_bitrate == o.suspend_below_min_bitrate &&
unsignalled_recv_stream_limit == o.unsignalled_recv_stream_limit &&
use_simulcast_adapter == o.use_simulcast_adapter &&
- screencast_min_bitrate == o.screencast_min_bitrate;
+ screencast_min_bitrate == o.screencast_min_bitrate &&
+ disable_prerenderer_smoothing == o.disable_prerenderer_smoothing;
}
std::string ToString() const {
@@ -381,56 +336,71 @@ struct VideoOptions {
}
// Enable CPU adaptation?
- Settable<bool> adapt_input_to_cpu_usage;
+ rtc::Optional<bool> adapt_input_to_cpu_usage;
// Enable CPU adaptation smoothing?
- Settable<bool> adapt_cpu_with_smoothing;
+ rtc::Optional<bool> adapt_cpu_with_smoothing;
// Enable video adapt third?
- Settable<bool> video_adapt_third;
+ rtc::Optional<bool> video_adapt_third;
// Enable denoising?
- Settable<bool> video_noise_reduction;
+ rtc::Optional<bool> video_noise_reduction;
// Experimental: Enable WebRtc higher start bitrate?
- Settable<int> video_start_bitrate;
+ rtc::Optional<int> video_start_bitrate;
// Enable WebRTC Cpu Overuse Detection, which is a new version of the CPU
// adaptation algorithm. So this option will override the
// |adapt_input_to_cpu_usage|.
- Settable<bool> cpu_overuse_detection;
+ rtc::Optional<bool> cpu_overuse_detection;
// Low threshold (t1) for cpu overuse adaptation. (Adapt up)
// Metric: encode usage (m1). m1 < t1 => underuse.
- Settable<int> cpu_underuse_threshold;
+ rtc::Optional<int> cpu_underuse_threshold;
// High threshold (t1) for cpu overuse adaptation. (Adapt down)
// Metric: encode usage (m1). m1 > t1 => overuse.
- Settable<int> cpu_overuse_threshold;
+ rtc::Optional<int> cpu_overuse_threshold;
// Low threshold (t2) for cpu overuse adaptation. (Adapt up)
// Metric: relative standard deviation of encode time (m2).
// Optional threshold. If set, (m1 < t1 && m2 < t2) => underuse.
// Note: t2 will have no effect if t1 is not set.
- Settable<int> cpu_underuse_encode_rsd_threshold;
+ rtc::Optional<int> cpu_underuse_encode_rsd_threshold;
// High threshold (t2) for cpu overuse adaptation. (Adapt down)
// Metric: relative standard deviation of encode time (m2).
// Optional threshold. If set, (m1 > t1 || m2 > t2) => overuse.
// Note: t2 will have no effect if t1 is not set.
- Settable<int> cpu_overuse_encode_rsd_threshold;
+ rtc::Optional<int> cpu_overuse_encode_rsd_threshold;
// Use encode usage for cpu detection.
- Settable<bool> cpu_overuse_encode_usage;
+ rtc::Optional<bool> cpu_overuse_encode_usage;
// Use conference mode?
- Settable<bool> conference_mode;
+ rtc::Optional<bool> conference_mode;
// Threshhold for process cpu adaptation. (Process limit)
- Settable<float> process_adaptation_threshhold;
+ rtc::Optional<float> process_adaptation_threshhold;
// Low threshhold for cpu adaptation. (Adapt up)
- Settable<float> system_low_adaptation_threshhold;
+ rtc::Optional<float> system_low_adaptation_threshhold;
// High threshhold for cpu adaptation. (Adapt down)
- Settable<float> system_high_adaptation_threshhold;
+ rtc::Optional<float> system_high_adaptation_threshhold;
// Set DSCP value for packet sent from video channel.
- Settable<bool> dscp;
+ rtc::Optional<bool> dscp;
// Enable WebRTC suspension of video. No video frames will be sent when the
// bitrate is below the configured minimum bitrate.
- Settable<bool> suspend_below_min_bitrate;
+ rtc::Optional<bool> suspend_below_min_bitrate;
// Limit on the number of early receive channels that can be created.
- Settable<int> unsignalled_recv_stream_limit;
+ rtc::Optional<int> unsignalled_recv_stream_limit;
// Enable use of simulcast adapter.
- Settable<bool> use_simulcast_adapter;
+ rtc::Optional<bool> use_simulcast_adapter;
// Force screencast to use a minimum bitrate
- Settable<int> screencast_min_bitrate;
+ rtc::Optional<int> screencast_min_bitrate;
+ // Set to true if the renderer has an algorithm of frame selection.
+ // If the value is true, then WebRTC will hand over a frame as soon as
+ // possible without delay, and rendering smoothness is completely the duty
+ // of the renderer;
+ // If the value is false, then WebRTC is responsible to delay frame release
+ // in order to increase rendering smoothness.
+ rtc::Optional<bool> disable_prerenderer_smoothing;
+
+ private:
+ template <typename T>
+ static void SetFrom(rtc::Optional<T>* s, const rtc::Optional<T>& o) {
+ if (o) {
+ *s = o;
+ }
+ }
};
struct RtpHeaderExtension {
@@ -447,8 +417,8 @@ struct RtpHeaderExtension {
std::string ToString() const {
std::ostringstream ost;
ost << "{";
- ost << "id: , " << id;
ost << "uri: " << uri;
+ ost << ", id: " << id;
ost << "}";
return ost.str();
}
@@ -481,12 +451,6 @@ enum VoiceMediaChannelOptions {
OPT_AGC_MINUS_10DB = 0x80000000
};
-// DTMF flags to control if a DTMF tone should be played and/or sent.
-enum DtmfFlags {
- DF_PLAY = 0x01,
- DF_SEND = 0x02,
-};
-
class MediaChannel : public sigslot::has_slots<> {
public:
class NetworkInterface {
@@ -593,7 +557,6 @@ class MediaChannel : public sigslot::has_slots<> {
enum SendFlags {
SEND_NOTHING,
- SEND_RINGBACKTONE,
SEND_MICROPHONE
};
@@ -820,6 +783,7 @@ struct VideoSenderInfo : public MediaSenderInfo {
}
std::vector<SsrcGroup> ssrc_groups;
+ std::string encoder_implementation_name;
int packets_cached;
int firs_rcvd;
int plis_rcvd;
@@ -865,6 +829,7 @@ struct VideoReceiverInfo : public MediaReceiverInfo {
}
std::vector<SsrcGroup> ssrc_groups;
+ std::string decoder_implementation_name;
int packets_concealed;
int firs_sent;
int plis_sent;
@@ -968,9 +933,13 @@ struct DataMediaInfo {
std::vector<DataReceiverInfo> receivers;
};
+struct RtcpParameters {
+ bool reduced_size = false;
+};
+
template <class Codec>
struct RtpParameters {
- virtual std::string ToString() {
+ virtual std::string ToString() const {
std::ostringstream ost;
ost << "{";
ost << "codecs: " << VectorToString(codecs) << ", ";
@@ -982,11 +951,12 @@ struct RtpParameters {
std::vector<Codec> codecs;
std::vector<RtpHeaderExtension> extensions;
// TODO(pthatcher): Add streams.
+ RtcpParameters rtcp;
};
template <class Codec, class Options>
struct RtpSendParameters : RtpParameters<Codec> {
- std::string ToString() override {
+ std::string ToString() const override {
std::ostringstream ost;
ost << "{";
ost << "codecs: " << VectorToString(this->codecs) << ", ";
@@ -1056,18 +1026,18 @@ class VoiceMediaChannel : public MediaChannel {
// Set speaker output volume of the specified ssrc.
virtual bool SetOutputVolume(uint32_t ssrc, double volume) = 0;
// Returns if the telephone-event has been negotiated.
- virtual bool CanInsertDtmf() { return false; }
- // Send and/or play a DTMF |event| according to the |flags|.
- // The DTMF out-of-band signal will be used on sending.
+ virtual bool CanInsertDtmf() = 0;
+ // Send a DTMF |event|. The DTMF out-of-band signal will be used.
// The |ssrc| should be either 0 or a valid send stream ssrc.
// The valid value for the |event| are 0 to 15 which corresponding to
// DTMF event 0-9, *, #, A-D.
- virtual bool InsertDtmf(uint32_t ssrc,
- int event,
- int duration,
- int flags) = 0;
+ virtual bool InsertDtmf(uint32_t ssrc, int event, int duration) = 0;
// Gets quality stats for the channel.
virtual bool GetStats(VoiceMediaInfo* info) = 0;
+
+ virtual void SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) = 0;
};
struct VideoSendParameters : RtpSendParameters<VideoCodec, VideoOptions> {
@@ -1194,13 +1164,13 @@ struct SendDataParams {
enum SendDataResult { SDR_SUCCESS, SDR_ERROR, SDR_BLOCK };
struct DataOptions {
- std::string ToString() {
+ std::string ToString() const {
return "{}";
}
};
struct DataSendParameters : RtpSendParameters<DataCodec, DataOptions> {
- std::string ToString() {
+ std::string ToString() const {
std::ostringstream ost;
// Options and extensions aren't used.
ost << "{";
diff --git a/talk/media/base/mediaengine.h b/talk/media/base/mediaengine.h
index 1a992d7d4a..467614bb3e 100644
--- a/talk/media/base/mediaengine.h
+++ b/talk/media/base/mediaengine.h
@@ -28,7 +28,7 @@
#ifndef TALK_MEDIA_BASE_MEDIAENGINE_H_
#define TALK_MEDIA_BASE_MEDIAENGINE_H_
-#ifdef OSX
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
#include <CoreAudio/CoreAudio.h>
#endif
@@ -40,8 +40,8 @@
#include "talk/media/base/mediacommon.h"
#include "talk/media/base/videocapturer.h"
#include "talk/media/base/videocommon.h"
-#include "talk/media/base/voiceprocessor.h"
#include "talk/media/devices/devicemanager.h"
+#include "webrtc/audio_state.h"
#include "webrtc/base/fileutils.h"
#include "webrtc/base/sigslotrepeater.h"
@@ -51,13 +51,16 @@
namespace webrtc {
class Call;
-class VoiceEngine;
}
namespace cricket {
class VideoCapturer;
+struct RtpCapabilities {
+ std::vector<RtpHeaderExtension> header_extensions;
+};
+
// MediaEngineInterface is an abstraction of a media engine which can be
// subclassed to support different media componentry backends.
// It supports voice and video operations in the same class to facilitate
@@ -72,7 +75,7 @@ class MediaEngineInterface {
// Shuts down the engine.
virtual void Terminate() = 0;
// TODO(solenberg): Remove once VoE API refactoring is done.
- virtual webrtc::VoiceEngine* GetVoE() = 0;
+ virtual rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const = 0;
// MediaChannel creation
// Creates a voice media channel. Returns NULL on failure.
@@ -85,20 +88,6 @@ class MediaEngineInterface {
webrtc::Call* call,
const VideoOptions& options) = 0;
- // Configuration
- // Gets global audio options.
- virtual AudioOptions GetAudioOptions() const = 0;
- // Sets global audio options. "options" are from AudioOptions, above.
- virtual bool SetAudioOptions(const AudioOptions& options) = 0;
- // Sets the default (maximum) codec/resolution and encoder option to capture
- // and encode video.
- virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config)
- = 0;
-
- // Device selection
- virtual bool SetSoundDevices(const Device* in_device,
- const Device* out_device) = 0;
-
// Device configuration
// Gets the current speaker volume, as a value between 0 and 255.
virtual bool GetOutputVolume(int* level) = 0;
@@ -109,15 +98,9 @@ class MediaEngineInterface {
virtual int GetInputLevel() = 0;
virtual const std::vector<AudioCodec>& audio_codecs() = 0;
- virtual const std::vector<RtpHeaderExtension>&
- audio_rtp_header_extensions() = 0;
+ virtual RtpCapabilities GetAudioCapabilities() = 0;
virtual const std::vector<VideoCodec>& video_codecs() = 0;
- virtual const std::vector<RtpHeaderExtension>&
- video_rtp_header_extensions() = 0;
-
- // Logging control
- virtual void SetVoiceLogging(int min_sev, const char* filter) = 0;
- virtual void SetVideoLogging(int min_sev, const char* filter) = 0;
+ virtual RtpCapabilities GetVideoCapabilities() = 0;
// Starts AEC dump using existing file.
virtual bool StartAecDump(rtc::PlatformFile file) = 0;
@@ -167,8 +150,8 @@ class CompositeMediaEngine : public MediaEngineInterface {
voice_.Terminate();
}
- virtual webrtc::VoiceEngine* GetVoE() {
- return voice_.GetVoE();
+ virtual rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const {
+ return voice_.GetAudioState();
}
virtual VoiceMediaChannel* CreateChannel(webrtc::Call* call,
const AudioOptions& options) {
@@ -179,21 +162,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
return video_.CreateChannel(call, options);
}
- virtual AudioOptions GetAudioOptions() const {
- return voice_.GetOptions();
- }
- virtual bool SetAudioOptions(const AudioOptions& options) {
- return voice_.SetOptions(options);
- }
- virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) {
- return video_.SetDefaultEncoderConfig(config);
- }
-
- virtual bool SetSoundDevices(const Device* in_device,
- const Device* out_device) {
- return voice_.SetDevices(in_device, out_device);
- }
-
virtual bool GetOutputVolume(int* level) {
return voice_.GetOutputVolume(level);
}
@@ -207,21 +175,14 @@ class CompositeMediaEngine : public MediaEngineInterface {
virtual const std::vector<AudioCodec>& audio_codecs() {
return voice_.codecs();
}
- virtual const std::vector<RtpHeaderExtension>& audio_rtp_header_extensions() {
- return voice_.rtp_header_extensions();
+ virtual RtpCapabilities GetAudioCapabilities() {
+ return voice_.GetCapabilities();
}
virtual const std::vector<VideoCodec>& video_codecs() {
return video_.codecs();
}
- virtual const std::vector<RtpHeaderExtension>& video_rtp_header_extensions() {
- return video_.rtp_header_extensions();
- }
-
- virtual void SetVoiceLogging(int min_sev, const char* filter) {
- voice_.SetLogging(min_sev, filter);
- }
- virtual void SetVideoLogging(int min_sev, const char* filter) {
- video_.SetLogging(min_sev, filter);
+ virtual RtpCapabilities GetVideoCapabilities() {
+ return video_.GetCapabilities();
}
virtual bool StartAecDump(rtc::PlatformFile file) {
@@ -243,70 +204,6 @@ class CompositeMediaEngine : public MediaEngineInterface {
VIDEO video_;
};
-// NullVoiceEngine can be used with CompositeMediaEngine in the case where only
-// a video engine is desired.
-class NullVoiceEngine {
- public:
- bool Init(rtc::Thread* worker_thread) { return true; }
- void Terminate() {}
- // If you need this to return an actual channel, use FakeMediaEngine instead.
- VoiceMediaChannel* CreateChannel(const AudioOptions& options) {
- return nullptr;
- }
- AudioOptions GetOptions() const { return AudioOptions(); }
- bool SetOptions(const AudioOptions& options) { return true; }
- bool SetDevices(const Device* in_device, const Device* out_device) {
- return true;
- }
- bool GetOutputVolume(int* level) {
- *level = 0;
- return true;
- }
- bool SetOutputVolume(int level) { return true; }
- int GetInputLevel() { return 0; }
- const std::vector<AudioCodec>& codecs() { return codecs_; }
- const std::vector<RtpHeaderExtension>& rtp_header_extensions() {
- return rtp_header_extensions_;
- }
- void SetLogging(int min_sev, const char* filter) {}
- bool StartAecDump(rtc::PlatformFile file) { return false; }
- bool StartRtcEventLog(rtc::PlatformFile file) { return false; }
- void StopRtcEventLog() {}
-
- private:
- std::vector<AudioCodec> codecs_;
- std::vector<RtpHeaderExtension> rtp_header_extensions_;
-};
-
-// NullVideoEngine can be used with CompositeMediaEngine in the case where only
-// a voice engine is desired.
-class NullVideoEngine {
- public:
- bool Init(rtc::Thread* worker_thread) { return true; }
- void Terminate() {}
- // If you need this to return an actual channel, use FakeMediaEngine instead.
- VideoMediaChannel* CreateChannel(
- const VideoOptions& options,
- VoiceMediaChannel* voice_media_channel) {
- return NULL;
- }
- bool SetOptions(const VideoOptions& options) { return true; }
- bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) {
- return true;
- }
- const std::vector<VideoCodec>& codecs() { return codecs_; }
- const std::vector<RtpHeaderExtension>& rtp_header_extensions() {
- return rtp_header_extensions_;
- }
- void SetLogging(int min_sev, const char* filter) {}
-
- private:
- std::vector<VideoCodec> codecs_;
- std::vector<RtpHeaderExtension> rtp_header_extensions_;
-};
-
-typedef CompositeMediaEngine<NullVoiceEngine, NullVideoEngine> NullMediaEngine;
-
enum DataChannelType {
DCT_NONE = 0,
DCT_RTP = 1,
diff --git a/talk/media/base/streamparams_unittest.cc b/talk/media/base/streamparams_unittest.cc
index a9e1ce3531..a0164733d4 100644
--- a/talk/media/base/streamparams_unittest.cc
+++ b/talk/media/base/streamparams_unittest.cc
@@ -27,6 +27,7 @@
#include "talk/media/base/streamparams.h"
#include "talk/media/base/testutils.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/gunit.h"
static const uint32_t kSsrcs1[] = {1};
@@ -54,8 +55,8 @@ TEST(SsrcGroup, EqualNotEqual) {
cricket::SsrcGroup("abc", MAKE_VECTOR(kSsrcs2)),
};
- for (size_t i = 0; i < ARRAY_SIZE(ssrc_groups); ++i) {
- for (size_t j = 0; j < ARRAY_SIZE(ssrc_groups); ++j) {
+ for (size_t i = 0; i < arraysize(ssrc_groups); ++i) {
+ for (size_t j = 0; j < arraysize(ssrc_groups); ++j) {
EXPECT_EQ((ssrc_groups[i] == ssrc_groups[j]), (i == j));
EXPECT_EQ((ssrc_groups[i] != ssrc_groups[j]), (i != j));
}
@@ -92,7 +93,7 @@ TEST(StreamParams, CreateLegacy) {
TEST(StreamParams, HasSsrcGroup) {
cricket::StreamParams sp =
- CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, ARRAY_SIZE(kSsrcs2));
+ CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, arraysize(kSsrcs2));
EXPECT_EQ(2U, sp.ssrcs.size());
EXPECT_EQ(kSsrcs2[0], sp.first_ssrc());
EXPECT_TRUE(sp.has_ssrcs());
@@ -107,7 +108,7 @@ TEST(StreamParams, HasSsrcGroup) {
TEST(StreamParams, GetSsrcGroup) {
cricket::StreamParams sp =
- CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, ARRAY_SIZE(kSsrcs2));
+ CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, arraysize(kSsrcs2));
EXPECT_EQ(NULL, sp.get_ssrc_group("xyz"));
EXPECT_EQ(&sp.ssrc_groups[0], sp.get_ssrc_group("XYZ"));
}
@@ -116,17 +117,17 @@ TEST(StreamParams, EqualNotEqual) {
cricket::StreamParams l1 = cricket::StreamParams::CreateLegacy(1);
cricket::StreamParams l2 = cricket::StreamParams::CreateLegacy(2);
cricket::StreamParams sg1 =
- CreateStreamParamsWithSsrcGroup("ABC", kSsrcs1, ARRAY_SIZE(kSsrcs1));
+ CreateStreamParamsWithSsrcGroup("ABC", kSsrcs1, arraysize(kSsrcs1));
cricket::StreamParams sg2 =
- CreateStreamParamsWithSsrcGroup("ABC", kSsrcs2, ARRAY_SIZE(kSsrcs2));
+ CreateStreamParamsWithSsrcGroup("ABC", kSsrcs2, arraysize(kSsrcs2));
cricket::StreamParams sg3 =
- CreateStreamParamsWithSsrcGroup("Abc", kSsrcs2, ARRAY_SIZE(kSsrcs2));
+ CreateStreamParamsWithSsrcGroup("Abc", kSsrcs2, arraysize(kSsrcs2));
cricket::StreamParams sg4 =
- CreateStreamParamsWithSsrcGroup("abc", kSsrcs2, ARRAY_SIZE(kSsrcs2));
+ CreateStreamParamsWithSsrcGroup("abc", kSsrcs2, arraysize(kSsrcs2));
cricket::StreamParams sps[] = {l1, l2, sg1, sg2, sg3, sg4};
- for (size_t i = 0; i < ARRAY_SIZE(sps); ++i) {
- for (size_t j = 0; j < ARRAY_SIZE(sps); ++j) {
+ for (size_t i = 0; i < arraysize(sps); ++i) {
+ for (size_t j = 0; j < arraysize(sps); ++j) {
EXPECT_EQ((sps[i] == sps[j]), (i == j));
EXPECT_EQ((sps[i] != sps[j]), (i != j));
}
@@ -195,7 +196,7 @@ TEST(StreamParams, GetPrimaryAndFidSsrcs) {
TEST(StreamParams, ToString) {
cricket::StreamParams sp =
- CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, ARRAY_SIZE(kSsrcs2));
+ CreateStreamParamsWithSsrcGroup("XYZ", kSsrcs2, arraysize(kSsrcs2));
EXPECT_STREQ("{ssrcs:[1,2];ssrc_groups:{semantics:XYZ;ssrcs:[1,2]};}",
sp.ToString().c_str());
}
diff --git a/talk/media/base/testutils.cc b/talk/media/base/testutils.cc
index 3b1fcf0513..49a78e63dd 100644
--- a/talk/media/base/testutils.cc
+++ b/talk/media/base/testutils.cc
@@ -132,8 +132,8 @@ const RawRtcpPacket RtpTestUtility::kTestRawRtcpPackets[] = {
};
size_t RtpTestUtility::GetTestPacketCount() {
- return std::min(ARRAY_SIZE(kTestRawRtpPackets),
- ARRAY_SIZE(kTestRawRtcpPackets));
+ return std::min(arraysize(kTestRawRtpPackets),
+ arraysize(kTestRawRtcpPackets));
}
bool RtpTestUtility::WriteTestPackets(size_t count,
diff --git a/talk/media/base/testutils.h b/talk/media/base/testutils.h
index cb4146d707..20c0d62ab7 100644
--- a/talk/media/base/testutils.h
+++ b/talk/media/base/testutils.h
@@ -35,6 +35,7 @@
#include "talk/media/base/mediachannel.h"
#include "talk/media/base/videocapturer.h"
#include "talk/media/base/videocommon.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/basictypes.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/base/window.h"
@@ -54,7 +55,7 @@ namespace cricket {
template <class T> inline std::vector<T> MakeVector(const T a[], size_t s) {
return std::vector<T>(a, a + s);
}
-#define MAKE_VECTOR(a) cricket::MakeVector(a, ARRAY_SIZE(a))
+#define MAKE_VECTOR(a) cricket::MakeVector(a, arraysize(a))
struct RtpDumpPacket;
class RtpDumpWriter;
diff --git a/talk/media/base/videocapturer.cc b/talk/media/base/videocapturer.cc
index ca4b9069f1..d525a4188e 100644
--- a/talk/media/base/videocapturer.cc
+++ b/talk/media/base/videocapturer.cc
@@ -59,7 +59,7 @@ enum {
};
static const int64_t kMaxDistance = ~(static_cast<int64_t>(1) << 63);
-#ifdef LINUX
+#ifdef WEBRTC_LINUX
static const int kYU12Penalty = 16; // Needs to be higher than MJPG index.
#endif
static const int kDefaultScreencastFps = 5;
@@ -82,7 +82,7 @@ CapturedFrame::CapturedFrame()
pixel_height(0),
time_stamp(0),
data_size(0),
- rotation(0),
+ rotation(webrtc::kVideoRotation_0),
data(NULL) {}
// TODO(fbarchard): Remove this function once lmimediaengine stops using it.
@@ -94,11 +94,6 @@ bool CapturedFrame::GetDataSize(uint32_t* size) const {
return true;
}
-webrtc::VideoRotation CapturedFrame::GetRotation() const {
- ASSERT(rotation == 0 || rotation == 90 || rotation == 180 || rotation == 270);
- return static_cast<webrtc::VideoRotation>(rotation);
-}
-
/////////////////////////////////////////////////////////////////////
// Implementation of class VideoCapturer
/////////////////////////////////////////////////////////////////////
@@ -126,7 +121,6 @@ void VideoCapturer::Construct() {
SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured);
scaled_width_ = 0;
scaled_height_ = 0;
- screencast_max_pixels_ = 0;
muted_ = false;
black_frame_count_down_ = kNumBlackFramesOnMute;
enable_video_adapter_ = true;
@@ -365,16 +359,11 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
if (IsScreencast()) {
int scaled_width, scaled_height;
- if (screencast_max_pixels_ > 0) {
- ComputeScaleMaxPixels(captured_frame->width, captured_frame->height,
- screencast_max_pixels_, &scaled_width, &scaled_height);
- } else {
- int desired_screencast_fps = capture_format_.get() ?
- VideoFormat::IntervalToFps(capture_format_->interval) :
- kDefaultScreencastFps;
- ComputeScale(captured_frame->width, captured_frame->height,
- desired_screencast_fps, &scaled_width, &scaled_height);
- }
+ int desired_screencast_fps = capture_format_.get() ?
+ VideoFormat::IntervalToFps(capture_format_->interval) :
+ kDefaultScreencastFps;
+ ComputeScale(captured_frame->width, captured_frame->height,
+ desired_screencast_fps, &scaled_width, &scaled_height);
if (FOURCC_ARGB == captured_frame->fourcc &&
(scaled_width != captured_frame->width ||
@@ -605,7 +594,7 @@ int64_t VideoCapturer::GetFormatDistance(const VideoFormat& desired,
for (size_t i = 0; i < preferred_fourccs.size(); ++i) {
if (supported_fourcc == CanonicalFourCC(preferred_fourccs[i])) {
delta_fourcc = i;
-#ifdef LINUX
+#ifdef WEBRTC_LINUX
// For HD avoid YU12 which is a software conversion and has 2 bugs
// b/7326348 b/6960899. Reenable when fixed.
if (supported.height >= 720 && (supported_fourcc == FOURCC_YU12 ||
diff --git a/talk/media/base/videocapturer.h b/talk/media/base/videocapturer.h
index 0a11ed09c1..a13c201b8b 100644
--- a/talk/media/base/videocapturer.h
+++ b/talk/media/base/videocapturer.h
@@ -78,10 +78,6 @@ struct CapturedFrame {
// fourcc. Return true if succeeded.
bool GetDataSize(uint32_t* size) const;
- // TODO(guoweis): Change the type of |rotation| from int to
- // webrtc::VideoRotation once chromium gets the code.
- webrtc::VideoRotation GetRotation() const;
-
// The width and height of the captured frame could be different from those
// of VideoFormat. Once the first frame is captured, the width, height,
// fourcc, pixel_width, and pixel_height should keep the same over frames.
@@ -90,15 +86,11 @@ struct CapturedFrame {
uint32_t fourcc; // compression
uint32_t pixel_width; // width of a pixel, default is 1
uint32_t pixel_height; // height of a pixel, default is 1
- // TODO(magjed): |elapsed_time| is deprecated - remove once not used anymore.
- int64_t elapsed_time;
int64_t time_stamp; // timestamp of when the frame was captured, in unix
// time with nanosecond units.
uint32_t data_size; // number of bytes of the frame data
- // TODO(guoweis): This can't be converted to VideoRotation yet as it's
- // used by chrome now.
- int rotation; // rotation in degrees of the frame (0, 90, 180, 270)
+ webrtc::VideoRotation rotation; // rotation in degrees of the frame.
void* data; // pointer to the frame data. This object allocates the
// memory or points to an existing memory.
@@ -270,17 +262,6 @@ class VideoCapturer
sigslot::signal2<VideoCapturer*, const VideoFrame*,
sigslot::multi_threaded_local> SignalVideoFrame;
- // If 'screencast_max_pixels' is set greater than zero, screencasts will be
- // scaled to be no larger than this value.
- // If set to zero, the max pixels will be limited to
- // Retina MacBookPro 15" resolution of 2880 x 1800.
- // For high fps, maximum pixels limit is set based on common 24" monitor
- // resolution of 2048 x 1280.
- int screencast_max_pixels() const { return screencast_max_pixels_; }
- void set_screencast_max_pixels(int p) {
- screencast_max_pixels_ = std::max(0, p);
- }
-
// If true, run video adaptation. By default, video adaptation is enabled
// and users must call video_adapter()->OnOutputFormatRequest()
// to receive frames.
@@ -377,7 +358,6 @@ class VideoCapturer
bool square_pixel_aspect_ratio_; // Enable scaling to square pixels.
int scaled_width_; // Current output size from ComputeScale.
int scaled_height_;
- int screencast_max_pixels_; // Downscale screencasts further if requested.
bool muted_;
int black_frame_count_down_;
diff --git a/talk/media/base/videocapturer_unittest.cc b/talk/media/base/videocapturer_unittest.cc
index 359fe9552a..6d1d8aa395 100644
--- a/talk/media/base/videocapturer_unittest.cc
+++ b/talk/media/base/videocapturer_unittest.cc
@@ -196,39 +196,6 @@ TEST_F(VideoCapturerTest, CameraOffOnMute) {
EXPECT_EQ(33, video_frames_received());
}
-TEST_F(VideoCapturerTest, ScreencastScaledMaxPixels) {
- capturer_.SetScreencast(true);
-
- int kWidth = 1280;
- int kHeight = 720;
-
- // Screencasts usually have large weird dimensions and are ARGB.
- std::vector<cricket::VideoFormat> formats;
- formats.push_back(cricket::VideoFormat(kWidth, kHeight,
- cricket::VideoFormat::FpsToInterval(5), cricket::FOURCC_ARGB));
- formats.push_back(cricket::VideoFormat(2 * kWidth, 2 * kHeight,
- cricket::VideoFormat::FpsToInterval(5), cricket::FOURCC_ARGB));
- capturer_.ResetSupportedFormats(formats);
-
-
- EXPECT_EQ(0, capturer_.screencast_max_pixels());
- EXPECT_EQ(cricket::CS_RUNNING, capturer_.Start(cricket::VideoFormat(
- 2 * kWidth,
- 2 * kHeight,
- cricket::VideoFormat::FpsToInterval(30),
- cricket::FOURCC_ARGB)));
- EXPECT_TRUE(capturer_.IsRunning());
- EXPECT_EQ(0, renderer_.num_rendered_frames());
- renderer_.SetSize(2 * kWidth, 2 * kHeight, 0);
- EXPECT_TRUE(capturer_.CaptureFrame());
- EXPECT_EQ(1, renderer_.num_rendered_frames());
-
- capturer_.set_screencast_max_pixels(kWidth * kHeight);
- renderer_.SetSize(kWidth, kHeight, 0);
- EXPECT_TRUE(capturer_.CaptureFrame());
- EXPECT_EQ(2, renderer_.num_rendered_frames());
-}
-
TEST_F(VideoCapturerTest, ScreencastScaledOddWidth) {
capturer_.SetScreencast(true);
diff --git a/talk/media/base/videocommon.cc b/talk/media/base/videocommon.cc
index 7b6aac206b..faf6450b56 100644
--- a/talk/media/base/videocommon.cc
+++ b/talk/media/base/videocommon.cc
@@ -31,6 +31,7 @@
#include <math.h>
#include <sstream>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/common.h"
namespace cricket {
@@ -58,7 +59,7 @@ static const FourCCAliasEntry kFourCCAliases[] = {
};
uint32_t CanonicalFourCC(uint32_t fourcc) {
- for (int i = 0; i < ARRAY_SIZE(kFourCCAliases); ++i) {
+ for (int i = 0; i < arraysize(kFourCCAliases); ++i) {
if (kFourCCAliases[i].alias == fourcc) {
return kFourCCAliases[i].canonical;
}
@@ -75,7 +76,7 @@ static float kScaleFactors[] = {
1.f / 16.f // 1/16 scale.
};
-static const int kNumScaleFactors = ARRAY_SIZE(kScaleFactors);
+static const int kNumScaleFactors = arraysize(kScaleFactors);
// Finds the scale factor that, when applied to width and height, produces
// fewer than num_pixels.
@@ -106,9 +107,6 @@ void ComputeScaleMaxPixels(int frame_width, int frame_height, int max_pixels,
ASSERT(scaled_width != NULL);
ASSERT(scaled_height != NULL);
ASSERT(max_pixels > 0);
- // For VP8 the values for max width and height can be found here
- // webrtc/src/video_engine/vie_defines.h (kViEMaxCodecWidth and
- // kViEMaxCodecHeight)
const int kMaxWidth = 4096;
const int kMaxHeight = 3072;
int new_frame_width = frame_width;
diff --git a/talk/media/base/videoengine_unittest.h b/talk/media/base/videoengine_unittest.h
index d89b3e6f43..d7fa00d558 100644
--- a/talk/media/base/videoengine_unittest.h
+++ b/talk/media/base/videoengine_unittest.h
@@ -126,327 +126,6 @@ class VideoEngineOverride : public T {
}
};
-template<class E>
-class VideoEngineTest : public testing::Test {
- protected:
- // Tests starting and stopping the engine, and creating a channel.
- void StartupShutdown() {
- EXPECT_TRUE(engine_.Init(rtc::Thread::Current()));
- cricket::VideoMediaChannel* channel = engine_.CreateChannel(NULL);
- EXPECT_TRUE(channel != NULL);
- delete channel;
- engine_.Terminate();
- }
-
- void ConstrainNewCodecBody() {
- cricket::VideoCodec empty, in, out;
- cricket::VideoCodec max_settings(engine_.codecs()[0].id,
- engine_.codecs()[0].name,
- 1280, 800, 30, 0);
-
- // set max settings of 1280x800x30
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
- cricket::VideoEncoderConfig(max_settings)));
-
- // don't constrain the max resolution
- in = max_settings;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // constrain resolution greater than the max and wider aspect,
- // picking best aspect (16:10)
- in.width = 1380;
- in.height = 800;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 720, 30);
-
- // constrain resolution greater than the max and narrow aspect,
- // picking best aspect (16:9)
- in.width = 1280;
- in.height = 740;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 720, 30);
-
- // constrain resolution greater than the max, picking equal aspect (4:3)
- in.width = 1280;
- in.height = 960;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 800, 30);
-
- // constrain resolution greater than the max, picking equal aspect (16:10)
- in.width = 1280;
- in.height = 800;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 800, 30);
-
- // reduce max settings to 640x480x30
- max_settings.width = 640;
- max_settings.height = 480;
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
- cricket::VideoEncoderConfig(max_settings)));
-
- // don't constrain the max resolution
- in = max_settings;
- in.width = 640;
- in.height = 480;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // keep 16:10 if they request it
- in.height = 400;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // don't constrain lesser 4:3 resolutions
- in.width = 320;
- in.height = 240;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // don't constrain lesser 16:10 resolutions
- in.width = 320;
- in.height = 200;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // requested resolution of 0x0 succeeds
- in.width = 0;
- in.height = 0;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // constrain resolution lesser than the max and wider aspect,
- // picking best aspect (16:9)
- in.width = 350;
- in.height = 201;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 320, 180, 30);
-
- // constrain resolution greater than the max and narrow aspect,
- // picking best aspect (4:3)
- in.width = 350;
- in.height = 300;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 320, 240, 30);
-
- // constrain resolution greater than the max and wider aspect,
- // picking best aspect (16:9)
- in.width = 1380;
- in.height = 800;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 360, 30);
-
- // constrain resolution greater than the max and narrow aspect,
- // picking best aspect (4:3)
- in.width = 1280;
- in.height = 900;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 480, 30);
-
- // constrain resolution greater than the max, picking equal aspect (4:3)
- in.width = 1280;
- in.height = 960;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 480, 30);
-
- // constrain resolution greater than the max, picking equal aspect (16:10)
- in.width = 1280;
- in.height = 800;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 400, 30);
-
- // constrain res & fps greater than the max
- in.framerate = 50;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 400, 30);
-
- // reduce max settings to 160x100x10
- max_settings.width = 160;
- max_settings.height = 100;
- max_settings.framerate = 10;
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
- cricket::VideoEncoderConfig(max_settings)));
-
- // constrain res & fps to new max
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 160, 100, 10);
-
- // allow 4:3 "comparable" resolutions
- in.width = 160;
- in.height = 120;
- in.framerate = 10;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 160, 120, 10);
- }
-
- // This is the new way of constraining codec size, where we no longer maintain
- // a list of the supported formats. Instead, CanSendCodec will just downscale
- // the resolution by 2 until the width is below clamp.
- void ConstrainNewCodec2Body() {
- cricket::VideoCodec empty, in, out;
- cricket::VideoCodec max_settings(engine_.codecs()[0].id,
- engine_.codecs()[0].name,
- 1280, 800, 30, 0);
-
- // Set max settings of 1280x800x30
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
- cricket::VideoEncoderConfig(max_settings)));
-
- // Don't constrain the max resolution
- in = max_settings;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // Constrain resolution greater than the max width.
- in.width = 1380;
- in.height = 800;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 690, 400, 30);
-
- // Don't constrain resolution when only the height is greater than max.
- in.width = 960;
- in.height = 1280;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 960, 1280, 30);
-
- // Don't constrain smaller format.
- in.width = 640;
- in.height = 480;
- EXPECT_TRUE(engine_.CanSendCodec(in, empty, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 480, 30);
- }
-
- void ConstrainRunningCodecBody() {
- cricket::VideoCodec in, out, current;
- cricket::VideoCodec max_settings(engine_.codecs()[0].id,
- engine_.codecs()[0].name,
- 1280, 800, 30, 0);
-
- // set max settings of 1280x960x30
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
- cricket::VideoEncoderConfig(max_settings)));
-
- // establish current call at 1280x800x30 (16:10)
- current = max_settings;
- current.height = 800;
-
- // Don't constrain current resolution
- in = current;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // requested resolution of 0x0 succeeds
- in.width = 0;
- in.height = 0;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // Reduce an intermediate resolution down to the next lowest one, preserving
- // aspect ratio.
- in.width = 800;
- in.height = 600;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 400, 30);
-
- // Clamping by aspect ratio, but still never return a dimension higher than
- // requested.
- in.width = 1280;
- in.height = 720;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 720, 30);
-
- in.width = 1279;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 960, 600, 30);
-
- in.width = 1281;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 720, 30);
-
- // Clamp large resolutions down, always preserving aspect
- in.width = 1920;
- in.height = 1080;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 800, 30);
-
- in.width = 1921;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 800, 30);
-
- in.width = 1919;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 1280, 800, 30);
-
- // reduce max settings to 640x480x30
- max_settings.width = 640;
- max_settings.height = 480;
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
- cricket::VideoEncoderConfig(max_settings)));
-
- // establish current call at 640x400x30 (16:10)
- current = max_settings;
- current.height = 400;
-
- // Don't constrain current resolution
- in = current;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // requested resolution of 0x0 succeeds
- in.width = 0;
- in.height = 0;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED2(IsEqualCodec, out, in);
-
- // Reduce an intermediate resolution down to the next lowest one, preserving
- // aspect ratio.
- in.width = 400;
- in.height = 300;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 320, 200, 30);
-
- // Clamping by aspect ratio, but still never return a dimension higher than
- // requested.
- in.width = 640;
- in.height = 360;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 360, 30);
-
- in.width = 639;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 480, 300, 30);
-
- in.width = 641;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 360, 30);
-
- // Clamp large resolutions down, always preserving aspect
- in.width = 1280;
- in.height = 800;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 400, 30);
-
- in.width = 1281;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 400, 30);
-
- in.width = 1279;
- EXPECT_TRUE(engine_.CanSendCodec(in, current, &out));
- EXPECT_PRED4(IsEqualRes, out, 640, 400, 30);
-
- // Should fail for any that are smaller than our supported formats
- in.width = 80;
- in.height = 80;
- EXPECT_FALSE(engine_.CanSendCodec(in, current, &out));
-
- in.height = 50;
- EXPECT_FALSE(engine_.CanSendCodec(in, current, &out));
- }
-
- VideoEngineOverride<E> engine_;
- rtc::scoped_ptr<cricket::FakeVideoCapturer> video_capturer_;
-};
-
template<class E, class C>
class VideoMediaChannelTest : public testing::Test,
public sigslot::has_slots<> {
@@ -875,7 +554,7 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
cricket::VideoSendParameters parameters;
parameters.codecs.push_back(DefaultCodec());
- parameters.options.conference_mode.Set(true);
+ parameters.options.conference_mode = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->AddRecvStream(
@@ -926,7 +605,7 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(SetOneCodec(DefaultCodec()));
cricket::VideoSendParameters parameters;
parameters.codecs.push_back(DefaultCodec());
- parameters.options.conference_mode.Set(true);
+ parameters.options.conference_mode = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(kSsrc)));
@@ -1009,8 +688,10 @@ class VideoMediaChannelTest : public testing::Test,
rtc::scoped_ptr<const rtc::Buffer> p(GetRtpPacket(0));
ParseRtpPacket(p.get(), NULL, NULL, NULL, NULL, &ssrc, NULL);
EXPECT_EQ(kSsrc, ssrc);
- EXPECT_EQ(NumRtpPackets(), NumRtpPackets(ssrc));
- EXPECT_EQ(NumRtpBytes(), NumRtpBytes(ssrc));
+ // Packets are being paced out, so these can mismatch between the first and
+ // second call to NumRtpPackets until pending packets are paced out.
+ EXPECT_EQ_WAIT(NumRtpPackets(), NumRtpPackets(ssrc), kTimeout);
+ EXPECT_EQ_WAIT(NumRtpBytes(), NumRtpBytes(ssrc), kTimeout);
EXPECT_EQ(1, NumSentSsrcs());
EXPECT_EQ(0, NumRtpPackets(kSsrc - 1));
EXPECT_EQ(0, NumRtpBytes(kSsrc - 1));
@@ -1031,8 +712,10 @@ class VideoMediaChannelTest : public testing::Test,
rtc::scoped_ptr<const rtc::Buffer> p(GetRtpPacket(0));
ParseRtpPacket(p.get(), NULL, NULL, NULL, NULL, &ssrc, NULL);
EXPECT_EQ(999u, ssrc);
- EXPECT_EQ(NumRtpPackets(), NumRtpPackets(ssrc));
- EXPECT_EQ(NumRtpBytes(), NumRtpBytes(ssrc));
+ // Packets are being paced out, so these can mismatch between the first and
+ // second call to NumRtpPackets until pending packets are paced out.
+ EXPECT_EQ_WAIT(NumRtpPackets(), NumRtpPackets(ssrc), kTimeout);
+ EXPECT_EQ_WAIT(NumRtpBytes(), NumRtpBytes(ssrc), kTimeout);
EXPECT_EQ(1, NumSentSsrcs());
EXPECT_EQ(0, NumRtpPackets(kSsrc));
EXPECT_EQ(0, NumRtpBytes(kSsrc));
@@ -1236,7 +919,7 @@ class VideoMediaChannelTest : public testing::Test,
EXPECT_TRUE(SetDefaultCodec());
cricket::VideoSendParameters parameters;
parameters.codecs.push_back(DefaultCodec());
- parameters.options.conference_mode.Set(true);
+ parameters.options.conference_mode = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
EXPECT_TRUE(SetSend(true));
EXPECT_TRUE(channel_->AddRecvStream(
@@ -1746,8 +1429,8 @@ class VideoMediaChannelTest : public testing::Test,
// Tests that we can send and receive frames with early receive.
void TwoStreamsSendAndUnsignalledRecv(const cricket::VideoCodec& codec) {
cricket::VideoSendParameters parameters;
- parameters.options.conference_mode.Set(true);
- parameters.options.unsignalled_recv_stream_limit.Set(1);
+ parameters.options.conference_mode = rtc::Optional<bool>(true);
+ parameters.options.unsignalled_recv_stream_limit = rtc::Optional<int>(1);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
SetUpSecondStreamWithNoRecv();
// Test sending and receiving on first stream.
@@ -1780,8 +1463,8 @@ class VideoMediaChannelTest : public testing::Test,
void TwoStreamsAddAndRemoveUnsignalledRecv(
const cricket::VideoCodec& codec) {
cricket::VideoOptions vmo;
- vmo.conference_mode.Set(true);
- vmo.unsignalled_recv_stream_limit.Set(1);
+ vmo.conference_mode = rtc::Optional<bool>(true);
+ vmo.unsignalled_recv_stream_limit = rtc::Optional<int>(1);
EXPECT_TRUE(channel_->SetOptions(vmo));
SetUpSecondStreamWithNoRecv();
// Sending and receiving on first stream.
diff --git a/talk/media/base/videoframe.cc b/talk/media/base/videoframe.cc
index 2b604b085b..3e4d60a258 100644
--- a/talk/media/base/videoframe.cc
+++ b/talk/media/base/videoframe.cc
@@ -33,6 +33,7 @@
#include "libyuv/planar_functions.h"
#include "libyuv/scale.h"
#include "talk/media/base/videocommon.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
@@ -318,7 +319,7 @@ bool VideoFrame::Validate(uint32_t fourcc,
}
// TODO(fbarchard): Make function to dump information about frames.
uint8_t four_samples[4] = {0, 0, 0, 0};
- for (size_t i = 0; i < ARRAY_SIZE(four_samples) && i < sample_size; ++i) {
+ for (size_t i = 0; i < arraysize(four_samples) && i < sample_size; ++i) {
four_samples[i] = sample[i];
}
if (sample_size < expected_size) {
diff --git a/talk/media/base/videoframe.h b/talk/media/base/videoframe.h
index 217732fa18..f81c678d61 100644
--- a/talk/media/base/videoframe.h
+++ b/talk/media/base/videoframe.h
@@ -30,7 +30,7 @@
#include "webrtc/base/basictypes.h"
#include "webrtc/base/stream.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/common_video/rotation.h"
namespace cricket {
diff --git a/talk/media/base/videoframefactory.cc b/talk/media/base/videoframefactory.cc
index dfd97c6faa..fb81096c31 100644
--- a/talk/media/base/videoframefactory.cc
+++ b/talk/media/base/videoframefactory.cc
@@ -51,8 +51,8 @@ VideoFrame* VideoFrameFactory::CreateAliasedFrame(
// If the frame is rotated, we need to switch the width and height.
if (apply_rotation_ &&
- (input_frame->GetRotation() == webrtc::kVideoRotation_90 ||
- input_frame->GetRotation() == webrtc::kVideoRotation_270)) {
+ (input_frame->rotation == webrtc::kVideoRotation_90 ||
+ input_frame->rotation == webrtc::kVideoRotation_270)) {
std::swap(output_width, output_height);
}
diff --git a/talk/media/base/videorenderer.h b/talk/media/base/videorenderer.h
index 0a0ee51817..a18c4e3c29 100644
--- a/talk/media/base/videorenderer.h
+++ b/talk/media/base/videorenderer.h
@@ -42,11 +42,12 @@ class VideoFrame;
class VideoRenderer {
public:
virtual ~VideoRenderer() {}
- // Called when the video has changed size. This is also used as an
- // initialization method to set the UI size before any video frame
- // rendered. webrtc::ExternalRenderer's FrameSizeChange will invoke this when
- // it's called or later when a VideoRenderer is attached.
- virtual bool SetSize(int width, int height, int reserved) = 0;
+ // Called when the video has changed size.
+ // TODO(nisse): This method is not really used, and should be
+ // deleted. Provide a default do-nothing implementation, to easy the
+ // transition as the method is deleted in subclasses, in particular,
+ // chrome's MockVideoRenderer class.
+ virtual bool SetSize(int width, int height, int reserved) { return true; };
// Called when a new frame is available for display.
virtual bool RenderFrame(const VideoFrame *frame) = 0;
diff --git a/talk/media/devices/carbonvideorenderer.cc b/talk/media/devices/carbonvideorenderer.cc
index 846135d925..b711ae4fbd 100644
--- a/talk/media/devices/carbonvideorenderer.cc
+++ b/talk/media/devices/carbonvideorenderer.cc
@@ -40,7 +40,6 @@ CarbonVideoRenderer::CarbonVideoRenderer(int x, int y)
image_height_(0),
x_(x),
y_(y),
- image_ref_(NULL),
window_ref_(NULL) {
}
diff --git a/talk/media/devices/carbonvideorenderer.h b/talk/media/devices/carbonvideorenderer.h
index 52c974060c..e8329ea031 100644
--- a/talk/media/devices/carbonvideorenderer.h
+++ b/talk/media/devices/carbonvideorenderer.h
@@ -65,7 +65,6 @@ class CarbonVideoRenderer : public VideoRenderer {
int image_height_;
int x_;
int y_;
- CGImageRef image_ref_;
WindowRef window_ref_;
};
diff --git a/talk/media/devices/devicemanager.cc b/talk/media/devices/devicemanager.cc
index 1d7ac5baf1..eca14a5def 100644
--- a/talk/media/devices/devicemanager.cc
+++ b/talk/media/devices/devicemanager.cc
@@ -123,7 +123,7 @@ bool DeviceManager::GetAudioOutputDevice(const std::string& name, Device* out) {
bool DeviceManager::GetVideoCaptureDevices(std::vector<Device>* devices) {
devices->clear();
-#if defined(ANDROID) || defined(IOS)
+#if defined(ANDROID) || defined(WEBRTC_IOS)
// On Android and iOS, we treat the camera(s) as a single device. Even if
// there are multiple cameras, that's abstracted away at a higher level.
Device dev("camera", "1"); // name and ID
diff --git a/talk/media/devices/devicemanager_unittest.cc b/talk/media/devices/devicemanager_unittest.cc
index f259c7d0d3..606a05e7c3 100644
--- a/talk/media/devices/devicemanager_unittest.cc
+++ b/talk/media/devices/devicemanager_unittest.cc
@@ -39,6 +39,7 @@
#include "talk/media/base/videocapturerfactory.h"
#include "talk/media/devices/filevideocapturer.h"
#include "talk/media/devices/v4llookup.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/fileutils.h"
#include "webrtc/base/gunit.h"
#include "webrtc/base/logging.h"
@@ -47,10 +48,10 @@
#include "webrtc/base/stream.h"
#include "webrtc/base/windowpickerfactory.h"
-#ifdef LINUX
+#ifdef WEBRTC_LINUX
// TODO(juberti): Figure out why this doesn't compile on Windows.
#include "webrtc/base/fileutils_mock.h"
-#endif // LINUX
+#endif // WEBRTC_LINUX
using rtc::Pathname;
using rtc::FileTimeType;
@@ -269,22 +270,22 @@ TEST(DeviceManagerTest, VerifyFilterDevices) {
"device5",
};
std::vector<Device> devices;
- for (int i = 0; i < ARRAY_SIZE(kTotalDevicesName); ++i) {
+ for (int i = 0; i < arraysize(kTotalDevicesName); ++i) {
devices.push_back(Device(kTotalDevicesName[i], i));
}
EXPECT_TRUE(CompareDeviceList(devices, kTotalDevicesName,
- ARRAY_SIZE(kTotalDevicesName)));
+ arraysize(kTotalDevicesName)));
// Return false if given NULL as the exclusion list.
EXPECT_TRUE(DeviceManager::FilterDevices(&devices, NULL));
// The devices should not change.
EXPECT_TRUE(CompareDeviceList(devices, kTotalDevicesName,
- ARRAY_SIZE(kTotalDevicesName)));
+ arraysize(kTotalDevicesName)));
EXPECT_TRUE(DeviceManager::FilterDevices(&devices, kFilteredDevicesName));
EXPECT_TRUE(CompareDeviceList(devices, kDevicesName,
- ARRAY_SIZE(kDevicesName)));
+ arraysize(kDevicesName)));
}
-#ifdef LINUX
+#ifdef WEBRTC_LINUX
class FakeV4LLookup : public cricket::V4LLookup {
public:
explicit FakeV4LLookup(std::vector<std::string> device_paths)
@@ -376,7 +377,7 @@ TEST(DeviceManagerTest, GetVideoCaptureDevices_KUnknown) {
EXPECT_EQ("/dev/video0", video_ins.at(0).name);
EXPECT_EQ("/dev/video5", video_ins.at(1).name);
}
-#endif // LINUX
+#endif // WEBRTC_LINUX
// TODO(noahric): These are flaky on windows on headless machines.
#ifndef WIN32
diff --git a/talk/media/devices/fakedevicemanager.h b/talk/media/devices/fakedevicemanager.h
index a4b2b86e44..77a83424b2 100644
--- a/talk/media/devices/fakedevicemanager.h
+++ b/talk/media/devices/fakedevicemanager.h
@@ -156,7 +156,7 @@ class FakeDeviceManager : public DeviceManagerInterface {
return true;
}
-#ifdef OSX
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
bool QtKitToSgDevice(const std::string& qtkit_name, Device* out) {
out->name = qtkit_name;
out->id = "sg:" + qtkit_name;
diff --git a/talk/media/devices/mobiledevicemanager.cc b/talk/media/devices/mobiledevicemanager.cc
index 2a886a36d4..5739c7e8d6 100644
--- a/talk/media/devices/mobiledevicemanager.cc
+++ b/talk/media/devices/mobiledevicemanager.cc
@@ -27,7 +27,7 @@
#include "talk/media/devices/devicemanager.h"
#include "webrtc/base/arraysize.h"
-#include "webrtc/modules/video_capture/include/video_capture_factory.h"
+#include "webrtc/modules/video_capture/video_capture_factory.h"
namespace cricket {
diff --git a/talk/media/devices/v4llookup.h b/talk/media/devices/v4llookup.h
index 1bed90b650..5c53ede99f 100644
--- a/talk/media/devices/v4llookup.h
+++ b/talk/media/devices/v4llookup.h
@@ -37,7 +37,7 @@
#include <string>
-#ifdef LINUX
+#ifdef WEBRTC_LINUX
namespace cricket {
class V4LLookup {
public:
@@ -66,5 +66,5 @@ class V4LLookup {
} // namespace cricket
-#endif // LINUX
+#endif // WEBRTC_LINUX
#endif // TALK_MEDIA_DEVICES_V4LLOOKUP_H_
diff --git a/talk/media/devices/videorendererfactory.h b/talk/media/devices/videorendererfactory.h
index 416f05b297..b7128f625d 100644
--- a/talk/media/devices/videorendererfactory.h
+++ b/talk/media/devices/videorendererfactory.h
@@ -32,9 +32,9 @@
#define TALK_MEDIA_DEVICES_VIDEORENDERERFACTORY_H_
#include "talk/media/base/videorenderer.h"
-#if defined(LINUX) && defined(HAVE_GTK)
+#if defined(WEBRTC_LINUX) && defined(HAVE_GTK)
#include "talk/media/devices/gtkvideorenderer.h"
-#elif defined(OSX) && !defined(CARBON_DEPRECATED)
+#elif defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) && !defined(CARBON_DEPRECATED)
#include "talk/media/devices/carbonvideorenderer.h"
#elif defined(WIN32)
#include "talk/media/devices/gdivideorenderer.h"
@@ -45,9 +45,10 @@ namespace cricket {
class VideoRendererFactory {
public:
static VideoRenderer* CreateGuiVideoRenderer(int x, int y) {
- #if defined(LINUX) && defined(HAVE_GTK)
+ #if defined(WEBRTC_LINUX) && defined(HAVE_GTK)
return new GtkVideoRenderer(x, y);
- #elif defined(OSX) && !defined(CARBON_DEPRECATED)
+ #elif defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) && \
+ !defined(CARBON_DEPRECATED)
CarbonVideoRenderer* renderer = new CarbonVideoRenderer(x, y);
// Needs to be initialized on the main thread.
if (renderer->Initialize()) {
diff --git a/talk/media/devices/win32devicemanager.cc b/talk/media/devices/win32devicemanager.cc
index 1b9e9d86f6..f34e3c44eb 100644
--- a/talk/media/devices/win32devicemanager.cc
+++ b/talk/media/devices/win32devicemanager.cc
@@ -48,6 +48,7 @@ EXTERN_C const PROPERTYKEY PKEY_AudioEndpoint_GUID = { {
} }, 4
};
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/base/thread.h"
@@ -148,7 +149,7 @@ bool Win32DeviceManager::GetDefaultVideoCaptureDevice(Device* device) {
*device = devices[0];
for (size_t i = 0; i < devices.size(); ++i) {
if (strnicmp(devices[i].id.c_str(), kUsbDevicePathPrefix,
- ARRAY_SIZE(kUsbDevicePathPrefix) - 1) == 0) {
+ arraysize(kUsbDevicePathPrefix) - 1) == 0) {
*device = devices[i];
break;
}
diff --git a/talk/media/sctp/sctpdataengine.cc b/talk/media/sctp/sctpdataengine.cc
index c88882d42d..3753cd22c0 100644
--- a/talk/media/sctp/sctpdataengine.cc
+++ b/talk/media/sctp/sctpdataengine.cc
@@ -36,6 +36,7 @@
#include "talk/media/base/constants.h"
#include "talk/media/base/streamparams.h"
#include "usrsctplib/usrsctp.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/helpers.h"
#include "webrtc/base/logging.h"
@@ -76,7 +77,7 @@ std::string ListFlags(int flags) {
MAKEFLAG(SCTP_STREAM_CHANGE_DENIED)
};
#undef MAKEFLAG
- for (int i = 0; i < ARRAY_SIZE(flaginfo); ++i) {
+ for (int i = 0; i < arraysize(flaginfo); ++i) {
if (flags & flaginfo[i].value) {
if (!first) result << " | ";
result << flaginfo[i].name;
@@ -473,7 +474,7 @@ bool SctpDataMediaChannel::OpenSctpSocket() {
struct sctp_event event = {0};
event.se_assoc_id = SCTP_ALL_ASSOC;
event.se_on = 1;
- for (size_t i = 0; i < ARRAY_SIZE(event_types); i++) {
+ for (size_t i = 0; i < arraysize(event_types); i++) {
event.se_type = event_types[i];
if (usrsctp_setsockopt(sock_, IPPROTO_SCTP, SCTP_EVENT, &event,
sizeof(event)) < 0) {
@@ -728,7 +729,13 @@ bool SctpDataMediaChannel::AddStream(const StreamParams& stream) {
}
const uint32_t ssrc = stream.first_ssrc();
- if (open_streams_.find(ssrc) != open_streams_.end()) {
+ if (ssrc >= cricket::kMaxSctpSid) {
+ LOG(LS_WARNING) << debug_name_ << "->Add(Send|Recv)Stream(...): "
+ << "Not adding data stream '" << stream.id
+ << "' with ssrc=" << ssrc
+ << " because stream ssrc is too high.";
+ return false;
+ } else if (open_streams_.find(ssrc) != open_streams_.end()) {
LOG(LS_WARNING) << debug_name_ << "->Add(Send|Recv)Stream(...): "
<< "Not adding data stream '" << stream.id
<< "' with ssrc=" << ssrc
diff --git a/talk/media/sctp/sctpdataengine_unittest.cc b/talk/media/sctp/sctpdataengine_unittest.cc
index 4706368b9d..d673c69c98 100644
--- a/talk/media/sctp/sctpdataengine_unittest.cc
+++ b/talk/media/sctp/sctpdataengine_unittest.cc
@@ -270,12 +270,14 @@ class SctpDataMediaChannelTest : public testing::Test,
ProcessMessagesUntilIdle();
}
- void AddStream(int ssrc) {
+ bool AddStream(int ssrc) {
+ bool ret = true;
cricket::StreamParams p(cricket::StreamParams::CreateLegacy(ssrc));
- chan1_->AddSendStream(p);
- chan1_->AddRecvStream(p);
- chan2_->AddSendStream(p);
- chan2_->AddRecvStream(p);
+ ret = ret && chan1_->AddSendStream(p);
+ ret = ret && chan1_->AddRecvStream(p);
+ ret = ret && chan2_->AddSendStream(p);
+ ret = ret && chan2_->AddRecvStream(p);
+ return ret;
}
cricket::SctpDataMediaChannel* CreateChannel(
@@ -504,6 +506,12 @@ TEST_F(SctpDataMediaChannelTest, EngineSignalsRightChannel) {
EXPECT_GT(channel1_ready_to_send_count(), prior_count);
}
+TEST_F(SctpDataMediaChannelTest, RefusesHighNumberedChannels) {
+ SetupConnectedChannels();
+ EXPECT_TRUE(AddStream(1022));
+ EXPECT_FALSE(AddStream(1023));
+}
+
// Flaky on Linux and Windows. See webrtc:4453.
#if defined(WEBRTC_WIN) || defined(WEBRTC_LINUX)
#define MAYBE_ReusesAStream DISABLED_ReusesAStream
diff --git a/talk/media/webrtc/fakewebrtccall.cc b/talk/media/webrtc/fakewebrtccall.cc
index d86bfb553c..d50a53cb63 100644
--- a/talk/media/webrtc/fakewebrtccall.cc
+++ b/talk/media/webrtc/fakewebrtccall.cc
@@ -28,10 +28,12 @@
#include "talk/media/webrtc/fakewebrtccall.h"
#include <algorithm>
+#include <utility>
#include "talk/media/base/rtputils.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/gunit.h"
+#include "webrtc/audio/audio_sink.h"
namespace cricket {
FakeAudioSendStream::FakeAudioSendStream(
@@ -39,14 +41,27 @@ FakeAudioSendStream::FakeAudioSendStream(
RTC_DCHECK(config.voe_channel_id != -1);
}
+const webrtc::AudioSendStream::Config&
+ FakeAudioSendStream::GetConfig() const {
+ return config_;
+}
+
void FakeAudioSendStream::SetStats(
const webrtc::AudioSendStream::Stats& stats) {
stats_ = stats;
}
-const webrtc::AudioSendStream::Config&
- FakeAudioSendStream::GetConfig() const {
- return config_;
+FakeAudioSendStream::TelephoneEvent
+ FakeAudioSendStream::GetLatestTelephoneEvent() const {
+ return latest_telephone_event_;
+}
+
+bool FakeAudioSendStream::SendTelephoneEvent(int payload_type, uint8_t event,
+ uint32_t duration_ms) {
+ latest_telephone_event_.payload_type = payload_type;
+ latest_telephone_event_.event_code = event;
+ latest_telephone_event_.duration_ms = duration_ms;
+ return true;
}
webrtc::AudioSendStream::Stats FakeAudioSendStream::GetStats() const {
@@ -77,6 +92,11 @@ webrtc::AudioReceiveStream::Stats FakeAudioReceiveStream::GetStats() const {
return stats_;
}
+void FakeAudioReceiveStream::SetSink(
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
+ sink_ = std::move(sink);
+}
+
FakeVideoSendStream::FakeVideoSendStream(
const webrtc::VideoSendStream::Config& config,
const webrtc::VideoEncoderConfig& encoder_config)
diff --git a/talk/media/webrtc/fakewebrtccall.h b/talk/media/webrtc/fakewebrtccall.h
index 88edc60d78..3528c7a7b1 100644
--- a/talk/media/webrtc/fakewebrtccall.h
+++ b/talk/media/webrtc/fakewebrtccall.h
@@ -47,14 +47,19 @@
#include "webrtc/video_send_stream.h"
namespace cricket {
-
-class FakeAudioSendStream : public webrtc::AudioSendStream {
+class FakeAudioSendStream final : public webrtc::AudioSendStream {
public:
- explicit FakeAudioSendStream(
- const webrtc::AudioSendStream::Config& config);
+ struct TelephoneEvent {
+ int payload_type = -1;
+ uint8_t event_code = 0;
+ uint32_t duration_ms = 0;
+ };
+
+ explicit FakeAudioSendStream(const webrtc::AudioSendStream::Config& config);
const webrtc::AudioSendStream::Config& GetConfig() const;
void SetStats(const webrtc::AudioSendStream::Stats& stats);
+ TelephoneEvent GetLatestTelephoneEvent() const;
private:
// webrtc::SendStream implementation.
@@ -66,13 +71,16 @@ class FakeAudioSendStream : public webrtc::AudioSendStream {
}
// webrtc::AudioSendStream implementation.
+ bool SendTelephoneEvent(int payload_type, uint8_t event,
+ uint32_t duration_ms) override;
webrtc::AudioSendStream::Stats GetStats() const override;
+ TelephoneEvent latest_telephone_event_;
webrtc::AudioSendStream::Config config_;
webrtc::AudioSendStream::Stats stats_;
};
-class FakeAudioReceiveStream : public webrtc::AudioReceiveStream {
+class FakeAudioReceiveStream final : public webrtc::AudioReceiveStream {
public:
explicit FakeAudioReceiveStream(
const webrtc::AudioReceiveStream::Config& config);
@@ -98,14 +106,16 @@ class FakeAudioReceiveStream : public webrtc::AudioReceiveStream {
// webrtc::AudioReceiveStream implementation.
webrtc::AudioReceiveStream::Stats GetStats() const override;
+ void SetSink(rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) override;
webrtc::AudioReceiveStream::Config config_;
webrtc::AudioReceiveStream::Stats stats_;
int received_packets_;
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink_;
};
-class FakeVideoSendStream : public webrtc::VideoSendStream,
- public webrtc::VideoCaptureInput {
+class FakeVideoSendStream final : public webrtc::VideoSendStream,
+ public webrtc::VideoCaptureInput {
public:
FakeVideoSendStream(const webrtc::VideoSendStream::Config& config,
const webrtc::VideoEncoderConfig& encoder_config);
@@ -153,7 +163,7 @@ class FakeVideoSendStream : public webrtc::VideoSendStream,
webrtc::VideoSendStream::Stats stats_;
};
-class FakeVideoReceiveStream : public webrtc::VideoReceiveStream {
+class FakeVideoReceiveStream final : public webrtc::VideoReceiveStream {
public:
explicit FakeVideoReceiveStream(
const webrtc::VideoReceiveStream::Config& config);
@@ -188,7 +198,7 @@ class FakeVideoReceiveStream : public webrtc::VideoReceiveStream {
webrtc::VideoReceiveStream::Stats stats_;
};
-class FakeCall : public webrtc::Call, public webrtc::PacketReceiver {
+class FakeCall final : public webrtc::Call, public webrtc::PacketReceiver {
public:
explicit FakeCall(const webrtc::Call::Config& config);
~FakeCall() override;
diff --git a/talk/media/webrtc/fakewebrtcvideoengine.h b/talk/media/webrtc/fakewebrtcvideoengine.h
index 8e4c7c87f8..e0d4db52f8 100644
--- a/talk/media/webrtc/fakewebrtcvideoengine.h
+++ b/talk/media/webrtc/fakewebrtcvideoengine.h
@@ -41,7 +41,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_error_codes.h"
+#include "webrtc/modules/video_coding/include/video_error_codes.h"
#include "webrtc/video_decoder.h"
#include "webrtc/video_encoder.h"
diff --git a/talk/media/webrtc/fakewebrtcvoiceengine.h b/talk/media/webrtc/fakewebrtcvoiceengine.h
index 2405e07b5f..65ba927cc5 100644
--- a/talk/media/webrtc/fakewebrtcvoiceengine.h
+++ b/talk/media/webrtc/fakewebrtcvoiceengine.h
@@ -41,19 +41,11 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/config.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
namespace cricket {
-static const char kFakeDefaultDeviceName[] = "Fake Default";
-static const int kFakeDefaultDeviceId = -1;
-static const char kFakeDeviceName[] = "Fake Device";
-#ifdef WIN32
-static const int kFakeDeviceId = 0;
-#else
-static const int kFakeDeviceId = 1;
-#endif
-
static const int kOpusBandwidthNb = 4000;
static const int kOpusBandwidthMb = 6000;
static const int kOpusBandwidthWb = 8000;
@@ -63,18 +55,6 @@ static const int kOpusBandwidthFb = 20000;
#define WEBRTC_CHECK_CHANNEL(channel) \
if (channels_.find(channel) == channels_.end()) return -1;
-#define WEBRTC_ASSERT_CHANNEL(channel) \
- RTC_DCHECK(channels_.find(channel) != channels_.end());
-
-// Verify the header extension ID, if enabled, is within the bounds specified in
-// [RFC5285]: 1-14 inclusive.
-#define WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id) \
- do { \
- if (enable && (id < 1 || id > 14)) { \
- return -1; \
- } \
- } while (0);
-
class FakeAudioProcessing : public webrtc::AudioProcessing {
public:
FakeAudioProcessing() : experimental_ns_enabled_(false) {}
@@ -94,11 +74,13 @@ class FakeAudioProcessing : public webrtc::AudioProcessing {
experimental_ns_enabled_ = config.Get<webrtc::ExperimentalNs>().enabled;
}
+ WEBRTC_STUB_CONST(input_sample_rate_hz, ());
WEBRTC_STUB_CONST(proc_sample_rate_hz, ());
WEBRTC_STUB_CONST(proc_split_sample_rate_hz, ());
- WEBRTC_STUB_CONST(num_input_channels, ());
- WEBRTC_STUB_CONST(num_output_channels, ());
- WEBRTC_STUB_CONST(num_reverse_channels, ());
+ size_t num_input_channels() const override { return 0; }
+ size_t num_proc_channels() const override { return 0; }
+ size_t num_output_channels() const override { return 0; }
+ size_t num_reverse_channels() const override { return 0; }
WEBRTC_VOID_STUB(set_output_will_be_muted, (bool muted));
WEBRTC_STUB(ProcessStream, (webrtc::AudioFrame* frame));
WEBRTC_STUB(ProcessStream, (
@@ -156,20 +138,11 @@ class FakeAudioProcessing : public webrtc::AudioProcessing {
class FakeWebRtcVoiceEngine
: public webrtc::VoEAudioProcessing,
- public webrtc::VoEBase, public webrtc::VoECodec, public webrtc::VoEDtmf,
+ public webrtc::VoEBase, public webrtc::VoECodec,
public webrtc::VoEHardware,
public webrtc::VoENetwork, public webrtc::VoERTP_RTCP,
public webrtc::VoEVolumeControl {
public:
- struct DtmfInfo {
- DtmfInfo()
- : dtmf_event_code(-1),
- dtmf_out_of_band(false),
- dtmf_length_ms(-1) {}
- int dtmf_event_code;
- bool dtmf_out_of_band;
- int dtmf_length_ms;
- };
struct Channel {
explicit Channel()
: external_transport(false),
@@ -184,15 +157,11 @@ class FakeWebRtcVoiceEngine
nack(false),
cn8_type(13),
cn16_type(105),
- dtmf_type(106),
red_type(117),
nack_max_packets(0),
send_ssrc(0),
- send_audio_level_ext_(-1),
- receive_audio_level_ext_(-1),
- send_absolute_sender_time_ext_(-1),
- receive_absolute_sender_time_ext_(-1),
associate_send_channel(-1),
+ recv_codecs(),
neteq_capacity(-1),
neteq_fast_accelerate(false) {
memset(&send_codec, 0, sizeof(send_codec));
@@ -209,16 +178,10 @@ class FakeWebRtcVoiceEngine
bool nack;
int cn8_type;
int cn16_type;
- int dtmf_type;
int red_type;
int nack_max_packets;
uint32_t send_ssrc;
- int send_audio_level_ext_;
- int receive_audio_level_ext_;
- int send_absolute_sender_time_ext_;
- int receive_absolute_sender_time_ext_;
int associate_send_channel;
- DtmfInfo dtmf_info;
std::vector<webrtc::CodecInst> recv_codecs;
webrtc::CodecInst send_codec;
webrtc::PacketTime last_rtp_packet_time;
@@ -227,13 +190,10 @@ class FakeWebRtcVoiceEngine
bool neteq_fast_accelerate;
};
- FakeWebRtcVoiceEngine(const cricket::AudioCodec* const* codecs,
- int num_codecs)
+ FakeWebRtcVoiceEngine()
: inited_(false),
last_channel_(-1),
fail_create_channel_(false),
- codecs_(codecs),
- num_codecs_(num_codecs),
num_set_send_codecs_(0),
ec_enabled_(false),
ec_metrics_enabled_(false),
@@ -255,26 +215,13 @@ class FakeWebRtcVoiceEngine
memset(&agc_config_, 0, sizeof(agc_config_));
}
~FakeWebRtcVoiceEngine() {
- // Ought to have all been deleted by the WebRtcVoiceMediaChannel
- // destructors, but just in case ...
- for (std::map<int, Channel*>::const_iterator i = channels_.begin();
- i != channels_.end(); ++i) {
- delete i->second;
- }
+ RTC_CHECK(channels_.empty());
}
bool ec_metrics_enabled() const { return ec_metrics_enabled_; }
bool IsInited() const { return inited_; }
int GetLastChannel() const { return last_channel_; }
- int GetChannelFromLocalSsrc(uint32_t local_ssrc) const {
- for (std::map<int, Channel*>::const_iterator iter = channels_.begin();
- iter != channels_.end(); ++iter) {
- if (local_ssrc == iter->second->send_ssrc)
- return iter->first;
- }
- return -1;
- }
int GetNumChannels() const { return static_cast<int>(channels_.size()); }
uint32_t GetLocalSSRC(int channel) {
return channels_[channel]->send_ssrc;
@@ -307,7 +254,7 @@ class FakeWebRtcVoiceEngine
return channels_[channel]->nack_max_packets;
}
const webrtc::PacketTime& GetLastRtpPacketTime(int channel) {
- WEBRTC_ASSERT_CHANNEL(channel);
+ RTC_DCHECK(channels_.find(channel) != channels_.end());
return channels_[channel]->last_rtp_packet_time;
}
int GetSendCNPayloadType(int channel, bool wideband) {
@@ -315,9 +262,6 @@ class FakeWebRtcVoiceEngine
channels_[channel]->cn16_type :
channels_[channel]->cn8_type;
}
- int GetSendTelephoneEventPayloadType(int channel) {
- return channels_[channel]->dtmf_type;
- }
int GetSendREDPayloadType(int channel) {
return channels_[channel]->red_type;
}
@@ -351,11 +295,8 @@ class FakeWebRtcVoiceEngine
return -1;
}
Channel* ch = new Channel();
- for (int i = 0; i < NumOfCodecs(); ++i) {
- webrtc::CodecInst codec;
- GetCodec(i, codec);
- ch->recv_codecs.push_back(codec);
- }
+ auto db = webrtc::acm2::RentACodec::Database();
+ ch->recv_codecs.assign(db.begin(), db.end());
if (config.Get<webrtc::NetEqCapacityConfig>().enabled) {
ch->neteq_capacity = config.Get<webrtc::NetEqCapacityConfig>().capacity;
}
@@ -364,24 +305,6 @@ class FakeWebRtcVoiceEngine
channels_[++last_channel_] = ch;
return last_channel_;
}
- int GetSendRtpExtensionId(int channel, const std::string& extension) {
- WEBRTC_ASSERT_CHANNEL(channel);
- if (extension == kRtpAudioLevelHeaderExtension) {
- return channels_[channel]->send_audio_level_ext_;
- } else if (extension == kRtpAbsoluteSenderTimeHeaderExtension) {
- return channels_[channel]->send_absolute_sender_time_ext_;
- }
- return -1;
- }
- int GetReceiveRtpExtensionId(int channel, const std::string& extension) {
- WEBRTC_ASSERT_CHANNEL(channel);
- if (extension == kRtpAudioLevelHeaderExtension) {
- return channels_[channel]->receive_audio_level_ext_;
- } else if (extension == kRtpAbsoluteSenderTimeHeaderExtension) {
- return channels_[channel]->receive_absolute_sender_time_ext_;
- }
- return -1;
- }
int GetNumSetSendCodecs() const { return num_set_send_codecs_; }
@@ -473,22 +396,8 @@ class FakeWebRtcVoiceEngine
webrtc::RtcEventLog* GetEventLog() { return nullptr; }
// webrtc::VoECodec
- WEBRTC_FUNC(NumOfCodecs, ()) {
- return num_codecs_;
- }
- WEBRTC_FUNC(GetCodec, (int index, webrtc::CodecInst& codec)) {
- if (index < 0 || index >= NumOfCodecs()) {
- return -1;
- }
- const cricket::AudioCodec& c(*codecs_[index]);
- codec.pltype = c.id;
- rtc::strcpyn(codec.plname, sizeof(codec.plname), c.name.c_str());
- codec.plfreq = c.clockrate;
- codec.pacsize = 0;
- codec.channels = c.channels;
- codec.rate = c.bitrate;
- return 0;
- }
+ WEBRTC_STUB(NumOfCodecs, ());
+ WEBRTC_STUB(GetCodec, (int index, webrtc::CodecInst& codec));
WEBRTC_FUNC(SetSendCodec, (int channel, const webrtc::CodecInst& codec)) {
WEBRTC_CHECK_CHANNEL(channel);
// To match the behavior of the real implementation.
@@ -526,16 +435,17 @@ class FakeWebRtcVoiceEngine
}
}
// Otherwise try to find this codec and update its payload type.
+ int result = -1; // not found
for (std::vector<webrtc::CodecInst>::iterator it = ch->recv_codecs.begin();
it != ch->recv_codecs.end(); ++it) {
if (strcmp(it->plname, codec.plname) == 0 &&
- it->plfreq == codec.plfreq) {
+ it->plfreq == codec.plfreq &&
+ it->channels == codec.channels) {
it->pltype = codec.pltype;
- it->channels = codec.channels;
- return 0;
+ result = 0;
}
}
- return -1; // not found
+ return result;
}
WEBRTC_FUNC(SetSendCNPayloadType, (int channel, int type,
webrtc::PayloadFrequencies frequency)) {
@@ -620,46 +530,11 @@ class FakeWebRtcVoiceEngine
return 0;
}
- // webrtc::VoEDtmf
- WEBRTC_FUNC(SendTelephoneEvent, (int channel, int event_code,
- bool out_of_band = true, int length_ms = 160, int attenuation_db = 10)) {
- channels_[channel]->dtmf_info.dtmf_event_code = event_code;
- channels_[channel]->dtmf_info.dtmf_out_of_band = out_of_band;
- channels_[channel]->dtmf_info.dtmf_length_ms = length_ms;
- return 0;
- }
-
- WEBRTC_FUNC(SetSendTelephoneEventPayloadType,
- (int channel, unsigned char type)) {
- channels_[channel]->dtmf_type = type;
- return 0;
- };
- WEBRTC_STUB(GetSendTelephoneEventPayloadType,
- (int channel, unsigned char& type));
-
- WEBRTC_STUB(SetDtmfFeedbackStatus, (bool enable, bool directFeedback));
- WEBRTC_STUB(GetDtmfFeedbackStatus, (bool& enabled, bool& directFeedback));
-
- WEBRTC_FUNC(PlayDtmfTone,
- (int event_code, int length_ms = 200, int attenuation_db = 10)) {
- dtmf_info_.dtmf_event_code = event_code;
- dtmf_info_.dtmf_length_ms = length_ms;
- return 0;
- }
-
// webrtc::VoEHardware
- WEBRTC_FUNC(GetNumOfRecordingDevices, (int& num)) {
- return GetNumDevices(num);
- }
- WEBRTC_FUNC(GetNumOfPlayoutDevices, (int& num)) {
- return GetNumDevices(num);
- }
- WEBRTC_FUNC(GetRecordingDeviceName, (int i, char* name, char* guid)) {
- return GetDeviceName(i, name, guid);
- }
- WEBRTC_FUNC(GetPlayoutDeviceName, (int i, char* name, char* guid)) {
- return GetDeviceName(i, name, guid);
- }
+ WEBRTC_STUB(GetNumOfRecordingDevices, (int& num));
+ WEBRTC_STUB(GetNumOfPlayoutDevices, (int& num));
+ WEBRTC_STUB(GetRecordingDeviceName, (int i, char* name, char* guid));
+ WEBRTC_STUB(GetPlayoutDeviceName, (int i, char* name, char* guid));
WEBRTC_STUB(SetRecordingDevice, (int, webrtc::StereoChannel));
WEBRTC_STUB(SetPlayoutDevice, (int));
WEBRTC_STUB(SetAudioDeviceLayer, (webrtc::AudioLayers));
@@ -729,35 +604,14 @@ class FakeWebRtcVoiceEngine
}
WEBRTC_STUB(GetLocalSSRC, (int channel, unsigned int& ssrc));
WEBRTC_STUB(GetRemoteSSRC, (int channel, unsigned int& ssrc));
- WEBRTC_FUNC(SetSendAudioLevelIndicationStatus, (int channel, bool enable,
- unsigned char id)) {
- WEBRTC_CHECK_CHANNEL(channel);
- WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id);
- channels_[channel]->send_audio_level_ext_ = (enable) ? id : -1;
- return 0;
- }
- WEBRTC_FUNC(SetReceiveAudioLevelIndicationStatus, (int channel, bool enable,
- unsigned char id)) {
- WEBRTC_CHECK_CHANNEL(channel);
- WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id);
- channels_[channel]->receive_audio_level_ext_ = (enable) ? id : -1;
- return 0;
- }
- WEBRTC_FUNC(SetSendAbsoluteSenderTimeStatus, (int channel, bool enable,
- unsigned char id)) {
- WEBRTC_CHECK_CHANNEL(channel);
- WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id);
- channels_[channel]->send_absolute_sender_time_ext_ = (enable) ? id : -1;
- return 0;
- }
- WEBRTC_FUNC(SetReceiveAbsoluteSenderTimeStatus, (int channel, bool enable,
- unsigned char id)) {
- WEBRTC_CHECK_CHANNEL(channel);
- WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id);
- channels_[channel]->receive_absolute_sender_time_ext_ = (enable) ? id : -1;
- return 0;
- }
-
+ WEBRTC_STUB(SetSendAudioLevelIndicationStatus, (int channel, bool enable,
+ unsigned char id));
+ WEBRTC_STUB(SetReceiveAudioLevelIndicationStatus, (int channel, bool enable,
+ unsigned char id));
+ WEBRTC_STUB(SetSendAbsoluteSenderTimeStatus, (int channel, bool enable,
+ unsigned char id));
+ WEBRTC_STUB(SetReceiveAbsoluteSenderTimeStatus, (int channel, bool enable,
+ unsigned char id));
WEBRTC_STUB(SetRTCPStatus, (int channel, bool enable));
WEBRTC_STUB(GetRTCPStatus, (int channel, bool& enabled));
WEBRTC_STUB(SetRTCP_CNAME, (int channel, const char cname[256]));
@@ -776,22 +630,12 @@ class FakeWebRtcVoiceEngine
unsigned int& discardedPackets));
WEBRTC_STUB(GetRTCPStatistics, (int channel, webrtc::CallStatistics& stats));
WEBRTC_FUNC(SetREDStatus, (int channel, bool enable, int redPayloadtype)) {
- return SetFECStatus(channel, enable, redPayloadtype);
- }
- // TODO(minyue): remove the below function when transition to SetREDStatus
- // is finished.
- WEBRTC_FUNC(SetFECStatus, (int channel, bool enable, int redPayloadtype)) {
WEBRTC_CHECK_CHANNEL(channel);
channels_[channel]->red = enable;
channels_[channel]->red_type = redPayloadtype;
return 0;
}
WEBRTC_FUNC(GetREDStatus, (int channel, bool& enable, int& redPayloadtype)) {
- return GetFECStatus(channel, enable, redPayloadtype);
- }
- // TODO(minyue): remove the below function when transition to GetREDStatus
- // is finished.
- WEBRTC_FUNC(GetFECStatus, (int channel, bool& enable, int& redPayloadtype)) {
WEBRTC_CHECK_CHANNEL(channel);
enable = channels_[channel]->red;
redPayloadtype = channels_[channel]->red_type;
@@ -937,15 +781,6 @@ class FakeWebRtcVoiceEngine
void EnableStereoChannelSwapping(bool enable) {
stereo_swapping_enabled_ = enable;
}
- bool WasSendTelephoneEventCalled(int channel, int event_code, int length_ms) {
- return (channels_[channel]->dtmf_info.dtmf_event_code == event_code &&
- channels_[channel]->dtmf_info.dtmf_out_of_band == true &&
- channels_[channel]->dtmf_info.dtmf_length_ms == length_ms);
- }
- bool WasPlayDtmfToneCalled(int event_code, int length_ms) {
- return (dtmf_info_.dtmf_event_code == event_code &&
- dtmf_info_.dtmf_length_ms == length_ms);
- }
int GetNetEqCapacity() const {
auto ch = channels_.find(last_channel_);
ASSERT(ch != channels_.end());
@@ -958,47 +793,10 @@ class FakeWebRtcVoiceEngine
}
private:
- int GetNumDevices(int& num) {
-#ifdef WIN32
- num = 1;
-#else
- // On non-Windows platforms VE adds a special entry for the default device,
- // so if there is one physical device then there are two entries in the
- // list.
- num = 2;
-#endif
- return 0;
- }
-
- int GetDeviceName(int i, char* name, char* guid) {
- const char *s;
-#ifdef WIN32
- if (0 == i) {
- s = kFakeDeviceName;
- } else {
- return -1;
- }
-#else
- // See comment above.
- if (0 == i) {
- s = kFakeDefaultDeviceName;
- } else if (1 == i) {
- s = kFakeDeviceName;
- } else {
- return -1;
- }
-#endif
- strcpy(name, s);
- guid[0] = '\0';
- return 0;
- }
-
bool inited_;
int last_channel_;
std::map<int, Channel*> channels_;
bool fail_create_channel_;
- const cricket::AudioCodec* const* codecs_;
- int num_codecs_;
int num_set_send_codecs_; // how many times we call SetSendCodec().
bool ec_enabled_;
bool ec_metrics_enabled_;
@@ -1018,12 +816,9 @@ class FakeWebRtcVoiceEngine
int send_fail_channel_;
int recording_sample_rate_;
int playout_sample_rate_;
- DtmfInfo dtmf_info_;
FakeAudioProcessing audio_processing_;
};
-#undef WEBRTC_CHECK_HEADER_EXTENSION_ID
-
} // namespace cricket
#endif // TALK_SESSION_PHONE_FAKEWEBRTCVOICEENGINE_H_
diff --git a/talk/media/webrtc/simulcast.cc b/talk/media/webrtc/simulcast.cc
index f55d9606a5..b67a363a76 100755
--- a/talk/media/webrtc/simulcast.cc
+++ b/talk/media/webrtc/simulcast.cc
@@ -29,9 +29,11 @@
#include "talk/media/base/streamparams.h"
#include "talk/media/webrtc/simulcast.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/common.h"
#include "webrtc/base/logging.h"
#include "webrtc/system_wrappers/include/field_trial.h"
+
namespace cricket {
struct SimulcastFormat {
@@ -93,7 +95,7 @@ void MaybeExchangeWidthHeight(int* width, int* height) {
int FindSimulcastFormatIndex(int width, int height) {
MaybeExchangeWidthHeight(&width, &height);
- for (int i = 0; i < ARRAY_SIZE(kSimulcastFormats); ++i) {
+ for (int i = 0; i < arraysize(kSimulcastFormats); ++i) {
if (width >= kSimulcastFormats[i].width &&
height >= kSimulcastFormats[i].height) {
return i;
@@ -105,7 +107,7 @@ int FindSimulcastFormatIndex(int width, int height) {
int FindSimulcastFormatIndex(int width, int height, size_t max_layers) {
MaybeExchangeWidthHeight(&width, &height);
- for (int i = 0; i < ARRAY_SIZE(kSimulcastFormats); ++i) {
+ for (int i = 0; i < arraysize(kSimulcastFormats); ++i) {
if (width >= kSimulcastFormats[i].width &&
height >= kSimulcastFormats[i].height &&
max_layers == kSimulcastFormats[i].max_layers) {
diff --git a/talk/media/webrtc/webrtcmediaengine.cc b/talk/media/webrtc/webrtcmediaengine.cc
index af202bd613..31e5025a55 100644
--- a/talk/media/webrtc/webrtcmediaengine.cc
+++ b/talk/media/webrtc/webrtcmediaengine.cc
@@ -26,6 +26,9 @@
*/
#include "talk/media/webrtc/webrtcmediaengine.h"
+
+#include <algorithm>
+
#include "talk/media/webrtc/webrtcvideoengine2.h"
#include "talk/media/webrtc/webrtcvoiceengine.h"
@@ -68,44 +71,85 @@ MediaEngineInterface* WebRtcMediaEngineFactory::Create(
return CreateWebRtcMediaEngine(adm, encoder_factory, decoder_factory);
}
-const char* kBweExtensionPriorities[] = {
- kRtpTransportSequenceNumberHeaderExtension,
- kRtpAbsoluteSenderTimeHeaderExtension, kRtpTimestampOffsetHeaderExtension};
-
-const size_t kBweExtensionPrioritiesLength =
- ARRAY_SIZE(kBweExtensionPriorities);
+namespace {
+// Remove mutually exclusive extensions with lower priority.
+void DiscardRedundantExtensions(
+ std::vector<webrtc::RtpExtension>* extensions,
+ rtc::ArrayView<const char*> extensions_decreasing_prio) {
+ RTC_DCHECK(extensions);
+ bool found = false;
+ for (const char* name : extensions_decreasing_prio) {
+ auto it = std::find_if(extensions->begin(), extensions->end(),
+ [name](const webrtc::RtpExtension& rhs) {
+ return rhs.name == name;
+ });
+ if (it != extensions->end()) {
+ if (found) {
+ extensions->erase(it);
+ }
+ found = true;
+ }
+ }
+}
+} // namespace
-int GetPriority(const RtpHeaderExtension& extension,
- const char* extension_prios[],
- size_t extension_prios_length) {
- for (size_t i = 0; i < extension_prios_length; ++i) {
- if (extension.uri == extension_prios[i])
- return static_cast<int>(i);
+bool ValidateRtpExtensions(const std::vector<RtpHeaderExtension>& extensions) {
+ bool id_used[14] = {false};
+ for (const auto& extension : extensions) {
+ if (extension.id <= 0 || extension.id >= 15) {
+ LOG(LS_ERROR) << "Bad RTP extension ID: " << extension.ToString();
+ return false;
+ }
+ if (id_used[extension.id - 1]) {
+ LOG(LS_ERROR) << "Duplicate RTP extension ID: " << extension.ToString();
+ return false;
+ }
+ id_used[extension.id - 1] = true;
}
- return -1;
+ return true;
}
-std::vector<RtpHeaderExtension> FilterRedundantRtpExtensions(
+std::vector<webrtc::RtpExtension> FilterRtpExtensions(
const std::vector<RtpHeaderExtension>& extensions,
- const char* extension_prios[],
- size_t extension_prios_length) {
- if (extensions.empty())
- return std::vector<RtpHeaderExtension>();
- std::vector<RtpHeaderExtension> filtered;
- std::map<int, const RtpHeaderExtension*> sorted;
- for (auto& extension : extensions) {
- int priority =
- GetPriority(extension, extension_prios, extension_prios_length);
- if (priority == -1) {
- filtered.push_back(extension);
- continue;
+ bool (*supported)(const std::string&),
+ bool filter_redundant_extensions) {
+ RTC_DCHECK(ValidateRtpExtensions(extensions));
+ RTC_DCHECK(supported);
+ std::vector<webrtc::RtpExtension> result;
+
+ // Ignore any extensions that we don't recognize.
+ for (const auto& extension : extensions) {
+ if (supported(extension.uri)) {
+ result.push_back({extension.uri, extension.id});
} else {
- sorted[priority] = &extension;
+ LOG(LS_WARNING) << "Unsupported RTP extension: " << extension.ToString();
}
}
- if (!sorted.empty())
- filtered.push_back(*sorted.begin()->second);
- return filtered;
-}
+ // Sort by name, ascending, so that we don't reset extensions if they were
+ // specified in a different order (also allows us to use std::unique below).
+ std::sort(result.begin(), result.end(),
+ [](const webrtc::RtpExtension& rhs, const webrtc::RtpExtension& lhs) {
+ return rhs.name < lhs.name;
+ });
+
+ // Remove unnecessary extensions (used on send side).
+ if (filter_redundant_extensions) {
+ auto it = std::unique(result.begin(), result.end(),
+ [](const webrtc::RtpExtension& rhs, const webrtc::RtpExtension& lhs) {
+ return rhs.name == lhs.name;
+ });
+ result.erase(it, result.end());
+
+ // Keep just the highest priority extension of any in the following list.
+ static const char* kBweExtensionPriorities[] = {
+ kRtpTransportSequenceNumberHeaderExtension,
+ kRtpAbsoluteSenderTimeHeaderExtension,
+ kRtpTimestampOffsetHeaderExtension
+ };
+ DiscardRedundantExtensions(&result, kBweExtensionPriorities);
+ }
+
+ return result;
+}
} // namespace cricket
diff --git a/talk/media/webrtc/webrtcmediaengine.h b/talk/media/webrtc/webrtcmediaengine.h
index 8d7540404d..831d0725e8 100644
--- a/talk/media/webrtc/webrtcmediaengine.h
+++ b/talk/media/webrtc/webrtcmediaengine.h
@@ -28,7 +28,11 @@
#ifndef TALK_MEDIA_WEBRTCMEDIAENGINE_H_
#define TALK_MEDIA_WEBRTCMEDIAENGINE_H_
+#include <string>
+#include <vector>
+
#include "talk/media/base/mediaengine.h"
+#include "webrtc/config.h"
namespace webrtc {
class AudioDeviceModule;
@@ -48,13 +52,18 @@ class WebRtcMediaEngineFactory {
WebRtcVideoDecoderFactory* decoder_factory);
};
-extern const char* kBweExtensionPriorities[];
-extern const size_t kBweExtensionPrioritiesLength;
+// Verify that extension IDs are within 1-byte extension range and are not
+// overlapping.
+bool ValidateRtpExtensions(const std::vector<RtpHeaderExtension>& extensions);
-std::vector<RtpHeaderExtension> FilterRedundantRtpExtensions(
+// Convert cricket::RtpHeaderExtension:s to webrtc::RtpExtension:s, discarding
+// any extensions not validated by the 'supported' predicate. Duplicate
+// extensions are removed if 'filter_redundant_extensions' is set, and also any
+// mutually exclusive extensions (see implementation for details).
+std::vector<webrtc::RtpExtension> FilterRtpExtensions(
const std::vector<RtpHeaderExtension>& extensions,
- const char* extension_prios[],
- size_t extension_prios_length);
+ bool (*supported)(const std::string&),
+ bool filter_redundant_extensions);
} // namespace cricket
diff --git a/talk/media/webrtc/webrtcmediaengine_unittest.cc b/talk/media/webrtc/webrtcmediaengine_unittest.cc
new file mode 100644
index 0000000000..7c80e77301
--- /dev/null
+++ b/talk/media/webrtc/webrtcmediaengine_unittest.cc
@@ -0,0 +1,205 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "talk/media/webrtc/webrtcmediaengine.h"
+
+namespace cricket {
+namespace {
+
+std::vector<RtpHeaderExtension> MakeUniqueExtensions() {
+ std::vector<RtpHeaderExtension> result;
+ char name[] = "a";
+ for (int i = 0; i < 7; ++i) {
+ result.push_back(RtpHeaderExtension(name, 1 + i));
+ name[0]++;
+ result.push_back(RtpHeaderExtension(name, 14 - i));
+ name[0]++;
+ }
+ return result;
+}
+
+std::vector<RtpHeaderExtension> MakeRedundantExtensions() {
+ std::vector<RtpHeaderExtension> result;
+ char name[] = "a";
+ for (int i = 0; i < 7; ++i) {
+ result.push_back(RtpHeaderExtension(name, 1 + i));
+ result.push_back(RtpHeaderExtension(name, 14 - i));
+ name[0]++;
+ }
+ return result;
+}
+
+bool SupportedExtensions1(const std::string& name) {
+ return name == "c" || name == "i";
+}
+
+bool SupportedExtensions2(const std::string& name) {
+ return name != "a" && name != "n";
+}
+
+bool IsSorted(const std::vector<webrtc::RtpExtension>& extensions) {
+ const std::string* last = nullptr;
+ for (const auto& extension : extensions) {
+ if (last && *last > extension.name) {
+ return false;
+ }
+ last = &extension.name;
+ }
+ return true;
+}
+} // namespace
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_EmptyList) {
+ std::vector<RtpHeaderExtension> extensions;
+ EXPECT_TRUE(ValidateRtpExtensions(extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_AllGood) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ EXPECT_TRUE(ValidateRtpExtensions(extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OutOfRangeId_Low) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpHeaderExtension("foo", 0));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OutOfRangeId_High) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpHeaderExtension("foo", 15));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OverlappingIds_StartOfSet) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpHeaderExtension("foo", 1));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions));
+}
+
+TEST(WebRtcMediaEngineTest, ValidateRtpExtensions_OverlappingIds_EndOfSet) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ extensions.push_back(RtpHeaderExtension("foo", 14));
+ EXPECT_FALSE(ValidateRtpExtensions(extensions));
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_EmptyList) {
+ std::vector<RtpHeaderExtension> extensions;
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions1, true);
+ EXPECT_EQ(0, filtered.size());
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_IncludeOnlySupported) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions1, false);
+ EXPECT_EQ(2, filtered.size());
+ EXPECT_EQ("c", filtered[0].name);
+ EXPECT_EQ("i", filtered[1].name);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_SortedByName_1) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, false);
+ EXPECT_EQ(12, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_SortedByName_2) {
+ std::vector<RtpHeaderExtension> extensions = MakeUniqueExtensions();
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true);
+ EXPECT_EQ(12, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_DontRemoveRedundant) {
+ std::vector<RtpHeaderExtension> extensions = MakeRedundantExtensions();
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, false);
+ EXPECT_EQ(12, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+ EXPECT_EQ(filtered[0].name, filtered[1].name);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundant) {
+ std::vector<RtpHeaderExtension> extensions = MakeRedundantExtensions();
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true);
+ EXPECT_EQ(6, filtered.size());
+ EXPECT_TRUE(IsSorted(filtered));
+ EXPECT_NE(filtered[0].name, filtered[1].name);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_1) {
+ std::vector<RtpHeaderExtension> extensions;
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTransportSequenceNumberHeaderExtension, 3));
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension, 9));
+ extensions.push_back(
+ RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension, 6));
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTransportSequenceNumberHeaderExtension, 1));
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension, 14));
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true);
+ EXPECT_EQ(1, filtered.size());
+ EXPECT_EQ(kRtpTransportSequenceNumberHeaderExtension, filtered[0].name);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_2) {
+ std::vector<RtpHeaderExtension> extensions;
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension, 1));
+ extensions.push_back(
+ RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension, 14));
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension, 7));
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true);
+ EXPECT_EQ(1, filtered.size());
+ EXPECT_EQ(kRtpAbsoluteSenderTimeHeaderExtension, filtered[0].name);
+}
+
+TEST(WebRtcMediaEngineTest, FilterRtpExtensions_RemoveRedundantBwe_3) {
+ std::vector<RtpHeaderExtension> extensions;
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension, 2));
+ extensions.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension, 14));
+ std::vector<webrtc::RtpExtension> filtered =
+ FilterRtpExtensions(extensions, SupportedExtensions2, true);
+ EXPECT_EQ(1, filtered.size());
+ EXPECT_EQ(kRtpTimestampOffsetHeaderExtension, filtered[0].name);
+}
+} // namespace cricket
diff --git a/talk/media/webrtc/webrtcvideocapturer.cc b/talk/media/webrtc/webrtcvideocapturer.cc
index 7d72128d61..ee4db5b1d2 100644
--- a/talk/media/webrtc/webrtcvideocapturer.cc
+++ b/talk/media/webrtc/webrtcvideocapturer.cc
@@ -34,6 +34,7 @@
#ifdef HAVE_WEBRTC_VIDEO
#include "talk/media/webrtc/webrtcvideoframe.h"
#include "talk/media/webrtc/webrtcvideoframefactory.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/criticalsection.h"
@@ -43,7 +44,7 @@
#include "webrtc/base/timeutils.h"
#include "webrtc/base/win32.h" // Need this to #include the impl files.
-#include "webrtc/modules/video_capture/include/video_capture_factory.h"
+#include "webrtc/modules/video_capture/video_capture_factory.h"
#include "webrtc/system_wrappers/include/field_trial.h"
namespace cricket {
@@ -83,7 +84,7 @@ class WebRtcVcmFactory : public WebRtcVcmFactoryInterface {
static bool CapabilityToFormat(const webrtc::VideoCaptureCapability& cap,
VideoFormat* format) {
uint32_t fourcc = 0;
- for (size_t i = 0; i < ARRAY_SIZE(kSupportedFourCCs); ++i) {
+ for (size_t i = 0; i < arraysize(kSupportedFourCCs); ++i) {
if (kSupportedFourCCs[i].webrtc_type == cap.rawType) {
fourcc = kSupportedFourCCs[i].fourcc;
break;
@@ -103,7 +104,7 @@ static bool CapabilityToFormat(const webrtc::VideoCaptureCapability& cap,
static bool FormatToCapability(const VideoFormat& format,
webrtc::VideoCaptureCapability* cap) {
webrtc::RawVideoType webrtc_type = webrtc::kVideoUnknown;
- for (size_t i = 0; i < ARRAY_SIZE(kSupportedFourCCs); ++i) {
+ for (size_t i = 0; i < arraysize(kSupportedFourCCs); ++i) {
if (kSupportedFourCCs[i].fourcc == format.fourcc) {
webrtc_type = kSupportedFourCCs[i].webrtc_type;
break;
@@ -171,8 +172,8 @@ bool WebRtcVideoCapturer::Init(const Device& device) {
bool found = false;
for (int index = 0; index < num_cams; ++index) {
char vcm_name[256];
- if (info->GetDeviceName(index, vcm_name, ARRAY_SIZE(vcm_name),
- vcm_id, ARRAY_SIZE(vcm_id)) != -1) {
+ if (info->GetDeviceName(index, vcm_name, arraysize(vcm_name), vcm_id,
+ arraysize(vcm_id)) != -1) {
if (device.name == reinterpret_cast<char*>(vcm_name)) {
found = true;
break;
@@ -349,6 +350,7 @@ void WebRtcVideoCapturer::Stop() {
SetCaptureFormat(NULL);
start_thread_ = nullptr;
+ SetCaptureState(CS_STOPPED);
}
bool WebRtcVideoCapturer::IsRunning() {
@@ -361,7 +363,7 @@ bool WebRtcVideoCapturer::GetPreferredFourccs(std::vector<uint32_t>* fourccs) {
}
fourccs->clear();
- for (size_t i = 0; i < ARRAY_SIZE(kSupportedFourCCs); ++i) {
+ for (size_t i = 0; i < arraysize(kSupportedFourCCs); ++i) {
fourccs->push_back(kSupportedFourCCs[i].fourcc);
}
return true;
diff --git a/talk/media/webrtc/webrtcvideocapturer.h b/talk/media/webrtc/webrtcvideocapturer.h
index 0a99884fe1..591e46f629 100644
--- a/talk/media/webrtc/webrtcvideocapturer.h
+++ b/talk/media/webrtc/webrtcvideocapturer.h
@@ -39,7 +39,7 @@
#include "webrtc/base/messagehandler.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_capture/include/video_capture.h"
+#include "webrtc/modules/video_capture/video_capture.h"
namespace cricket {
diff --git a/talk/media/webrtc/webrtcvideocapturer_unittest.cc b/talk/media/webrtc/webrtcvideocapturer_unittest.cc
index d560fc554e..85db32e7d2 100644
--- a/talk/media/webrtc/webrtcvideocapturer_unittest.cc
+++ b/talk/media/webrtc/webrtcvideocapturer_unittest.cc
@@ -111,6 +111,7 @@ TEST_F(WebRtcVideoCapturerTest, TestCapture) {
capturer_->Stop();
EXPECT_FALSE(capturer_->IsRunning());
EXPECT_TRUE(capturer_->GetCaptureFormat() == NULL);
+ EXPECT_EQ_WAIT(cricket::CS_STOPPED, listener_.last_capture_state(), 1000);
}
TEST_F(WebRtcVideoCapturerTest, TestCaptureVcm) {
diff --git a/talk/media/webrtc/webrtcvideoengine2.cc b/talk/media/webrtc/webrtcvideoengine2.cc
index bcd513ee2d..55c07426d0 100644
--- a/talk/media/webrtc/webrtcvideoengine2.cc
+++ b/talk/media/webrtc/webrtcvideoengine2.cc
@@ -152,9 +152,7 @@ bool CodecIsInternallySupported(const std::string& codec_name) {
return true;
}
if (CodecNamesEq(codec_name, kVp9CodecName)) {
- const std::string group_name =
- webrtc::field_trial::FindFullName("WebRTC-SupportVP9");
- return group_name == "Enabled" || group_name == "EnabledByFlag";
+ return true;
}
if (CodecNamesEq(codec_name, kH264CodecName)) {
return webrtc::H264Encoder::IsSupported() &&
@@ -168,6 +166,8 @@ void AddDefaultFeedbackParams(VideoCodec* codec) {
codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamNack, kParamValueEmpty));
codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamNack, kRtcpFbNackParamPli));
codec->AddFeedbackParam(FeedbackParam(kRtcpFbParamRemb, kParamValueEmpty));
+ codec->AddFeedbackParam(
+ FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
}
static VideoCodec MakeVideoCodecWithDefaultFeedbackParams(int payload_type,
@@ -243,20 +243,6 @@ static bool ValidateStreamParams(const StreamParams& sp) {
return true;
}
-static std::string RtpExtensionsToString(
- const std::vector<RtpHeaderExtension>& extensions) {
- std::stringstream out;
- out << '{';
- for (size_t i = 0; i < extensions.size(); ++i) {
- out << "{" << extensions[i].uri << ": " << extensions[i].id << "}";
- if (i != extensions.size() - 1) {
- out << ", ";
- }
- }
- out << '}';
- return out.str();
-}
-
inline const webrtc::RtpExtension* FindHeaderExtension(
const std::vector<webrtc::RtpExtension>& extensions,
const std::string& name) {
@@ -303,7 +289,8 @@ static void MergeFecConfig(const webrtc::FecConfig& other,
// Returns true if the given codec is disallowed from doing simulcast.
bool IsCodecBlacklistedForSimulcast(const std::string& codec_name) {
- return CodecNamesEq(codec_name, kH264CodecName);
+ return CodecNamesEq(codec_name, kH264CodecName) ||
+ CodecNamesEq(codec_name, kVp9CodecName);
}
// The selected thresholds for QVGA and VGA corresponded to a QP around 10.
@@ -339,13 +326,13 @@ static const int kDefaultRtcpReceiverReportSsrc = 1;
std::vector<VideoCodec> DefaultVideoCodecList() {
std::vector<VideoCodec> codecs;
+ codecs.push_back(MakeVideoCodecWithDefaultFeedbackParams(kDefaultVp8PlType,
+ kVp8CodecName));
if (CodecIsInternallySupported(kVp9CodecName)) {
codecs.push_back(MakeVideoCodecWithDefaultFeedbackParams(kDefaultVp9PlType,
kVp9CodecName));
// TODO(andresp): Add rtx codec for vp9 and verify it works.
}
- codecs.push_back(MakeVideoCodecWithDefaultFeedbackParams(kDefaultVp8PlType,
- kVp8CodecName));
if (CodecIsInternallySupported(kH264CodecName)) {
codecs.push_back(MakeVideoCodecWithDefaultFeedbackParams(kDefaultH264PlType,
kH264CodecName));
@@ -357,72 +344,6 @@ std::vector<VideoCodec> DefaultVideoCodecList() {
return codecs;
}
-static bool FindFirstMatchingCodec(const std::vector<VideoCodec>& codecs,
- const VideoCodec& requested_codec,
- VideoCodec* matching_codec) {
- for (size_t i = 0; i < codecs.size(); ++i) {
- if (requested_codec.Matches(codecs[i])) {
- *matching_codec = codecs[i];
- return true;
- }
- }
- return false;
-}
-
-static bool ValidateRtpHeaderExtensionIds(
- const std::vector<RtpHeaderExtension>& extensions) {
- std::set<int> extensions_used;
- for (size_t i = 0; i < extensions.size(); ++i) {
- if (extensions[i].id <= 0 || extensions[i].id >= 15 ||
- !extensions_used.insert(extensions[i].id).second) {
- LOG(LS_ERROR) << "RTP extensions are with incorrect or duplicate ids.";
- return false;
- }
- }
- return true;
-}
-
-static bool CompareRtpHeaderExtensionIds(
- const webrtc::RtpExtension& extension1,
- const webrtc::RtpExtension& extension2) {
- // Sorting on ID is sufficient, more than one extension per ID is unsupported.
- return extension1.id > extension2.id;
-}
-
-static std::vector<webrtc::RtpExtension> FilterRtpExtensions(
- const std::vector<RtpHeaderExtension>& extensions) {
- std::vector<webrtc::RtpExtension> webrtc_extensions;
- for (size_t i = 0; i < extensions.size(); ++i) {
- // Unsupported extensions will be ignored.
- if (webrtc::RtpExtension::IsSupportedForVideo(extensions[i].uri)) {
- webrtc_extensions.push_back(webrtc::RtpExtension(
- extensions[i].uri, extensions[i].id));
- } else {
- LOG(LS_WARNING) << "Unsupported RTP extension: " << extensions[i].uri;
- }
- }
-
- // Sort filtered headers to make sure that they can later be compared
- // regardless of in which order they were entered.
- std::sort(webrtc_extensions.begin(), webrtc_extensions.end(),
- CompareRtpHeaderExtensionIds);
- return webrtc_extensions;
-}
-
-static bool RtpExtensionsHaveChanged(
- const std::vector<webrtc::RtpExtension>& before,
- const std::vector<webrtc::RtpExtension>& after) {
- if (before.size() != after.size())
- return true;
- for (size_t i = 0; i < before.size(); ++i) {
- if (before[i].id != after[i].id)
- return true;
- if (before[i].name != after[i].name)
- return true;
- }
- return false;
-}
-
std::vector<webrtc::VideoStream>
WebRtcVideoChannel2::WebRtcVideoSendStream::CreateSimulcastVideoStreams(
const VideoCodec& codec,
@@ -489,7 +410,8 @@ void* WebRtcVideoChannel2::WebRtcVideoSendStream::ConfigureVideoEncoderSettings(
denoising = false;
} else {
// Use codec default if video_noise_reduction is unset.
- codec_default_denoising = !options.video_noise_reduction.Get(&denoising);
+ codec_default_denoising = !options.video_noise_reduction;
+ denoising = options.video_noise_reduction.value_or(false);
}
if (CodecNamesEq(codec.name, kVp8CodecName)) {
@@ -554,20 +476,6 @@ WebRtcVideoEngine2::WebRtcVideoEngine2()
external_encoder_factory_(NULL) {
LOG(LS_INFO) << "WebRtcVideoEngine2::WebRtcVideoEngine2()";
video_codecs_ = GetSupportedCodecs();
- rtp_header_extensions_.push_back(
- RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension,
- kRtpTimestampOffsetHeaderExtensionDefaultId));
- rtp_header_extensions_.push_back(
- RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
- kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
- rtp_header_extensions_.push_back(
- RtpHeaderExtension(kRtpVideoRotationHeaderExtension,
- kRtpVideoRotationHeaderExtensionDefaultId));
- if (webrtc::field_trial::FindFullName("WebRTC-SendSideBwe") == "Enabled") {
- rtp_header_extensions_.push_back(RtpHeaderExtension(
- kRtpTransportSequenceNumberHeaderExtension,
- kRtpTransportSequenceNumberHeaderExtensionDefaultId));
- }
}
WebRtcVideoEngine2::~WebRtcVideoEngine2() {
@@ -579,29 +487,6 @@ void WebRtcVideoEngine2::Init() {
initialized_ = true;
}
-bool WebRtcVideoEngine2::SetDefaultEncoderConfig(
- const VideoEncoderConfig& config) {
- const VideoCodec& codec = config.max_codec;
- bool supports_codec = false;
- for (size_t i = 0; i < video_codecs_.size(); ++i) {
- if (CodecNamesEq(video_codecs_[i].name, codec.name)) {
- video_codecs_[i].width = codec.width;
- video_codecs_[i].height = codec.height;
- video_codecs_[i].framerate = codec.framerate;
- supports_codec = true;
- break;
- }
- }
-
- if (!supports_codec) {
- LOG(LS_ERROR) << "SetDefaultEncoderConfig, codec not supported: "
- << codec.ToString();
- return false;
- }
-
- return true;
-}
-
WebRtcVideoChannel2* WebRtcVideoEngine2::CreateChannel(
webrtc::Call* call,
const VideoOptions& options) {
@@ -615,19 +500,23 @@ const std::vector<VideoCodec>& WebRtcVideoEngine2::codecs() const {
return video_codecs_;
}
-const std::vector<RtpHeaderExtension>&
-WebRtcVideoEngine2::rtp_header_extensions() const {
- return rtp_header_extensions_;
-}
-
-void WebRtcVideoEngine2::SetLogging(int min_sev, const char* filter) {
- // TODO(pbos): Set up logging.
- LOG(LS_VERBOSE) << "SetLogging: " << min_sev << '"' << filter << '"';
- // if min_sev == -1, we keep the current log level.
- if (min_sev < 0) {
- RTC_DCHECK(min_sev == -1);
- return;
+RtpCapabilities WebRtcVideoEngine2::GetCapabilities() const {
+ RtpCapabilities capabilities;
+ capabilities.header_extensions.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension,
+ kRtpTimestampOffsetHeaderExtensionDefaultId));
+ capabilities.header_extensions.push_back(
+ RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
+ kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
+ capabilities.header_extensions.push_back(
+ RtpHeaderExtension(kRtpVideoRotationHeaderExtension,
+ kRtpVideoRotationHeaderExtensionDefaultId));
+ if (webrtc::field_trial::FindFullName("WebRTC-SendSideBwe") == "Enabled") {
+ capabilities.header_extensions.push_back(RtpHeaderExtension(
+ kRtpTransportSequenceNumberHeaderExtension,
+ kRtpTransportSequenceNumberHeaderExtensionDefaultId));
}
+ return capabilities;
}
void WebRtcVideoEngine2::SetExternalDecoderFactory(
@@ -677,48 +566,6 @@ bool WebRtcVideoEngine2::FindCodec(const VideoCodec& in) {
return false;
}
-// Tells whether the |requested| codec can be transmitted or not. If it can be
-// transmitted |out| is set with the best settings supported. Aspect ratio will
-// be set as close to |current|'s as possible. If not set |requested|'s
-// dimensions will be used for aspect ratio matching.
-bool WebRtcVideoEngine2::CanSendCodec(const VideoCodec& requested,
- const VideoCodec& current,
- VideoCodec* out) {
- RTC_DCHECK(out != NULL);
-
- if (requested.width != requested.height &&
- (requested.height == 0 || requested.width == 0)) {
- // 0xn and nx0 are invalid resolutions.
- return false;
- }
-
- VideoCodec matching_codec;
- if (!FindFirstMatchingCodec(video_codecs_, requested, &matching_codec)) {
- // Codec not supported.
- return false;
- }
-
- out->id = requested.id;
- out->name = requested.name;
- out->preference = requested.preference;
- out->params = requested.params;
- out->framerate = std::min(requested.framerate, matching_codec.framerate);
- out->params = requested.params;
- out->feedback_params = requested.feedback_params;
- out->width = requested.width;
- out->height = requested.height;
- if (requested.width == 0 && requested.height == 0) {
- return true;
- }
-
- while (out->width > matching_codec.width) {
- out->width /= 2;
- out->height /= 2;
- }
-
- return out->width > 0 && out->height > 0;
-}
-
// Ignore spammy trace messages, mostly from the stats API when we haven't
// gotten RTCP info yet from the remote side.
bool WebRtcVideoEngine2::ShouldIgnoreTrace(const std::string& trace) {
@@ -777,7 +624,8 @@ WebRtcVideoChannel2::WebRtcVideoChannel2(
RTC_DCHECK(thread_checker_.CalledOnValidThread());
SetDefaultOptions();
options_.SetAll(options);
- options_.cpu_overuse_detection.Get(&signal_cpu_adaptation_);
+ if (options_.cpu_overuse_detection)
+ signal_cpu_adaptation_ = *options_.cpu_overuse_detection;
rtcp_receiver_report_ssrc_ = kDefaultRtcpReceiverReportSsrc;
sending_ = false;
default_send_ssrc_ = 0;
@@ -785,10 +633,10 @@ WebRtcVideoChannel2::WebRtcVideoChannel2(
}
void WebRtcVideoChannel2::SetDefaultOptions() {
- options_.cpu_overuse_detection.Set(true);
- options_.dscp.Set(false);
- options_.suspend_below_min_bitrate.Set(false);
- options_.screencast_min_bitrate.Set(0);
+ options_.cpu_overuse_detection = rtc::Optional<bool>(true);
+ options_.dscp = rtc::Optional<bool>(false);
+ options_.suspend_below_min_bitrate = rtc::Optional<bool>(false);
+ options_.screencast_min_bitrate = rtc::Optional<int>(0);
}
WebRtcVideoChannel2::~WebRtcVideoChannel2() {
@@ -863,19 +711,43 @@ bool WebRtcVideoChannel2::ReceiveCodecsHaveChanged(
}
bool WebRtcVideoChannel2::SetSendParameters(const VideoSendParameters& params) {
+ TRACE_EVENT0("webrtc", "WebRtcVideoChannel2::SetSendParameters");
+ LOG(LS_INFO) << "SetSendParameters: " << params.ToString();
// TODO(pbos): Refactor this to only recreate the send streams once
// instead of 4 times.
- return (SetSendCodecs(params.codecs) &&
- SetSendRtpHeaderExtensions(params.extensions) &&
- SetMaxSendBandwidth(params.max_bandwidth_bps) &&
- SetOptions(params.options));
+ if (!SetSendCodecs(params.codecs) ||
+ !SetSendRtpHeaderExtensions(params.extensions) ||
+ !SetMaxSendBandwidth(params.max_bandwidth_bps) ||
+ !SetOptions(params.options)) {
+ return false;
+ }
+ if (send_params_.rtcp.reduced_size != params.rtcp.reduced_size) {
+ rtc::CritScope stream_lock(&stream_crit_);
+ for (auto& kv : send_streams_) {
+ kv.second->SetSendParameters(params);
+ }
+ }
+ send_params_ = params;
+ return true;
}
bool WebRtcVideoChannel2::SetRecvParameters(const VideoRecvParameters& params) {
+ TRACE_EVENT0("webrtc", "WebRtcVideoChannel2::SetRecvParameters");
+ LOG(LS_INFO) << "SetRecvParameters: " << params.ToString();
// TODO(pbos): Refactor this to only recreate the recv streams once
// instead of twice.
- return (SetRecvCodecs(params.codecs) &&
- SetRecvRtpHeaderExtensions(params.extensions));
+ if (!SetRecvCodecs(params.codecs) ||
+ !SetRecvRtpHeaderExtensions(params.extensions)) {
+ return false;
+ }
+ if (recv_params_.rtcp.reduced_size != params.rtcp.reduced_size) {
+ rtc::CritScope stream_lock(&stream_crit_);
+ for (auto& kv : receive_streams_) {
+ kv.second->SetRecvParameters(params);
+ }
+ }
+ recv_params_ = params;
+ return true;
}
std::string WebRtcVideoChannel2::CodecSettingsVectorToString(
@@ -952,15 +824,15 @@ bool WebRtcVideoChannel2::SetSendCodecs(const std::vector<VideoCodec>& codecs) {
LOG(LS_INFO) << "Using codec: " << supported_codecs.front().codec.ToString();
- VideoCodecSettings old_codec;
- if (send_codec_.Get(&old_codec) && supported_codecs.front() == old_codec) {
+ if (send_codec_ && supported_codecs.front() == *send_codec_) {
LOG(LS_INFO) << "Ignore call to SetSendCodecs because first supported "
"codec hasn't changed.";
// Using same codec, avoid reconfiguring.
return true;
}
- send_codec_.Set(supported_codecs.front());
+ send_codec_ = rtc::Optional<WebRtcVideoChannel2::VideoCodecSettings>(
+ supported_codecs.front());
rtc::CritScope stream_lock(&stream_crit_);
LOG(LS_INFO) << "Change the send codec because SetSendCodecs has a different "
@@ -969,12 +841,15 @@ bool WebRtcVideoChannel2::SetSendCodecs(const std::vector<VideoCodec>& codecs) {
RTC_DCHECK(kv.second != nullptr);
kv.second->SetCodec(supported_codecs.front());
}
- LOG(LS_INFO) << "SetNackAndRemb on all the receive streams because the send "
- "codec has changed.";
+ LOG(LS_INFO)
+ << "SetFeedbackOptions on all the receive streams because the send "
+ "codec has changed.";
for (auto& kv : receive_streams_) {
RTC_DCHECK(kv.second != nullptr);
- kv.second->SetNackAndRemb(HasNack(supported_codecs.front().codec),
- HasRemb(supported_codecs.front().codec));
+ kv.second->SetFeedbackParameters(
+ HasNack(supported_codecs.front().codec),
+ HasRemb(supported_codecs.front().codec),
+ HasTransportCc(supported_codecs.front().codec));
}
// TODO(holmer): Changing the codec parameters shouldn't necessarily mean that
@@ -1006,12 +881,11 @@ bool WebRtcVideoChannel2::SetSendCodecs(const std::vector<VideoCodec>& codecs) {
}
bool WebRtcVideoChannel2::GetSendCodec(VideoCodec* codec) {
- VideoCodecSettings codec_settings;
- if (!send_codec_.Get(&codec_settings)) {
+ if (!send_codec_) {
LOG(LS_VERBOSE) << "GetSendCodec: No send codec set.";
return false;
}
- *codec = codec_settings.codec;
+ *codec = send_codec_->codec;
return true;
}
@@ -1028,7 +902,7 @@ bool WebRtcVideoChannel2::SetSendStreamFormat(uint32_t ssrc,
bool WebRtcVideoChannel2::SetSend(bool send) {
LOG(LS_VERBOSE) << "SetSend: " << (send ? "true" : "false");
- if (send && !send_codec_.IsSet()) {
+ if (send && !send_codec_) {
LOG(LS_ERROR) << "SetSend(true) called before setting codec.";
return false;
}
@@ -1094,15 +968,10 @@ bool WebRtcVideoChannel2::AddSendStream(const StreamParams& sp) {
webrtc::VideoSendStream::Config config(this);
config.overuse_callback = this;
- WebRtcVideoSendStream* stream =
- new WebRtcVideoSendStream(call_,
- sp,
- config,
- external_encoder_factory_,
- options_,
- bitrate_config_.max_bitrate_bps,
- send_codec_,
- send_rtp_extensions_);
+ WebRtcVideoSendStream* stream = new WebRtcVideoSendStream(
+ call_, sp, config, external_encoder_factory_, options_,
+ bitrate_config_.max_bitrate_bps, send_codec_, send_rtp_extensions_,
+ send_params_);
uint32_t ssrc = sp.first_ssrc();
RTC_DCHECK(ssrc != 0);
@@ -1224,15 +1093,13 @@ bool WebRtcVideoChannel2::AddRecvStream(const StreamParams& sp,
// Set up A/V sync group based on sync label.
config.sync_group = sp.sync_label;
- config.rtp.remb = false;
- VideoCodecSettings send_codec;
- if (send_codec_.Get(&send_codec)) {
- config.rtp.remb = HasRemb(send_codec.codec);
- }
+ config.rtp.remb = send_codec_ ? HasRemb(send_codec_->codec) : false;
+ config.rtp.transport_cc =
+ send_codec_ ? HasTransportCc(send_codec_->codec) : false;
receive_streams_[ssrc] = new WebRtcVideoReceiveStream(
call_, sp, config, external_decoder_factory_, default_stream,
- recv_codecs_);
+ recv_codecs_, options_.disable_prerenderer_smoothing.value_or(false));
return true;
}
@@ -1246,6 +1113,9 @@ void WebRtcVideoChannel2::ConfigureReceiverRtp(
config->rtp.local_ssrc = rtcp_receiver_report_ssrc_;
config->rtp.extensions = recv_rtp_extensions_;
+ config->rtp.rtcp_mode = recv_params_.rtcp.reduced_size
+ ? webrtc::RtcpMode::kReducedSize
+ : webrtc::RtcpMode::kCompound;
// TODO(pbos): This protection is against setting the same local ssrc as
// remote which is not permitted by the lower-level API. RTCP requires a
@@ -1482,12 +1352,14 @@ void WebRtcVideoChannel2::OnRtcpReceived(
const rtc::PacketTime& packet_time) {
const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp,
packet_time.not_before);
- if (call_->Receiver()->DeliverPacket(
- webrtc::MediaType::VIDEO,
- reinterpret_cast<const uint8_t*>(packet->data()), packet->size(),
- webrtc_packet_time) != webrtc::PacketReceiver::DELIVERY_OK) {
- LOG(LS_WARNING) << "Failed to deliver RTCP packet.";
- }
+ // TODO(pbos): Check webrtc::PacketReceiver::DELIVERY_OK once we deliver
+ // for both audio and video on the same path. Since BundleFilter doesn't
+ // filter RTCP anymore incoming RTCP packets could've been going to audio (so
+ // logging failures spam the log).
+ call_->Receiver()->DeliverPacket(
+ webrtc::MediaType::VIDEO,
+ reinterpret_cast<const uint8_t*>(packet->data()), packet->size(),
+ webrtc_packet_time);
}
void WebRtcVideoChannel2::OnReadyToSend(bool ready) {
@@ -1512,20 +1384,17 @@ bool WebRtcVideoChannel2::MuteStream(uint32_t ssrc, bool mute) {
bool WebRtcVideoChannel2::SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
TRACE_EVENT0("webrtc", "WebRtcVideoChannel2::SetRecvRtpHeaderExtensions");
- LOG(LS_INFO) << "SetRecvRtpHeaderExtensions: "
- << RtpExtensionsToString(extensions);
- if (!ValidateRtpHeaderExtensionIds(extensions))
+ if (!ValidateRtpExtensions(extensions)) {
return false;
-
- std::vector<webrtc::RtpExtension> filtered_extensions =
- FilterRtpExtensions(extensions);
- if (!RtpExtensionsHaveChanged(recv_rtp_extensions_, filtered_extensions)) {
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions = FilterRtpExtensions(
+ extensions, webrtc::RtpExtension::IsSupportedForVideo, false);
+ if (recv_rtp_extensions_ == filtered_extensions) {
LOG(LS_INFO) << "Ignoring call to SetRecvRtpHeaderExtensions because "
"header extensions haven't changed.";
return true;
}
-
- recv_rtp_extensions_ = filtered_extensions;
+ recv_rtp_extensions_.swap(filtered_extensions);
rtc::CritScope stream_lock(&stream_crit_);
for (std::map<uint32_t, WebRtcVideoReceiveStream*>::iterator it =
@@ -1539,21 +1408,17 @@ bool WebRtcVideoChannel2::SetRecvRtpHeaderExtensions(
bool WebRtcVideoChannel2::SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
TRACE_EVENT0("webrtc", "WebRtcVideoChannel2::SetSendRtpHeaderExtensions");
- LOG(LS_INFO) << "SetSendRtpHeaderExtensions: "
- << RtpExtensionsToString(extensions);
- if (!ValidateRtpHeaderExtensionIds(extensions))
+ if (!ValidateRtpExtensions(extensions)) {
return false;
-
- std::vector<webrtc::RtpExtension> filtered_extensions =
- FilterRtpExtensions(FilterRedundantRtpExtensions(
- extensions, kBweExtensionPriorities, kBweExtensionPrioritiesLength));
- if (!RtpExtensionsHaveChanged(send_rtp_extensions_, filtered_extensions)) {
- LOG(LS_INFO) << "Ignoring call to SetSendRtpHeaderExtensions because "
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions = FilterRtpExtensions(
+ extensions, webrtc::RtpExtension::IsSupportedForVideo, true);
+ if (send_rtp_extensions_ == filtered_extensions) {
+ LOG(LS_INFO) << "Ignoring call to SetRecvRtpHeaderExtensions because "
"header extensions haven't changed.";
return true;
}
-
- send_rtp_extensions_ = filtered_extensions;
+ send_rtp_extensions_.swap(filtered_extensions);
const webrtc::RtpExtension* cvo_extension = FindHeaderExtension(
send_rtp_extensions_, kRtpVideoRotationHeaderExtension);
@@ -1612,11 +1477,11 @@ bool WebRtcVideoChannel2::SetOptions(const VideoOptions& options) {
}
{
rtc::CritScope lock(&capturer_crit_);
- options_.cpu_overuse_detection.Get(&signal_cpu_adaptation_);
+ if (options_.cpu_overuse_detection)
+ signal_cpu_adaptation_ = *options_.cpu_overuse_detection;
}
- rtc::DiffServCodePoint dscp = options_.dscp.GetWithDefaultIfUnset(false)
- ? rtc::DSCP_AF41
- : rtc::DSCP_DEFAULT;
+ rtc::DiffServCodePoint dscp =
+ options_.dscp.value_or(false) ? rtc::DSCP_AF41 : rtc::DSCP_DEFAULT;
MediaChannel::SetDscp(dscp);
rtc::CritScope stream_lock(&stream_crit_);
for (std::map<uint32_t, WebRtcVideoSendStream*>::iterator it =
@@ -1708,12 +1573,11 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::VideoSendStreamParameters::
const webrtc::VideoSendStream::Config& config,
const VideoOptions& options,
int max_bitrate_bps,
- const Settable<VideoCodecSettings>& codec_settings)
+ const rtc::Optional<VideoCodecSettings>& codec_settings)
: config(config),
options(options),
max_bitrate_bps(max_bitrate_bps),
- codec_settings(codec_settings) {
-}
+ codec_settings(codec_settings) {}
WebRtcVideoChannel2::WebRtcVideoSendStream::AllocatedEncoder::AllocatedEncoder(
webrtc::VideoEncoder* encoder,
@@ -1737,8 +1601,11 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::WebRtcVideoSendStream(
WebRtcVideoEncoderFactory* external_encoder_factory,
const VideoOptions& options,
int max_bitrate_bps,
- const Settable<VideoCodecSettings>& codec_settings,
- const std::vector<webrtc::RtpExtension>& rtp_extensions)
+ const rtc::Optional<VideoCodecSettings>& codec_settings,
+ const std::vector<webrtc::RtpExtension>& rtp_extensions,
+ // TODO(deadbeef): Don't duplicate information between send_params,
+ // rtp_extensions, options, etc.
+ const VideoSendParameters& send_params)
: ssrcs_(sp.ssrcs),
ssrc_groups_(sp.ssrc_groups),
call_(call),
@@ -1759,10 +1626,12 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::WebRtcVideoSendStream(
&parameters_.config.rtp.rtx.ssrcs);
parameters_.config.rtp.c_name = sp.cname;
parameters_.config.rtp.extensions = rtp_extensions;
+ parameters_.config.rtp.rtcp_mode = send_params.rtcp.reduced_size
+ ? webrtc::RtcpMode::kReducedSize
+ : webrtc::RtcpMode::kCompound;
- VideoCodecSettings params;
- if (codec_settings.Get(&params)) {
- SetCodec(params);
+ if (codec_settings) {
+ SetCodec(*codec_settings);
}
}
@@ -1940,11 +1809,10 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::SetApplyRotation(
void WebRtcVideoChannel2::WebRtcVideoSendStream::SetOptions(
const VideoOptions& options) {
rtc::CritScope cs(&lock_);
- VideoCodecSettings codec_settings;
- if (parameters_.codec_settings.Get(&codec_settings)) {
+ if (parameters_.codec_settings) {
LOG(LS_INFO) << "SetCodecAndOptions because of SetOptions; options="
<< options.ToString();
- SetCodecAndOptions(codec_settings, options);
+ SetCodecAndOptions(*parameters_.codec_settings, options);
} else {
parameters_.options = options;
}
@@ -2049,10 +1917,12 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::SetCodecAndOptions(
parameters_.config.rtp.nack.rtp_history_ms =
HasNack(codec_settings.codec) ? kNackHistoryMs : 0;
- options.suspend_below_min_bitrate.Get(
- &parameters_.config.suspend_below_min_bitrate);
+ RTC_CHECK(options.suspend_below_min_bitrate);
+ parameters_.config.suspend_below_min_bitrate =
+ *options.suspend_below_min_bitrate;
- parameters_.codec_settings.Set(codec_settings);
+ parameters_.codec_settings =
+ rtc::Optional<WebRtcVideoChannel2::VideoCodecSettings>(codec_settings);
parameters_.options = options;
LOG(LS_INFO)
@@ -2075,17 +1945,27 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::SetRtpExtensions(
}
}
+void WebRtcVideoChannel2::WebRtcVideoSendStream::SetSendParameters(
+ const VideoSendParameters& send_params) {
+ rtc::CritScope cs(&lock_);
+ parameters_.config.rtp.rtcp_mode = send_params.rtcp.reduced_size
+ ? webrtc::RtcpMode::kReducedSize
+ : webrtc::RtcpMode::kCompound;
+ if (stream_ != nullptr) {
+ LOG(LS_INFO) << "RecreateWebRtcStream (send) because of SetSendParameters";
+ RecreateWebRtcStream();
+ }
+}
+
webrtc::VideoEncoderConfig
WebRtcVideoChannel2::WebRtcVideoSendStream::CreateVideoEncoderConfig(
const Dimensions& dimensions,
const VideoCodec& codec) const {
webrtc::VideoEncoderConfig encoder_config;
if (dimensions.is_screencast) {
- int screencast_min_bitrate_kbps;
- parameters_.options.screencast_min_bitrate.Get(
- &screencast_min_bitrate_kbps);
+ RTC_CHECK(parameters_.options.screencast_min_bitrate);
encoder_config.min_transmit_bitrate_bps =
- screencast_min_bitrate_kbps * 1000;
+ *parameters_.options.screencast_min_bitrate * 1000;
encoder_config.content_type =
webrtc::VideoEncoderConfig::ContentType::kScreen;
} else {
@@ -2121,7 +2001,7 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::CreateVideoEncoderConfig(
parameters_.max_bitrate_bps, stream_count);
// Conference mode screencast uses 2 temporal layers split at 100kbit.
- if (parameters_.options.conference_mode.GetWithDefaultIfUnset(false) &&
+ if (parameters_.options.conference_mode.value_or(false) &&
dimensions.is_screencast && encoder_config.streams.size() == 1) {
ScreenshareLayerConfig config = ScreenshareLayerConfig::GetDefault();
@@ -2156,8 +2036,8 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::SetDimensions(
RTC_DCHECK(!parameters_.encoder_config.streams.empty());
- VideoCodecSettings codec_settings;
- parameters_.codec_settings.Get(&codec_settings);
+ RTC_CHECK(parameters_.codec_settings);
+ VideoCodecSettings codec_settings = *parameters_.codec_settings;
webrtc::VideoEncoderConfig encoder_config =
CreateVideoEncoderConfig(last_dimensions_, codec_settings.codec);
@@ -2202,9 +2082,8 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::GetVideoSenderInfo() {
for (uint32_t ssrc : parameters_.config.rtp.ssrcs)
info.add_ssrc(ssrc);
- VideoCodecSettings codec_settings;
- if (parameters_.codec_settings.Get(&codec_settings))
- info.codec_name = codec_settings.codec.name;
+ if (parameters_.codec_settings)
+ info.codec_name = parameters_.codec_settings->codec.name;
for (size_t i = 0; i < parameters_.encoder_config.streams.size(); ++i) {
if (i == parameters_.encoder_config.streams.size() - 1) {
info.preferred_bitrate +=
@@ -2238,6 +2117,15 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::GetVideoSenderInfo() {
}
}
}
+
+ // Get bandwidth limitation info from stream_->GetStats().
+ // Input resolution (output from video_adapter) can be further scaled down or
+ // higher video layer(s) can be dropped due to bitrate constraints.
+ // Note, adapt_changes only include changes from the video_adapter.
+ if (stats.bw_limited_resolution)
+ info.adapt_reason |= CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH;
+
+ info.encoder_implementation_name = stats.encoder_implementation_name;
info.ssrc_groups = ssrc_groups_;
info.framerate_input = stats.input_frame_rate;
info.framerate_sent = stats.encode_frame_rate;
@@ -2316,11 +2204,10 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::RecreateWebRtcStream() {
call_->DestroyVideoSendStream(stream_);
}
- VideoCodecSettings codec_settings;
- parameters_.codec_settings.Get(&codec_settings);
+ RTC_CHECK(parameters_.codec_settings);
parameters_.encoder_config.encoder_specific_settings =
ConfigureVideoEncoderSettings(
- codec_settings.codec, parameters_.options,
+ parameters_.codec_settings->codec, parameters_.options,
parameters_.encoder_config.content_type ==
webrtc::VideoEncoderConfig::ContentType::kScreen);
@@ -2345,7 +2232,8 @@ WebRtcVideoChannel2::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream(
const webrtc::VideoReceiveStream::Config& config,
WebRtcVideoDecoderFactory* external_decoder_factory,
bool default_stream,
- const std::vector<VideoCodecSettings>& recv_codecs)
+ const std::vector<VideoCodecSettings>& recv_codecs,
+ bool disable_prerenderer_smoothing)
: call_(call),
ssrcs_(sp.ssrcs),
ssrc_groups_(sp.ssrc_groups),
@@ -2353,6 +2241,7 @@ WebRtcVideoChannel2::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream(
default_stream_(default_stream),
config_(config),
external_decoder_factory_(external_decoder_factory),
+ disable_prerenderer_smoothing_(disable_prerenderer_smoothing),
renderer_(NULL),
last_width_(-1),
last_height_(-1),
@@ -2457,10 +2346,10 @@ void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetRecvCodecs(
config_.rtp.nack.rtp_history_ms =
HasNack(recv_codecs.begin()->codec) ? kNackHistoryMs : 0;
- ClearDecoders(&old_decoders);
LOG(LS_INFO) << "RecreateWebRtcStream (recv) because of SetRecvCodecs: "
<< CodecSettingsVectorToString(recv_codecs);
RecreateWebRtcStream();
+ ClearDecoders(&old_decoders);
}
void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetLocalSsrc(
@@ -2482,20 +2371,28 @@ void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetLocalSsrc(
RecreateWebRtcStream();
}
-void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetNackAndRemb(
- bool nack_enabled, bool remb_enabled) {
+void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetFeedbackParameters(
+ bool nack_enabled,
+ bool remb_enabled,
+ bool transport_cc_enabled) {
int nack_history_ms = nack_enabled ? kNackHistoryMs : 0;
if (config_.rtp.nack.rtp_history_ms == nack_history_ms &&
- config_.rtp.remb == remb_enabled) {
- LOG(LS_INFO) << "Ignoring call to SetNackAndRemb because parameters are "
- "unchanged; nack=" << nack_enabled
- << ", remb=" << remb_enabled;
+ config_.rtp.remb == remb_enabled &&
+ config_.rtp.transport_cc == transport_cc_enabled) {
+ LOG(LS_INFO)
+ << "Ignoring call to SetFeedbackParameters because parameters are "
+ "unchanged; nack="
+ << nack_enabled << ", remb=" << remb_enabled
+ << ", transport_cc=" << transport_cc_enabled;
return;
}
config_.rtp.remb = remb_enabled;
config_.rtp.nack.rtp_history_ms = nack_history_ms;
- LOG(LS_INFO) << "RecreateWebRtcStream (recv) because of SetNackAndRemb; nack="
- << nack_enabled << ", remb=" << remb_enabled;
+ config_.rtp.transport_cc = transport_cc_enabled;
+ LOG(LS_INFO)
+ << "RecreateWebRtcStream (recv) because of SetFeedbackParameters; nack="
+ << nack_enabled << ", remb=" << remb_enabled
+ << ", transport_cc=" << transport_cc_enabled;
RecreateWebRtcStream();
}
@@ -2506,6 +2403,15 @@ void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetRtpExtensions(
RecreateWebRtcStream();
}
+void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetRecvParameters(
+ const VideoRecvParameters& recv_params) {
+ config_.rtp.rtcp_mode = recv_params.rtcp.reduced_size
+ ? webrtc::RtcpMode::kReducedSize
+ : webrtc::RtcpMode::kCompound;
+ LOG(LS_INFO) << "RecreateWebRtcStream (recv) because of SetRecvParameters";
+ RecreateWebRtcStream();
+}
+
void WebRtcVideoChannel2::WebRtcVideoReceiveStream::RecreateWebRtcStream() {
if (stream_ != NULL) {
call_->DestroyVideoReceiveStream(stream_);
@@ -2560,6 +2466,11 @@ bool WebRtcVideoChannel2::WebRtcVideoReceiveStream::IsTextureSupported() const {
return true;
}
+bool WebRtcVideoChannel2::WebRtcVideoReceiveStream::SmoothsRenderedFrames()
+ const {
+ return disable_prerenderer_smoothing_;
+}
+
bool WebRtcVideoChannel2::WebRtcVideoReceiveStream::IsDefaultStream() const {
return default_stream_;
}
@@ -2607,6 +2518,7 @@ WebRtcVideoChannel2::WebRtcVideoReceiveStream::GetVideoReceiverInfo() {
info.ssrc_groups = ssrc_groups_;
info.add_ssrc(config_.rtp.remote_ssrc);
webrtc::VideoReceiveStream::Stats stats = stream_->GetStats();
+ info.decoder_implementation_name = stats.decoder_implementation_name;
info.bytes_rcvd = stats.rtp_stats.transmitted.payload_bytes +
stats.rtp_stats.transmitted.header_bytes +
stats.rtp_stats.transmitted.padding_bytes;
diff --git a/talk/media/webrtc/webrtcvideoengine2.h b/talk/media/webrtc/webrtcvideoengine2.h
index 7096135cdd..1b8da16368 100644
--- a/talk/media/webrtc/webrtcvideoengine2.h
+++ b/talk/media/webrtc/webrtcvideoengine2.h
@@ -112,14 +112,11 @@ class WebRtcVideoEngine2 {
// Basic video engine implementation.
void Init();
- bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
-
WebRtcVideoChannel2* CreateChannel(webrtc::Call* call,
const VideoOptions& options);
const std::vector<VideoCodec>& codecs() const;
- const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
- void SetLogging(int min_sev, const char* filter);
+ RtpCapabilities GetCapabilities() const;
// Set a WebRtcVideoDecoderFactory for external decoding. Video engine does
// not take the ownership of |decoder_factory|. The caller needs to make sure
@@ -134,9 +131,6 @@ class WebRtcVideoEngine2 {
bool EnableTimedRender();
bool FindCodec(const VideoCodec& in);
- bool CanSendCodec(const VideoCodec& in,
- const VideoCodec& current,
- VideoCodec* out);
// Check whether the supplied trace should be ignored.
bool ShouldIgnoreTrace(const std::string& trace);
@@ -144,7 +138,6 @@ class WebRtcVideoEngine2 {
std::vector<VideoCodec> GetSupportedCodecs() const;
std::vector<VideoCodec> video_codecs_;
- std::vector<RtpHeaderExtension> rtp_header_extensions_;
bool initialized_;
@@ -250,14 +243,18 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
WebRtcVideoEncoderFactory* external_encoder_factory,
const VideoOptions& options,
int max_bitrate_bps,
- const Settable<VideoCodecSettings>& codec_settings,
- const std::vector<webrtc::RtpExtension>& rtp_extensions);
+ const rtc::Optional<VideoCodecSettings>& codec_settings,
+ const std::vector<webrtc::RtpExtension>& rtp_extensions,
+ const VideoSendParameters& send_params);
~WebRtcVideoSendStream();
void SetOptions(const VideoOptions& options);
void SetCodec(const VideoCodecSettings& codec);
void SetRtpExtensions(
const std::vector<webrtc::RtpExtension>& rtp_extensions);
+ // TODO(deadbeef): Move logic from SetCodec/SetRtpExtensions/etc.
+ // into this method. Currently this method only sets the RTCP mode.
+ void SetSendParameters(const VideoSendParameters& send_params);
void InputFrame(VideoCapturer* capturer, const VideoFrame* frame);
bool SetCapturer(VideoCapturer* capturer);
@@ -286,11 +283,11 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
const webrtc::VideoSendStream::Config& config,
const VideoOptions& options,
int max_bitrate_bps,
- const Settable<VideoCodecSettings>& codec_settings);
+ const rtc::Optional<VideoCodecSettings>& codec_settings);
webrtc::VideoSendStream::Config config;
VideoOptions options;
int max_bitrate_bps;
- Settable<VideoCodecSettings> codec_settings;
+ rtc::Optional<VideoCodecSettings> codec_settings;
// Sent resolutions + bitrates etc. by the underlying VideoSendStream,
// typically changes when setting a new resolution or reconfiguring
// bitrates.
@@ -395,19 +392,26 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
const webrtc::VideoReceiveStream::Config& config,
WebRtcVideoDecoderFactory* external_decoder_factory,
bool default_stream,
- const std::vector<VideoCodecSettings>& recv_codecs);
+ const std::vector<VideoCodecSettings>& recv_codecs,
+ bool disable_prerenderer_smoothing);
~WebRtcVideoReceiveStream();
const std::vector<uint32_t>& GetSsrcs() const;
void SetLocalSsrc(uint32_t local_ssrc);
- void SetNackAndRemb(bool nack_enabled, bool remb_enabled);
+ void SetFeedbackParameters(bool nack_enabled,
+ bool remb_enabled,
+ bool transport_cc_enabled);
void SetRecvCodecs(const std::vector<VideoCodecSettings>& recv_codecs);
void SetRtpExtensions(const std::vector<webrtc::RtpExtension>& extensions);
+ // TODO(deadbeef): Move logic from SetRecvCodecs/SetRtpExtensions/etc.
+ // into this method. Currently this method only sets the RTCP mode.
+ void SetRecvParameters(const VideoRecvParameters& recv_params);
void RenderFrame(const webrtc::VideoFrame& frame,
int time_to_render_ms) override;
bool IsTextureSupported() const override;
+ bool SmoothsRenderedFrames() const override;
bool IsDefaultStream() const;
void SetRenderer(cricket::VideoRenderer* renderer);
@@ -448,6 +452,8 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
WebRtcVideoDecoderFactory* const external_decoder_factory_;
std::vector<AllocatedDecoder> allocated_decoders_;
+ const bool disable_prerenderer_smoothing_;
+
rtc::CriticalSection renderer_lock_;
cricket::VideoRenderer* renderer_ GUARDED_BY(renderer_lock_);
int last_width_ GUARDED_BY(renderer_lock_);
@@ -512,7 +518,7 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
std::set<uint32_t> send_ssrcs_ GUARDED_BY(stream_crit_);
std::set<uint32_t> receive_ssrcs_ GUARDED_BY(stream_crit_);
- Settable<VideoCodecSettings> send_codec_;
+ rtc::Optional<VideoCodecSettings> send_codec_;
std::vector<webrtc::RtpExtension> send_rtp_extensions_;
WebRtcVideoEncoderFactory* const external_encoder_factory_;
@@ -521,6 +527,10 @@ class WebRtcVideoChannel2 : public rtc::MessageHandler,
std::vector<webrtc::RtpExtension> recv_rtp_extensions_;
webrtc::Call::Config::BitrateConfig bitrate_config_;
VideoOptions options_;
+ // TODO(deadbeef): Don't duplicate information between
+ // send_params/recv_params, rtp_extensions, options, etc.
+ VideoSendParameters send_params_;
+ VideoRecvParameters recv_params_;
};
} // namespace cricket
diff --git a/talk/media/webrtc/webrtcvideoengine2_unittest.cc b/talk/media/webrtc/webrtcvideoengine2_unittest.cc
index c0cd2ffa50..41e04a9fa7 100644
--- a/talk/media/webrtc/webrtcvideoengine2_unittest.cc
+++ b/talk/media/webrtc/webrtcvideoengine2_unittest.cc
@@ -75,6 +75,8 @@ void VerifyCodecHasDefaultFeedbackParams(const cricket::VideoCodec& codec) {
EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
cricket::kRtcpFbParamRemb, cricket::kParamValueEmpty)));
EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
+ cricket::kRtcpFbParamTransportCc, cricket::kParamValueEmpty)));
+ EXPECT_TRUE(codec.HasFeedbackParam(cricket::FeedbackParam(
cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir)));
}
@@ -205,26 +207,6 @@ TEST_F(WebRtcVideoEngine2Test, FindCodec) {
EXPECT_TRUE(engine_.FindCodec(rtx));
}
-TEST_F(WebRtcVideoEngine2Test, SetDefaultEncoderConfigPreservesFeedbackParams) {
- cricket::VideoCodec max_settings(
- engine_.codecs()[0].id, engine_.codecs()[0].name,
- engine_.codecs()[0].width / 2, engine_.codecs()[0].height / 2, 30, 0);
- // This codec shouldn't have NACK by default or the test is pointless.
- EXPECT_FALSE(max_settings.HasFeedbackParam(
- FeedbackParam(kRtcpFbParamNack, kParamValueEmpty)));
- // The engine should by default have it however.
- EXPECT_TRUE(engine_.codecs()[0].HasFeedbackParam(
- FeedbackParam(kRtcpFbParamNack, kParamValueEmpty)));
-
- // Set constrained max codec settings.
- EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
- cricket::VideoEncoderConfig(max_settings)));
-
- // Verify that feedback parameters are retained.
- EXPECT_TRUE(engine_.codecs()[0].HasFeedbackParam(
- FeedbackParam(kRtcpFbParamNack, kParamValueEmpty)));
-}
-
TEST_F(WebRtcVideoEngine2Test, DefaultRtxCodecHasAssociatedPayloadTypeSet) {
std::vector<VideoCodec> engine_codecs = engine_.codecs();
for (size_t i = 0; i < engine_codecs.size(); ++i) {
@@ -240,11 +222,11 @@ TEST_F(WebRtcVideoEngine2Test, DefaultRtxCodecHasAssociatedPayloadTypeSet) {
}
TEST_F(WebRtcVideoEngine2Test, SupportsTimestampOffsetHeaderExtension) {
- std::vector<RtpHeaderExtension> extensions = engine_.rtp_header_extensions();
- ASSERT_FALSE(extensions.empty());
- for (size_t i = 0; i < extensions.size(); ++i) {
- if (extensions[i].uri == kRtpTimestampOffsetHeaderExtension) {
- EXPECT_EQ(kRtpTimestampOffsetHeaderExtensionDefaultId, extensions[i].id);
+ RtpCapabilities capabilities = engine_.GetCapabilities();
+ ASSERT_FALSE(capabilities.header_extensions.empty());
+ for (const RtpHeaderExtension& extension : capabilities.header_extensions) {
+ if (extension.uri == kRtpTimestampOffsetHeaderExtension) {
+ EXPECT_EQ(kRtpTimestampOffsetHeaderExtensionDefaultId, extension.id);
return;
}
}
@@ -252,12 +234,11 @@ TEST_F(WebRtcVideoEngine2Test, SupportsTimestampOffsetHeaderExtension) {
}
TEST_F(WebRtcVideoEngine2Test, SupportsAbsoluteSenderTimeHeaderExtension) {
- std::vector<RtpHeaderExtension> extensions = engine_.rtp_header_extensions();
- ASSERT_FALSE(extensions.empty());
- for (size_t i = 0; i < extensions.size(); ++i) {
- if (extensions[i].uri == kRtpAbsoluteSenderTimeHeaderExtension) {
- EXPECT_EQ(kRtpAbsoluteSenderTimeHeaderExtensionDefaultId,
- extensions[i].id);
+ RtpCapabilities capabilities = engine_.GetCapabilities();
+ ASSERT_FALSE(capabilities.header_extensions.empty());
+ for (const RtpHeaderExtension& extension : capabilities.header_extensions) {
+ if (extension.uri == kRtpAbsoluteSenderTimeHeaderExtension) {
+ EXPECT_EQ(kRtpAbsoluteSenderTimeHeaderExtensionDefaultId, extension.id);
return;
}
}
@@ -272,12 +253,12 @@ class WebRtcVideoEngine2WithSendSideBweTest : public WebRtcVideoEngine2Test {
TEST_F(WebRtcVideoEngine2WithSendSideBweTest,
SupportsTransportSequenceNumberHeaderExtension) {
- std::vector<RtpHeaderExtension> extensions = engine_.rtp_header_extensions();
- ASSERT_FALSE(extensions.empty());
- for (size_t i = 0; i < extensions.size(); ++i) {
- if (extensions[i].uri == kRtpTransportSequenceNumberHeaderExtension) {
+ RtpCapabilities capabilities = engine_.GetCapabilities();
+ ASSERT_FALSE(capabilities.header_extensions.empty());
+ for (const RtpHeaderExtension& extension : capabilities.header_extensions) {
+ if (extension.uri == kRtpTransportSequenceNumberHeaderExtension) {
EXPECT_EQ(kRtpTransportSequenceNumberHeaderExtensionDefaultId,
- extensions[i].id);
+ extension.id);
return;
}
}
@@ -285,11 +266,11 @@ TEST_F(WebRtcVideoEngine2WithSendSideBweTest,
}
TEST_F(WebRtcVideoEngine2Test, SupportsVideoRotationHeaderExtension) {
- std::vector<RtpHeaderExtension> extensions = engine_.rtp_header_extensions();
- ASSERT_FALSE(extensions.empty());
- for (size_t i = 0; i < extensions.size(); ++i) {
- if (extensions[i].uri == kRtpVideoRotationHeaderExtension) {
- EXPECT_EQ(kRtpVideoRotationHeaderExtensionDefaultId, extensions[i].id);
+ RtpCapabilities capabilities = engine_.GetCapabilities();
+ ASSERT_FALSE(capabilities.header_extensions.empty());
+ for (const RtpHeaderExtension& extension : capabilities.header_extensions) {
+ if (extension.uri == kRtpVideoRotationHeaderExtension) {
+ EXPECT_EQ(kRtpVideoRotationHeaderExtensionDefaultId, extension.id);
return;
}
}
@@ -794,17 +775,6 @@ TEST_F(WebRtcVideoEngine2Test, RegisterExternalH264DecoderIfSupported) {
ASSERT_EQ(1u, decoder_factory.decoders().size());
}
-class WebRtcVideoEngine2BaseTest
- : public VideoEngineTest<cricket::WebRtcVideoEngine2> {
- protected:
- typedef VideoEngineTest<cricket::WebRtcVideoEngine2> Base;
-};
-
-#define WEBRTC_ENGINE_BASE_TEST(test) \
- TEST_F(WebRtcVideoEngine2BaseTest, test) { Base::test##Body(); }
-
-WEBRTC_ENGINE_BASE_TEST(ConstrainNewCodec2);
-
class WebRtcVideoChannel2BaseTest
: public VideoMediaChannelTest<WebRtcVideoEngine2, WebRtcVideoChannel2> {
protected:
@@ -894,7 +864,10 @@ TEST_F(WebRtcVideoChannel2BaseTest, TwoStreamsReUseFirstStream) {
Base::TwoStreamsReUseFirstStream(kVp8Codec);
}
+//Disabled for TSan: https://bugs.chromium.org/p/webrtc/issues/detail?id=4963
+#if !defined(THREAD_SANITIZER)
WEBRTC_BASE_TEST(SendManyResizeOnce);
+#endif // THREAD_SANITIZER
// TODO(pbos): Enable and figure out why this fails (or should work).
TEST_F(WebRtcVideoChannel2BaseTest, DISABLED_SendVp8HdAndReceiveAdaptedVp8Vga) {
@@ -1097,7 +1070,7 @@ class WebRtcVideoChannel2Test : public WebRtcVideoEngine2Test {
FakeVideoSendStream* SetDenoisingOption(
const cricket::VideoSendParameters& parameters, bool enabled) {
cricket::VideoSendParameters params = parameters;
- params.options.video_noise_reduction.Set(enabled);
+ params.options.video_noise_reduction = rtc::Optional<bool>(enabled);
channel_->SetSendParameters(params);
return fake_call_->GetVideoSendStreams().back();
}
@@ -1148,7 +1121,7 @@ TEST_F(WebRtcVideoChannel2Test, RecvStreamWithSimAndRtx) {
parameters.codecs = engine_.codecs();
EXPECT_TRUE(channel_->SetSendParameters(parameters));
EXPECT_TRUE(channel_->SetSend(true));
- parameters.options.conference_mode.Set(true);
+ parameters.options.conference_mode = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
// Send side.
@@ -1451,6 +1424,11 @@ TEST_F(WebRtcVideoChannel2Test, RembIsEnabledByDefault) {
EXPECT_TRUE(stream->GetConfig().rtp.remb);
}
+TEST_F(WebRtcVideoChannel2Test, TransportCcIsEnabledByDefault) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ EXPECT_TRUE(stream->GetConfig().rtp.transport_cc);
+}
+
TEST_F(WebRtcVideoChannel2Test, RembCanBeEnabledAndDisabled) {
FakeVideoReceiveStream* stream = AddRecvStream();
EXPECT_TRUE(stream->GetConfig().rtp.remb);
@@ -1471,6 +1449,27 @@ TEST_F(WebRtcVideoChannel2Test, RembCanBeEnabledAndDisabled) {
EXPECT_TRUE(stream->GetConfig().rtp.remb);
}
+TEST_F(WebRtcVideoChannel2Test, TransportCcCanBeEnabledAndDisabled) {
+ FakeVideoReceiveStream* stream = AddRecvStream();
+ EXPECT_TRUE(stream->GetConfig().rtp.transport_cc);
+
+ // Verify that transport cc feedback is turned off when send(!) codecs without
+ // transport cc feedback are set.
+ cricket::VideoSendParameters parameters;
+ parameters.codecs.push_back(kVp8Codec);
+ EXPECT_TRUE(parameters.codecs[0].feedback_params.params().empty());
+ EXPECT_TRUE(channel_->SetSendParameters(parameters));
+ stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_FALSE(stream->GetConfig().rtp.transport_cc);
+
+ // Verify that transport cc feedback is turned on when setting default codecs
+ // since the default codecs have transport cc feedback enabled.
+ parameters.codecs = engine_.codecs();
+ EXPECT_TRUE(channel_->SetSendParameters(parameters));
+ stream = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_TRUE(stream->GetConfig().rtp.transport_cc);
+}
+
TEST_F(WebRtcVideoChannel2Test, NackIsEnabledByDefault) {
VerifyCodecHasDefaultFeedbackParams(default_codec_);
@@ -1558,7 +1557,8 @@ TEST_F(WebRtcVideoChannel2Test, UsesCorrectSettingsForScreencast) {
cricket::VideoCodec codec = kVp8Codec360p;
cricket::VideoSendParameters parameters;
parameters.codecs.push_back(codec);
- parameters.options.screencast_min_bitrate.Set(kScreenshareMinBitrateKbps);
+ parameters.options.screencast_min_bitrate =
+ rtc::Optional<int>(kScreenshareMinBitrateKbps);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
AddSendStream();
@@ -1612,7 +1612,7 @@ TEST_F(WebRtcVideoChannel2Test,
ConferenceModeScreencastConfiguresTemporalLayer) {
static const int kConferenceScreencastTemporalBitrateBps =
ScreenshareLayerConfig::GetDefault().tl0_bitrate_kbps * 1000;
- send_parameters_.options.conference_mode.Set(true);
+ send_parameters_.options.conference_mode = rtc::Optional<bool>(true);
channel_->SetSendParameters(send_parameters_);
AddSendStream();
@@ -1659,13 +1659,15 @@ TEST_F(WebRtcVideoChannel2Test, SuspendBelowMinBitrateDisabledByDefault) {
}
TEST_F(WebRtcVideoChannel2Test, SetOptionsWithSuspendBelowMinBitrate) {
- send_parameters_.options.suspend_below_min_bitrate.Set(true);
+ send_parameters_.options.suspend_below_min_bitrate =
+ rtc::Optional<bool>(true);
channel_->SetSendParameters(send_parameters_);
FakeVideoSendStream* stream = AddSendStream();
EXPECT_TRUE(stream->GetConfig().suspend_below_min_bitrate);
- send_parameters_.options.suspend_below_min_bitrate.Set(false);
+ send_parameters_.options.suspend_below_min_bitrate =
+ rtc::Optional<bool>(false);
channel_->SetSendParameters(send_parameters_);
stream = fake_call_->GetVideoSendStreams()[0];
@@ -1853,7 +1855,7 @@ void WebRtcVideoChannel2Test::TestCpuAdaptation(bool enable_overuse,
cricket::VideoSendParameters parameters;
parameters.codecs.push_back(codec);
if (!enable_overuse) {
- parameters.options.cpu_overuse_detection.Set(false);
+ parameters.options.cpu_overuse_detection = rtc::Optional<bool>(false);
}
EXPECT_TRUE(channel_->SetSendParameters(parameters));
@@ -2375,19 +2377,55 @@ TEST_F(WebRtcVideoChannel2Test, TestSetDscpOptions) {
cricket::VideoSendParameters parameters = send_parameters_;
EXPECT_TRUE(channel_->SetSendParameters(parameters));
EXPECT_EQ(rtc::DSCP_NO_CHANGE, network_interface->dscp());
- parameters.options.dscp.Set(true);
+ parameters.options.dscp = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
EXPECT_EQ(rtc::DSCP_AF41, network_interface->dscp());
// Verify previous value is not modified if dscp option is not set.
cricket::VideoSendParameters parameters1 = send_parameters_;
EXPECT_TRUE(channel_->SetSendParameters(parameters1));
EXPECT_EQ(rtc::DSCP_AF41, network_interface->dscp());
- parameters1.options.dscp.Set(false);
+ parameters1.options.dscp = rtc::Optional<bool>(false);
EXPECT_TRUE(channel_->SetSendParameters(parameters1));
EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface->dscp());
channel_->SetInterface(NULL);
}
+// This test verifies that the RTCP reduced size mode is properly applied to
+// send video streams.
+TEST_F(WebRtcVideoChannel2Test, TestSetSendRtcpReducedSize) {
+ // Create stream, expecting that default mode is "compound".
+ FakeVideoSendStream* stream1 = AddSendStream();
+ EXPECT_EQ(webrtc::RtcpMode::kCompound, stream1->GetConfig().rtp.rtcp_mode);
+
+ // Now enable reduced size mode.
+ send_parameters_.rtcp.reduced_size = true;
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
+ stream1 = fake_call_->GetVideoSendStreams()[0];
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream1->GetConfig().rtp.rtcp_mode);
+
+ // Create a new stream and ensure it picks up the reduced size mode.
+ FakeVideoSendStream* stream2 = AddSendStream();
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream2->GetConfig().rtp.rtcp_mode);
+}
+
+// This test verifies that the RTCP reduced size mode is properly applied to
+// receive video streams.
+TEST_F(WebRtcVideoChannel2Test, TestSetRecvRtcpReducedSize) {
+ // Create stream, expecting that default mode is "compound".
+ FakeVideoReceiveStream* stream1 = AddRecvStream();
+ EXPECT_EQ(webrtc::RtcpMode::kCompound, stream1->GetConfig().rtp.rtcp_mode);
+
+ // Now enable reduced size mode.
+ recv_parameters_.rtcp.reduced_size = true;
+ EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_));
+ stream1 = fake_call_->GetVideoReceiveStreams()[0];
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream1->GetConfig().rtp.rtcp_mode);
+
+ // Create a new stream and ensure it picks up the reduced size mode.
+ FakeVideoReceiveStream* stream2 = AddRecvStream();
+ EXPECT_EQ(webrtc::RtcpMode::kReducedSize, stream2->GetConfig().rtp.rtcp_mode);
+}
+
TEST_F(WebRtcVideoChannel2Test, OnReadyToSendSignalsNetworkState) {
EXPECT_EQ(webrtc::kNetworkUp, fake_call_->GetNetworkState());
@@ -2410,6 +2448,18 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsReportsSentCodecName) {
EXPECT_EQ(kVp8Codec.name, info.senders[0].codec_name);
}
+TEST_F(WebRtcVideoChannel2Test, GetStatsReportsEncoderImplementationName) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.encoder_implementation_name = "encoder_implementation_name";
+ stream->SetStats(stats);
+
+ cricket::VideoMediaInfo info;
+ ASSERT_TRUE(channel_->GetStats(&info));
+ EXPECT_EQ(stats.encoder_implementation_name,
+ info.senders[0].encoder_implementation_name);
+}
+
TEST_F(WebRtcVideoChannel2Test, GetStatsReportsCpuOveruseMetrics) {
FakeVideoSendStream* stream = AddSendStream();
webrtc::VideoSendStream::Stats stats;
@@ -2460,7 +2510,7 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationStats) {
EXPECT_TRUE(channel_->SetSend(true));
// Verify that the CpuOveruseObserver is registered and trigger downgrade.
- parameters.options.cpu_overuse_detection.Set(true);
+ parameters.options.cpu_overuse_detection = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(parameters));
// Trigger overuse.
@@ -2518,6 +2568,87 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationStats) {
EXPECT_TRUE(channel_->SetCapturer(kSsrcs3[0], NULL));
}
+TEST_F(WebRtcVideoChannel2Test, GetStatsTracksAdaptationAndBandwidthStats) {
+ AddSendStream(cricket::CreateSimStreamParams("cname", MAKE_VECTOR(kSsrcs3)));
+
+ // Capture format VGA.
+ cricket::FakeVideoCapturer video_capturer_vga;
+ const std::vector<cricket::VideoFormat>* formats =
+ video_capturer_vga.GetSupportedFormats();
+ cricket::VideoFormat capture_format_vga = (*formats)[1];
+ EXPECT_EQ(cricket::CS_RUNNING, video_capturer_vga.Start(capture_format_vga));
+ EXPECT_TRUE(channel_->SetCapturer(kSsrcs3[0], &video_capturer_vga));
+ EXPECT_TRUE(video_capturer_vga.CaptureFrame());
+
+ cricket::VideoCodec send_codec(100, "VP8", 640, 480, 30, 0);
+ cricket::VideoSendParameters parameters;
+ parameters.codecs.push_back(send_codec);
+ EXPECT_TRUE(channel_->SetSendParameters(parameters));
+ EXPECT_TRUE(channel_->SetSend(true));
+
+ // Verify that the CpuOveruseObserver is registered and trigger downgrade.
+ parameters.options.cpu_overuse_detection = rtc::Optional<bool>(true);
+ EXPECT_TRUE(channel_->SetSendParameters(parameters));
+
+ // Trigger overuse -> adapt CPU.
+ ASSERT_EQ(1u, fake_call_->GetVideoSendStreams().size());
+ webrtc::LoadObserver* overuse_callback =
+ fake_call_->GetVideoSendStreams().front()->GetConfig().overuse_callback;
+ ASSERT_TRUE(overuse_callback != NULL);
+ overuse_callback->OnLoadUpdate(webrtc::LoadObserver::kOveruse);
+ EXPECT_TRUE(video_capturer_vga.CaptureFrame());
+ cricket::VideoMediaInfo info;
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.senders.size());
+ EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_CPU,
+ info.senders[0].adapt_reason);
+
+ // Set bandwidth limitation stats for the stream -> adapt CPU + BW.
+ webrtc::VideoSendStream::Stats stats;
+ stats.bw_limited_resolution = true;
+ fake_call_->GetVideoSendStreams().front()->SetStats(stats);
+ info.Clear();
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.senders.size());
+ EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_CPU +
+ CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH,
+ info.senders[0].adapt_reason);
+
+ // Trigger upgrade -> adapt BW.
+ overuse_callback->OnLoadUpdate(webrtc::LoadObserver::kUnderuse);
+ EXPECT_TRUE(video_capturer_vga.CaptureFrame());
+ info.Clear();
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.senders.size());
+ EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH,
+ info.senders[0].adapt_reason);
+
+ // Reset bandwidth limitation state -> adapt NONE.
+ stats.bw_limited_resolution = false;
+ fake_call_->GetVideoSendStreams().front()->SetStats(stats);
+ info.Clear();
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.senders.size());
+ EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_NONE,
+ info.senders[0].adapt_reason);
+
+ EXPECT_TRUE(channel_->SetCapturer(kSsrcs3[0], NULL));
+}
+
+TEST_F(WebRtcVideoChannel2Test,
+ GetStatsTranslatesBandwidthLimitedResolutionCorrectly) {
+ FakeVideoSendStream* stream = AddSendStream();
+ webrtc::VideoSendStream::Stats stats;
+ stats.bw_limited_resolution = true;
+ stream->SetStats(stats);
+
+ cricket::VideoMediaInfo info;
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.senders.size());
+ EXPECT_EQ(CoordinatedVideoAdapter::ADAPTREASON_BANDWIDTH,
+ info.senders[0].adapt_reason);
+}
+
TEST_F(WebRtcVideoChannel2Test,
GetStatsTranslatesSendRtcpPacketTypesCorrectly) {
FakeVideoSendStream* stream = AddSendStream();
@@ -2561,6 +2692,7 @@ TEST_F(WebRtcVideoChannel2Test,
TEST_F(WebRtcVideoChannel2Test, GetStatsTranslatesDecodeStatsCorrectly) {
FakeVideoReceiveStream* stream = AddRecvStream();
webrtc::VideoReceiveStream::Stats stats;
+ stats.decoder_implementation_name = "decoder_implementation_name";
stats.decode_ms = 2;
stats.max_decode_ms = 3;
stats.current_delay_ms = 4;
@@ -2572,6 +2704,8 @@ TEST_F(WebRtcVideoChannel2Test, GetStatsTranslatesDecodeStatsCorrectly) {
cricket::VideoMediaInfo info;
ASSERT_TRUE(channel_->GetStats(&info));
+ EXPECT_EQ(stats.decoder_implementation_name,
+ info.receivers[0].decoder_implementation_name);
EXPECT_EQ(stats.decode_ms, info.receivers[0].decode_ms);
EXPECT_EQ(stats.max_decode_ms, info.receivers[0].max_decode_ms);
EXPECT_EQ(stats.current_delay_ms, info.receivers[0].current_delay_ms);
diff --git a/talk/media/webrtc/webrtcvideoframe.cc b/talk/media/webrtc/webrtcvideoframe.cc
index 7da7e3b7fb..fcc991c753 100644
--- a/talk/media/webrtc/webrtcvideoframe.cc
+++ b/talk/media/webrtc/webrtcvideoframe.cc
@@ -56,17 +56,6 @@ WebRtcVideoFrame::WebRtcVideoFrame(
rotation_(rotation) {
}
-WebRtcVideoFrame::WebRtcVideoFrame(
- const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& buffer,
- int64_t elapsed_time_ns,
- int64_t time_stamp_ns)
- : video_frame_buffer_(buffer),
- pixel_width_(1),
- pixel_height_(1),
- time_stamp_ns_(time_stamp_ns),
- rotation_(webrtc::kVideoRotation_0) {
-}
-
WebRtcVideoFrame::~WebRtcVideoFrame() {}
bool WebRtcVideoFrame::Init(uint32_t format,
@@ -90,13 +79,7 @@ bool WebRtcVideoFrame::Init(const CapturedFrame* frame, int dw, int dh,
return Reset(frame->fourcc, frame->width, frame->height, dw, dh,
static_cast<uint8_t*>(frame->data), frame->data_size,
frame->pixel_width, frame->pixel_height, frame->time_stamp,
- frame->GetRotation(), apply_rotation);
-}
-
-bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width,
- size_t pixel_height, int64_t,
- int64_t time_stamp_ns) {
- return InitToBlack(w, h, pixel_width, pixel_height, time_stamp_ns);
+ frame->rotation, apply_rotation);
}
bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width,
diff --git a/talk/media/webrtc/webrtcvideoframe.h b/talk/media/webrtc/webrtcvideoframe.h
index 0928c59324..827cf28821 100644
--- a/talk/media/webrtc/webrtcvideoframe.h
+++ b/talk/media/webrtc/webrtcvideoframe.h
@@ -33,7 +33,7 @@
#include "webrtc/base/refcount.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
namespace cricket {
@@ -46,11 +46,6 @@ class WebRtcVideoFrame : public VideoFrame {
int64_t time_stamp_ns,
webrtc::VideoRotation rotation);
- // TODO(guoweis): Remove this when chrome code base is updated.
- WebRtcVideoFrame(const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& buffer,
- int64_t elapsed_time_ns,
- int64_t time_stamp_ns);
-
~WebRtcVideoFrame();
// Creates a frame from a raw sample with FourCC "format" and size "w" x "h".
@@ -74,10 +69,6 @@ class WebRtcVideoFrame : public VideoFrame {
void InitToEmptyBuffer(int w, int h, size_t pixel_width, size_t pixel_height,
int64_t time_stamp_ns);
- // TODO(magjed): Remove once Chromium is updated.
- bool InitToBlack(int w, int h, size_t pixel_width, size_t pixel_height,
- int64_t elapsed_time_ns, int64_t time_stamp_ns);
-
bool InitToBlack(int w, int h, size_t pixel_width, size_t pixel_height,
int64_t time_stamp_ns) override;
diff --git a/talk/media/webrtc/webrtcvoe.h b/talk/media/webrtc/webrtcvoe.h
index db6a64a1fe..aa705a014d 100644
--- a/talk/media/webrtc/webrtcvoe.h
+++ b/talk/media/webrtc/webrtcvoe.h
@@ -36,7 +36,6 @@
#include "webrtc/voice_engine/include/voe_audio_processing.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_codec.h"
-#include "webrtc/voice_engine/include/voe_dtmf.h"
#include "webrtc/voice_engine/include/voe_errors.h"
#include "webrtc/voice_engine/include/voe_hardware.h"
#include "webrtc/voice_engine/include/voe_network.h"
@@ -91,14 +90,13 @@ class VoEWrapper {
public:
VoEWrapper()
: engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
- base_(engine_), codec_(engine_), dtmf_(engine_),
+ base_(engine_), codec_(engine_),
hw_(engine_), network_(engine_),
rtp_(engine_), volume_(engine_) {
}
VoEWrapper(webrtc::VoEAudioProcessing* processing,
webrtc::VoEBase* base,
webrtc::VoECodec* codec,
- webrtc::VoEDtmf* dtmf,
webrtc::VoEHardware* hw,
webrtc::VoENetwork* network,
webrtc::VoERTP_RTCP* rtp,
@@ -107,7 +105,6 @@ class VoEWrapper {
processing_(processing),
base_(base),
codec_(codec),
- dtmf_(dtmf),
hw_(hw),
network_(network),
rtp_(rtp),
@@ -118,7 +115,6 @@ class VoEWrapper {
webrtc::VoEAudioProcessing* processing() const { return processing_.get(); }
webrtc::VoEBase* base() const { return base_.get(); }
webrtc::VoECodec* codec() const { return codec_.get(); }
- webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
webrtc::VoEHardware* hw() const { return hw_.get(); }
webrtc::VoENetwork* network() const { return network_.get(); }
webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
@@ -130,29 +126,11 @@ class VoEWrapper {
scoped_voe_ptr<webrtc::VoEAudioProcessing> processing_;
scoped_voe_ptr<webrtc::VoEBase> base_;
scoped_voe_ptr<webrtc::VoECodec> codec_;
- scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
scoped_voe_ptr<webrtc::VoEHardware> hw_;
scoped_voe_ptr<webrtc::VoENetwork> network_;
scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
};
-
-// Adds indirection to static WebRtc functions, allowing them to be mocked.
-class VoETraceWrapper {
- public:
- virtual ~VoETraceWrapper() {}
-
- virtual int SetTraceFilter(const unsigned int filter) {
- return webrtc::VoiceEngine::SetTraceFilter(filter);
- }
- virtual int SetTraceFile(const char* fileNameUTF8) {
- return webrtc::VoiceEngine::SetTraceFile(fileNameUTF8);
- }
- virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
- return webrtc::VoiceEngine::SetTraceCallback(callback);
- }
-};
-
} // namespace cricket
#endif // TALK_MEDIA_WEBRTCVOE_H_
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index 27ca1deb2d..9192b72539 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -42,7 +42,10 @@
#include "talk/media/base/audiorenderer.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/streamparams.h"
+#include "talk/media/webrtc/webrtcmediaengine.h"
#include "talk/media/webrtc/webrtcvoe.h"
+#include "webrtc/audio/audio_sink.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/base64.h"
#include "webrtc/base/byteorder.h"
#include "webrtc/base/common.h"
@@ -52,53 +55,26 @@
#include "webrtc/base/stringutils.h"
#include "webrtc/call/rtc_event_log.h"
#include "webrtc/common.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/include/field_trial.h"
+#include "webrtc/system_wrappers/include/trace.h"
namespace cricket {
namespace {
-const int kMaxNumPacketSize = 6;
-struct CodecPref {
- const char* name;
- int clockrate;
- int channels;
- int payload_type;
- bool is_multi_rate;
- int packet_sizes_ms[kMaxNumPacketSize];
-};
-// Note: keep the supported packet sizes in ascending order.
-const CodecPref kCodecPrefs[] = {
- { kOpusCodecName, 48000, 2, 111, true, { 10, 20, 40, 60 } },
- { kIsacCodecName, 16000, 1, 103, true, { 30, 60 } },
- { kIsacCodecName, 32000, 1, 104, true, { 30 } },
- // G722 should be advertised as 8000 Hz because of the RFC "bug".
- { kG722CodecName, 8000, 1, 9, false, { 10, 20, 30, 40, 50, 60 } },
- { kIlbcCodecName, 8000, 1, 102, false, { 20, 30, 40, 60 } },
- { kPcmuCodecName, 8000, 1, 0, false, { 10, 20, 30, 40, 50, 60 } },
- { kPcmaCodecName, 8000, 1, 8, false, { 10, 20, 30, 40, 50, 60 } },
- { kCnCodecName, 32000, 1, 106, false, { } },
- { kCnCodecName, 16000, 1, 105, false, { } },
- { kCnCodecName, 8000, 1, 13, false, { } },
- { kRedCodecName, 8000, 1, 127, false, { } },
- { kDtmfCodecName, 8000, 1, 126, false, { } },
-};
+const int kDefaultTraceFilter = webrtc::kTraceNone | webrtc::kTraceTerseInfo |
+ webrtc::kTraceWarning | webrtc::kTraceError |
+ webrtc::kTraceCritical;
+const int kElevatedTraceFilter = kDefaultTraceFilter | webrtc::kTraceStateInfo |
+ webrtc::kTraceInfo;
-// For Linux/Mac, using the default device is done by specifying index 0 for
-// VoE 4.0 and not -1 (which was the case for VoE 3.5).
-//
// On Windows Vista and newer, Microsoft introduced the concept of "Default
// Communications Device". This means that there are two types of default
// devices (old Wave Audio style default and Default Communications Device).
//
// On Windows systems which only support Wave Audio style default, uses either
// -1 or 0 to select the default device.
-//
-// On Windows systems which support both "Default Communication Device" and
-// old Wave Audio style default, use -1 for Default Communications Device and
-// -2 for Wave Audio style default, which is what we want to use for clips.
-// It's not clear yet whether the -2 index is handled properly on other OSes.
-
#ifdef WIN32
const int kDefaultAudioDeviceId = -1;
#else
@@ -150,6 +126,12 @@ const char kAecDumpByAudioOptionFilename[] = "/sdcard/audio.aecdump";
const char kAecDumpByAudioOptionFilename[] = "audio.aecdump";
#endif
+// Constants from voice_engine_defines.h.
+const int kMinTelephoneEventCode = 0; // RFC4733 (Section 2.3.1)
+const int kMaxTelephoneEventCode = 255;
+const int kMinTelephoneEventDuration = 100;
+const int kMaxTelephoneEventDuration = 60000; // Actual limit is 2^16
+
bool ValidateStreamParams(const StreamParams& sp) {
if (sp.ssrcs.empty()) {
LOG(LS_ERROR) << "No SSRCs in stream parameters: " << sp.ToString();
@@ -177,32 +159,6 @@ std::string ToString(const webrtc::CodecInst& codec) {
return ss.str();
}
-void LogMultiline(rtc::LoggingSeverity sev, char* text) {
- const char* delim = "\r\n";
- for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
- LOG_V(sev) << tok;
- }
-}
-
-// Severity is an integer because it comes is assumed to be from command line.
-int SeverityToFilter(int severity) {
- int filter = webrtc::kTraceNone;
- switch (severity) {
- case rtc::LS_VERBOSE:
- filter |= webrtc::kTraceAll;
- FALLTHROUGH();
- case rtc::LS_INFO:
- filter |= (webrtc::kTraceStateInfo | webrtc::kTraceInfo);
- FALLTHROUGH();
- case rtc::LS_WARNING:
- filter |= (webrtc::kTraceTerseInfo | webrtc::kTraceWarning);
- FALLTHROUGH();
- case rtc::LS_ERROR:
- filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
- }
- return filter;
-}
-
bool IsCodec(const AudioCodec& codec, const char* ref_name) {
return (_stricmp(codec.name.c_str(), ref_name) == 0);
}
@@ -211,19 +167,9 @@ bool IsCodec(const webrtc::CodecInst& codec, const char* ref_name) {
return (_stricmp(codec.plname, ref_name) == 0);
}
-bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
- for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
- if (IsCodec(codec, kCodecPrefs[i].name) &&
- kCodecPrefs[i].clockrate == codec.plfreq) {
- return kCodecPrefs[i].is_multi_rate;
- }
- }
- return false;
-}
-
bool FindCodec(const std::vector<AudioCodec>& codecs,
- const AudioCodec& codec,
- AudioCodec* found_codec) {
+ const AudioCodec& codec,
+ AudioCodec* found_codec) {
for (const AudioCodec& c : codecs) {
if (c.Matches(codec)) {
if (found_codec != NULL) {
@@ -253,38 +199,8 @@ bool IsNackEnabled(const AudioCodec& codec) {
kParamValueEmpty));
}
-int SelectPacketSize(const CodecPref& codec_pref, int ptime_ms) {
- int selected_packet_size_ms = codec_pref.packet_sizes_ms[0];
- for (int packet_size_ms : codec_pref.packet_sizes_ms) {
- if (packet_size_ms && packet_size_ms <= ptime_ms) {
- selected_packet_size_ms = packet_size_ms;
- }
- }
- return selected_packet_size_ms;
-}
-
-// If the AudioCodec param kCodecParamPTime is set, then we will set it to codec
-// pacsize if it's valid, or we will pick the next smallest value we support.
-// TODO(Brave): Query supported packet sizes from ACM when the API is ready.
-bool SetPTimeAsPacketSize(webrtc::CodecInst* codec, int ptime_ms) {
- for (const CodecPref& codec_pref : kCodecPrefs) {
- if ((IsCodec(*codec, codec_pref.name) &&
- codec_pref.clockrate == codec->plfreq) ||
- IsCodec(*codec, kG722CodecName)) {
- int packet_size_ms = SelectPacketSize(codec_pref, ptime_ms);
- if (packet_size_ms) {
- // Convert unit from milli-seconds to samples.
- codec->pacsize = (codec->plfreq / 1000) * packet_size_ms;
- return true;
- }
- }
- }
- return false;
-}
-
// Return true if codec.params[feature] == "1", false otherwise.
-bool IsCodecFeatureEnabled(const AudioCodec& codec,
- const char* feature) {
+bool IsCodecFeatureEnabled(const AudioCodec& codec, const char* feature) {
int value;
return codec.GetParam(feature, &value) && value == 1;
}
@@ -351,109 +267,29 @@ void GetOpusConfig(const AudioCodec& codec, webrtc::CodecInst* voe_codec,
voe_codec->rate = GetOpusBitrate(codec, *max_playback_rate);
}
-// Changes RTP timestamp rate of G722. This is due to the "bug" in the RFC
-// which says that G722 should be advertised as 8 kHz although it is a 16 kHz
-// codec.
-void MaybeFixupG722(webrtc::CodecInst* voe_codec, int new_plfreq) {
- if (IsCodec(*voe_codec, kG722CodecName)) {
- // If the ASSERT triggers, the codec definition in WebRTC VoiceEngine
- // has changed, and this special case is no longer needed.
- RTC_DCHECK(voe_codec->plfreq != new_plfreq);
- voe_codec->plfreq = new_plfreq;
- }
-}
-
-// Gets the default set of options applied to the engine. Historically, these
-// were supplied as a combination of flags from the channel manager (ec, agc,
-// ns, and highpass) and the rest hardcoded in InitInternal.
-AudioOptions GetDefaultEngineOptions() {
- AudioOptions options;
- options.echo_cancellation.Set(true);
- options.auto_gain_control.Set(true);
- options.noise_suppression.Set(true);
- options.highpass_filter.Set(true);
- options.stereo_swapping.Set(false);
- options.audio_jitter_buffer_max_packets.Set(50);
- options.audio_jitter_buffer_fast_accelerate.Set(false);
- options.typing_detection.Set(true);
- options.adjust_agc_delta.Set(0);
- options.experimental_agc.Set(false);
- options.extended_filter_aec.Set(false);
- options.delay_agnostic_aec.Set(false);
- options.experimental_ns.Set(false);
- options.aec_dump.Set(false);
- return options;
-}
-
-std::string GetEnableString(bool enable) {
- return enable ? "enable" : "disable";
-}
-} // namespace {
-
-WebRtcVoiceEngine::WebRtcVoiceEngine()
- : voe_wrapper_(new VoEWrapper()),
- tracing_(new VoETraceWrapper()),
- adm_(NULL),
- log_filter_(SeverityToFilter(kDefaultLogSeverity)),
- is_dumping_aec_(false) {
- Construct();
-}
-
-WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
- VoETraceWrapper* tracing)
- : voe_wrapper_(voe_wrapper),
- tracing_(tracing),
- adm_(NULL),
- log_filter_(SeverityToFilter(kDefaultLogSeverity)),
- is_dumping_aec_(false) {
- Construct();
-}
-
-void WebRtcVoiceEngine::Construct() {
- SetTraceFilter(log_filter_);
- initialized_ = false;
- LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
- SetTraceOptions("");
- if (tracing_->SetTraceCallback(this) == -1) {
- LOG_RTCERR0(SetTraceCallback);
- }
- if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
- LOG_RTCERR0(RegisterVoiceEngineObserver);
- }
- // Clear the default agc state.
- memset(&default_agc_config_, 0, sizeof(default_agc_config_));
-
- // Load our audio codec list.
- ConstructCodecs();
-
- // Load our RTP Header extensions.
- rtp_header_extensions_.push_back(
- RtpHeaderExtension(kRtpAudioLevelHeaderExtension,
- kRtpAudioLevelHeaderExtensionDefaultId));
- rtp_header_extensions_.push_back(
- RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
- kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
- if (webrtc::field_trial::FindFullName("WebRTC-SendSideBwe") == "Enabled") {
- rtp_header_extensions_.push_back(RtpHeaderExtension(
- kRtpTransportSequenceNumberHeaderExtension,
- kRtpTransportSequenceNumberHeaderExtensionDefaultId));
- }
- options_ = GetDefaultEngineOptions();
+webrtc::AudioState::Config MakeAudioStateConfig(VoEWrapper* voe_wrapper) {
+ webrtc::AudioState::Config config;
+ config.voice_engine = voe_wrapper->engine();
+ return config;
}
-void WebRtcVoiceEngine::ConstructCodecs() {
- LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
- int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
- for (int i = 0; i < ncodecs; ++i) {
- webrtc::CodecInst voe_codec;
- if (GetVoeCodec(i, &voe_codec)) {
+class WebRtcVoiceCodecs final {
+ public:
+ // TODO(solenberg): Do this filtering once off-line, add a simple AudioCodec
+ // list and add a test which verifies VoE supports the listed codecs.
+ static std::vector<AudioCodec> SupportedCodecs() {
+ LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
+ std::vector<AudioCodec> result;
+ for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) {
+ // Change the sample rate of G722 to 8000 to match SDP.
+ MaybeFixupG722(&voe_codec, 8000);
// Skip uncompressed formats.
if (IsCodec(voe_codec, kL16CodecName)) {
continue;
}
const CodecPref* pref = NULL;
- for (size_t j = 0; j < ARRAY_SIZE(kCodecPrefs); ++j) {
+ for (size_t j = 0; j < arraysize(kCodecPrefs); ++j) {
if (IsCodec(voe_codec, kCodecPrefs[j].name) &&
kCodecPrefs[j].clockrate == voe_codec.plfreq &&
kCodecPrefs[j].channels == voe_codec.channels) {
@@ -465,9 +301,10 @@ void WebRtcVoiceEngine::ConstructCodecs() {
if (pref) {
// Use the payload type that we've configured in our pref table;
// use the offset in our pref table to determine the sort order.
- AudioCodec codec(pref->payload_type, voe_codec.plname, voe_codec.plfreq,
- voe_codec.rate, voe_codec.channels,
- ARRAY_SIZE(kCodecPrefs) - (pref - kCodecPrefs));
+ AudioCodec codec(
+ pref->payload_type, voe_codec.plname, voe_codec.plfreq,
+ voe_codec.rate, voe_codec.channels,
+ static_cast<int>(arraysize(kCodecPrefs)) - (pref - kCodecPrefs));
LOG(LS_INFO) << ToString(codec);
if (IsCodec(codec, kIsacCodecName)) {
// Indicate auto-bitrate in signaling.
@@ -488,40 +325,183 @@ void WebRtcVoiceEngine::ConstructCodecs() {
// TODO(hellner): Add ptime, sprop-stereo, and stereo
// when they can be set to values other than the default.
}
- codecs_.push_back(codec);
+ result.push_back(codec);
} else {
LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec);
}
}
+ // Make sure they are in local preference order.
+ std::sort(result.begin(), result.end(), &AudioCodec::Preferable);
+ return result;
+ }
+
+ static bool ToCodecInst(const AudioCodec& in,
+ webrtc::CodecInst* out) {
+ for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) {
+ // Change the sample rate of G722 to 8000 to match SDP.
+ MaybeFixupG722(&voe_codec, 8000);
+ AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
+ voe_codec.rate, voe_codec.channels, 0);
+ bool multi_rate = IsCodecMultiRate(voe_codec);
+ // Allow arbitrary rates for ISAC to be specified.
+ if (multi_rate) {
+ // Set codec.bitrate to 0 so the check for codec.Matches() passes.
+ codec.bitrate = 0;
+ }
+ if (codec.Matches(in)) {
+ if (out) {
+ // Fixup the payload type.
+ voe_codec.pltype = in.id;
+
+ // Set bitrate if specified.
+ if (multi_rate && in.bitrate != 0) {
+ voe_codec.rate = in.bitrate;
+ }
+
+ // Reset G722 sample rate to 16000 to match WebRTC.
+ MaybeFixupG722(&voe_codec, 16000);
+
+ // Apply codec-specific settings.
+ if (IsCodec(codec, kIsacCodecName)) {
+ // If ISAC and an explicit bitrate is not specified,
+ // enable auto bitrate adjustment.
+ voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
+ }
+ *out = voe_codec;
+ }
+ return true;
+ }
+ }
+ return false;
}
- // Make sure they are in local preference order.
- std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
-}
-bool WebRtcVoiceEngine::GetVoeCodec(int index, webrtc::CodecInst* codec) {
- if (voe_wrapper_->codec()->GetCodec(index, *codec) == -1) {
+ static bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
+ for (size_t i = 0; i < arraysize(kCodecPrefs); ++i) {
+ if (IsCodec(codec, kCodecPrefs[i].name) &&
+ kCodecPrefs[i].clockrate == codec.plfreq) {
+ return kCodecPrefs[i].is_multi_rate;
+ }
+ }
return false;
}
- // Change the sample rate of G722 to 8000 to match SDP.
- MaybeFixupG722(codec, 8000);
- return true;
+
+ // If the AudioCodec param kCodecParamPTime is set, then we will set it to
+ // codec pacsize if it's valid, or we will pick the next smallest value we
+ // support.
+ // TODO(Brave): Query supported packet sizes from ACM when the API is ready.
+ static bool SetPTimeAsPacketSize(webrtc::CodecInst* codec, int ptime_ms) {
+ for (const CodecPref& codec_pref : kCodecPrefs) {
+ if ((IsCodec(*codec, codec_pref.name) &&
+ codec_pref.clockrate == codec->plfreq) ||
+ IsCodec(*codec, kG722CodecName)) {
+ int packet_size_ms = SelectPacketSize(codec_pref, ptime_ms);
+ if (packet_size_ms) {
+ // Convert unit from milli-seconds to samples.
+ codec->pacsize = (codec->plfreq / 1000) * packet_size_ms;
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ private:
+ static const int kMaxNumPacketSize = 6;
+ struct CodecPref {
+ const char* name;
+ int clockrate;
+ size_t channels;
+ int payload_type;
+ bool is_multi_rate;
+ int packet_sizes_ms[kMaxNumPacketSize];
+ };
+ // Note: keep the supported packet sizes in ascending order.
+ static const CodecPref kCodecPrefs[12];
+
+ static int SelectPacketSize(const CodecPref& codec_pref, int ptime_ms) {
+ int selected_packet_size_ms = codec_pref.packet_sizes_ms[0];
+ for (int packet_size_ms : codec_pref.packet_sizes_ms) {
+ if (packet_size_ms && packet_size_ms <= ptime_ms) {
+ selected_packet_size_ms = packet_size_ms;
+ }
+ }
+ return selected_packet_size_ms;
+ }
+
+ // Changes RTP timestamp rate of G722. This is due to the "bug" in the RFC
+ // which says that G722 should be advertised as 8 kHz although it is a 16 kHz
+ // codec.
+ static void MaybeFixupG722(webrtc::CodecInst* voe_codec, int new_plfreq) {
+ if (IsCodec(*voe_codec, kG722CodecName)) {
+ // If the ASSERT triggers, the codec definition in WebRTC VoiceEngine
+ // has changed, and this special case is no longer needed.
+ RTC_DCHECK(voe_codec->plfreq != new_plfreq);
+ voe_codec->plfreq = new_plfreq;
+ }
+ }
+};
+
+const WebRtcVoiceCodecs::CodecPref WebRtcVoiceCodecs::kCodecPrefs[12] = {
+ { kOpusCodecName, 48000, 2, 111, true, { 10, 20, 40, 60 } },
+ { kIsacCodecName, 16000, 1, 103, true, { 30, 60 } },
+ { kIsacCodecName, 32000, 1, 104, true, { 30 } },
+ // G722 should be advertised as 8000 Hz because of the RFC "bug".
+ { kG722CodecName, 8000, 1, 9, false, { 10, 20, 30, 40, 50, 60 } },
+ { kIlbcCodecName, 8000, 1, 102, false, { 20, 30, 40, 60 } },
+ { kPcmuCodecName, 8000, 1, 0, false, { 10, 20, 30, 40, 50, 60 } },
+ { kPcmaCodecName, 8000, 1, 8, false, { 10, 20, 30, 40, 50, 60 } },
+ { kCnCodecName, 32000, 1, 106, false, { } },
+ { kCnCodecName, 16000, 1, 105, false, { } },
+ { kCnCodecName, 8000, 1, 13, false, { } },
+ { kRedCodecName, 8000, 1, 127, false, { } },
+ { kDtmfCodecName, 8000, 1, 126, false, { } },
+};
+} // namespace {
+
+bool WebRtcVoiceEngine::ToCodecInst(const AudioCodec& in,
+ webrtc::CodecInst* out) {
+ return WebRtcVoiceCodecs::ToCodecInst(in, out);
+}
+
+WebRtcVoiceEngine::WebRtcVoiceEngine()
+ : voe_wrapper_(new VoEWrapper()),
+ audio_state_(webrtc::AudioState::Create(MakeAudioStateConfig(voe()))) {
+ Construct();
+}
+
+WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper)
+ : voe_wrapper_(voe_wrapper) {
+ Construct();
+}
+
+void WebRtcVoiceEngine::Construct() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
+
+ signal_thread_checker_.DetachFromThread();
+ std::memset(&default_agc_config_, 0, sizeof(default_agc_config_));
+ voe_config_.Set<webrtc::VoicePacing>(new webrtc::VoicePacing(true));
+
+ webrtc::Trace::set_level_filter(kDefaultTraceFilter);
+ webrtc::Trace::SetTraceCallback(this);
+
+ // Load our audio codec list.
+ codecs_ = WebRtcVoiceCodecs::SupportedCodecs();
}
WebRtcVoiceEngine::~WebRtcVoiceEngine() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
- if (voe_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
- LOG_RTCERR0(DeRegisterVoiceEngineObserver);
- }
if (adm_) {
voe_wrapper_.reset();
adm_->Release();
adm_ = NULL;
}
-
- tracing_->SetTraceCallback(NULL);
+ webrtc::Trace::SetTraceCallback(nullptr);
}
bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(worker_thread == rtc::Thread::Current());
LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
bool res = InitInternal();
@@ -535,59 +515,37 @@ bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
}
bool WebRtcVoiceEngine::InitInternal() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Temporarily turn logging level up for the Init call
- int old_filter = log_filter_;
- int extended_filter = log_filter_ | SeverityToFilter(rtc::LS_INFO);
- SetTraceFilter(extended_filter);
- SetTraceOptions("");
-
- // Init WebRtc VoiceEngine.
+ webrtc::Trace::set_level_filter(kElevatedTraceFilter);
+ LOG(LS_INFO) << webrtc::VoiceEngine::GetVersionString();
if (voe_wrapper_->base()->Init(adm_) == -1) {
LOG_RTCERR0_EX(Init, voe_wrapper_->error());
- SetTraceFilter(old_filter);
return false;
}
-
- SetTraceFilter(old_filter);
- SetTraceOptions(log_options_);
-
- // Log the VoiceEngine version info
- char buffer[1024] = "";
- voe_wrapper_->base()->GetVersion(buffer);
- LOG(LS_INFO) << "WebRtc VoiceEngine Version:";
- LogMultiline(rtc::LS_INFO, buffer);
+ webrtc::Trace::set_level_filter(kDefaultTraceFilter);
// Save the default AGC configuration settings. This must happen before
- // calling SetOptions or the default will be overwritten.
+ // calling ApplyOptions or the default will be overwritten.
if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) {
LOG_RTCERR0(GetAgcConfig);
return false;
}
- // Set defaults for options, so that ApplyOptions applies them explicitly
- // when we clear option (channel) overrides. External clients can still
- // modify the defaults via SetOptions (on the media engine).
- if (!SetOptions(GetDefaultEngineOptions())) {
- return false;
- }
-
// Print our codec list again for the call diagnostic log
LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
for (const AudioCodec& codec : codecs_) {
LOG(LS_INFO) << ToString(codec);
}
- // Disable the DTMF playout when a tone is sent.
- // PlayDtmfTone will be used if local playout is needed.
- if (voe_wrapper_->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
- LOG_RTCERR1(SetDtmfFeedbackStatus, false);
- }
+ SetDefaultDevices();
initialized_ = true;
return true;
}
void WebRtcVoiceEngine::Terminate() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
initialized_ = false;
@@ -596,62 +554,81 @@ void WebRtcVoiceEngine::Terminate() {
voe_wrapper_->base()->Terminate();
}
+rtc::scoped_refptr<webrtc::AudioState>
+ WebRtcVoiceEngine::GetAudioState() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return audio_state_;
+}
+
VoiceMediaChannel* WebRtcVoiceEngine::CreateChannel(webrtc::Call* call,
const AudioOptions& options) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return new WebRtcVoiceMediaChannel(this, options, call);
}
-bool WebRtcVoiceEngine::SetOptions(const AudioOptions& options) {
- if (!ApplyOptions(options)) {
- return false;
- }
- options_ = options;
- return true;
-}
-
-// AudioOptions defaults are set in InitInternal (for options with corresponding
-// MediaEngineInterface flags) and in SetOptions(int) for flagless options.
bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "ApplyOptions: " << options_in.ToString();
- AudioOptions options = options_in; // The options are modified below.
+
+ // Default engine options.
+ AudioOptions options;
+ options.echo_cancellation = rtc::Optional<bool>(true);
+ options.auto_gain_control = rtc::Optional<bool>(true);
+ options.noise_suppression = rtc::Optional<bool>(true);
+ options.highpass_filter = rtc::Optional<bool>(true);
+ options.stereo_swapping = rtc::Optional<bool>(false);
+ options.audio_jitter_buffer_max_packets = rtc::Optional<int>(50);
+ options.audio_jitter_buffer_fast_accelerate = rtc::Optional<bool>(false);
+ options.typing_detection = rtc::Optional<bool>(true);
+ options.adjust_agc_delta = rtc::Optional<int>(0);
+ options.experimental_agc = rtc::Optional<bool>(false);
+ options.extended_filter_aec = rtc::Optional<bool>(false);
+ options.delay_agnostic_aec = rtc::Optional<bool>(false);
+ options.experimental_ns = rtc::Optional<bool>(false);
+ options.aec_dump = rtc::Optional<bool>(false);
+
+ // Apply any given options on top.
+ options.SetAll(options_in);
+
// kEcConference is AEC with high suppression.
webrtc::EcModes ec_mode = webrtc::kEcConference;
webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone;
webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
webrtc::NsModes ns_mode = webrtc::kNsHighSuppression;
- bool aecm_comfort_noise = false;
- if (options.aecm_generate_comfort_noise.Get(&aecm_comfort_noise)) {
+ if (options.aecm_generate_comfort_noise) {
LOG(LS_VERBOSE) << "Comfort noise explicitly set to "
- << aecm_comfort_noise << " (default is false).";
+ << *options.aecm_generate_comfort_noise
+ << " (default is false).";
}
-#if defined(IOS)
+#if defined(WEBRTC_IOS)
// On iOS, VPIO provides built-in EC and AGC.
- options.echo_cancellation.Set(false);
- options.auto_gain_control.Set(false);
+ options.echo_cancellation = rtc::Optional<bool>(false);
+ options.auto_gain_control = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Always disable AEC and AGC on iOS. Use built-in instead.";
#elif defined(ANDROID)
ec_mode = webrtc::kEcAecm;
#endif
-#if defined(IOS) || defined(ANDROID)
+#if defined(WEBRTC_IOS) || defined(ANDROID)
// Set the AGC mode for iOS as well despite disabling it above, to avoid
// unsupported configuration errors from webrtc.
agc_mode = webrtc::kAgcFixedDigital;
- options.typing_detection.Set(false);
- options.experimental_agc.Set(false);
- options.extended_filter_aec.Set(false);
- options.experimental_ns.Set(false);
+ options.typing_detection = rtc::Optional<bool>(false);
+ options.experimental_agc = rtc::Optional<bool>(false);
+ options.extended_filter_aec = rtc::Optional<bool>(false);
+ options.experimental_ns = rtc::Optional<bool>(false);
#endif
// Delay Agnostic AEC automatically turns on EC if not set except on iOS
// where the feature is not supported.
bool use_delay_agnostic_aec = false;
-#if !defined(IOS)
- if (options.delay_agnostic_aec.Get(&use_delay_agnostic_aec)) {
+#if !defined(WEBRTC_IOS)
+ if (options.delay_agnostic_aec) {
+ use_delay_agnostic_aec = *options.delay_agnostic_aec;
if (use_delay_agnostic_aec) {
- options.echo_cancellation.Set(true);
- options.extended_filter_aec.Set(true);
+ options.echo_cancellation = rtc::Optional<bool>(true);
+ options.extended_filter_aec = rtc::Optional<bool>(true);
ec_mode = webrtc::kEcConference;
}
}
@@ -659,8 +636,7 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
- bool echo_cancellation = false;
- if (options.echo_cancellation.Get(&echo_cancellation)) {
+ if (options.echo_cancellation) {
// Check if platform supports built-in EC. Currently only supported on
// Android and in combination with Java based audio layer.
// TODO(henrika): investigate possibility to support built-in EC also
@@ -671,63 +647,61 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
// overriding it. Enable/Disable it according to the echo_cancellation
// audio option.
const bool enable_built_in_aec =
- echo_cancellation && !use_delay_agnostic_aec;
+ *options.echo_cancellation && !use_delay_agnostic_aec;
if (voe_wrapper_->hw()->EnableBuiltInAEC(enable_built_in_aec) == 0 &&
enable_built_in_aec) {
// Disable internal software EC if built-in EC is enabled,
// i.e., replace the software EC with the built-in EC.
- options.echo_cancellation.Set(false);
- echo_cancellation = false;
+ options.echo_cancellation = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Disabling EC since built-in EC will be used instead";
}
}
- if (voep->SetEcStatus(echo_cancellation, ec_mode) == -1) {
- LOG_RTCERR2(SetEcStatus, echo_cancellation, ec_mode);
+ if (voep->SetEcStatus(*options.echo_cancellation, ec_mode) == -1) {
+ LOG_RTCERR2(SetEcStatus, *options.echo_cancellation, ec_mode);
return false;
} else {
- LOG(LS_INFO) << "Echo control set to " << echo_cancellation
+ LOG(LS_INFO) << "Echo control set to " << *options.echo_cancellation
<< " with mode " << ec_mode;
}
#if !defined(ANDROID)
// TODO(ajm): Remove the error return on Android from webrtc.
- if (voep->SetEcMetricsStatus(echo_cancellation) == -1) {
- LOG_RTCERR1(SetEcMetricsStatus, echo_cancellation);
+ if (voep->SetEcMetricsStatus(*options.echo_cancellation) == -1) {
+ LOG_RTCERR1(SetEcMetricsStatus, *options.echo_cancellation);
return false;
}
#endif
if (ec_mode == webrtc::kEcAecm) {
- if (voep->SetAecmMode(aecm_mode, aecm_comfort_noise) != 0) {
- LOG_RTCERR2(SetAecmMode, aecm_mode, aecm_comfort_noise);
+ bool cn = options.aecm_generate_comfort_noise.value_or(false);
+ if (voep->SetAecmMode(aecm_mode, cn) != 0) {
+ LOG_RTCERR2(SetAecmMode, aecm_mode, cn);
return false;
}
}
}
- bool auto_gain_control = false;
- if (options.auto_gain_control.Get(&auto_gain_control)) {
+ if (options.auto_gain_control) {
const bool built_in_agc = voe_wrapper_->hw()->BuiltInAGCIsAvailable();
if (built_in_agc) {
- if (voe_wrapper_->hw()->EnableBuiltInAGC(auto_gain_control) == 0 &&
- auto_gain_control) {
+ if (voe_wrapper_->hw()->EnableBuiltInAGC(*options.auto_gain_control) ==
+ 0 &&
+ *options.auto_gain_control) {
// Disable internal software AGC if built-in AGC is enabled,
// i.e., replace the software AGC with the built-in AGC.
- options.auto_gain_control.Set(false);
- auto_gain_control = false;
+ options.auto_gain_control = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Disabling AGC since built-in AGC will be used instead";
}
}
- if (voep->SetAgcStatus(auto_gain_control, agc_mode) == -1) {
- LOG_RTCERR2(SetAgcStatus, auto_gain_control, agc_mode);
+ if (voep->SetAgcStatus(*options.auto_gain_control, agc_mode) == -1) {
+ LOG_RTCERR2(SetAgcStatus, *options.auto_gain_control, agc_mode);
return false;
} else {
- LOG(LS_INFO) << "Auto gain set to " << auto_gain_control << " with mode "
- << agc_mode;
+ LOG(LS_INFO) << "Auto gain set to " << *options.auto_gain_control
+ << " with mode " << agc_mode;
}
}
- if (options.tx_agc_target_dbov.IsSet() ||
- options.tx_agc_digital_compression_gain.IsSet() ||
- options.tx_agc_limiter.IsSet()) {
+ if (options.tx_agc_target_dbov || options.tx_agc_digital_compression_gain ||
+ options.tx_agc_limiter) {
// Override default_agc_config_. Generally, an unset option means "leave
// the VoE bits alone" in this function, so we want whatever is set to be
// stored as the new "default". If we didn't, then setting e.g.
@@ -736,15 +710,13 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
// Also, if we don't update default_agc_config_, then adjust_agc_delta
// would be an offset from the original values, and not whatever was set
// explicitly.
- default_agc_config_.targetLeveldBOv =
- options.tx_agc_target_dbov.GetWithDefaultIfUnset(
- default_agc_config_.targetLeveldBOv);
+ default_agc_config_.targetLeveldBOv = options.tx_agc_target_dbov.value_or(
+ default_agc_config_.targetLeveldBOv);
default_agc_config_.digitalCompressionGaindB =
- options.tx_agc_digital_compression_gain.GetWithDefaultIfUnset(
+ options.tx_agc_digital_compression_gain.value_or(
default_agc_config_.digitalCompressionGaindB);
default_agc_config_.limiterEnable =
- options.tx_agc_limiter.GetWithDefaultIfUnset(
- default_agc_config_.limiterEnable);
+ options.tx_agc_limiter.value_or(default_agc_config_.limiterEnable);
if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) {
LOG_RTCERR3(SetAgcConfig,
default_agc_config_.targetLeveldBOv,
@@ -754,84 +726,79 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
}
}
- bool noise_suppression = false;
- if (options.noise_suppression.Get(&noise_suppression)) {
+ if (options.noise_suppression) {
const bool built_in_ns = voe_wrapper_->hw()->BuiltInNSIsAvailable();
if (built_in_ns) {
- if (voe_wrapper_->hw()->EnableBuiltInNS(noise_suppression) == 0 &&
- noise_suppression) {
+ if (voe_wrapper_->hw()->EnableBuiltInNS(*options.noise_suppression) ==
+ 0 &&
+ *options.noise_suppression) {
// Disable internal software NS if built-in NS is enabled,
// i.e., replace the software NS with the built-in NS.
- options.noise_suppression.Set(false);
- noise_suppression = false;
+ options.noise_suppression = rtc::Optional<bool>(false);
LOG(LS_INFO) << "Disabling NS since built-in NS will be used instead";
}
}
- if (voep->SetNsStatus(noise_suppression, ns_mode) == -1) {
- LOG_RTCERR2(SetNsStatus, noise_suppression, ns_mode);
+ if (voep->SetNsStatus(*options.noise_suppression, ns_mode) == -1) {
+ LOG_RTCERR2(SetNsStatus, *options.noise_suppression, ns_mode);
return false;
} else {
- LOG(LS_INFO) << "Noise suppression set to " << noise_suppression
+ LOG(LS_INFO) << "Noise suppression set to " << *options.noise_suppression
<< " with mode " << ns_mode;
}
}
- bool highpass_filter;
- if (options.highpass_filter.Get(&highpass_filter)) {
- LOG(LS_INFO) << "High pass filter enabled? " << highpass_filter;
- if (voep->EnableHighPassFilter(highpass_filter) == -1) {
- LOG_RTCERR1(SetHighpassFilterStatus, highpass_filter);
+ if (options.highpass_filter) {
+ LOG(LS_INFO) << "High pass filter enabled? " << *options.highpass_filter;
+ if (voep->EnableHighPassFilter(*options.highpass_filter) == -1) {
+ LOG_RTCERR1(SetHighpassFilterStatus, *options.highpass_filter);
return false;
}
}
- bool stereo_swapping;
- if (options.stereo_swapping.Get(&stereo_swapping)) {
- LOG(LS_INFO) << "Stereo swapping enabled? " << stereo_swapping;
- voep->EnableStereoChannelSwapping(stereo_swapping);
- if (voep->IsStereoChannelSwappingEnabled() != stereo_swapping) {
- LOG_RTCERR1(EnableStereoChannelSwapping, stereo_swapping);
+ if (options.stereo_swapping) {
+ LOG(LS_INFO) << "Stereo swapping enabled? " << *options.stereo_swapping;
+ voep->EnableStereoChannelSwapping(*options.stereo_swapping);
+ if (voep->IsStereoChannelSwappingEnabled() != *options.stereo_swapping) {
+ LOG_RTCERR1(EnableStereoChannelSwapping, *options.stereo_swapping);
return false;
}
}
- int audio_jitter_buffer_max_packets;
- if (options.audio_jitter_buffer_max_packets.Get(
- &audio_jitter_buffer_max_packets)) {
- LOG(LS_INFO) << "NetEq capacity is " << audio_jitter_buffer_max_packets;
+ if (options.audio_jitter_buffer_max_packets) {
+ LOG(LS_INFO) << "NetEq capacity is "
+ << *options.audio_jitter_buffer_max_packets;
voe_config_.Set<webrtc::NetEqCapacityConfig>(
- new webrtc::NetEqCapacityConfig(audio_jitter_buffer_max_packets));
+ new webrtc::NetEqCapacityConfig(
+ *options.audio_jitter_buffer_max_packets));
}
- bool audio_jitter_buffer_fast_accelerate;
- if (options.audio_jitter_buffer_fast_accelerate.Get(
- &audio_jitter_buffer_fast_accelerate)) {
- LOG(LS_INFO) << "NetEq fast mode? " << audio_jitter_buffer_fast_accelerate;
+ if (options.audio_jitter_buffer_fast_accelerate) {
+ LOG(LS_INFO) << "NetEq fast mode? "
+ << *options.audio_jitter_buffer_fast_accelerate;
voe_config_.Set<webrtc::NetEqFastAccelerate>(
- new webrtc::NetEqFastAccelerate(audio_jitter_buffer_fast_accelerate));
+ new webrtc::NetEqFastAccelerate(
+ *options.audio_jitter_buffer_fast_accelerate));
}
- bool typing_detection;
- if (options.typing_detection.Get(&typing_detection)) {
- LOG(LS_INFO) << "Typing detection is enabled? " << typing_detection;
- if (voep->SetTypingDetectionStatus(typing_detection) == -1) {
+ if (options.typing_detection) {
+ LOG(LS_INFO) << "Typing detection is enabled? "
+ << *options.typing_detection;
+ if (voep->SetTypingDetectionStatus(*options.typing_detection) == -1) {
// In case of error, log the info and continue
- LOG_RTCERR1(SetTypingDetectionStatus, typing_detection);
+ LOG_RTCERR1(SetTypingDetectionStatus, *options.typing_detection);
}
}
- int adjust_agc_delta;
- if (options.adjust_agc_delta.Get(&adjust_agc_delta)) {
- LOG(LS_INFO) << "Adjust agc delta is " << adjust_agc_delta;
- if (!AdjustAgcLevel(adjust_agc_delta)) {
+ if (options.adjust_agc_delta) {
+ LOG(LS_INFO) << "Adjust agc delta is " << *options.adjust_agc_delta;
+ if (!AdjustAgcLevel(*options.adjust_agc_delta)) {
return false;
}
}
- bool aec_dump;
- if (options.aec_dump.Get(&aec_dump)) {
- LOG(LS_INFO) << "Aec dump is enabled? " << aec_dump;
- if (aec_dump)
+ if (options.aec_dump) {
+ LOG(LS_INFO) << "Aec dump is enabled? " << *options.aec_dump;
+ if (*options.aec_dump)
StartAecDump(kAecDumpByAudioOptionFilename);
else
StopAecDump();
@@ -839,28 +806,30 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
webrtc::Config config;
- delay_agnostic_aec_.SetFrom(options.delay_agnostic_aec);
- bool delay_agnostic_aec;
- if (delay_agnostic_aec_.Get(&delay_agnostic_aec)) {
- LOG(LS_INFO) << "Delay agnostic aec is enabled? " << delay_agnostic_aec;
+ if (options.delay_agnostic_aec)
+ delay_agnostic_aec_ = options.delay_agnostic_aec;
+ if (delay_agnostic_aec_) {
+ LOG(LS_INFO) << "Delay agnostic aec is enabled? " << *delay_agnostic_aec_;
config.Set<webrtc::DelayAgnostic>(
- new webrtc::DelayAgnostic(delay_agnostic_aec));
+ new webrtc::DelayAgnostic(*delay_agnostic_aec_));
}
- extended_filter_aec_.SetFrom(options.extended_filter_aec);
- bool extended_filter;
- if (extended_filter_aec_.Get(&extended_filter)) {
- LOG(LS_INFO) << "Extended filter aec is enabled? " << extended_filter;
+ if (options.extended_filter_aec) {
+ extended_filter_aec_ = options.extended_filter_aec;
+ }
+ if (extended_filter_aec_) {
+ LOG(LS_INFO) << "Extended filter aec is enabled? " << *extended_filter_aec_;
config.Set<webrtc::ExtendedFilter>(
- new webrtc::ExtendedFilter(extended_filter));
+ new webrtc::ExtendedFilter(*extended_filter_aec_));
}
- experimental_ns_.SetFrom(options.experimental_ns);
- bool experimental_ns;
- if (experimental_ns_.Get(&experimental_ns)) {
- LOG(LS_INFO) << "Experimental ns is enabled? " << experimental_ns;
+ if (options.experimental_ns) {
+ experimental_ns_ = options.experimental_ns;
+ }
+ if (experimental_ns_) {
+ LOG(LS_INFO) << "Experimental ns is enabled? " << *experimental_ns_;
config.Set<webrtc::ExperimentalNs>(
- new webrtc::ExperimentalNs(experimental_ns));
+ new webrtc::ExperimentalNs(*experimental_ns_));
}
// We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
@@ -870,167 +839,58 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
audioproc->SetExtraOptions(config);
}
- uint32_t recording_sample_rate;
- if (options.recording_sample_rate.Get(&recording_sample_rate)) {
- LOG(LS_INFO) << "Recording sample rate is " << recording_sample_rate;
- if (voe_wrapper_->hw()->SetRecordingSampleRate(recording_sample_rate)) {
- LOG_RTCERR1(SetRecordingSampleRate, recording_sample_rate);
+ if (options.recording_sample_rate) {
+ LOG(LS_INFO) << "Recording sample rate is "
+ << *options.recording_sample_rate;
+ if (voe_wrapper_->hw()->SetRecordingSampleRate(
+ *options.recording_sample_rate)) {
+ LOG_RTCERR1(SetRecordingSampleRate, *options.recording_sample_rate);
}
}
- uint32_t playout_sample_rate;
- if (options.playout_sample_rate.Get(&playout_sample_rate)) {
- LOG(LS_INFO) << "Playout sample rate is " << playout_sample_rate;
- if (voe_wrapper_->hw()->SetPlayoutSampleRate(playout_sample_rate)) {
- LOG_RTCERR1(SetPlayoutSampleRate, playout_sample_rate);
+ if (options.playout_sample_rate) {
+ LOG(LS_INFO) << "Playout sample rate is " << *options.playout_sample_rate;
+ if (voe_wrapper_->hw()->SetPlayoutSampleRate(
+ *options.playout_sample_rate)) {
+ LOG_RTCERR1(SetPlayoutSampleRate, *options.playout_sample_rate);
}
}
return true;
}
-// TODO(juberti): Refactor this so that the core logic can be used to set the
-// soundclip device. At that time, reinstate the soundclip pause/resume code.
-bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
- const Device* out_device) {
-#if !defined(IOS)
- int in_id = in_device ? rtc::FromString<int>(in_device->id) :
- kDefaultAudioDeviceId;
- int out_id = out_device ? rtc::FromString<int>(out_device->id) :
- kDefaultAudioDeviceId;
- // The device manager uses -1 as the default device, which was the case for
- // VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
-#ifndef WIN32
- if (-1 == in_id) {
- in_id = kDefaultAudioDeviceId;
- }
- if (-1 == out_id) {
- out_id = kDefaultAudioDeviceId;
- }
-#endif
-
- std::string in_name = (in_id != kDefaultAudioDeviceId) ?
- in_device->name : "Default device";
- std::string out_name = (out_id != kDefaultAudioDeviceId) ?
- out_device->name : "Default device";
- LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
- << ") and speaker to (id=" << out_id << ", name=" << out_name
- << ")";
+void WebRtcVoiceEngine::SetDefaultDevices() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+#if !defined(WEBRTC_IOS)
+ int in_id = kDefaultAudioDeviceId;
+ int out_id = kDefaultAudioDeviceId;
+ LOG(LS_INFO) << "Setting microphone to (id=" << in_id
+ << ") and speaker to (id=" << out_id << ")";
- // Must also pause all audio playback and capture.
bool ret = true;
- for (WebRtcVoiceMediaChannel* channel : channels_) {
- if (!channel->PausePlayout()) {
- LOG(LS_WARNING) << "Failed to pause playout";
- ret = false;
- }
- if (!channel->PauseSend()) {
- LOG(LS_WARNING) << "Failed to pause send";
- ret = false;
- }
- }
-
- // Find the recording device id in VoiceEngine and set recording device.
- if (!FindWebRtcAudioDeviceId(true, in_name, in_id, &in_id)) {
+ if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
+ LOG_RTCERR1(SetRecordingDevice, in_id);
ret = false;
}
- if (ret) {
- if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
- LOG_RTCERR2(SetRecordingDevice, in_name, in_id);
- ret = false;
- }
- webrtc::AudioProcessing* ap = voe()->base()->audio_processing();
- if (ap)
- ap->Initialize();
+ webrtc::AudioProcessing* ap = voe()->base()->audio_processing();
+ if (ap) {
+ ap->Initialize();
}
- // Find the playout device id in VoiceEngine and set playout device.
- if (!FindWebRtcAudioDeviceId(false, out_name, out_id, &out_id)) {
- LOG(LS_WARNING) << "Failed to find VoiceEngine device id for " << out_name;
+ if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
+ LOG_RTCERR1(SetPlayoutDevice, out_id);
ret = false;
}
- if (ret) {
- if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
- LOG_RTCERR2(SetPlayoutDevice, out_name, out_id);
- ret = false;
- }
- }
-
- // Resume all audio playback and capture.
- for (WebRtcVoiceMediaChannel* channel : channels_) {
- if (!channel->ResumePlayout()) {
- LOG(LS_WARNING) << "Failed to resume playout";
- ret = false;
- }
- if (!channel->ResumeSend()) {
- LOG(LS_WARNING) << "Failed to resume send";
- ret = false;
- }
- }
if (ret) {
- LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
- << ") and speaker to (id="<< out_id << " name=" << out_name
- << ")";
+ LOG(LS_INFO) << "Set microphone to (id=" << in_id
+ << ") and speaker to (id=" << out_id << ")";
}
-
- return ret;
-#else
- return true;
-#endif // !IOS
-}
-
-bool WebRtcVoiceEngine::FindWebRtcAudioDeviceId(
- bool is_input, const std::string& dev_name, int dev_id, int* rtc_id) {
- // In Linux, VoiceEngine uses the same device dev_id as the device manager.
-#if defined(LINUX) || defined(ANDROID)
- *rtc_id = dev_id;
- return true;
-#else
- // In Windows and Mac, we need to find the VoiceEngine device id by name
- // unless the input dev_id is the default device id.
- if (kDefaultAudioDeviceId == dev_id) {
- *rtc_id = dev_id;
- return true;
- }
-
- // Get the number of VoiceEngine audio devices.
- int count = 0;
- if (is_input) {
- if (-1 == voe_wrapper_->hw()->GetNumOfRecordingDevices(count)) {
- LOG_RTCERR0(GetNumOfRecordingDevices);
- return false;
- }
- } else {
- if (-1 == voe_wrapper_->hw()->GetNumOfPlayoutDevices(count)) {
- LOG_RTCERR0(GetNumOfPlayoutDevices);
- return false;
- }
- }
-
- for (int i = 0; i < count; ++i) {
- char name[128];
- char guid[128];
- if (is_input) {
- voe_wrapper_->hw()->GetRecordingDeviceName(i, name, guid);
- LOG(LS_VERBOSE) << "VoiceEngine microphone " << i << ": " << name;
- } else {
- voe_wrapper_->hw()->GetPlayoutDeviceName(i, name, guid);
- LOG(LS_VERBOSE) << "VoiceEngine speaker " << i << ": " << name;
- }
-
- std::string webrtc_name(name);
- if (dev_name.compare(0, webrtc_name.size(), webrtc_name) == 0) {
- *rtc_id = i;
- return true;
- }
- }
- LOG(LS_WARNING) << "VoiceEngine cannot find device: " << dev_name;
- return false;
-#endif
+#endif // !WEBRTC_IOS
}
bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
unsigned int ulevel;
if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) {
LOG_RTCERR1(GetSpeakerVolume, level);
@@ -1041,6 +901,7 @@ bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
}
bool WebRtcVoiceEngine::SetOutputVolume(int level) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(level >= 0 && level <= 255);
if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
LOG_RTCERR1(SetSpeakerVolume, level);
@@ -1050,136 +911,36 @@ bool WebRtcVoiceEngine::SetOutputVolume(int level) {
}
int WebRtcVoiceEngine::GetInputLevel() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
unsigned int ulevel;
return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ?
static_cast<int>(ulevel) : -1;
}
const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() {
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
return codecs_;
}
-bool WebRtcVoiceEngine::FindCodec(const AudioCodec& in) {
- return FindWebRtcCodec(in, NULL);
-}
-
-// Get the VoiceEngine codec that matches |in|, with the supplied settings.
-bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
- webrtc::CodecInst* out) {
- int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
- for (int i = 0; i < ncodecs; ++i) {
- webrtc::CodecInst voe_codec;
- if (GetVoeCodec(i, &voe_codec)) {
- AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
- voe_codec.rate, voe_codec.channels, 0);
- bool multi_rate = IsCodecMultiRate(voe_codec);
- // Allow arbitrary rates for ISAC to be specified.
- if (multi_rate) {
- // Set codec.bitrate to 0 so the check for codec.Matches() passes.
- codec.bitrate = 0;
- }
- if (codec.Matches(in)) {
- if (out) {
- // Fixup the payload type.
- voe_codec.pltype = in.id;
-
- // Set bitrate if specified.
- if (multi_rate && in.bitrate != 0) {
- voe_codec.rate = in.bitrate;
- }
-
- // Reset G722 sample rate to 16000 to match WebRTC.
- MaybeFixupG722(&voe_codec, 16000);
-
- // Apply codec-specific settings.
- if (IsCodec(codec, kIsacCodecName)) {
- // If ISAC and an explicit bitrate is not specified,
- // enable auto bitrate adjustment.
- voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
- }
- *out = voe_codec;
- }
- return true;
- }
- }
- }
- return false;
-}
-const std::vector<RtpHeaderExtension>&
-WebRtcVoiceEngine::rtp_header_extensions() const {
- return rtp_header_extensions_;
-}
-
-void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
- // if min_sev == -1, we keep the current log level.
- if (min_sev >= 0) {
- SetTraceFilter(SeverityToFilter(min_sev));
- }
- log_options_ = filter;
- SetTraceOptions(initialized_ ? log_options_ : "");
+RtpCapabilities WebRtcVoiceEngine::GetCapabilities() const {
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RtpCapabilities capabilities;
+ capabilities.header_extensions.push_back(RtpHeaderExtension(
+ kRtpAudioLevelHeaderExtension, kRtpAudioLevelHeaderExtensionDefaultId));
+ capabilities.header_extensions.push_back(
+ RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
+ kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
+ return capabilities;
}
int WebRtcVoiceEngine::GetLastEngineError() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return voe_wrapper_->error();
}
-void WebRtcVoiceEngine::SetTraceFilter(int filter) {
- log_filter_ = filter;
- tracing_->SetTraceFilter(filter);
-}
-
-// We suppport three different logging settings for VoiceEngine:
-// 1. Observer callback that goes into talk diagnostic logfile.
-// Use --logfile and --loglevel
-//
-// 2. Encrypted VoiceEngine log for debugging VoiceEngine.
-// Use --voice_loglevel --voice_logfilter "tracefile file_name"
-//
-// 3. EC log and dump for debugging QualityEngine.
-// Use --voice_loglevel --voice_logfilter "recordEC file_name"
-//
-// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
-// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
-void WebRtcVoiceEngine::SetTraceOptions(const std::string& options) {
- // Set encrypted trace file.
- std::vector<std::string> opts;
- rtc::tokenize(options, ' ', '"', '"', &opts);
- std::vector<std::string>::iterator tracefile =
- std::find(opts.begin(), opts.end(), "tracefile");
- if (tracefile != opts.end() && ++tracefile != opts.end()) {
- // Write encrypted debug output (at same loglevel) to file
- // EncryptedTraceFile no longer supported.
- if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
- LOG_RTCERR1(SetTraceFile, *tracefile);
- }
- }
-
- // Allow trace options to override the trace filter. We default
- // it to log_filter_ (as a translation of libjingle log levels)
- // elsewhere, but this allows clients to explicitly set webrtc
- // log levels.
- std::vector<std::string>::iterator tracefilter =
- std::find(opts.begin(), opts.end(), "tracefilter");
- if (tracefilter != opts.end() && ++tracefilter != opts.end()) {
- if (!tracing_->SetTraceFilter(rtc::FromString<int>(*tracefilter))) {
- LOG_RTCERR1(SetTraceFilter, *tracefilter);
- }
- }
-
- // Set AEC dump file
- std::vector<std::string>::iterator recordEC =
- std::find(opts.begin(), opts.end(), "recordEC");
- if (recordEC != opts.end()) {
- ++recordEC;
- if (recordEC != opts.end())
- StartAecDump(recordEC->c_str());
- else
- StopAecDump();
- }
-}
-
void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace,
int length) {
+ // Note: This callback can happen on any thread!
rtc::LoggingSeverity sev = rtc::LS_VERBOSE;
if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
sev = rtc::LS_ERROR;
@@ -1201,34 +962,24 @@ void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace,
}
}
-void WebRtcVoiceEngine::CallbackOnError(int channel_id, int err_code) {
- RTC_DCHECK(channel_id == -1);
- LOG(LS_WARNING) << "VoiceEngine error " << err_code << " reported on channel "
- << channel_id << ".";
- rtc::CritScope lock(&channels_cs_);
- for (WebRtcVoiceMediaChannel* channel : channels_) {
- channel->OnError(err_code);
- }
-}
-
void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel* channel) {
- RTC_DCHECK(channel != NULL);
- rtc::CritScope lock(&channels_cs_);
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(channel);
channels_.push_back(channel);
}
void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel* channel) {
- rtc::CritScope lock(&channels_cs_);
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
auto it = std::find(channels_.begin(), channels_.end(), channel);
- if (it != channels_.end()) {
- channels_.erase(it);
- }
+ RTC_DCHECK(it != channels_.end());
+ channels_.erase(it);
}
// Adjusts the default AGC target level by the specified delta.
// NB: If we start messing with other config fields, we'll want
// to save the current webrtc::AgcConfig as well.
bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
webrtc::AgcConfig config = default_agc_config_;
config.targetLeveldBOv -= delta;
@@ -1244,6 +995,7 @@ bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
}
bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (initialized_) {
LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init.";
return false;
@@ -1260,6 +1012,7 @@ bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm) {
}
bool WebRtcVoiceEngine::StartAecDump(rtc::PlatformFile file) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
FILE* aec_dump_file_stream = rtc::FdopenPlatformFileForWriting(file);
if (!aec_dump_file_stream) {
LOG(LS_ERROR) << "Could not open AEC dump file stream.";
@@ -1279,6 +1032,7 @@ bool WebRtcVoiceEngine::StartAecDump(rtc::PlatformFile file) {
}
void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (!is_dumping_aec_) {
// Start dumping AEC when we are not dumping.
if (voe_wrapper_->processing()->StartDebugRecording(
@@ -1291,6 +1045,7 @@ void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
}
void WebRtcVoiceEngine::StopAecDump() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (is_dumping_aec_) {
// Stop dumping AEC when we are dumping.
if (voe_wrapper_->processing()->StopDebugRecording() !=
@@ -1302,14 +1057,17 @@ void WebRtcVoiceEngine::StopAecDump() {
}
bool WebRtcVoiceEngine::StartRtcEventLog(rtc::PlatformFile file) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return voe_wrapper_->codec()->GetEventLog()->StartLogging(file);
}
void WebRtcVoiceEngine::StopRtcEventLog() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
voe_wrapper_->codec()->GetEventLog()->StopLogging();
}
int WebRtcVoiceEngine::CreateVoEChannel() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
return voe_wrapper_->base()->CreateChannel(voe_config_);
}
@@ -1317,33 +1075,61 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
: public AudioRenderer::Sink {
public:
WebRtcAudioSendStream(int ch, webrtc::AudioTransport* voe_audio_transport,
- uint32_t ssrc, webrtc::Call* call)
- : channel_(ch),
- voe_audio_transport_(voe_audio_transport),
- call_(call) {
+ uint32_t ssrc, const std::string& c_name,
+ const std::vector<webrtc::RtpExtension>& extensions,
+ webrtc::Call* call)
+ : voe_audio_transport_(voe_audio_transport),
+ call_(call),
+ config_(nullptr) {
RTC_DCHECK_GE(ch, 0);
// TODO(solenberg): Once we're not using FakeWebRtcVoiceEngine anymore:
// RTC_DCHECK(voe_audio_transport);
RTC_DCHECK(call);
audio_capture_thread_checker_.DetachFromThread();
- webrtc::AudioSendStream::Config config(nullptr);
- config.voe_channel_id = channel_;
- config.rtp.ssrc = ssrc;
- stream_ = call_->CreateAudioSendStream(config);
- RTC_DCHECK(stream_);
+ config_.rtp.ssrc = ssrc;
+ config_.rtp.c_name = c_name;
+ config_.voe_channel_id = ch;
+ RecreateAudioSendStream(extensions);
}
+
~WebRtcAudioSendStream() override {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
Stop();
call_->DestroyAudioSendStream(stream_);
}
+ void RecreateAudioSendStream(
+ const std::vector<webrtc::RtpExtension>& extensions) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ if (stream_) {
+ call_->DestroyAudioSendStream(stream_);
+ stream_ = nullptr;
+ }
+ config_.rtp.extensions = extensions;
+ RTC_DCHECK(!stream_);
+ stream_ = call_->CreateAudioSendStream(config_);
+ RTC_CHECK(stream_);
+ }
+
+ bool SendTelephoneEvent(int payload_type, uint8_t event,
+ uint32_t duration_ms) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(stream_);
+ return stream_->SendTelephoneEvent(payload_type, event, duration_ms);
+ }
+
+ webrtc::AudioSendStream::Stats GetStats() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(stream_);
+ return stream_->GetStats();
+ }
+
// Starts the rendering by setting a sink to the renderer to get data
// callback.
// This method is called on the libjingle worker thread.
// TODO(xians): Make sure Start() is called only once.
void Start(AudioRenderer* renderer) {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(renderer);
if (renderer_) {
RTC_DCHECK(renderer_ == renderer);
@@ -1353,16 +1139,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
renderer_ = renderer;
}
- webrtc::AudioSendStream::Stats GetStats() const {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
- return stream_->GetStats();
- }
-
// Stops rendering by setting the sink of the renderer to nullptr. No data
// callback will be received after this method.
// This method is called on the libjingle worker thread.
void Stop() {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (renderer_) {
renderer_->SetSink(nullptr);
renderer_ = nullptr;
@@ -1374,11 +1155,12 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) override {
+ RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(audio_capture_thread_checker_.CalledOnValidThread());
RTC_DCHECK(voe_audio_transport_);
- voe_audio_transport_->OnData(channel_,
+ voe_audio_transport_->OnData(config_.voe_channel_id,
audio_data,
bits_per_sample,
sample_rate,
@@ -1389,7 +1171,7 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
// Callback from the |renderer_| when it is going away. In case Start() has
// never been called, this callback won't be triggered.
void OnClose() override {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Set |renderer_| to nullptr to make sure no more callback will get into
// the renderer.
renderer_ = nullptr;
@@ -1397,16 +1179,18 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
// Accessor to the VoE channel ID.
int channel() const {
- RTC_DCHECK(signal_thread_checker_.CalledOnValidThread());
- return channel_;
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return config_.voe_channel_id;
}
private:
- rtc::ThreadChecker signal_thread_checker_;
+ rtc::ThreadChecker worker_thread_checker_;
rtc::ThreadChecker audio_capture_thread_checker_;
- const int channel_ = -1;
webrtc::AudioTransport* const voe_audio_transport_ = nullptr;
webrtc::Call* call_ = nullptr;
+ webrtc::AudioSendStream::Config config_;
+ // The stream is owned by WebRtcAudioSendStream and may be reallocated if
+ // configuration changes.
webrtc::AudioSendStream* stream_ = nullptr;
// Raw pointer to AudioRenderer owned by LocalAudioTrackHandler.
@@ -1419,80 +1203,163 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
public:
- explicit WebRtcAudioReceiveStream(int voe_channel_id)
- : channel_(voe_channel_id) {}
+ WebRtcAudioReceiveStream(int ch, uint32_t remote_ssrc, uint32_t local_ssrc,
+ bool use_combined_bwe, const std::string& sync_group,
+ const std::vector<webrtc::RtpExtension>& extensions,
+ webrtc::Call* call)
+ : call_(call),
+ config_() {
+ RTC_DCHECK_GE(ch, 0);
+ RTC_DCHECK(call);
+ config_.rtp.remote_ssrc = remote_ssrc;
+ config_.rtp.local_ssrc = local_ssrc;
+ config_.voe_channel_id = ch;
+ config_.sync_group = sync_group;
+ RecreateAudioReceiveStream(use_combined_bwe, extensions);
+ }
- int channel() { return channel_; }
+ ~WebRtcAudioReceiveStream() {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ call_->DestroyAudioReceiveStream(stream_);
+ }
+
+ void RecreateAudioReceiveStream(
+ const std::vector<webrtc::RtpExtension>& extensions) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RecreateAudioReceiveStream(config_.combined_audio_video_bwe, extensions);
+ }
+ void RecreateAudioReceiveStream(bool use_combined_bwe) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RecreateAudioReceiveStream(use_combined_bwe, config_.rtp.extensions);
+ }
+
+ webrtc::AudioReceiveStream::Stats GetStats() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(stream_);
+ return stream_->GetStats();
+ }
+
+ int channel() const {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ return config_.voe_channel_id;
+ }
+
+ void SetRawAudioSink(rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ stream_->SetSink(std::move(sink));
+ }
private:
- int channel_;
+ void RecreateAudioReceiveStream(bool use_combined_bwe,
+ const std::vector<webrtc::RtpExtension>& extensions) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ if (stream_) {
+ call_->DestroyAudioReceiveStream(stream_);
+ stream_ = nullptr;
+ }
+ config_.rtp.extensions = extensions;
+ config_.combined_audio_video_bwe = use_combined_bwe;
+ RTC_DCHECK(!stream_);
+ stream_ = call_->CreateAudioReceiveStream(config_);
+ RTC_CHECK(stream_);
+ }
+
+ rtc::ThreadChecker worker_thread_checker_;
+ webrtc::Call* call_ = nullptr;
+ webrtc::AudioReceiveStream::Config config_;
+ // The stream is owned by WebRtcAudioReceiveStream and may be reallocated if
+ // configuration changes.
+ webrtc::AudioReceiveStream* stream_ = nullptr;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioReceiveStream);
};
-// WebRtcVoiceMediaChannel
WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine,
const AudioOptions& options,
webrtc::Call* call)
- : engine_(engine),
- send_bitrate_setting_(false),
- send_bitrate_bps_(0),
- options_(),
- dtmf_allowed_(false),
- desired_playout_(false),
- nack_enabled_(false),
- playout_(false),
- typing_noise_detected_(false),
- desired_send_(SEND_NOTHING),
- send_(SEND_NOTHING),
- call_(call) {
+ : engine_(engine), call_(call) {
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel";
- RTC_DCHECK(nullptr != call);
+ RTC_DCHECK(call);
engine->RegisterChannel(this);
SetOptions(options);
}
WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel";
-
- // Remove any remaining send streams.
+ // TODO(solenberg): Should be able to delete the streams directly, without
+ // going through RemoveNnStream(), once stream objects handle
+ // all (de)configuration.
while (!send_streams_.empty()) {
RemoveSendStream(send_streams_.begin()->first);
}
-
- // Remove any remaining receive streams.
- while (!receive_channels_.empty()) {
- RemoveRecvStream(receive_channels_.begin()->first);
+ while (!recv_streams_.empty()) {
+ RemoveRecvStream(recv_streams_.begin()->first);
}
- RTC_DCHECK(receive_streams_.empty());
-
- // Unregister ourselves from the engine.
engine()->UnregisterChannel(this);
}
bool WebRtcVoiceMediaChannel::SetSendParameters(
const AudioSendParameters& params) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSendParameters: "
+ << params.ToString();
// TODO(pthatcher): Refactor this to be more clean now that we have
// all the information at once.
- return (SetSendCodecs(params.codecs) &&
- SetSendRtpHeaderExtensions(params.extensions) &&
- SetMaxSendBandwidth(params.max_bandwidth_bps) &&
- SetOptions(params.options));
+
+ if (!SetSendCodecs(params.codecs)) {
+ return false;
+ }
+
+ if (!ValidateRtpExtensions(params.extensions)) {
+ return false;
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions =
+ FilterRtpExtensions(params.extensions,
+ webrtc::RtpExtension::IsSupportedForAudio, true);
+ if (send_rtp_extensions_ != filtered_extensions) {
+ send_rtp_extensions_.swap(filtered_extensions);
+ for (auto& it : send_streams_) {
+ it.second->RecreateAudioSendStream(send_rtp_extensions_);
+ }
+ }
+
+ if (!SetMaxSendBandwidth(params.max_bandwidth_bps)) {
+ return false;
+ }
+ return SetOptions(params.options);
}
bool WebRtcVoiceMediaChannel::SetRecvParameters(
const AudioRecvParameters& params) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetRecvParameters: "
+ << params.ToString();
// TODO(pthatcher): Refactor this to be more clean now that we have
// all the information at once.
- return (SetRecvCodecs(params.codecs) &&
- SetRecvRtpHeaderExtensions(params.extensions));
+
+ if (!SetRecvCodecs(params.codecs)) {
+ return false;
+ }
+
+ if (!ValidateRtpExtensions(params.extensions)) {
+ return false;
+ }
+ std::vector<webrtc::RtpExtension> filtered_extensions =
+ FilterRtpExtensions(params.extensions,
+ webrtc::RtpExtension::IsSupportedForAudio, false);
+ if (recv_rtp_extensions_ != filtered_extensions) {
+ recv_rtp_extensions_.swap(filtered_extensions);
+ for (auto& it : recv_streams_) {
+ it.second->RecreateAudioReceiveStream(recv_rtp_extensions_);
+ }
+ }
+
+ return true;
}
bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "Setting voice channel options: "
<< options.ToString();
@@ -1503,26 +1370,27 @@ bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
// on top. This means there is no way to "clear" options such that
// they go back to the engine default.
options_.SetAll(options);
-
- if (send_ != SEND_NOTHING) {
- if (!engine()->ApplyOptions(options_)) {
- LOG(LS_WARNING) <<
- "Failed to apply engine options during channel SetOptions.";
- return false;
- }
+ if (!engine()->ApplyOptions(options_)) {
+ LOG(LS_WARNING) <<
+ "Failed to apply engine options during channel SetOptions.";
+ return false;
}
if (dscp_option_changed) {
rtc::DiffServCodePoint dscp = rtc::DSCP_DEFAULT;
- if (options_.dscp.GetWithDefaultIfUnset(false))
+ if (options_.dscp.value_or(false)) {
dscp = kAudioDscpValue;
+ }
if (MediaChannel::SetDscp(dscp) != 0) {
LOG(LS_WARNING) << "Failed to set DSCP settings for audio channel";
}
}
// TODO(solenberg): Don't recreate unless options changed.
- RecreateAudioReceiveStreams();
+ for (auto& it : recv_streams_) {
+ it.second->RecreateAudioReceiveStream(
+ options_.combined_audio_video_bwe.value_or(false));
+ }
LOG(LS_INFO) << "Set voice channel options. Current options: "
<< options_.ToString();
@@ -1531,7 +1399,7 @@ bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
bool WebRtcVoiceMediaChannel::SetRecvCodecs(
const std::vector<AudioCodec>& codecs) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Set the payload types to be used for incoming media.
LOG(LS_INFO) << "Setting receive voice codecs.";
@@ -1568,7 +1436,26 @@ bool WebRtcVoiceMediaChannel::SetRecvCodecs(
PausePlayout();
}
- bool result = SetRecvCodecsInternal(new_codecs);
+ bool result = true;
+ for (const AudioCodec& codec : new_codecs) {
+ webrtc::CodecInst voe_codec;
+ if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
+ LOG(LS_INFO) << ToString(codec);
+ voe_codec.pltype = codec.id;
+ for (const auto& ch : recv_streams_) {
+ if (engine()->voe()->codec()->SetRecPayloadType(
+ ch.second->channel(), voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, ch.second->channel(),
+ ToString(voe_codec));
+ result = false;
+ }
+ }
+ } else {
+ LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
+ result = false;
+ break;
+ }
+ }
if (result) {
recv_codecs_ = codecs;
}
@@ -1588,7 +1475,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
engine()->voe()->codec()->SetFECStatus(channel, false);
// Scan through the list to figure out the codec to use for sending, along
- // with the proper configuration for VAD and DTMF.
+ // with the proper configuration for VAD.
bool found_send_codec = false;
webrtc::CodecInst send_codec;
memset(&send_codec, 0, sizeof(send_codec));
@@ -1603,7 +1490,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
webrtc::CodecInst voe_codec;
- if (!engine()->FindWebRtcCodec(codec, &voe_codec)) {
+ if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
continue;
}
@@ -1644,7 +1531,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Set packet size if the AudioCodec param kCodecParamPTime is set.
int ptime_ms = 0;
if (codec.GetParam(kCodecParamPTime, &ptime_ms)) {
- if (!SetPTimeAsPacketSize(&send_codec, ptime_ms)) {
+ if (!WebRtcVoiceCodecs::SetPTimeAsPacketSize(&send_codec, ptime_ms)) {
LOG(LS_WARNING) << "Failed to set packet size for codec "
<< send_codec.plname;
return false;
@@ -1687,7 +1574,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Set Opus internal DTX.
LOG(LS_INFO) << "Attempt to "
- << GetEnableString(enable_opus_dtx)
+ << (enable_opus_dtx ? "enable" : "disable")
<< " Opus DTX on channel "
<< channel;
if (engine()->voe()->codec()->SetOpusDtx(channel, enable_opus_dtx)) {
@@ -1717,25 +1604,17 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
SetSendBitrateInternal(send_bitrate_bps_);
}
- // Loop through the codecs list again to config the telephone-event/CN codec.
+ // Loop through the codecs list again to config the CN codec.
for (const AudioCodec& codec : codecs) {
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
webrtc::CodecInst voe_codec;
- if (!engine()->FindWebRtcCodec(codec, &voe_codec)) {
+ if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
continue;
}
- // Find the DTMF telephone event "codec" and tell VoiceEngine channels
- // about it.
- if (IsCodec(codec, kDtmfCodecName)) {
- if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
- channel, codec.id) == -1) {
- LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, codec.id);
- return false;
- }
- } else if (IsCodec(codec, kCnCodecName)) {
+ if (IsCodec(codec, kCnCodecName)) {
// Turn voice activity detection/comfort noise on if supported.
// Set the wideband CN payload type appropriately.
// (narrowband always uses the static payload type 13).
@@ -1789,13 +1668,17 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
bool WebRtcVoiceMediaChannel::SetSendCodecs(
const std::vector<AudioCodec>& codecs) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ // TODO(solenberg): Validate input - that payload types don't overlap, are
+ // within range, filter out codecs we don't support,
+ // redundant codecs etc.
- dtmf_allowed_ = false;
+ // Find the DTMF telephone event "codec" payload type.
+ dtmf_payload_type_ = rtc::Optional<int>();
for (const AudioCodec& codec : codecs) {
- // Find the DTMF telephone event "codec".
if (IsCodec(codec, kDtmfCodecName)) {
- dtmf_allowed_ = true;
+ dtmf_payload_type_ = rtc::Optional<int>(codec.id);
+ break;
}
}
@@ -1808,7 +1691,7 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
}
// Set nack status on receive channels and update |nack_enabled_|.
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
SetNack(ch.second->channel(), nack_enabled_);
}
@@ -1844,106 +1727,6 @@ bool WebRtcVoiceMediaChannel::SetSendCodec(
return true;
}
-bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- if (receive_extensions_ == extensions) {
- return true;
- }
-
- for (const auto& ch : receive_channels_) {
- if (!SetChannelRecvRtpHeaderExtensions(ch.second->channel(), extensions)) {
- return false;
- }
- }
-
- receive_extensions_ = extensions;
-
- // Recreate AudioReceiveStream:s.
- {
- std::vector<webrtc::RtpExtension> exts;
-
- const RtpHeaderExtension* audio_level_extension =
- FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
- if (audio_level_extension) {
- exts.push_back({
- kRtpAudioLevelHeaderExtension, audio_level_extension->id});
- }
-
- const RtpHeaderExtension* send_time_extension =
- FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
- if (send_time_extension) {
- exts.push_back({
- kRtpAbsoluteSenderTimeHeaderExtension, send_time_extension->id});
- }
-
- recv_rtp_extensions_.swap(exts);
- RecreateAudioReceiveStreams();
- }
-
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::SetChannelRecvRtpHeaderExtensions(
- int channel_id, const std::vector<RtpHeaderExtension>& extensions) {
- const RtpHeaderExtension* audio_level_extension =
- FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetReceiveAudioLevelIndicationStatus, channel_id,
- audio_level_extension)) {
- return false;
- }
-
- const RtpHeaderExtension* send_time_extension =
- FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetReceiveAbsoluteSenderTimeStatus, channel_id,
- send_time_extension)) {
- return false;
- }
-
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- if (send_extensions_ == extensions) {
- return true;
- }
-
- for (const auto& ch : send_streams_) {
- if (!SetChannelSendRtpHeaderExtensions(ch.second->channel(), extensions)) {
- return false;
- }
- }
-
- send_extensions_ = extensions;
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::SetChannelSendRtpHeaderExtensions(
- int channel_id, const std::vector<RtpHeaderExtension>& extensions) {
- const RtpHeaderExtension* audio_level_extension =
- FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
-
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetSendAudioLevelIndicationStatus, channel_id,
- audio_level_extension)) {
- return false;
- }
-
- const RtpHeaderExtension* send_time_extension =
- FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
- if (!SetHeaderExtension(
- &webrtc::VoERTP_RTCP::SetSendAbsoluteSenderTimeStatus, channel_id,
- send_time_extension)) {
- return false;
- }
-
- return true;
-}
-
bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) {
desired_playout_ = playout;
return ChangePlayout(desired_playout_);
@@ -1958,12 +1741,12 @@ bool WebRtcVoiceMediaChannel::ResumePlayout() {
}
bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (playout_ == playout) {
return true;
}
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
if (!SetPlayout(ch.second->channel(), playout)) {
LOG(LS_ERROR) << "SetPlayout " << playout << " on channel "
<< ch.second->channel() << " failed";
@@ -1995,7 +1778,7 @@ bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
return true;
}
- // Apply channel specific options.
+ // Apply channel specific options when channel is enabled for sending.
if (send == SEND_MICROPHONE) {
engine()->ApplyOptions(options_);
}
@@ -2007,13 +1790,6 @@ bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
}
}
- // Clear up the options after stopping sending. Since we may previously have
- // applied the channel specific options, now apply the original options stored
- // in WebRtcVoiceEngine.
- if (send == SEND_NOTHING) {
- engine()->ApplyOptions(engine()->GetOptions());
- }
-
send_ = send;
return true;
}
@@ -2039,7 +1815,7 @@ bool WebRtcVoiceMediaChannel::SetAudioSend(uint32_t ssrc,
bool enable,
const AudioOptions* options,
AudioRenderer* renderer) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// TODO(solenberg): The state change should be fully rolled back if any one of
// these calls fail.
if (!SetLocalRenderer(ssrc, renderer)) {
@@ -2068,7 +1844,7 @@ int WebRtcVoiceMediaChannel::CreateVoEChannel() {
return id;
}
-bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
+bool WebRtcVoiceMediaChannel::DeleteVoEChannel(int channel) {
if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) {
LOG_RTCERR1(DeRegisterExternalTransport, channel);
}
@@ -2080,7 +1856,7 @@ bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
}
bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "AddSendStream: " << sp.ToString();
uint32_t ssrc = sp.first_ssrc();
@@ -2097,33 +1873,12 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
return false;
}
- // Enable RTCP (for quality stats and feedback messages).
- if (engine()->voe()->rtp()->SetRTCPStatus(channel, true) == -1) {
- LOG_RTCERR2(SetRTCPStatus, channel, 1);
- }
-
- SetChannelSendRtpHeaderExtensions(channel, send_extensions_);
-
- // Set the local (send) SSRC.
- if (engine()->voe()->rtp()->SetLocalSSRC(channel, ssrc) == -1) {
- LOG_RTCERR2(SetLocalSSRC, channel, ssrc);
- DeleteChannel(channel);
- return false;
- }
-
- if (engine()->voe()->rtp()->SetRTCP_CNAME(channel, sp.cname.c_str()) == -1) {
- LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
- DeleteChannel(channel);
- return false;
- }
-
// Save the channel to send_streams_, so that RemoveSendStream() can still
// delete the channel in case failure happens below.
webrtc::AudioTransport* audio_transport =
engine()->voe()->base()->audio_transport();
- send_streams_.insert(
- std::make_pair(ssrc,
- new WebRtcAudioSendStream(channel, audio_transport, ssrc, call_)));
+ send_streams_.insert(std::make_pair(ssrc, new WebRtcAudioSendStream(
+ channel, audio_transport, ssrc, sp.cname, send_rtp_extensions_, call_)));
// Set the current codecs to be used for the new channel. We need to do this
// after adding the channel to send_channels_, because of how max bitrate is
@@ -2138,10 +1893,10 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
// with the same SSRC in order to send receiver reports.
if (send_streams_.size() == 1) {
receiver_reports_ssrc_ = ssrc;
- for (const auto& ch : receive_channels_) {
- int recv_channel = ch.second->channel();
+ for (const auto& stream : recv_streams_) {
+ int recv_channel = stream.second->channel();
if (engine()->voe()->rtp()->SetLocalSSRC(recv_channel, ssrc) != 0) {
- LOG_RTCERR2(SetLocalSSRC, ch.second->channel(), ssrc);
+ LOG_RTCERR2(SetLocalSSRC, recv_channel, ssrc);
return false;
}
engine()->voe()->base()->AssociateSendChannel(recv_channel, channel);
@@ -2154,7 +1909,9 @@ bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
}
bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "RemoveSendStream: " << ssrc;
+
auto it = send_streams_.find(ssrc);
if (it == send_streams_.end()) {
LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
@@ -2165,15 +1922,12 @@ bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) {
int channel = it->second->channel();
ChangeSend(channel, SEND_NOTHING);
- // Delete the WebRtcVoiceChannelRenderer object connected to the channel,
- // this will disconnect the audio renderer with the send channel.
- delete it->second;
- send_streams_.erase(it);
-
- // Clean up and delete the send channel.
+ // Clean up and delete the send stream+channel.
LOG(LS_INFO) << "Removing audio send stream " << ssrc
<< " with VoiceEngine channel #" << channel << ".";
- if (!DeleteChannel(channel)) {
+ delete it->second;
+ send_streams_.erase(it);
+ if (!DeleteVoEChannel(channel)) {
return false;
}
if (send_streams_.empty()) {
@@ -2183,14 +1937,14 @@ bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) {
}
bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "AddRecvStream: " << sp.ToString();
if (!ValidateStreamParams(sp)) {
return false;
}
- uint32_t ssrc = sp.first_ssrc();
+ const uint32_t ssrc = sp.first_ssrc();
if (ssrc == 0) {
LOG(LS_WARNING) << "AddRecvStream with ssrc==0 is not supported.";
return false;
@@ -2202,114 +1956,87 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
RemoveRecvStream(ssrc);
}
- if (receive_channels_.find(ssrc) != receive_channels_.end()) {
+ if (GetReceiveChannelId(ssrc) != -1) {
LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
return false;
}
- RTC_DCHECK(receive_stream_params_.find(ssrc) == receive_stream_params_.end());
// Create a new channel for receiving audio data.
- int channel = CreateVoEChannel();
+ const int channel = CreateVoEChannel();
if (channel == -1) {
return false;
}
- if (!ConfigureRecvChannel(channel)) {
- DeleteChannel(channel);
- return false;
- }
-
- WebRtcAudioReceiveStream* stream = new WebRtcAudioReceiveStream(channel);
- receive_channels_.insert(std::make_pair(ssrc, stream));
- receive_stream_params_[ssrc] = sp;
- AddAudioReceiveStream(ssrc);
-
- LOG(LS_INFO) << "New audio stream " << ssrc
- << " registered to VoiceEngine channel #"
- << channel << ".";
- return true;
-}
-
-bool WebRtcVoiceMediaChannel::ConfigureRecvChannel(int channel) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
-
- int send_channel = GetSendChannelId(receiver_reports_ssrc_);
- if (send_channel != -1) {
- // Associate receive channel with first send channel (so the receive channel
- // can obtain RTT from the send channel)
- engine()->voe()->base()->AssociateSendChannel(channel, send_channel);
- LOG(LS_INFO) << "VoiceEngine channel #" << channel
- << " is associated with channel #" << send_channel << ".";
- }
- if (engine()->voe()->rtp()->SetLocalSSRC(channel,
- receiver_reports_ssrc_) == -1) {
- LOG_RTCERR1(SetLocalSSRC, channel);
- return false;
- }
// Turn off all supported codecs.
- int ncodecs = engine()->voe()->codec()->NumOfCodecs();
- for (int i = 0; i < ncodecs; ++i) {
- webrtc::CodecInst voe_codec;
- if (engine()->voe()->codec()->GetCodec(i, voe_codec) != -1) {
- voe_codec.pltype = -1;
- if (engine()->voe()->codec()->SetRecPayloadType(
- channel, voe_codec) == -1) {
- LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
- return false;
- }
+ // TODO(solenberg): Remove once "no codecs" is the default state of a stream.
+ for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) {
+ voe_codec.pltype = -1;
+ if (engine()->voe()->codec()->SetRecPayloadType(channel, voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
+ DeleteVoEChannel(channel);
+ return false;
}
}
// Only enable those configured for this channel.
for (const auto& codec : recv_codecs_) {
webrtc::CodecInst voe_codec;
- if (engine()->FindWebRtcCodec(codec, &voe_codec)) {
+ if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
voe_codec.pltype = codec.id;
if (engine()->voe()->codec()->SetRecPayloadType(
channel, voe_codec) == -1) {
LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
+ DeleteVoEChannel(channel);
return false;
}
}
}
- SetNack(channel, nack_enabled_);
-
- // Set RTP header extension for the new channel.
- if (!SetChannelRecvRtpHeaderExtensions(channel, receive_extensions_)) {
- return false;
+ const int send_channel = GetSendChannelId(receiver_reports_ssrc_);
+ if (send_channel != -1) {
+ // Associate receive channel with first send channel (so the receive channel
+ // can obtain RTT from the send channel)
+ engine()->voe()->base()->AssociateSendChannel(channel, send_channel);
+ LOG(LS_INFO) << "VoiceEngine channel #" << channel
+ << " is associated with channel #" << send_channel << ".";
}
+ recv_streams_.insert(std::make_pair(ssrc, new WebRtcAudioReceiveStream(
+ channel, ssrc, receiver_reports_ssrc_,
+ options_.combined_audio_video_bwe.value_or(false), sp.sync_label,
+ recv_rtp_extensions_, call_)));
+
+ SetNack(channel, nack_enabled_);
SetPlayout(channel, playout_);
+
return true;
}
bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
LOG(LS_INFO) << "RemoveRecvStream: " << ssrc;
- auto it = receive_channels_.find(ssrc);
- if (it == receive_channels_.end()) {
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
<< " which doesn't exist.";
return false;
}
- RemoveAudioReceiveStream(ssrc);
- receive_stream_params_.erase(ssrc);
-
- const int channel = it->second->channel();
- delete it->second;
- receive_channels_.erase(it);
-
// Deregister default channel, if that's the one being destroyed.
if (IsDefaultRecvStream(ssrc)) {
default_recv_ssrc_ = -1;
}
- LOG(LS_INFO) << "Removing audio stream " << ssrc
+ const int channel = it->second->channel();
+
+ // Clean up and delete the receive stream+channel.
+ LOG(LS_INFO) << "Removing audio receive stream " << ssrc
<< " with VoiceEngine channel #" << channel << ".";
- return DeleteChannel(channel);
+ it->second->SetRawAudioSink(nullptr);
+ delete it->second;
+ recv_streams_.erase(it);
+ return DeleteVoEChannel(channel);
}
bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32_t ssrc,
@@ -2337,9 +2064,9 @@ bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32_t ssrc,
bool WebRtcVoiceMediaChannel::GetActiveStreams(
AudioInfo::StreamList* actives) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
actives->clear();
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
int level = GetOutputLevel(ch.second->channel());
if (level > 0) {
actives->push_back(std::make_pair(ch.first, level));
@@ -2349,9 +2076,9 @@ bool WebRtcVoiceMediaChannel::GetActiveStreams(
}
int WebRtcVoiceMediaChannel::GetOutputLevel() {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
int highest = 0;
- for (const auto& ch : receive_channels_) {
+ for (const auto& ch : recv_streams_) {
highest = std::max(GetOutputLevel(ch.second->channel()), highest);
}
return highest;
@@ -2383,7 +2110,7 @@ void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window,
}
bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (ssrc == 0) {
default_recv_volume_ = volume;
if (default_recv_ssrc_ == -1) {
@@ -2408,64 +2135,48 @@ bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) {
}
bool WebRtcVoiceMediaChannel::CanInsertDtmf() {
- return dtmf_allowed_;
+ return dtmf_payload_type_ ? true : false;
}
-bool WebRtcVoiceMediaChannel::InsertDtmf(uint32_t ssrc,
- int event,
- int duration,
- int flags) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- if (!dtmf_allowed_) {
+bool WebRtcVoiceMediaChannel::InsertDtmf(uint32_t ssrc, int event,
+ int duration) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_INFO) << "WebRtcVoiceMediaChannel::InsertDtmf";
+ if (!dtmf_payload_type_) {
return false;
}
- // Send the event.
- if (flags & cricket::DF_SEND) {
- int channel = -1;
- if (ssrc == 0) {
- if (send_streams_.size() > 0) {
- channel = send_streams_.begin()->second->channel();
- }
- } else {
- channel = GetSendChannelId(ssrc);
- }
- if (channel == -1) {
- LOG(LS_WARNING) << "InsertDtmf - The specified ssrc "
- << ssrc << " is not in use.";
- return false;
- }
- // Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
- if (engine()->voe()->dtmf()->SendTelephoneEvent(
- channel, event, true, duration) == -1) {
- LOG_RTCERR4(SendTelephoneEvent, channel, event, true, duration);
- return false;
- }
+ // Figure out which WebRtcAudioSendStream to send the event on.
+ auto it = ssrc != 0 ? send_streams_.find(ssrc) : send_streams_.begin();
+ if (it == send_streams_.end()) {
+ LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
+ return false;
}
-
- // Play the event.
- if (flags & cricket::DF_PLAY) {
- // Play DTMF tone locally.
- if (engine()->voe()->dtmf()->PlayDtmfTone(event, duration) == -1) {
- LOG_RTCERR2(PlayDtmfTone, event, duration);
- return false;
- }
+ if (event < kMinTelephoneEventCode ||
+ event > kMaxTelephoneEventCode) {
+ LOG(LS_WARNING) << "DTMF event code " << event << " out of range.";
+ return false;
}
-
- return true;
+ if (duration < kMinTelephoneEventDuration ||
+ duration > kMaxTelephoneEventDuration) {
+ LOG(LS_WARNING) << "DTMF event duration " << duration << " out of range.";
+ return false;
+ }
+ return it->second->SendTelephoneEvent(*dtmf_payload_type_, event, duration);
}
void WebRtcVoiceMediaChannel::OnPacketReceived(
rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
uint32_t ssrc = 0;
if (!GetRtpSsrc(packet->data(), packet->size(), &ssrc)) {
return;
}
- if (receive_channels_.empty()) {
- // Create new channel, which will be the default receive channel.
+ // If we don't have a default channel, and the SSRC is unknown, create a
+ // default channel.
+ if (default_recv_ssrc_ == -1 && GetReceiveChannelId(ssrc) == -1) {
StreamParams sp;
sp.ssrcs.push_back(ssrc);
LOG(LS_INFO) << "Creating default receive stream for SSRC=" << ssrc << ".";
@@ -2485,7 +2196,13 @@ void WebRtcVoiceMediaChannel::OnPacketReceived(
reinterpret_cast<const uint8_t*>(packet->data()), packet->size(),
webrtc_packet_time);
if (webrtc::PacketReceiver::DELIVERY_OK != delivery_result) {
- return;
+ // If the SSRC is unknown here, route it to the default channel, if we have
+ // one. See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5208
+ if (default_recv_ssrc_ == -1) {
+ return;
+ } else {
+ ssrc = default_recv_ssrc_;
+ }
}
// Find the channel to send this packet to. It must exist since webrtc::Call
@@ -2500,7 +2217,7 @@ void WebRtcVoiceMediaChannel::OnPacketReceived(
void WebRtcVoiceMediaChannel::OnRtcpReceived(
rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
// Forward packet to Call as well.
const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp,
@@ -2542,7 +2259,7 @@ void WebRtcVoiceMediaChannel::OnRtcpReceived(
}
bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
int channel = GetSendChannelId(ssrc);
if (channel == -1) {
LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
@@ -2601,7 +2318,7 @@ bool WebRtcVoiceMediaChannel::SetSendBitrateInternal(int bps) {
return true;
webrtc::CodecInst codec = *send_codec_;
- bool is_multi_rate = IsCodecMultiRate(codec);
+ bool is_multi_rate = WebRtcVoiceCodecs::IsCodecMultiRate(codec);
if (is_multi_rate) {
// If codec is multi-rate then just set the bitrate.
@@ -2629,7 +2346,7 @@ bool WebRtcVoiceMediaChannel::SetSendBitrateInternal(int bps) {
}
bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RTC_DCHECK(info);
// Get SSRC and stats for each sender.
@@ -2652,15 +2369,14 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
sinfo.echo_delay_std_ms = stats.echo_delay_std_ms;
sinfo.echo_return_loss = stats.echo_return_loss;
sinfo.echo_return_loss_enhancement = stats.echo_return_loss_enhancement;
- sinfo.typing_noise_detected = typing_noise_detected_;
- // TODO(solenberg): Move to AudioSendStream.
- // sinfo.typing_noise_detected = stats.typing_noise_detected;
+ sinfo.typing_noise_detected =
+ (send_ == SEND_NOTHING ? false : stats.typing_noise_detected);
info->senders.push_back(sinfo);
}
// Get SSRC and stats for each receiver.
RTC_DCHECK(info->receivers.size() == 0);
- for (const auto& stream : receive_streams_) {
+ for (const auto& stream : recv_streams_) {
webrtc::AudioReceiveStream::Stats stats = stream.second->GetStats();
VoiceReceiverInfo rinfo;
rinfo.add_ssrc(stats.remote_ssrc);
@@ -2694,15 +2410,17 @@ bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
return true;
}
-void WebRtcVoiceMediaChannel::OnError(int error) {
- if (send_ == SEND_NOTHING) {
+void WebRtcVoiceMediaChannel::SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetRawAudioSink";
+ const auto it = recv_streams_.find(ssrc);
+ if (it == recv_streams_.end()) {
+ LOG(LS_WARNING) << "SetRawAudioSink: no recv stream" << ssrc;
return;
}
- if (error == VE_TYPING_NOISE_WARNING) {
- typing_noise_detected_ = true;
- } else if (error == VE_TYPING_NOISE_OFF_WARNING) {
- typing_noise_detected_ = false;
- }
+ it->second->SetRawAudioSink(std::move(sink));
}
int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
@@ -2712,16 +2430,16 @@ int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
}
int WebRtcVoiceMediaChannel::GetReceiveChannelId(uint32_t ssrc) const {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- const auto it = receive_channels_.find(ssrc);
- if (it != receive_channels_.end()) {
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+ const auto it = recv_streams_.find(ssrc);
+ if (it != recv_streams_.end()) {
return it->second->channel();
}
return -1;
}
int WebRtcVoiceMediaChannel::GetSendChannelId(uint32_t ssrc) const {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
const auto it = send_streams_.find(ssrc);
if (it != send_streams_.end()) {
return it->second->channel();
@@ -2762,7 +2480,7 @@ bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
if (codec.id == red_pt) {
// If we find the right codec, that will be the codec we pass to
// SetSendCodec, with the desired payload type.
- if (engine()->FindWebRtcCodec(codec, send_codec)) {
+ if (WebRtcVoiceEngine::ToCodecInst(codec, send_codec)) {
return true;
} else {
break;
@@ -2786,117 +2504,6 @@ bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
}
return true;
}
-
-// Convert VoiceEngine error code into VoiceMediaChannel::Error enum.
-VoiceMediaChannel::Error
- WebRtcVoiceMediaChannel::WebRtcErrorToChannelError(int err_code) {
- switch (err_code) {
- case 0:
- return ERROR_NONE;
- case VE_CANNOT_START_RECORDING:
- case VE_MIC_VOL_ERROR:
- case VE_GET_MIC_VOL_ERROR:
- case VE_CANNOT_ACCESS_MIC_VOL:
- return ERROR_REC_DEVICE_OPEN_FAILED;
- case VE_SATURATION_WARNING:
- return ERROR_REC_DEVICE_SATURATION;
- case VE_REC_DEVICE_REMOVED:
- return ERROR_REC_DEVICE_REMOVED;
- case VE_RUNTIME_REC_WARNING:
- case VE_RUNTIME_REC_ERROR:
- return ERROR_REC_RUNTIME_ERROR;
- case VE_CANNOT_START_PLAYOUT:
- case VE_SPEAKER_VOL_ERROR:
- case VE_GET_SPEAKER_VOL_ERROR:
- case VE_CANNOT_ACCESS_SPEAKER_VOL:
- return ERROR_PLAY_DEVICE_OPEN_FAILED;
- case VE_RUNTIME_PLAY_WARNING:
- case VE_RUNTIME_PLAY_ERROR:
- return ERROR_PLAY_RUNTIME_ERROR;
- case VE_TYPING_NOISE_WARNING:
- return ERROR_REC_TYPING_NOISE_DETECTED;
- default:
- return VoiceMediaChannel::ERROR_OTHER;
- }
-}
-
-bool WebRtcVoiceMediaChannel::SetHeaderExtension(ExtensionSetterFunction setter,
- int channel_id, const RtpHeaderExtension* extension) {
- bool enable = false;
- int id = 0;
- std::string uri;
- if (extension) {
- enable = true;
- id = extension->id;
- uri = extension->uri;
- }
- if ((engine()->voe()->rtp()->*setter)(channel_id, enable, id) != 0) {
- LOG_RTCERR4(*setter, uri, channel_id, enable, id);
- return false;
- }
- return true;
-}
-
-void WebRtcVoiceMediaChannel::RecreateAudioReceiveStreams() {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- for (const auto& it : receive_channels_) {
- RemoveAudioReceiveStream(it.first);
- }
- for (const auto& it : receive_channels_) {
- AddAudioReceiveStream(it.first);
- }
-}
-
-void WebRtcVoiceMediaChannel::AddAudioReceiveStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- WebRtcAudioReceiveStream* stream = receive_channels_[ssrc];
- RTC_DCHECK(stream != nullptr);
- RTC_DCHECK(receive_streams_.find(ssrc) == receive_streams_.end());
- webrtc::AudioReceiveStream::Config config;
- config.rtp.remote_ssrc = ssrc;
- // Only add RTP extensions if we support combined A/V BWE.
- config.rtp.extensions = recv_rtp_extensions_;
- config.combined_audio_video_bwe =
- options_.combined_audio_video_bwe.GetWithDefaultIfUnset(false);
- config.voe_channel_id = stream->channel();
- config.sync_group = receive_stream_params_[ssrc].sync_label;
- webrtc::AudioReceiveStream* s = call_->CreateAudioReceiveStream(config);
- receive_streams_.insert(std::make_pair(ssrc, s));
-}
-
-void WebRtcVoiceMediaChannel::RemoveAudioReceiveStream(uint32_t ssrc) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- auto stream_it = receive_streams_.find(ssrc);
- if (stream_it != receive_streams_.end()) {
- call_->DestroyAudioReceiveStream(stream_it->second);
- receive_streams_.erase(stream_it);
- }
-}
-
-bool WebRtcVoiceMediaChannel::SetRecvCodecsInternal(
- const std::vector<AudioCodec>& new_codecs) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- for (const AudioCodec& codec : new_codecs) {
- webrtc::CodecInst voe_codec;
- if (engine()->FindWebRtcCodec(codec, &voe_codec)) {
- LOG(LS_INFO) << ToString(codec);
- voe_codec.pltype = codec.id;
- for (const auto& ch : receive_channels_) {
- if (engine()->voe()->codec()->SetRecPayloadType(
- ch.second->channel(), voe_codec) == -1) {
- LOG_RTCERR2(SetRecPayloadType, ch.second->channel(),
- ToString(voe_codec));
- return false;
- }
- }
- } else {
- LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
- return false;
- }
- }
- return true;
-}
-
} // namespace cricket
#endif // HAVE_WEBRTC_VOICE
diff --git a/talk/media/webrtc/webrtcvoiceengine.h b/talk/media/webrtc/webrtcvoiceengine.h
index 1cf05e71a2..0f2f59e492 100644
--- a/talk/media/webrtc/webrtcvoiceengine.h
+++ b/talk/media/webrtc/webrtcvoiceengine.h
@@ -29,7 +29,6 @@
#define TALK_MEDIA_WEBRTCVOICEENGINE_H_
#include <map>
-#include <set>
#include <string>
#include <vector>
@@ -37,9 +36,8 @@
#include "talk/media/webrtc/webrtccommon.h"
#include "talk/media/webrtc/webrtcvoe.h"
#include "talk/session/media/channel.h"
+#include "webrtc/audio_state.h"
#include "webrtc/base/buffer.h"
-#include "webrtc/base/byteorder.h"
-#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/stream.h"
#include "webrtc/base/thread_checker.h"
@@ -51,43 +49,34 @@ namespace cricket {
class AudioDeviceModule;
class AudioRenderer;
-class VoETraceWrapper;
class VoEWrapper;
class WebRtcVoiceMediaChannel;
// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
// It uses the WebRtc VoiceEngine library for audio handling.
-class WebRtcVoiceEngine
- : public webrtc::VoiceEngineObserver,
- public webrtc::TraceCallback {
+class WebRtcVoiceEngine final : public webrtc::TraceCallback {
friend class WebRtcVoiceMediaChannel;
-
public:
+ // Exposed for the WVoE/MC unit test.
+ static bool ToCodecInst(const AudioCodec& in, webrtc::CodecInst* out);
+
WebRtcVoiceEngine();
// Dependency injection for testing.
- WebRtcVoiceEngine(VoEWrapper* voe_wrapper, VoETraceWrapper* tracing);
+ explicit WebRtcVoiceEngine(VoEWrapper* voe_wrapper);
~WebRtcVoiceEngine();
bool Init(rtc::Thread* worker_thread);
void Terminate();
- webrtc::VoiceEngine* GetVoE() { return voe()->engine(); }
+ rtc::scoped_refptr<webrtc::AudioState> GetAudioState() const;
VoiceMediaChannel* CreateChannel(webrtc::Call* call,
const AudioOptions& options);
- AudioOptions GetOptions() const { return options_; }
- bool SetOptions(const AudioOptions& options);
- bool SetDevices(const Device* in_device, const Device* out_device);
bool GetOutputVolume(int* level);
bool SetOutputVolume(int level);
int GetInputLevel();
const std::vector<AudioCodec>& codecs();
- bool FindCodec(const AudioCodec& codec);
- bool FindWebRtcCodec(const AudioCodec& codec, webrtc::CodecInst* gcodec);
-
- const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
-
- void SetLogging(int min_sev, const char* filter);
+ RtpCapabilities GetCapabilities() const;
// For tracking WebRtc channels. Needed because we have to pause them
// all when switching devices.
@@ -120,68 +109,49 @@ class WebRtcVoiceEngine
private:
void Construct();
- void ConstructCodecs();
- bool GetVoeCodec(int index, webrtc::CodecInst* codec);
bool InitInternal();
- void SetTraceFilter(int filter);
- void SetTraceOptions(const std::string& options);
// Every option that is "set" will be applied. Every option not "set" will be
// ignored. This allows us to selectively turn on and off different options
// easily at any time.
bool ApplyOptions(const AudioOptions& options);
+ void SetDefaultDevices();
// webrtc::TraceCallback:
void Print(webrtc::TraceLevel level, const char* trace, int length) override;
- // webrtc::VoiceEngineObserver:
- void CallbackOnError(int channel_id, int errCode) override;
-
- // Given the device type, name, and id, find device id. Return true and
- // set the output parameter rtc_id if successful.
- bool FindWebRtcAudioDeviceId(
- bool is_input, const std::string& dev_name, int dev_id, int* rtc_id);
-
void StartAecDump(const std::string& filename);
int CreateVoEChannel();
- static const int kDefaultLogSeverity = rtc::LS_WARNING;
+ rtc::ThreadChecker signal_thread_checker_;
+ rtc::ThreadChecker worker_thread_checker_;
// The primary instance of WebRtc VoiceEngine.
rtc::scoped_ptr<VoEWrapper> voe_wrapper_;
- rtc::scoped_ptr<VoETraceWrapper> tracing_;
+ rtc::scoped_refptr<webrtc::AudioState> audio_state_;
// The external audio device manager
- webrtc::AudioDeviceModule* adm_;
- int log_filter_;
- std::string log_options_;
- bool is_dumping_aec_;
+ webrtc::AudioDeviceModule* adm_ = nullptr;
std::vector<AudioCodec> codecs_;
- std::vector<RtpHeaderExtension> rtp_header_extensions_;
std::vector<WebRtcVoiceMediaChannel*> channels_;
- // channels_ can be read from WebRtc callback thread. We need a lock on that
- // callback as well as the RegisterChannel/UnregisterChannel.
- rtc::CriticalSection channels_cs_;
- webrtc::AgcConfig default_agc_config_;
-
webrtc::Config voe_config_;
+ bool initialized_ = false;
+ bool is_dumping_aec_ = false;
- bool initialized_;
- AudioOptions options_;
-
+ webrtc::AgcConfig default_agc_config_;
// Cache received extended_filter_aec, delay_agnostic_aec and experimental_ns
// values, and apply them in case they are missing in the audio options. We
// need to do this because SetExtraOptions() will revert to defaults for
// options which are not provided.
- Settable<bool> extended_filter_aec_;
- Settable<bool> delay_agnostic_aec_;
- Settable<bool> experimental_ns_;
+ rtc::Optional<bool> extended_filter_aec_;
+ rtc::Optional<bool> delay_agnostic_aec_;
+ rtc::Optional<bool> experimental_ns_;
RTC_DISALLOW_COPY_AND_ASSIGN(WebRtcVoiceEngine);
};
// WebRtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
// WebRtc Voice Engine.
-class WebRtcVoiceMediaChannel : public VoiceMediaChannel,
- public webrtc::Transport {
+class WebRtcVoiceMediaChannel final : public VoiceMediaChannel,
+ public webrtc::Transport {
public:
WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine,
const AudioOptions& options,
@@ -217,7 +187,7 @@ class WebRtcVoiceMediaChannel : public VoiceMediaChannel,
bool SetOutputVolume(uint32_t ssrc, double volume) override;
bool CanInsertDtmf() override;
- bool InsertDtmf(uint32_t ssrc, int event, int duration, int flags) override;
+ bool InsertDtmf(uint32_t ssrc, int event, int duration) override;
void OnPacketReceived(rtc::Buffer* packet,
const rtc::PacketTime& packet_time) override;
@@ -226,6 +196,10 @@ class WebRtcVoiceMediaChannel : public VoiceMediaChannel,
void OnReadyToSend(bool ready) override {}
bool GetStats(VoiceMediaInfo* info) override;
+ void SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) override;
+
// implements Transport interface
bool SendRtp(const uint8_t* data,
size_t len,
@@ -243,20 +217,14 @@ class WebRtcVoiceMediaChannel : public VoiceMediaChannel,
return VoiceMediaChannel::SendRtcp(&packet, rtc::PacketOptions());
}
- void OnError(int error);
-
int GetReceiveChannelId(uint32_t ssrc) const;
int GetSendChannelId(uint32_t ssrc) const;
private:
bool SetSendCodecs(const std::vector<AudioCodec>& codecs);
- bool SetSendRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions);
bool SetOptions(const AudioOptions& options);
bool SetMaxSendBandwidth(int bps);
bool SetRecvCodecs(const std::vector<AudioCodec>& codecs);
- bool SetRecvRtpHeaderExtensions(
- const std::vector<RtpHeaderExtension>& extensions);
bool SetLocalRenderer(uint32_t ssrc, AudioRenderer* renderer);
bool MuteStream(uint32_t ssrc, bool mute);
@@ -267,82 +235,55 @@ class WebRtcVoiceMediaChannel : public VoiceMediaChannel,
const std::vector<AudioCodec>& all_codecs,
webrtc::CodecInst* send_codec);
bool SetPlayout(int channel, bool playout);
- static Error WebRtcErrorToChannelError(int err_code);
-
- typedef int (webrtc::VoERTP_RTCP::* ExtensionSetterFunction)(int, bool,
- unsigned char);
-
void SetNack(int channel, bool nack_enabled);
bool SetSendCodec(int channel, const webrtc::CodecInst& send_codec);
bool ChangePlayout(bool playout);
bool ChangeSend(SendFlags send);
bool ChangeSend(int channel, SendFlags send);
- bool ConfigureRecvChannel(int channel);
int CreateVoEChannel();
- bool DeleteChannel(int channel);
+ bool DeleteVoEChannel(int channel);
bool IsDefaultRecvStream(uint32_t ssrc) {
return default_recv_ssrc_ == static_cast<int64_t>(ssrc);
}
bool SetSendCodecs(int channel, const std::vector<AudioCodec>& codecs);
bool SetSendBitrateInternal(int bps);
- bool SetHeaderExtension(ExtensionSetterFunction setter, int channel_id,
- const RtpHeaderExtension* extension);
- void RecreateAudioReceiveStreams();
- void AddAudioReceiveStream(uint32_t ssrc);
- void RemoveAudioReceiveStream(uint32_t ssrc);
- bool SetRecvCodecsInternal(const std::vector<AudioCodec>& new_codecs);
-
- bool SetChannelRecvRtpHeaderExtensions(
- int channel_id,
- const std::vector<RtpHeaderExtension>& extensions);
- bool SetChannelSendRtpHeaderExtensions(
- int channel_id,
- const std::vector<RtpHeaderExtension>& extensions);
+ rtc::ThreadChecker worker_thread_checker_;
- rtc::ThreadChecker thread_checker_;
-
- WebRtcVoiceEngine* const engine_;
+ WebRtcVoiceEngine* const engine_ = nullptr;
std::vector<AudioCodec> recv_codecs_;
std::vector<AudioCodec> send_codecs_;
rtc::scoped_ptr<webrtc::CodecInst> send_codec_;
- bool send_bitrate_setting_;
- int send_bitrate_bps_;
+ bool send_bitrate_setting_ = false;
+ int send_bitrate_bps_ = 0;
AudioOptions options_;
- bool dtmf_allowed_;
- bool desired_playout_;
- bool nack_enabled_;
- bool playout_;
- bool typing_noise_detected_;
- SendFlags desired_send_;
- SendFlags send_;
- webrtc::Call* const call_;
+ rtc::Optional<int> dtmf_payload_type_;
+ bool desired_playout_ = false;
+ bool nack_enabled_ = false;
+ bool playout_ = false;
+ SendFlags desired_send_ = SEND_NOTHING;
+ SendFlags send_ = SEND_NOTHING;
+ webrtc::Call* const call_ = nullptr;
// SSRC of unsignalled receive stream, or -1 if there isn't one.
int64_t default_recv_ssrc_ = -1;
// Volume for unsignalled stream, which may be set before the stream exists.
double default_recv_volume_ = 1.0;
- // SSRC to use for RTCP receiver reports; default to 1 in case of no signaled
+ // Default SSRC to use for RTCP receiver reports in case of no signaled
// send streams. See: https://code.google.com/p/webrtc/issues/detail?id=4740
- uint32_t receiver_reports_ssrc_ = 1;
+ // and https://code.google.com/p/chromium/issues/detail?id=547661
+ uint32_t receiver_reports_ssrc_ = 0xFA17FA17u;
class WebRtcAudioSendStream;
std::map<uint32_t, WebRtcAudioSendStream*> send_streams_;
- std::vector<RtpHeaderExtension> send_extensions_;
+ std::vector<webrtc::RtpExtension> send_rtp_extensions_;
class WebRtcAudioReceiveStream;
- std::map<uint32_t, WebRtcAudioReceiveStream*> receive_channels_;
- std::map<uint32_t, webrtc::AudioReceiveStream*> receive_streams_;
- std::map<uint32_t, StreamParams> receive_stream_params_;
- // receive_channels_ can be read from WebRtc callback thread. Access from
- // the WebRtc thread must be synchronized with edits on the worker thread.
- // Reads on the worker thread are ok.
- std::vector<RtpHeaderExtension> receive_extensions_;
+ std::map<uint32_t, WebRtcAudioReceiveStream*> recv_streams_;
std::vector<webrtc::RtpExtension> recv_rtp_extensions_;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcVoiceMediaChannel);
};
-
} // namespace cricket
#endif // TALK_MEDIA_WEBRTCVOICEENGINE_H_
diff --git a/talk/media/webrtc/webrtcvoiceengine_unittest.cc b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
index ce5115cb10..a62bcb225f 100644
--- a/talk/media/webrtc/webrtcvoiceengine_unittest.cc
+++ b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
@@ -25,6 +25,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/byteorder.h"
#include "webrtc/base/gunit.h"
#include "webrtc/call.h"
@@ -53,10 +54,6 @@ const cricket::AudioCodec kCn8000Codec(13, "CN", 8000, 0, 1, 0);
const cricket::AudioCodec kCn16000Codec(105, "CN", 16000, 0, 1, 0);
const cricket::AudioCodec kTelephoneEventCodec(106, "telephone-event", 8000, 0,
1, 0);
-const cricket::AudioCodec* const kAudioCodecs[] = {
- &kPcmuCodec, &kIsacCodec, &kOpusCodec, &kG722CodecVoE, &kRedCodec,
- &kCn8000Codec, &kCn16000Codec, &kTelephoneEventCodec,
-};
const uint32_t kSsrc1 = 0x99;
const uint32_t kSsrc2 = 0x98;
const uint32_t kSsrcs4[] = { 1, 2, 3, 4 };
@@ -67,37 +64,22 @@ class FakeVoEWrapper : public cricket::VoEWrapper {
: cricket::VoEWrapper(engine, // processing
engine, // base
engine, // codec
- engine, // dtmf
engine, // hw
engine, // network
engine, // rtp
engine) { // volume
}
};
-
-class FakeVoETraceWrapper : public cricket::VoETraceWrapper {
- public:
- int SetTraceFilter(const unsigned int filter) override {
- filter_ = filter;
- return 0;
- }
- int SetTraceFile(const char* fileNameUTF8) override { return 0; }
- int SetTraceCallback(webrtc::TraceCallback* callback) override { return 0; }
- unsigned int filter_;
-};
} // namespace
class WebRtcVoiceEngineTestFake : public testing::Test {
public:
WebRtcVoiceEngineTestFake()
: call_(webrtc::Call::Config()),
- voe_(kAudioCodecs, ARRAY_SIZE(kAudioCodecs)),
- trace_wrapper_(new FakeVoETraceWrapper()),
- engine_(new FakeVoEWrapper(&voe_), trace_wrapper_),
+ engine_(new FakeVoEWrapper(&voe_)),
channel_(nullptr) {
send_parameters_.codecs.push_back(kPcmuCodec);
recv_parameters_.codecs.push_back(kPcmuCodec);
- options_adjust_agc_.adjust_agc_delta.Set(-10);
}
bool SetupEngine() {
if (!engine_.Init(rtc::Thread::Current())) {
@@ -123,12 +105,10 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
void SetupForMultiSendStream() {
EXPECT_TRUE(SetupEngineWithSendStream());
// Remove stream added in Setup.
- int default_channel_num = voe_.GetLastChannel();
- EXPECT_EQ(kSsrc1, voe_.GetLocalSSRC(default_channel_num));
+ EXPECT_TRUE(call_.GetAudioSendStream(kSsrc1));
EXPECT_TRUE(channel_->RemoveSendStream(kSsrc1));
-
// Verify the channel does not exist.
- EXPECT_EQ(-1, voe_.GetChannelFromLocalSsrc(kSsrc1));
+ EXPECT_FALSE(call_.GetAudioSendStream(kSsrc1));
}
void DeliverPacket(const void* data, int len) {
rtc::Buffer packet(reinterpret_cast<const uint8_t*>(data), len);
@@ -139,6 +119,24 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
engine_.Terminate();
}
+ const cricket::FakeAudioSendStream& GetSendStream(uint32_t ssrc) {
+ const auto* send_stream = call_.GetAudioSendStream(ssrc);
+ EXPECT_TRUE(send_stream);
+ return *send_stream;
+ }
+
+ const webrtc::AudioSendStream::Config& GetSendStreamConfig(uint32_t ssrc) {
+ const auto* send_stream = call_.GetAudioSendStream(ssrc);
+ EXPECT_TRUE(send_stream);
+ return send_stream->GetConfig();
+ }
+
+ const webrtc::AudioReceiveStream::Config& GetRecvStreamConfig(uint32_t ssrc) {
+ const auto* recv_stream = call_.GetAudioReceiveStream(ssrc);
+ EXPECT_TRUE(recv_stream);
+ return recv_stream->GetConfig();
+ }
+
void TestInsertDtmf(uint32_t ssrc, bool caller) {
EXPECT_TRUE(engine_.Init(rtc::Thread::Current()));
channel_ = engine_.CreateChannel(&call_, cricket::AudioOptions());
@@ -154,39 +152,30 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
EXPECT_FALSE(channel_->CanInsertDtmf());
- EXPECT_FALSE(channel_->InsertDtmf(ssrc, 1, 111, cricket::DF_SEND));
+ EXPECT_FALSE(channel_->InsertDtmf(ssrc, 1, 111));
send_parameters_.codecs.push_back(kTelephoneEventCodec);
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
EXPECT_TRUE(channel_->CanInsertDtmf());
if (!caller) {
// If this is callee, there's no active send channel yet.
- EXPECT_FALSE(channel_->InsertDtmf(ssrc, 2, 123, cricket::DF_SEND));
+ EXPECT_FALSE(channel_->InsertDtmf(ssrc, 2, 123));
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrc1)));
}
// Check we fail if the ssrc is invalid.
- EXPECT_FALSE(channel_->InsertDtmf(-1, 1, 111, cricket::DF_SEND));
-
- // Test send
- int channel_id = voe_.GetLastChannel();
- EXPECT_FALSE(voe_.WasSendTelephoneEventCalled(channel_id, 2, 123));
- EXPECT_TRUE(channel_->InsertDtmf(ssrc, 2, 123, cricket::DF_SEND));
- EXPECT_TRUE(voe_.WasSendTelephoneEventCalled(channel_id, 2, 123));
-
- // Test play
- EXPECT_FALSE(voe_.WasPlayDtmfToneCalled(3, 134));
- EXPECT_TRUE(channel_->InsertDtmf(ssrc, 3, 134, cricket::DF_PLAY));
- EXPECT_TRUE(voe_.WasPlayDtmfToneCalled(3, 134));
-
- // Test send and play
- EXPECT_FALSE(voe_.WasSendTelephoneEventCalled(channel_id, 4, 145));
- EXPECT_FALSE(voe_.WasPlayDtmfToneCalled(4, 145));
- EXPECT_TRUE(channel_->InsertDtmf(ssrc, 4, 145,
- cricket::DF_PLAY | cricket::DF_SEND));
- EXPECT_TRUE(voe_.WasSendTelephoneEventCalled(channel_id, 4, 145));
- EXPECT_TRUE(voe_.WasPlayDtmfToneCalled(4, 145));
+ EXPECT_FALSE(channel_->InsertDtmf(-1, 1, 111));
+
+ // Test send.
+ cricket::FakeAudioSendStream::TelephoneEvent telephone_event =
+ GetSendStream(kSsrc1).GetLatestTelephoneEvent();
+ EXPECT_EQ(-1, telephone_event.payload_type);
+ EXPECT_TRUE(channel_->InsertDtmf(ssrc, 2, 123));
+ telephone_event = GetSendStream(kSsrc1).GetLatestTelephoneEvent();
+ EXPECT_EQ(kTelephoneEventCodec.id, telephone_event.payload_type);
+ EXPECT_EQ(2, telephone_event.event_code);
+ EXPECT_EQ(123, telephone_event.duration_ms);
}
// Test that send bandwidth is set correctly.
@@ -211,81 +200,85 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
void TestSetSendRtpHeaderExtensions(const std::string& ext) {
EXPECT_TRUE(SetupEngineWithSendStream());
- int channel_num = voe_.GetLastChannel();
// Ensure extensions are off by default.
- EXPECT_EQ(-1, voe_.GetSendRtpExtensionId(channel_num, ext));
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrc1).rtp.extensions.size());
// Ensure unknown extensions won't cause an error.
send_parameters_.extensions.push_back(cricket::RtpHeaderExtension(
"urn:ietf:params:unknownextention", 1));
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_EQ(-1, voe_.GetSendRtpExtensionId(channel_num, ext));
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrc1).rtp.extensions.size());
// Ensure extensions stay off with an empty list of headers.
send_parameters_.extensions.clear();
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_EQ(-1, voe_.GetSendRtpExtensionId(channel_num, ext));
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrc1).rtp.extensions.size());
// Ensure extension is set properly.
const int id = 1;
send_parameters_.extensions.push_back(cricket::RtpHeaderExtension(ext, id));
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_EQ(id, voe_.GetSendRtpExtensionId(channel_num, ext));
+ EXPECT_EQ(1u, GetSendStreamConfig(kSsrc1).rtp.extensions.size());
+ EXPECT_EQ(ext, GetSendStreamConfig(kSsrc1).rtp.extensions[0].name);
+ EXPECT_EQ(id, GetSendStreamConfig(kSsrc1).rtp.extensions[0].id);
- // Ensure extension is set properly on new channels.
+ // Ensure extension is set properly on new stream.
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(kSsrc2)));
- int new_channel_num = voe_.GetLastChannel();
- EXPECT_NE(channel_num, new_channel_num);
- EXPECT_EQ(id, voe_.GetSendRtpExtensionId(new_channel_num, ext));
+ EXPECT_NE(call_.GetAudioSendStream(kSsrc1),
+ call_.GetAudioSendStream(kSsrc2));
+ EXPECT_EQ(1u, GetSendStreamConfig(kSsrc2).rtp.extensions.size());
+ EXPECT_EQ(ext, GetSendStreamConfig(kSsrc2).rtp.extensions[0].name);
+ EXPECT_EQ(id, GetSendStreamConfig(kSsrc2).rtp.extensions[0].id);
// Ensure all extensions go back off with an empty list.
send_parameters_.codecs.push_back(kPcmuCodec);
send_parameters_.extensions.clear();
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
- EXPECT_EQ(-1, voe_.GetSendRtpExtensionId(channel_num, ext));
- EXPECT_EQ(-1, voe_.GetSendRtpExtensionId(new_channel_num, ext));
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrc1).rtp.extensions.size());
+ EXPECT_EQ(0u, GetSendStreamConfig(kSsrc2).rtp.extensions.size());
}
void TestSetRecvRtpHeaderExtensions(const std::string& ext) {
EXPECT_TRUE(SetupEngineWithRecvStream());
- int channel_num = voe_.GetLastChannel();
// Ensure extensions are off by default.
- EXPECT_EQ(-1, voe_.GetReceiveRtpExtensionId(channel_num, ext));
+ EXPECT_EQ(0u, GetRecvStreamConfig(kSsrc1).rtp.extensions.size());
- cricket::AudioRecvParameters parameters;
// Ensure unknown extensions won't cause an error.
- parameters.extensions.push_back(cricket::RtpHeaderExtension(
+ recv_parameters_.extensions.push_back(cricket::RtpHeaderExtension(
"urn:ietf:params:unknownextention", 1));
- EXPECT_TRUE(channel_->SetRecvParameters(parameters));
- EXPECT_EQ(-1, voe_.GetReceiveRtpExtensionId(channel_num, ext));
+ EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_));
+ EXPECT_EQ(0u, GetRecvStreamConfig(kSsrc1).rtp.extensions.size());
// Ensure extensions stay off with an empty list of headers.
- parameters.extensions.clear();
- EXPECT_TRUE(channel_->SetRecvParameters(parameters));
- EXPECT_EQ(-1, voe_.GetReceiveRtpExtensionId(channel_num, ext));
+ recv_parameters_.extensions.clear();
+ EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_));
+ EXPECT_EQ(0u, GetRecvStreamConfig(kSsrc1).rtp.extensions.size());
// Ensure extension is set properly.
const int id = 2;
- parameters.extensions.push_back(cricket::RtpHeaderExtension(ext, id));
- EXPECT_TRUE(channel_->SetRecvParameters(parameters));
- EXPECT_EQ(id, voe_.GetReceiveRtpExtensionId(channel_num, ext));
+ recv_parameters_.extensions.push_back(cricket::RtpHeaderExtension(ext, id));
+ EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_));
+ EXPECT_EQ(1u, GetRecvStreamConfig(kSsrc1).rtp.extensions.size());
+ EXPECT_EQ(ext, GetRecvStreamConfig(kSsrc1).rtp.extensions[0].name);
+ EXPECT_EQ(id, GetRecvStreamConfig(kSsrc1).rtp.extensions[0].id);
- // Ensure extension is set properly on new channel.
- // The first stream to occupy the default channel.
+ // Ensure extension is set properly on new stream.
EXPECT_TRUE(channel_->AddRecvStream(
cricket::StreamParams::CreateLegacy(kSsrc2)));
- int new_channel_num = voe_.GetLastChannel();
- EXPECT_NE(channel_num, new_channel_num);
- EXPECT_EQ(id, voe_.GetReceiveRtpExtensionId(new_channel_num, ext));
+ EXPECT_NE(call_.GetAudioReceiveStream(kSsrc1),
+ call_.GetAudioReceiveStream(kSsrc2));
+ EXPECT_EQ(1u, GetRecvStreamConfig(kSsrc2).rtp.extensions.size());
+ EXPECT_EQ(ext, GetRecvStreamConfig(kSsrc2).rtp.extensions[0].name);
+ EXPECT_EQ(id, GetRecvStreamConfig(kSsrc2).rtp.extensions[0].id);
// Ensure all extensions go back off with an empty list.
- parameters.extensions.clear();
- EXPECT_TRUE(channel_->SetRecvParameters(parameters));
- EXPECT_EQ(-1, voe_.GetReceiveRtpExtensionId(channel_num, ext));
- EXPECT_EQ(-1, voe_.GetReceiveRtpExtensionId(new_channel_num, ext));
+ recv_parameters_.extensions.clear();
+ EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_));
+ EXPECT_EQ(0u, GetRecvStreamConfig(kSsrc1).rtp.extensions.size());
+ EXPECT_EQ(0u, GetRecvStreamConfig(kSsrc2).rtp.extensions.size());
}
webrtc::AudioSendStream::Stats GetAudioSendStreamStats() const {
@@ -313,7 +306,8 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
s->SetStats(GetAudioSendStreamStats());
}
}
- void VerifyVoiceSenderInfo(const cricket::VoiceSenderInfo& info) {
+ void VerifyVoiceSenderInfo(const cricket::VoiceSenderInfo& info,
+ bool is_sending) {
const auto stats = GetAudioSendStreamStats();
EXPECT_EQ(info.ssrc(), stats.local_ssrc);
EXPECT_EQ(info.bytes_sent, stats.bytes_sent);
@@ -331,8 +325,8 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
EXPECT_EQ(info.echo_return_loss, stats.echo_return_loss);
EXPECT_EQ(info.echo_return_loss_enhancement,
stats.echo_return_loss_enhancement);
- // TODO(solenberg): Move typing noise detection into AudioSendStream.
- // EXPECT_EQ(info.typing_noise_detected, stats.typing_noise_detected);
+ EXPECT_EQ(info.typing_noise_detected,
+ stats.typing_noise_detected && is_sending);
}
webrtc::AudioReceiveStream::Stats GetAudioReceiveStreamStats() const {
@@ -401,13 +395,10 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
protected:
cricket::FakeCall call_;
cricket::FakeWebRtcVoiceEngine voe_;
- FakeVoETraceWrapper* trace_wrapper_;
cricket::WebRtcVoiceEngine engine_;
cricket::VoiceMediaChannel* channel_;
-
cricket::AudioSendParameters send_parameters_;
cricket::AudioRecvParameters recv_parameters_;
- cricket::AudioOptions options_adjust_agc_;
};
// Tests that our stub library "works".
@@ -448,32 +439,33 @@ TEST_F(WebRtcVoiceEngineTestFake, FindCodec) {
cricket::AudioCodec codec;
webrtc::CodecInst codec_inst;
// Find PCMU with explicit clockrate and bitrate.
- EXPECT_TRUE(engine_.FindWebRtcCodec(kPcmuCodec, &codec_inst));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(kPcmuCodec, &codec_inst));
// Find ISAC with explicit clockrate and 0 bitrate.
- EXPECT_TRUE(engine_.FindWebRtcCodec(kIsacCodec, &codec_inst));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(kIsacCodec, &codec_inst));
// Find telephone-event with explicit clockrate and 0 bitrate.
- EXPECT_TRUE(engine_.FindWebRtcCodec(kTelephoneEventCodec, &codec_inst));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(kTelephoneEventCodec,
+ &codec_inst));
// Find ISAC with a different payload id.
codec = kIsacCodec;
codec.id = 127;
- EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(codec, &codec_inst));
EXPECT_EQ(codec.id, codec_inst.pltype);
// Find PCMU with a 0 clockrate.
codec = kPcmuCodec;
codec.clockrate = 0;
- EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(codec, &codec_inst));
EXPECT_EQ(codec.id, codec_inst.pltype);
EXPECT_EQ(8000, codec_inst.plfreq);
// Find PCMU with a 0 bitrate.
codec = kPcmuCodec;
codec.bitrate = 0;
- EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(codec, &codec_inst));
EXPECT_EQ(codec.id, codec_inst.pltype);
EXPECT_EQ(64000, codec_inst.rate);
// Find ISAC with an explicit bitrate.
codec = kIsacCodec;
codec.bitrate = 32000;
- EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(codec, &codec_inst));
EXPECT_EQ(codec.id, codec_inst.pltype);
EXPECT_EQ(32000, codec_inst.rate);
}
@@ -492,14 +484,13 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
cricket::StreamParams::CreateLegacy(kSsrc1)));
int channel_num = voe_.GetLastChannel();
webrtc::CodecInst gcodec;
- rtc::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+ rtc::strcpyn(gcodec.plname, arraysize(gcodec.plname), "ISAC");
gcodec.plfreq = 16000;
gcodec.channels = 1;
EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num, gcodec));
EXPECT_EQ(106, gcodec.pltype);
EXPECT_STREQ("ISAC", gcodec.plname);
- rtc::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname),
- "telephone-event");
+ rtc::strcpyn(gcodec.plname, arraysize(gcodec.plname), "telephone-event");
gcodec.plfreq = 8000;
EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num, gcodec));
EXPECT_EQ(126, gcodec.pltype);
@@ -537,7 +528,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) {
cricket::StreamParams::CreateLegacy(kSsrc1)));
int channel_num = voe_.GetLastChannel();
webrtc::CodecInst opus;
- engine_.FindWebRtcCodec(kOpusCodec, &opus);
+ cricket::WebRtcVoiceEngine::ToCodecInst(kOpusCodec, &opus);
// Even without stereo parameters, recv codecs still specify channels = 2.
EXPECT_EQ(2, opus.channels);
EXPECT_EQ(111, opus.pltype);
@@ -560,7 +551,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
cricket::StreamParams::CreateLegacy(kSsrc1)));
int channel_num2 = voe_.GetLastChannel();
webrtc::CodecInst opus;
- engine_.FindWebRtcCodec(kOpusCodec, &opus);
+ cricket::WebRtcVoiceEngine::ToCodecInst(kOpusCodec, &opus);
// Even when stereo is off, recv codecs still specify channels = 2.
EXPECT_EQ(2, opus.channels);
EXPECT_EQ(111, opus.pltype);
@@ -583,7 +574,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
cricket::StreamParams::CreateLegacy(kSsrc1)));
int channel_num2 = voe_.GetLastChannel();
webrtc::CodecInst opus;
- engine_.FindWebRtcCodec(kOpusCodec, &opus);
+ cricket::WebRtcVoiceEngine::ToCodecInst(kOpusCodec, &opus);
EXPECT_EQ(2, opus.channels);
EXPECT_EQ(111, opus.pltype);
EXPECT_STREQ("opus", opus.plname);
@@ -606,14 +597,13 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
cricket::StreamParams::CreateLegacy(kSsrc1)));
int channel_num2 = voe_.GetLastChannel();
webrtc::CodecInst gcodec;
- rtc::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+ rtc::strcpyn(gcodec.plname, arraysize(gcodec.plname), "ISAC");
gcodec.plfreq = 16000;
gcodec.channels = 1;
EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, gcodec));
EXPECT_EQ(106, gcodec.pltype);
EXPECT_STREQ("ISAC", gcodec.plname);
- rtc::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname),
- "telephone-event");
+ rtc::strcpyn(gcodec.plname, arraysize(gcodec.plname), "telephone-event");
gcodec.plfreq = 8000;
gcodec.channels = 1;
EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, gcodec));
@@ -630,7 +620,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) {
int channel_num2 = voe_.GetLastChannel();
webrtc::CodecInst gcodec;
- rtc::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+ rtc::strcpyn(gcodec.plname, arraysize(gcodec.plname), "ISAC");
gcodec.plfreq = 16000;
gcodec.channels = 1;
EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, gcodec));
@@ -669,7 +659,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
int channel_num = voe_.GetLastChannel();
EXPECT_TRUE(voe_.GetPlayout(channel_num));
webrtc::CodecInst gcodec;
- EXPECT_TRUE(engine_.FindWebRtcCodec(kOpusCodec, &gcodec));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(kOpusCodec, &gcodec));
EXPECT_EQ(kOpusCodec.id, gcodec.pltype);
}
@@ -782,7 +772,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecs) {
EXPECT_FALSE(voe_.GetRED(channel_num));
EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
EXPECT_EQ(105, voe_.GetSendCNPayloadType(channel_num, true));
- EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
+ EXPECT_FALSE(channel_->CanInsertDtmf());
}
// Test that VoE Channel doesn't call SetSendCodec again if same codec is tried
@@ -1623,7 +1613,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
EXPECT_EQ(96, gcodec.pltype);
EXPECT_STREQ("ISAC", gcodec.plname);
- EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+ EXPECT_TRUE(channel_->CanInsertDtmf());
}
// Test that we can set send codecs even with CN codec as the first
@@ -1669,7 +1659,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
EXPECT_FALSE(voe_.GetRED(channel_num));
EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
- EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+ EXPECT_TRUE(channel_->CanInsertDtmf());
}
// Test that we set VAD and DTMF types correctly as callee.
@@ -1702,7 +1692,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
EXPECT_FALSE(voe_.GetRED(channel_num));
EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
- EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+ EXPECT_TRUE(channel_->CanInsertDtmf());
}
// Test that we only apply VAD if we have a CN codec that matches the
@@ -1766,7 +1756,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) {
EXPECT_FALSE(voe_.GetRED(channel_num));
EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
- EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+ EXPECT_TRUE(channel_->CanInsertDtmf());
}
// Test that we set up RED correctly as caller.
@@ -1976,21 +1966,16 @@ TEST_F(WebRtcVoiceEngineTestFake, CreateAndDeleteMultipleSendStreams) {
for (uint32_t ssrc : kSsrcs4) {
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(ssrc)));
- EXPECT_NE(nullptr, call_.GetAudioSendStream(ssrc));
-
// Verify that we are in a sending state for all the created streams.
- int channel_num = voe_.GetChannelFromLocalSsrc(ssrc);
- EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(voe_.GetSend(GetSendStreamConfig(ssrc).voe_channel_id));
}
- EXPECT_EQ(ARRAY_SIZE(kSsrcs4), call_.GetAudioSendStreams().size());
+ EXPECT_EQ(arraysize(kSsrcs4), call_.GetAudioSendStreams().size());
// Delete the send streams.
for (uint32_t ssrc : kSsrcs4) {
EXPECT_TRUE(channel_->RemoveSendStream(ssrc));
- EXPECT_EQ(nullptr, call_.GetAudioSendStream(ssrc));
- // Stream should already be deleted.
+ EXPECT_FALSE(call_.GetAudioSendStream(ssrc));
EXPECT_FALSE(channel_->RemoveSendStream(ssrc));
- EXPECT_EQ(-1, voe_.GetChannelFromLocalSsrc(ssrc));
}
EXPECT_EQ(0u, call_.GetAudioSendStreams().size());
}
@@ -2015,7 +2000,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
// Verify ISAC and VAD are corrected configured on all send channels.
webrtc::CodecInst gcodec;
for (uint32_t ssrc : kSsrcs4) {
- int channel_num = voe_.GetChannelFromLocalSsrc(ssrc);
+ int channel_num = GetSendStreamConfig(ssrc).voe_channel_id;
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
EXPECT_STREQ("ISAC", gcodec.plname);
EXPECT_TRUE(voe_.GetVAD(channel_num));
@@ -2026,7 +2011,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
parameters.codecs[0] = kPcmuCodec;
EXPECT_TRUE(channel_->SetSendParameters(parameters));
for (uint32_t ssrc : kSsrcs4) {
- int channel_num = voe_.GetChannelFromLocalSsrc(ssrc);
+ int channel_num = GetSendStreamConfig(ssrc).voe_channel_id;
EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
EXPECT_STREQ("PCMU", gcodec.plname);
EXPECT_FALSE(voe_.GetVAD(channel_num));
@@ -2049,7 +2034,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
for (uint32_t ssrc : kSsrcs4) {
// Verify that we are in a sending state for all the send streams.
- int channel_num = voe_.GetChannelFromLocalSsrc(ssrc);
+ int channel_num = GetSendStreamConfig(ssrc).voe_channel_id;
EXPECT_TRUE(voe_.GetSend(channel_num));
}
@@ -2057,7 +2042,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
for (uint32_t ssrc : kSsrcs4) {
// Verify that we are in a stop state for all the send streams.
- int channel_num = voe_.GetChannelFromLocalSsrc(ssrc);
+ int channel_num = GetSendStreamConfig(ssrc).voe_channel_id;
EXPECT_FALSE(voe_.GetSend(channel_num));
}
}
@@ -2087,9 +2072,9 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
EXPECT_EQ(true, channel_->GetStats(&info));
// We have added 4 send streams. We should see empty stats for all.
- EXPECT_EQ(static_cast<size_t>(ARRAY_SIZE(kSsrcs4)), info.senders.size());
+ EXPECT_EQ(static_cast<size_t>(arraysize(kSsrcs4)), info.senders.size());
for (const auto& sender : info.senders) {
- VerifyVoiceSenderInfo(sender);
+ VerifyVoiceSenderInfo(sender, false);
}
// We have added one receive stream. We should see empty stats.
@@ -2102,7 +2087,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
cricket::VoiceMediaInfo info;
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc2));
EXPECT_EQ(true, channel_->GetStats(&info));
- EXPECT_EQ(static_cast<size_t>(ARRAY_SIZE(kSsrcs4)), info.senders.size());
+ EXPECT_EQ(static_cast<size_t>(arraysize(kSsrcs4)), info.senders.size());
EXPECT_EQ(0u, info.receivers.size());
}
@@ -2113,7 +2098,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
SetAudioReceiveStreamStats();
EXPECT_EQ(true, channel_->GetStats(&info));
- EXPECT_EQ(static_cast<size_t>(ARRAY_SIZE(kSsrcs4)), info.senders.size());
+ EXPECT_EQ(static_cast<size_t>(arraysize(kSsrcs4)), info.senders.size());
EXPECT_EQ(1u, info.receivers.size());
VerifyVoiceReceiverInfo(info.receivers[0]);
}
@@ -2173,96 +2158,17 @@ TEST_F(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) {
EXPECT_FALSE(voe_.GetPlayout(channel_num1));
}
-// Test that we can set the devices to use.
-TEST_F(WebRtcVoiceEngineTestFake, SetDevices) {
- EXPECT_TRUE(SetupEngineWithSendStream());
- int send_channel = voe_.GetLastChannel();
- EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
- int recv_channel = voe_.GetLastChannel();
- EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
-
- cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
- cricket::kFakeDefaultDeviceId);
- cricket::Device dev(cricket::kFakeDeviceName,
- cricket::kFakeDeviceId);
-
- // Test SetDevices() while not sending or playing.
- EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
-
- // Test SetDevices() while sending and playing.
- EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
- EXPECT_TRUE(channel_->SetPlayout(true));
- EXPECT_TRUE(voe_.GetSend(send_channel));
- EXPECT_TRUE(voe_.GetPlayout(recv_channel));
-
- EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
-
- EXPECT_TRUE(voe_.GetSend(send_channel));
- EXPECT_TRUE(voe_.GetPlayout(recv_channel));
-
- // Test that failure to open newly selected devices does not prevent opening
- // ones after that.
- voe_.set_playout_fail_channel(recv_channel);
- voe_.set_send_fail_channel(send_channel);
-
- EXPECT_FALSE(engine_.SetDevices(&default_dev, &default_dev));
-
- EXPECT_FALSE(voe_.GetSend(send_channel));
- EXPECT_FALSE(voe_.GetPlayout(recv_channel));
-
- voe_.set_playout_fail_channel(-1);
- voe_.set_send_fail_channel(-1);
-
- EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
-
- EXPECT_TRUE(voe_.GetSend(send_channel));
- EXPECT_TRUE(voe_.GetPlayout(recv_channel));
-}
-
-// Test that we can set the devices to use even if we failed to
-// open the initial ones.
-TEST_F(WebRtcVoiceEngineTestFake, SetDevicesWithInitiallyBadDevices) {
- EXPECT_TRUE(SetupEngineWithSendStream());
- int send_channel = voe_.GetLastChannel();
- EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
- int recv_channel = voe_.GetLastChannel();
- EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
-
- cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
- cricket::kFakeDefaultDeviceId);
- cricket::Device dev(cricket::kFakeDeviceName,
- cricket::kFakeDeviceId);
-
- // Test that failure to open devices selected before starting
- // send/play does not prevent opening newly selected ones after that.
- voe_.set_playout_fail_channel(recv_channel);
- voe_.set_send_fail_channel(send_channel);
-
- EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
-
- EXPECT_FALSE(channel_->SetSend(cricket::SEND_MICROPHONE));
- EXPECT_FALSE(channel_->SetPlayout(true));
- EXPECT_FALSE(voe_.GetSend(send_channel));
- EXPECT_FALSE(voe_.GetPlayout(recv_channel));
-
- voe_.set_playout_fail_channel(-1);
- voe_.set_send_fail_channel(-1);
-
- EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
-
- EXPECT_TRUE(voe_.GetSend(send_channel));
- EXPECT_TRUE(voe_.GetPlayout(recv_channel));
-}
-
// Test that we can create a channel configured for Codian bridges,
// and start sending on it.
TEST_F(WebRtcVoiceEngineTestFake, CodianSend) {
EXPECT_TRUE(SetupEngineWithSendStream());
+ cricket::AudioOptions options_adjust_agc;
+ options_adjust_agc.adjust_agc_delta = rtc::Optional<int>(-10);
int channel_num = voe_.GetLastChannel();
webrtc::AgcConfig agc_config;
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
EXPECT_EQ(0, agc_config.targetLeveldBOv);
- send_parameters_.options = options_adjust_agc_;
+ send_parameters_.options = options_adjust_agc;
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
EXPECT_TRUE(voe_.GetSend(channel_num));
@@ -2271,7 +2177,6 @@ TEST_F(WebRtcVoiceEngineTestFake, CodianSend) {
EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
EXPECT_FALSE(voe_.GetSend(channel_num));
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
- EXPECT_EQ(0, agc_config.targetLeveldBOv); // level was restored
}
TEST_F(WebRtcVoiceEngineTestFake, TxAgcConfigViaOptions) {
@@ -2279,14 +2184,12 @@ TEST_F(WebRtcVoiceEngineTestFake, TxAgcConfigViaOptions) {
webrtc::AgcConfig agc_config;
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
EXPECT_EQ(0, agc_config.targetLeveldBOv);
-
- cricket::AudioOptions options;
- options.tx_agc_target_dbov.Set(3);
- options.tx_agc_digital_compression_gain.Set(9);
- options.tx_agc_limiter.Set(true);
- options.auto_gain_control.Set(true);
- EXPECT_TRUE(engine_.SetOptions(options));
-
+ send_parameters_.options.tx_agc_target_dbov = rtc::Optional<uint16_t>(3);
+ send_parameters_.options.tx_agc_digital_compression_gain =
+ rtc::Optional<uint16_t>(9);
+ send_parameters_.options.tx_agc_limiter = rtc::Optional<bool>(true);
+ send_parameters_.options.auto_gain_control = rtc::Optional<bool>(true);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
EXPECT_EQ(3, agc_config.targetLeveldBOv);
EXPECT_EQ(9, agc_config.digitalCompressionGaindB);
@@ -2294,19 +2197,18 @@ TEST_F(WebRtcVoiceEngineTestFake, TxAgcConfigViaOptions) {
// Check interaction with adjust_agc_delta. Both should be respected, for
// backwards compatibility.
- options.adjust_agc_delta.Set(-10);
- EXPECT_TRUE(engine_.SetOptions(options));
-
+ send_parameters_.options.adjust_agc_delta = rtc::Optional<int>(-10);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
EXPECT_EQ(13, agc_config.targetLeveldBOv);
}
TEST_F(WebRtcVoiceEngineTestFake, SampleRatesViaOptions) {
EXPECT_TRUE(SetupEngineWithSendStream());
- cricket::AudioOptions options;
- options.recording_sample_rate.Set(48000u);
- options.playout_sample_rate.Set(44100u);
- EXPECT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.recording_sample_rate =
+ rtc::Optional<uint32_t>(48000);
+ send_parameters_.options.playout_sample_rate = rtc::Optional<uint32_t>(44100);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
unsigned int recording_sample_rate, playout_sample_rate;
EXPECT_EQ(0, voe_.RecordingSampleRate(&recording_sample_rate));
@@ -2315,30 +2217,11 @@ TEST_F(WebRtcVoiceEngineTestFake, SampleRatesViaOptions) {
EXPECT_EQ(44100u, playout_sample_rate);
}
-TEST_F(WebRtcVoiceEngineTestFake, TraceFilterViaTraceOptions) {
- EXPECT_TRUE(SetupEngineWithSendStream());
- engine_.SetLogging(rtc::LS_INFO, "");
- EXPECT_EQ(
- // Info:
- webrtc::kTraceStateInfo | webrtc::kTraceInfo |
- // Warning:
- webrtc::kTraceTerseInfo | webrtc::kTraceWarning |
- // Error:
- webrtc::kTraceError | webrtc::kTraceCritical,
- static_cast<int>(trace_wrapper_->filter_));
- // Now set it explicitly
- std::string filter =
- "tracefilter " + rtc::ToString(webrtc::kTraceDefault);
- engine_.SetLogging(rtc::LS_VERBOSE, filter.c_str());
- EXPECT_EQ(static_cast<unsigned int>(webrtc::kTraceDefault),
- trace_wrapper_->filter_);
-}
-
// Test that we can set the outgoing SSRC properly.
// SSRC is set in SetupEngine by calling AddSendStream.
TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrc) {
EXPECT_TRUE(SetupEngineWithSendStream());
- EXPECT_EQ(kSsrc1, voe_.GetLocalSSRC(voe_.GetLastChannel()));
+ EXPECT_TRUE(call_.GetAudioSendStream(kSsrc1));
}
TEST_F(WebRtcVoiceEngineTestFake, GetStats) {
@@ -2359,12 +2242,20 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStats) {
// We have added one send stream. We should see the stats we've set.
EXPECT_EQ(1u, info.senders.size());
- VerifyVoiceSenderInfo(info.senders[0]);
+ VerifyVoiceSenderInfo(info.senders[0], false);
// We have added one receive stream. We should see empty stats.
EXPECT_EQ(info.receivers.size(), 1u);
EXPECT_EQ(info.receivers[0].ssrc(), 0);
}
+ // Start sending - this affects some reported stats.
+ {
+ cricket::VoiceMediaInfo info;
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_EQ(true, channel_->GetStats(&info));
+ VerifyVoiceSenderInfo(info.senders[0], true);
+ }
+
// Remove the kSsrc2 stream. No receiver stats.
{
cricket::VoiceMediaInfo info;
@@ -2391,9 +2282,10 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStats) {
// SSRC is set in SetupEngine by calling AddSendStream.
TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcWithMultipleStreams) {
EXPECT_TRUE(SetupEngineWithSendStream());
- EXPECT_EQ(kSsrc1, voe_.GetLocalSSRC(voe_.GetLastChannel()));
- EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
- EXPECT_EQ(kSsrc1, voe_.GetLocalSSRC(voe_.GetLastChannel()));
+ EXPECT_TRUE(call_.GetAudioSendStream(kSsrc1));
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc2)));
+ EXPECT_EQ(kSsrc1, GetRecvStreamConfig(kSsrc2).rtp.local_ssrc);
}
// Test that the local SSRC is the same on sending and receiving channels if the
@@ -2406,25 +2298,23 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
int receive_channel_num = voe_.GetLastChannel();
EXPECT_TRUE(channel_->AddSendStream(
cricket::StreamParams::CreateLegacy(1234)));
- int send_channel_num = voe_.GetLastChannel();
- EXPECT_EQ(1234U, voe_.GetLocalSSRC(send_channel_num));
+ EXPECT_TRUE(call_.GetAudioSendStream(1234));
EXPECT_EQ(1234U, voe_.GetLocalSSRC(receive_channel_num));
}
// Test that we can properly receive packets.
TEST_F(WebRtcVoiceEngineTestFake, Recv) {
EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
int channel_num = voe_.GetLastChannel();
- EXPECT_TRUE(voe_.CheckPacket(channel_num, kPcmuFrame,
- sizeof(kPcmuFrame)));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num, kPcmuFrame, sizeof(kPcmuFrame)));
}
// Test that we can properly receive packets on multiple streams.
TEST_F(WebRtcVoiceEngineTestFake, RecvWithMultipleStreams) {
- EXPECT_TRUE(SetupEngineWithSendStream());
- EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
+ EXPECT_TRUE(SetupEngine());
EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
int channel_num1 = voe_.GetLastChannel();
EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
@@ -2433,37 +2323,97 @@ TEST_F(WebRtcVoiceEngineTestFake, RecvWithMultipleStreams) {
int channel_num3 = voe_.GetLastChannel();
// Create packets with the right SSRCs.
char packets[4][sizeof(kPcmuFrame)];
- for (size_t i = 0; i < ARRAY_SIZE(packets); ++i) {
+ for (size_t i = 0; i < arraysize(packets); ++i) {
memcpy(packets[i], kPcmuFrame, sizeof(kPcmuFrame));
rtc::SetBE32(packets[i] + 8, static_cast<uint32_t>(i));
}
EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+
DeliverPacket(packets[0], sizeof(packets[0]));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+
DeliverPacket(packets[1], sizeof(packets[1]));
- EXPECT_TRUE(voe_.CheckPacket(channel_num1, packets[1],
- sizeof(packets[1])));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num1, packets[1], sizeof(packets[1])));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+
DeliverPacket(packets[2], sizeof(packets[2]));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
- EXPECT_TRUE(voe_.CheckPacket(channel_num2, packets[2],
- sizeof(packets[2])));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num2, packets[2], sizeof(packets[2])));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+
DeliverPacket(packets[3], sizeof(packets[3]));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
- EXPECT_TRUE(voe_.CheckPacket(channel_num3, packets[3],
- sizeof(packets[3])));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num3, packets[3], sizeof(packets[3])));
+
EXPECT_TRUE(channel_->RemoveRecvStream(3));
EXPECT_TRUE(channel_->RemoveRecvStream(2));
EXPECT_TRUE(channel_->RemoveRecvStream(1));
}
+// Test that receiving on an unsignalled stream works (default channel will be
+// created).
+TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignalled) {
+ EXPECT_TRUE(SetupEngine());
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_TRUE(voe_.CheckPacket(channel_num, kPcmuFrame, sizeof(kPcmuFrame)));
+}
+
+// Test that receiving on an unsignalled stream works (default channel will be
+// created), and that packets will be forwarded to the default channel
+// regardless of their SSRCs.
+TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignalledWithSsrcSwitch) {
+ EXPECT_TRUE(SetupEngine());
+ char packet[sizeof(kPcmuFrame)];
+ memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+
+ // Note that the first unknown SSRC cannot be 0, because we only support
+ // creating receive streams for SSRC!=0.
+ DeliverPacket(packet, sizeof(packet));
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_TRUE(voe_.CheckPacket(channel_num, packet, sizeof(packet)));
+ // Once we have the default channel, SSRC==0 will be ok.
+ for (uint32_t ssrc = 0; ssrc < 10; ++ssrc) {
+ rtc::SetBE32(&packet[8], ssrc);
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num, packet, sizeof(packet)));
+ }
+}
+
+// Test that a default channel is created even after a signalled stream has been
+// added, and that this stream will get any packets for unknown SSRCs.
+TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignalledAfterSignalled) {
+ EXPECT_TRUE(SetupEngine());
+ char packet[sizeof(kPcmuFrame)];
+ memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+
+ // Add a known stream, send packet and verify we got it.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ int signalled_channel_num = voe_.GetLastChannel();
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_TRUE(voe_.CheckPacket(signalled_channel_num, packet, sizeof(packet)));
+
+ // Note that the first unknown SSRC cannot be 0, because we only support
+ // creating receive streams for SSRC!=0.
+ rtc::SetBE32(&packet[8], 7011);
+ DeliverPacket(packet, sizeof(packet));
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_NE(channel_num, signalled_channel_num);
+ EXPECT_TRUE(voe_.CheckPacket(channel_num, packet, sizeof(packet)));
+ // Once we have the default channel, SSRC==0 will be ok.
+ for (uint32_t ssrc = 0; ssrc < 20; ssrc += 2) {
+ rtc::SetBE32(&packet[8], ssrc);
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num, packet, sizeof(packet)));
+ }
+}
+
// Test that we properly handle failures to add a receive stream.
TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamFail) {
EXPECT_TRUE(SetupEngine());
@@ -2498,7 +2448,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) {
cricket::StreamParams::CreateLegacy(kSsrc1)));
int channel_num2 = voe_.GetLastChannel();
webrtc::CodecInst gcodec;
- rtc::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "opus");
+ rtc::strcpyn(gcodec.plname, arraysize(gcodec.plname), "opus");
gcodec.plfreq = 48000;
gcodec.channels = 2;
EXPECT_EQ(-1, voe_.GetRecPayloadType(channel_num2, gcodec));
@@ -2602,10 +2552,12 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
EXPECT_TRUE(typing_detection_enabled);
EXPECT_EQ(ec_mode, webrtc::kEcConference);
EXPECT_EQ(ns_mode, webrtc::kNsHighSuppression);
+ EXPECT_EQ(50, voe_.GetNetEqCapacity());
+ EXPECT_FALSE(voe_.GetNetEqFastAccelerate());
- // Nothing set, so all ignored.
- cricket::AudioOptions options;
- ASSERT_TRUE(engine_.SetOptions(options));
+ // Nothing set in AudioOptions, so everything should be as default.
+ send_parameters_.options = cricket::AudioOptions();
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetAecmMode(aecm_mode, cng_enabled);
voe_.GetAgcStatus(agc_enabled, agc_mode);
@@ -2625,20 +2577,19 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
EXPECT_TRUE(typing_detection_enabled);
EXPECT_EQ(ec_mode, webrtc::kEcConference);
EXPECT_EQ(ns_mode, webrtc::kNsHighSuppression);
- EXPECT_EQ(50, voe_.GetNetEqCapacity()); // From GetDefaultEngineOptions().
- EXPECT_FALSE(
- voe_.GetNetEqFastAccelerate()); // From GetDefaultEngineOptions().
+ EXPECT_EQ(50, voe_.GetNetEqCapacity());
+ EXPECT_FALSE(voe_.GetNetEqFastAccelerate());
// Turn echo cancellation off
- options.echo_cancellation.Set(false);
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.echo_cancellation = rtc::Optional<bool>(false);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetEcStatus(ec_enabled, ec_mode);
EXPECT_FALSE(ec_enabled);
// Turn echo cancellation back on, with settings, and make sure
// nothing else changed.
- options.echo_cancellation.Set(true);
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.echo_cancellation = rtc::Optional<bool>(true);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetAecmMode(aecm_mode, cng_enabled);
voe_.GetAgcStatus(agc_enabled, agc_mode);
@@ -2660,8 +2611,8 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
// Turn on delay agnostic aec and make sure nothing change w.r.t. echo
// control.
- options.delay_agnostic_aec.Set(true);
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.delay_agnostic_aec = rtc::Optional<bool>(true);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetAecmMode(aecm_mode, cng_enabled);
EXPECT_TRUE(ec_enabled);
@@ -2669,41 +2620,41 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
EXPECT_EQ(ec_mode, webrtc::kEcConference);
// Turn off echo cancellation and delay agnostic aec.
- options.delay_agnostic_aec.Set(false);
- options.extended_filter_aec.Set(false);
- options.echo_cancellation.Set(false);
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.delay_agnostic_aec = rtc::Optional<bool>(false);
+ send_parameters_.options.extended_filter_aec = rtc::Optional<bool>(false);
+ send_parameters_.options.echo_cancellation = rtc::Optional<bool>(false);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetEcStatus(ec_enabled, ec_mode);
EXPECT_FALSE(ec_enabled);
// Turning delay agnostic aec back on should also turn on echo cancellation.
- options.delay_agnostic_aec.Set(true);
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.delay_agnostic_aec = rtc::Optional<bool>(true);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetEcStatus(ec_enabled, ec_mode);
EXPECT_TRUE(ec_enabled);
EXPECT_TRUE(voe_.ec_metrics_enabled());
EXPECT_EQ(ec_mode, webrtc::kEcConference);
// Turn off AGC
- options.auto_gain_control.Set(false);
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.auto_gain_control = rtc::Optional<bool>(false);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetAgcStatus(agc_enabled, agc_mode);
EXPECT_FALSE(agc_enabled);
// Turn AGC back on
- options.auto_gain_control.Set(true);
- options.adjust_agc_delta.Clear();
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.auto_gain_control = rtc::Optional<bool>(true);
+ send_parameters_.options.adjust_agc_delta = rtc::Optional<int>();
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetAgcStatus(agc_enabled, agc_mode);
EXPECT_TRUE(agc_enabled);
voe_.GetAgcConfig(agc_config);
EXPECT_EQ(0, agc_config.targetLeveldBOv);
// Turn off other options (and stereo swapping on).
- options.noise_suppression.Set(false);
- options.highpass_filter.Set(false);
- options.typing_detection.Set(false);
- options.stereo_swapping.Set(true);
- ASSERT_TRUE(engine_.SetOptions(options));
+ send_parameters_.options.noise_suppression = rtc::Optional<bool>(false);
+ send_parameters_.options.highpass_filter = rtc::Optional<bool>(false);
+ send_parameters_.options.typing_detection = rtc::Optional<bool>(false);
+ send_parameters_.options.stereo_swapping = rtc::Optional<bool>(true);
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetNsStatus(ns_enabled, ns_mode);
highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
@@ -2714,7 +2665,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
EXPECT_TRUE(stereo_swapping_enabled);
// Set options again to ensure it has no impact.
- ASSERT_TRUE(engine_.SetOptions(options));
+ EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetNsStatus(ns_enabled, ns_mode);
EXPECT_TRUE(ec_enabled);
@@ -2785,9 +2736,9 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
// AEC and AGC and NS
cricket::AudioSendParameters parameters_options_all = send_parameters_;
- parameters_options_all.options.echo_cancellation.Set(true);
- parameters_options_all.options.auto_gain_control.Set(true);
- parameters_options_all.options.noise_suppression.Set(true);
+ parameters_options_all.options.echo_cancellation = rtc::Optional<bool>(true);
+ parameters_options_all.options.auto_gain_control = rtc::Optional<bool>(true);
+ parameters_options_all.options.noise_suppression = rtc::Optional<bool>(true);
ASSERT_TRUE(channel1->SetSendParameters(parameters_options_all));
EXPECT_EQ(parameters_options_all.options, channel1->options());
ASSERT_TRUE(channel2->SetSendParameters(parameters_options_all));
@@ -2795,24 +2746,26 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
// unset NS
cricket::AudioSendParameters parameters_options_no_ns = send_parameters_;
- parameters_options_no_ns.options.noise_suppression.Set(false);
+ parameters_options_no_ns.options.noise_suppression =
+ rtc::Optional<bool>(false);
ASSERT_TRUE(channel1->SetSendParameters(parameters_options_no_ns));
cricket::AudioOptions expected_options = parameters_options_all.options;
- expected_options.echo_cancellation.Set(true);
- expected_options.auto_gain_control.Set(true);
- expected_options.noise_suppression.Set(false);
+ expected_options.echo_cancellation = rtc::Optional<bool>(true);
+ expected_options.auto_gain_control = rtc::Optional<bool>(true);
+ expected_options.noise_suppression = rtc::Optional<bool>(false);
EXPECT_EQ(expected_options, channel1->options());
// unset AGC
cricket::AudioSendParameters parameters_options_no_agc = send_parameters_;
- parameters_options_no_agc.options.auto_gain_control.Set(false);
+ parameters_options_no_agc.options.auto_gain_control =
+ rtc::Optional<bool>(false);
ASSERT_TRUE(channel2->SetSendParameters(parameters_options_no_agc));
- expected_options.echo_cancellation.Set(true);
- expected_options.auto_gain_control.Set(false);
- expected_options.noise_suppression.Set(true);
+ expected_options.echo_cancellation = rtc::Optional<bool>(true);
+ expected_options.auto_gain_control = rtc::Optional<bool>(false);
+ expected_options.noise_suppression = rtc::Optional<bool>(true);
EXPECT_EQ(expected_options, channel2->options());
- ASSERT_TRUE(engine_.SetOptions(parameters_options_all.options));
+ ASSERT_TRUE(channel_->SetSendParameters(parameters_options_all));
bool ec_enabled;
webrtc::EcModes ec_mode;
bool agc_enabled;
@@ -2834,14 +2787,6 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
EXPECT_TRUE(agc_enabled);
EXPECT_FALSE(ns_enabled);
- channel1->SetSend(cricket::SEND_NOTHING);
- voe_.GetEcStatus(ec_enabled, ec_mode);
- voe_.GetAgcStatus(agc_enabled, agc_mode);
- voe_.GetNsStatus(ns_enabled, ns_mode);
- EXPECT_TRUE(ec_enabled);
- EXPECT_TRUE(agc_enabled);
- EXPECT_TRUE(ns_enabled);
-
channel2->SetSend(cricket::SEND_MICROPHONE);
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetAgcStatus(agc_enabled, agc_mode);
@@ -2850,25 +2795,19 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
EXPECT_FALSE(agc_enabled);
EXPECT_TRUE(ns_enabled);
- channel2->SetSend(cricket::SEND_NOTHING);
- voe_.GetEcStatus(ec_enabled, ec_mode);
- voe_.GetAgcStatus(agc_enabled, agc_mode);
- voe_.GetNsStatus(ns_enabled, ns_mode);
- EXPECT_TRUE(ec_enabled);
- EXPECT_TRUE(agc_enabled);
- EXPECT_TRUE(ns_enabled);
-
// Make sure settings take effect while we are sending.
- ASSERT_TRUE(engine_.SetOptions(parameters_options_all.options));
+ ASSERT_TRUE(channel_->SetSendParameters(parameters_options_all));
cricket::AudioSendParameters parameters_options_no_agc_nor_ns =
send_parameters_;
- parameters_options_no_agc_nor_ns.options.auto_gain_control.Set(false);
- parameters_options_no_agc_nor_ns.options.noise_suppression.Set(false);
+ parameters_options_no_agc_nor_ns.options.auto_gain_control =
+ rtc::Optional<bool>(false);
+ parameters_options_no_agc_nor_ns.options.noise_suppression =
+ rtc::Optional<bool>(false);
channel2->SetSend(cricket::SEND_MICROPHONE);
channel2->SetSendParameters(parameters_options_no_agc_nor_ns);
- expected_options.echo_cancellation.Set(true);
- expected_options.auto_gain_control.Set(false);
- expected_options.noise_suppression.Set(false);
+ expected_options.echo_cancellation = rtc::Optional<bool>(true);
+ expected_options.auto_gain_control = rtc::Optional<bool>(false);
+ expected_options.noise_suppression = rtc::Optional<bool>(false);
EXPECT_EQ(expected_options, channel2->options());
voe_.GetEcStatus(ec_enabled, ec_mode);
voe_.GetAgcStatus(agc_enabled, agc_mode);
@@ -2887,13 +2826,13 @@ TEST_F(WebRtcVoiceEngineTestFake, TestSetDscpOptions) {
new cricket::FakeNetworkInterface);
channel->SetInterface(network_interface.get());
cricket::AudioSendParameters parameters = send_parameters_;
- parameters.options.dscp.Set(true);
+ parameters.options.dscp = rtc::Optional<bool>(true);
EXPECT_TRUE(channel->SetSendParameters(parameters));
EXPECT_EQ(rtc::DSCP_EF, network_interface->dscp());
// Verify previous value is not modified if dscp option is not set.
EXPECT_TRUE(channel->SetSendParameters(send_parameters_));
EXPECT_EQ(rtc::DSCP_EF, network_interface->dscp());
- parameters.options.dscp.Set(false);
+ parameters.options.dscp = rtc::Optional<bool>(false);
EXPECT_TRUE(channel->SetSendParameters(parameters));
EXPECT_EQ(rtc::DSCP_DEFAULT, network_interface->dscp());
}
@@ -3002,7 +2941,7 @@ TEST_F(WebRtcVoiceEngineTestFake, CanChangeCombinedBweOption) {
}
// Enable combined BWE option - now it should be set up.
- send_parameters_.options.combined_audio_video_bwe.Set(true);
+ send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
for (uint32_t ssrc : ssrcs) {
const auto* s = call_.GetAudioReceiveStream(ssrc);
@@ -3011,7 +2950,8 @@ TEST_F(WebRtcVoiceEngineTestFake, CanChangeCombinedBweOption) {
}
// Disable combined BWE option - should be disabled again.
- send_parameters_.options.combined_audio_video_bwe.Set(false);
+ send_parameters_.options.combined_audio_video_bwe =
+ rtc::Optional<bool>(false);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
for (uint32_t ssrc : ssrcs) {
const auto* s = call_.GetAudioReceiveStream(ssrc);
@@ -3028,18 +2968,19 @@ TEST_F(WebRtcVoiceEngineTestFake, ConfigureCombinedBweForNewRecvStreams) {
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::WebRtcVoiceMediaChannel* media_channel =
static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
- send_parameters_.options.combined_audio_video_bwe.Set(true);
+ send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
- static const uint32_t kSsrcs[] = {1, 2, 3, 4};
- for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs); ++i) {
+ for (uint32_t ssrc : kSsrcs4) {
EXPECT_TRUE(media_channel->AddRecvStream(
- cricket::StreamParams::CreateLegacy(kSsrcs[i])));
- EXPECT_NE(nullptr, call_.GetAudioReceiveStream(kSsrcs[i]));
+ cricket::StreamParams::CreateLegacy(ssrc)));
+ EXPECT_NE(nullptr, call_.GetAudioReceiveStream(ssrc));
}
- EXPECT_EQ(ARRAY_SIZE(kSsrcs), call_.GetAudioReceiveStreams().size());
+ EXPECT_EQ(arraysize(kSsrcs4), call_.GetAudioReceiveStreams().size());
}
+// TODO(solenberg): Remove, once recv streams are configured through Call.
+// (This is then covered by TestSetRecvRtpHeaderExtensions.)
TEST_F(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
// Test that setting the header extensions results in the expected state
// changes on an associated Call.
@@ -3050,7 +2991,7 @@ TEST_F(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::WebRtcVoiceMediaChannel* media_channel =
static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
- send_parameters_.options.combined_audio_video_bwe.Set(true);
+ send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
for (uint32_t ssrc : ssrcs) {
EXPECT_TRUE(media_channel->AddRecvStream(
@@ -3066,17 +3007,17 @@ TEST_F(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
}
// Set up receive extensions.
- const auto& e_exts = engine_.rtp_header_extensions();
+ cricket::RtpCapabilities capabilities = engine_.GetCapabilities();
cricket::AudioRecvParameters recv_parameters;
- recv_parameters.extensions = e_exts;
+ recv_parameters.extensions = capabilities.header_extensions;
channel_->SetRecvParameters(recv_parameters);
EXPECT_EQ(2, call_.GetAudioReceiveStreams().size());
for (uint32_t ssrc : ssrcs) {
const auto* s = call_.GetAudioReceiveStream(ssrc);
EXPECT_NE(nullptr, s);
const auto& s_exts = s->GetConfig().rtp.extensions;
- EXPECT_EQ(e_exts.size(), s_exts.size());
- for (const auto& e_ext : e_exts) {
+ EXPECT_EQ(capabilities.header_extensions.size(), s_exts.size());
+ for (const auto& e_ext : capabilities.header_extensions) {
for (const auto& s_ext : s_exts) {
if (e_ext.id == s_ext.id) {
EXPECT_EQ(e_ext.uri, s_ext.name);
@@ -3109,7 +3050,7 @@ TEST_F(WebRtcVoiceEngineTestFake, DeliverAudioPacket_Call) {
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::WebRtcVoiceMediaChannel* media_channel =
static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
- send_parameters_.options.combined_audio_video_bwe.Set(true);
+ send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
EXPECT_TRUE(media_channel->AddRecvStream(
cricket::StreamParams::CreateLegacy(kAudioSsrc)));
@@ -3164,18 +3105,6 @@ TEST_F(WebRtcVoiceEngineTestFake, AssociateChannelResetUponDeleteChannnel) {
EXPECT_EQ(voe_.GetAssociateSendChannel(recv_ch), -1);
}
-// Tests for the actual WebRtc VoE library.
-
-TEST(WebRtcVoiceEngineTest, TestDefaultOptionsBeforeInit) {
- cricket::WebRtcVoiceEngine engine;
- cricket::AudioOptions options = engine.GetOptions();
- // The default options should have at least a few things set. We purposefully
- // don't check the option values here, though.
- EXPECT_TRUE(options.echo_cancellation.IsSet());
- EXPECT_TRUE(options.auto_gain_control.IsSet());
- EXPECT_TRUE(options.noise_suppression.IsSet());
-}
-
// Tests that the library initializes and shuts down properly.
TEST(WebRtcVoiceEngineTest, StartupShutdown) {
cricket::WebRtcVoiceEngine engine;
@@ -3195,54 +3124,60 @@ TEST(WebRtcVoiceEngineTest, StartupShutdown) {
// Tests that the library is configured with the codecs we want.
TEST(WebRtcVoiceEngineTest, HasCorrectCodecs) {
- cricket::WebRtcVoiceEngine engine;
// Check codecs by name.
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "OPUS", 48000, 0, 2, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "ISAC", 16000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "ISAC", 32000, 0, 1, 0)));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "OPUS", 48000, 0, 2, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "ISAC", 16000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "ISAC", 32000, 0, 1, 0), nullptr));
// Check that name matching is case-insensitive.
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "ILBC", 8000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "iLBC", 8000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "PCMU", 8000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "PCMA", 8000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "G722", 8000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "red", 8000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "CN", 32000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "CN", 16000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "CN", 8000, 0, 1, 0)));
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(96, "telephone-event", 8000, 0, 1, 0)));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "ILBC", 8000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "iLBC", 8000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "PCMU", 8000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "PCMA", 8000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "G722", 8000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "red", 8000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "CN", 32000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "CN", 16000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "CN", 8000, 0, 1, 0), nullptr));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(96, "telephone-event", 8000, 0, 1, 0), nullptr));
// Check codecs with an id by id.
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(0, "", 8000, 0, 1, 0))); // PCMU
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(8, "", 8000, 0, 1, 0))); // PCMA
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(9, "", 8000, 0, 1, 0))); // G722
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(13, "", 8000, 0, 1, 0))); // CN
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(0, "", 8000, 0, 1, 0), nullptr)); // PCMU
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(8, "", 8000, 0, 1, 0), nullptr)); // PCMA
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(9, "", 8000, 0, 1, 0), nullptr)); // G722
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(13, "", 8000, 0, 1, 0), nullptr)); // CN
// Check sample/bitrate matching.
- EXPECT_TRUE(engine.FindCodec(
- cricket::AudioCodec(0, "PCMU", 8000, 64000, 1, 0)));
+ EXPECT_TRUE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(0, "PCMU", 8000, 64000, 1, 0), nullptr));
// Check that bad codecs fail.
- EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(99, "ABCD", 0, 0, 1, 0)));
- EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(88, "", 0, 0, 1, 0)));
- EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 0, 2, 0)));
- EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 5000, 0, 1, 0)));
- EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 5000, 1, 0)));
+ EXPECT_FALSE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(99, "ABCD", 0, 0, 1, 0), nullptr));
+ EXPECT_FALSE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(88, "", 0, 0, 1, 0), nullptr));
+ EXPECT_FALSE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(0, "", 0, 0, 2, 0), nullptr));
+ EXPECT_FALSE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(0, "", 5000, 0, 1, 0), nullptr));
+ EXPECT_FALSE(cricket::WebRtcVoiceEngine::ToCodecInst(
+ cricket::AudioCodec(0, "", 0, 5000, 1, 0), nullptr));
+
// Verify the payload id of common audio codecs, including CN, ISAC, and G722.
+ cricket::WebRtcVoiceEngine engine;
for (std::vector<cricket::AudioCodec>::const_iterator it =
engine.codecs().begin(); it != engine.codecs().end(); ++it) {
if (it->name == "CN" && it->clockrate == 16000) {
@@ -3269,7 +3204,6 @@ TEST(WebRtcVoiceEngineTest, HasCorrectCodecs) {
EXPECT_EQ("1", it->params.find("useinbandfec")->second);
}
}
-
engine.Terminate();
}
@@ -3282,7 +3216,7 @@ TEST(WebRtcVoiceEngineTest, Has32Channels) {
cricket::VoiceMediaChannel* channels[32];
int num_channels = 0;
- while (num_channels < ARRAY_SIZE(channels)) {
+ while (num_channels < arraysize(channels)) {
cricket::VoiceMediaChannel* channel =
engine.CreateChannel(call.get(), cricket::AudioOptions());
if (!channel)
@@ -3290,7 +3224,7 @@ TEST(WebRtcVoiceEngineTest, Has32Channels) {
channels[num_channels++] = channel;
}
- int expected = ARRAY_SIZE(channels);
+ int expected = arraysize(channels);
EXPECT_EQ(expected, num_channels);
while (num_channels > 0) {
diff --git a/talk/session/media/bundlefilter.cc b/talk/session/media/bundlefilter.cc
index b47d47fb27..670befeb7d 100755
--- a/talk/session/media/bundlefilter.cc
+++ b/talk/session/media/bundlefilter.cc
@@ -32,78 +32,29 @@
namespace cricket {
-static const uint32_t kSsrc01 = 0x01;
-
BundleFilter::BundleFilter() {
}
BundleFilter::~BundleFilter() {
}
-bool BundleFilter::DemuxPacket(const char* data, size_t len, bool rtcp) {
- // For rtp packets, we check whether the payload type can be found.
- // For rtcp packets, we check whether the ssrc can be found or is the special
- // value 1 except for SDES packets which always pass through. Plus, if
- // |streams_| is empty, we will allow all rtcp packets pass through provided
- // that they are valid rtcp packets in case that they are for early media.
- if (!rtcp) {
- // It may not be a RTP packet (e.g. SCTP).
- if (!IsRtpPacket(data, len))
- return false;
-
- int payload_type = 0;
- if (!GetRtpPayloadType(data, len, &payload_type)) {
- return false;
- }
- return FindPayloadType(payload_type);
+bool BundleFilter::DemuxPacket(const uint8_t* data, size_t len) {
+ // For RTP packets, we check whether the payload type can be found.
+ if (!IsRtpPacket(data, len)) {
+ return false;
}
- // Rtcp packets using ssrc filter.
- int pl_type = 0;
- uint32_t ssrc = 0;
- if (!GetRtcpType(data, len, &pl_type)) return false;
- if (pl_type == kRtcpTypeSDES) {
- // SDES packet parsing not supported.
- LOG(LS_INFO) << "SDES packet received for demux.";
- return true;
- } else {
- if (!GetRtcpSsrc(data, len, &ssrc)) return false;
- if (ssrc == kSsrc01) {
- // SSRC 1 has a special meaning and indicates generic feedback on
- // some systems and should never be dropped. If it is forwarded
- // incorrectly it will be ignored by lower layers anyway.
- return true;
- }
+ int payload_type = 0;
+ if (!GetRtpPayloadType(data, len, &payload_type)) {
+ return false;
}
- // Pass through if |streams_| is empty to allow early rtcp packets in.
- return !HasStreams() || FindStream(ssrc);
+ return FindPayloadType(payload_type);
}
void BundleFilter::AddPayloadType(int payload_type) {
payload_types_.insert(payload_type);
}
-bool BundleFilter::AddStream(const StreamParams& stream) {
- if (GetStreamBySsrc(streams_, stream.first_ssrc())) {
- LOG(LS_WARNING) << "Stream already added to filter";
- return false;
- }
- streams_.push_back(stream);
- return true;
-}
-
-bool BundleFilter::RemoveStream(uint32_t ssrc) {
- return RemoveStreamBySsrc(&streams_, ssrc);
-}
-
-bool BundleFilter::HasStreams() const {
- return !streams_.empty();
-}
-
-bool BundleFilter::FindStream(uint32_t ssrc) const {
- return ssrc == 0 ? false : GetStreamBySsrc(streams_, ssrc) != nullptr;
-}
-
bool BundleFilter::FindPayloadType(int pl_type) const {
return payload_types_.find(pl_type) != payload_types_.end();
}
diff --git a/talk/session/media/bundlefilter.h b/talk/session/media/bundlefilter.h
index 3717376668..d9d952f4ee 100755
--- a/talk/session/media/bundlefilter.h
+++ b/talk/session/media/bundlefilter.h
@@ -28,6 +28,8 @@
#ifndef TALK_SESSION_MEDIA_BUNDLEFILTER_H_
#define TALK_SESSION_MEDIA_BUNDLEFILTER_H_
+#include <stdint.h>
+
#include <set>
#include <vector>
@@ -37,42 +39,31 @@
namespace cricket {
// In case of single RTP session and single transport channel, all session
-// ( or media) channels share a common transport channel. Hence they all get
+// (or media) channels share a common transport channel. Hence they all get
// SignalReadPacket when packet received on transport channel. This requires
// cricket::BaseChannel to know all the valid sources, else media channel
// will decode invalid packets.
//
// This class determines whether a packet is destined for cricket::BaseChannel.
-// For rtp packets, this is decided based on the payload type. For rtcp packets,
-// this is decided based on the sender ssrc values.
+// This is only to be used for RTP packets as RTCP packets are not filtered.
+// For RTP packets, this is decided based on the payload type.
class BundleFilter {
public:
BundleFilter();
~BundleFilter();
- // Determines packet belongs to valid cricket::BaseChannel.
- bool DemuxPacket(const char* data, size_t len, bool rtcp);
+ // Determines if a RTP packet belongs to valid cricket::BaseChannel.
+ bool DemuxPacket(const uint8_t* data, size_t len);
// Adds the supported payload type.
void AddPayloadType(int payload_type);
- // Adding a valid source to the filter.
- bool AddStream(const StreamParams& stream);
-
- // Removes source from the filter.
- bool RemoveStream(uint32_t ssrc);
-
- // Utility methods added for unitest.
- // True if |streams_| is not empty.
- bool HasStreams() const;
- bool FindStream(uint32_t ssrc) const;
+ // Public for unittests.
bool FindPayloadType(int pl_type) const;
void ClearAllPayloadTypes();
-
private:
std::set<int> payload_types_;
- std::vector<StreamParams> streams_;
};
} // namespace cricket
diff --git a/talk/session/media/bundlefilter_unittest.cc b/talk/session/media/bundlefilter_unittest.cc
index 806d6bab09..f2c35fc1d8 100755
--- a/talk/session/media/bundlefilter_unittest.cc
+++ b/talk/session/media/bundlefilter_unittest.cc
@@ -30,9 +30,6 @@
using cricket::StreamParams;
-static const int kSsrc1 = 0x1111;
-static const int kSsrc2 = 0x2222;
-static const int kSsrc3 = 0x3333;
static const int kPayloadType1 = 0x11;
static const int kPayloadType2 = 0x22;
static const int kPayloadType3 = 0x33;
@@ -55,56 +52,6 @@ static const unsigned char kRtpPacketPt3Ssrc2[] = {
0x22,
};
-// PT = 200 = SR, len = 28, SSRC of sender = 0x0001
-// NTP TS = 0, RTP TS = 0, packet count = 0
-static const unsigned char kRtcpPacketSrSsrc01[] = {
- 0x80, 0xC8, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-};
-
-// PT = 200 = SR, len = 28, SSRC of sender = 0x2222
-// NTP TS = 0, RTP TS = 0, packet count = 0
-static const unsigned char kRtcpPacketSrSsrc2[] = {
- 0x80, 0xC8, 0x00, 0x1B, 0x00, 0x00, 0x22, 0x22,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-};
-
-// First packet - SR = PT = 200, len = 0, SSRC of sender = 0x1111
-// NTP TS = 0, RTP TS = 0, packet count = 0
-// second packet - SDES = PT = 202, count = 0, SSRC = 0x1111, cname len = 0
-static const unsigned char kRtcpPacketCompoundSrSdesSsrc1[] = {
- 0x80, 0xC8, 0x00, 0x01, 0x00, 0x00, 0x11, 0x11,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x81, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x01, 0x00,
-};
-
-// SDES = PT = 202, count = 0, SSRC = 0x2222, cname len = 0
-static const unsigned char kRtcpPacketSdesSsrc2[] = {
- 0x81, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x22, 0x22, 0x01, 0x00,
-};
-
-// Packet has only mandatory fixed RTCP header
-static const unsigned char kRtcpPacketFixedHeaderOnly[] = {
- 0x80, 0xC8, 0x00, 0x00,
-};
-
-// Small packet for SSRC demux.
-static const unsigned char kRtcpPacketTooSmall[] = {
- 0x80, 0xC8, 0x00, 0x00, 0x00, 0x00,
-};
-
-// PT = 206, FMT = 1, Sender SSRC = 0x1111, Media SSRC = 0x1111
-// No FCI information is needed for PLI.
-static const unsigned char kRtcpPacketNonCompoundRtcpPliFeedback[] = {
- 0x81, 0xCE, 0x00, 0x0C, 0x00, 0x00, 0x11, 0x11, 0x00, 0x00, 0x11, 0x11,
-};
-
// An SCTP packet.
static const unsigned char kSctpPacket[] = {
0x00, 0x01, 0x00, 0x01,
@@ -114,100 +61,29 @@ static const unsigned char kSctpPacket[] = {
0x00, 0x00, 0x00, 0x00,
};
-TEST(BundleFilterTest, AddRemoveStreamTest) {
- cricket::BundleFilter bundle_filter;
- EXPECT_FALSE(bundle_filter.HasStreams());
- EXPECT_TRUE(bundle_filter.AddStream(StreamParams::CreateLegacy(kSsrc1)));
- StreamParams stream2;
- stream2.ssrcs.push_back(kSsrc2);
- stream2.ssrcs.push_back(kSsrc3);
- EXPECT_TRUE(bundle_filter.AddStream(stream2));
-
- EXPECT_TRUE(bundle_filter.HasStreams());
- EXPECT_TRUE(bundle_filter.FindStream(kSsrc1));
- EXPECT_TRUE(bundle_filter.FindStream(kSsrc2));
- EXPECT_TRUE(bundle_filter.FindStream(kSsrc3));
- EXPECT_TRUE(bundle_filter.RemoveStream(kSsrc1));
- EXPECT_FALSE(bundle_filter.FindStream(kSsrc1));
- EXPECT_TRUE(bundle_filter.RemoveStream(kSsrc3));
- EXPECT_FALSE(bundle_filter.RemoveStream(kSsrc2)); // Already removed.
- EXPECT_FALSE(bundle_filter.HasStreams());
-}
-
TEST(BundleFilterTest, RtpPacketTest) {
cricket::BundleFilter bundle_filter;
bundle_filter.AddPayloadType(kPayloadType1);
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtpPacketPt1Ssrc1),
- sizeof(kRtpPacketPt1Ssrc1), false));
+ EXPECT_TRUE(bundle_filter.DemuxPacket(kRtpPacketPt1Ssrc1,
+ sizeof(kRtpPacketPt1Ssrc1)));
bundle_filter.AddPayloadType(kPayloadType2);
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtpPacketPt2Ssrc2),
- sizeof(kRtpPacketPt2Ssrc2), false));
+ EXPECT_TRUE(bundle_filter.DemuxPacket(kRtpPacketPt2Ssrc2,
+ sizeof(kRtpPacketPt2Ssrc2)));
// Payload type 0x33 is not added.
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtpPacketPt3Ssrc2),
- sizeof(kRtpPacketPt3Ssrc2), false));
+ EXPECT_FALSE(bundle_filter.DemuxPacket(kRtpPacketPt3Ssrc2,
+ sizeof(kRtpPacketPt3Ssrc2)));
// Size is too small.
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtpPacketPt1Ssrc1), 11, false));
+ EXPECT_FALSE(bundle_filter.DemuxPacket(kRtpPacketPt1Ssrc1, 11));
bundle_filter.ClearAllPayloadTypes();
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtpPacketPt1Ssrc1),
- sizeof(kRtpPacketPt1Ssrc1), false));
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtpPacketPt2Ssrc2),
- sizeof(kRtpPacketPt2Ssrc2), false));
-}
-
-TEST(BundleFilterTest, RtcpPacketTest) {
- cricket::BundleFilter bundle_filter;
- EXPECT_TRUE(bundle_filter.AddStream(StreamParams::CreateLegacy(kSsrc1)));
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketCompoundSrSdesSsrc1),
- sizeof(kRtcpPacketCompoundSrSdesSsrc1), true));
- EXPECT_TRUE(bundle_filter.AddStream(StreamParams::CreateLegacy(kSsrc2)));
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketSrSsrc2),
- sizeof(kRtcpPacketSrSsrc2), true));
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketSdesSsrc2),
- sizeof(kRtcpPacketSdesSsrc2), true));
- EXPECT_TRUE(bundle_filter.RemoveStream(kSsrc2));
- // RTCP Packets other than SR and RR are demuxed regardless of SSRC.
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketSdesSsrc2),
- sizeof(kRtcpPacketSdesSsrc2), true));
- // RTCP Packets with 'special' SSRC 0x01 are demuxed also
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketSrSsrc01),
- sizeof(kRtcpPacketSrSsrc01), true));
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketSrSsrc2),
- sizeof(kRtcpPacketSrSsrc2), true));
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketFixedHeaderOnly),
- sizeof(kRtcpPacketFixedHeaderOnly), true));
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketTooSmall),
- sizeof(kRtcpPacketTooSmall), true));
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketNonCompoundRtcpPliFeedback),
- sizeof(kRtcpPacketNonCompoundRtcpPliFeedback), true));
- // If the streams_ is empty, rtcp packet passes through
- EXPECT_TRUE(bundle_filter.RemoveStream(kSsrc1));
- EXPECT_FALSE(bundle_filter.HasStreams());
- EXPECT_TRUE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kRtcpPacketSrSsrc2),
- sizeof(kRtcpPacketSrSsrc2), true));
+ EXPECT_FALSE(bundle_filter.DemuxPacket(kRtpPacketPt1Ssrc1,
+ sizeof(kRtpPacketPt1Ssrc1)));
+ EXPECT_FALSE(bundle_filter.DemuxPacket(kRtpPacketPt2Ssrc2,
+ sizeof(kRtpPacketPt2Ssrc2)));
}
TEST(BundleFilterTest, InvalidRtpPacket) {
cricket::BundleFilter bundle_filter;
- EXPECT_TRUE(bundle_filter.AddStream(StreamParams::CreateLegacy(kSsrc1)));
- EXPECT_FALSE(bundle_filter.DemuxPacket(
- reinterpret_cast<const char*>(kSctpPacket),
- sizeof(kSctpPacket), false));
+ EXPECT_FALSE(bundle_filter.DemuxPacket(kSctpPacket, sizeof(kSctpPacket)));
}
diff --git a/talk/session/media/channel.cc b/talk/session/media/channel.cc
index 91a6d8cb5a..a59c3f82b7 100644
--- a/talk/session/media/channel.cc
+++ b/talk/session/media/channel.cc
@@ -25,23 +25,36 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <utility>
+
#include "talk/session/media/channel.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/rtputils.h"
-#include "webrtc/p2p/base/transportchannel.h"
#include "talk/session/media/channelmanager.h"
+#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/byteorder.h"
#include "webrtc/base/common.h"
#include "webrtc/base/dscp.h"
#include "webrtc/base/logging.h"
+#include "webrtc/base/trace_event.h"
+#include "webrtc/p2p/base/transportchannel.h"
namespace cricket {
-
using rtc::Bind;
+namespace {
+// See comment below for why we need to use a pointer to a scoped_ptr.
+bool SetRawAudioSink_w(VoiceMediaChannel* channel,
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface>* sink) {
+ channel->SetRawAudioSink(ssrc, std::move(*sink));
+ return true;
+}
+} // namespace
+
enum {
MSG_EARLYMEDIATIMEOUT = 1,
MSG_SCREENCASTWINDOWEVENT,
@@ -101,15 +114,6 @@ struct DataChannelErrorMessageData : public rtc::MessageData {
DataMediaChannel::Error error;
};
-
-struct VideoChannel::ScreencastDetailsData {
- explicit ScreencastDetailsData(uint32_t s)
- : ssrc(s), fps(0), screencast_max_pixels(0) {}
- uint32_t ssrc;
- int fps;
- int screencast_max_pixels;
-};
-
static const char* PacketType(bool rtcp) {
return (!rtcp) ? "RTP" : "RTCP";
}
@@ -151,6 +155,7 @@ void RtpParametersFromMediaDescription(
if (desc->rtp_header_extensions_set()) {
params->extensions = desc->rtp_header_extensions();
}
+ params->rtcp.reduced_size = desc->rtcp_reduced_size();
}
template <class Codec, class Options>
@@ -218,11 +223,11 @@ bool BaseChannel::Init() {
return false;
}
- if (!SetDtlsSrtpCiphers(transport_channel(), false)) {
+ if (!SetDtlsSrtpCryptoSuites(transport_channel(), false)) {
return false;
}
if (rtcp_transport_enabled() &&
- !SetDtlsSrtpCiphers(rtcp_transport_channel(), true)) {
+ !SetDtlsSrtpCryptoSuites(rtcp_transport_channel(), true)) {
return false;
}
@@ -249,21 +254,43 @@ bool BaseChannel::SetTransport_w(const std::string& transport_name) {
return true;
}
- set_transport_channel(transport_controller_->CreateTransportChannel_w(
- transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTP));
- if (!transport_channel()) {
- return false;
+ // When using DTLS-SRTP, we must reset the SrtpFilter every time the transport
+ // changes and wait until the DTLS handshake is complete to set the newly
+ // negotiated parameters.
+ if (ShouldSetupDtlsSrtp()) {
+ // Set |writable_| to false such that UpdateWritableState_w can set up
+ // DTLS-SRTP when the writable_ becomes true again.
+ writable_ = false;
+ srtp_filter_.ResetParams();
}
+
+ // TODO(guoweis): Remove this grossness when we remove non-muxed RTCP.
if (rtcp_transport_enabled()) {
LOG(LS_INFO) << "Create RTCP TransportChannel for " << content_name()
<< " on " << transport_name << " transport ";
- set_rtcp_transport_channel(transport_controller_->CreateTransportChannel_w(
- transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTCP));
+ set_rtcp_transport_channel(
+ transport_controller_->CreateTransportChannel_w(
+ transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTCP),
+ false /* update_writablity */);
if (!rtcp_transport_channel()) {
return false;
}
}
+ // We're not updating the writablity during the transition state.
+ set_transport_channel(transport_controller_->CreateTransportChannel_w(
+ transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTP));
+ if (!transport_channel()) {
+ return false;
+ }
+
+ // TODO(guoweis): Remove this grossness when we remove non-muxed RTCP.
+ if (rtcp_transport_enabled()) {
+ // We can only update the RTCP ready to send after set_transport_channel has
+ // handled channel writability.
+ SetReadyToSend(
+ true, rtcp_transport_channel() && rtcp_transport_channel()->writable());
+ }
transport_name_ = transport_name;
return true;
}
@@ -299,7 +326,8 @@ void BaseChannel::set_transport_channel(TransportChannel* new_tc) {
SetReadyToSend(false, new_tc && new_tc->writable());
}
-void BaseChannel::set_rtcp_transport_channel(TransportChannel* new_tc) {
+void BaseChannel::set_rtcp_transport_channel(TransportChannel* new_tc,
+ bool update_writablity) {
ASSERT(worker_thread_ == rtc::Thread::Current());
TransportChannel* old_tc = rtcp_transport_channel_;
@@ -318,16 +346,21 @@ void BaseChannel::set_rtcp_transport_channel(TransportChannel* new_tc) {
rtcp_transport_channel_ = new_tc;
if (new_tc) {
+ RTC_CHECK(!(ShouldSetupDtlsSrtp() && srtp_filter_.IsActive()))
+ << "Setting RTCP for DTLS/SRTP after SrtpFilter is active "
+ << "should never happen.";
ConnectToTransportChannel(new_tc);
for (const auto& pair : rtcp_socket_options_) {
new_tc->SetOption(pair.first, pair.second);
}
}
- // Update aggregate writable/ready-to-send state between RTP and RTCP upon
- // setting new channel
- UpdateWritableState_w();
- SetReadyToSend(true, new_tc && new_tc->writable());
+ if (update_writablity) {
+ // Update aggregate writable/ready-to-send state between RTP and RTCP upon
+ // setting new channel
+ UpdateWritableState_w();
+ SetReadyToSend(true, new_tc && new_tc->writable());
+ }
}
void BaseChannel::ConnectToTransportChannel(TransportChannel* tc) {
@@ -336,6 +369,7 @@ void BaseChannel::ConnectToTransportChannel(TransportChannel* tc) {
tc->SignalWritableState.connect(this, &BaseChannel::OnWritableState);
tc->SignalReadPacket.connect(this, &BaseChannel::OnChannelRead);
tc->SignalReadyToSend.connect(this, &BaseChannel::OnReadyToSend);
+ tc->SignalDtlsState.connect(this, &BaseChannel::OnDtlsState);
}
void BaseChannel::DisconnectFromTransportChannel(TransportChannel* tc) {
@@ -344,6 +378,7 @@ void BaseChannel::DisconnectFromTransportChannel(TransportChannel* tc) {
tc->SignalWritableState.disconnect(this);
tc->SignalReadPacket.disconnect(this);
tc->SignalReadyToSend.disconnect(this);
+ tc->SignalDtlsState.disconnect(this);
}
bool BaseChannel::Enable(bool enable) {
@@ -374,6 +409,7 @@ bool BaseChannel::RemoveSendStream(uint32_t ssrc) {
bool BaseChannel::SetLocalContent(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "BaseChannel::SetLocalContent");
return InvokeOnWorker(Bind(&BaseChannel::SetLocalContent_w,
this, content, action, error_desc));
}
@@ -381,6 +417,7 @@ bool BaseChannel::SetLocalContent(const MediaContentDescription* content,
bool BaseChannel::SetRemoteContent(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "BaseChannel::SetRemoteContent");
return InvokeOnWorker(Bind(&BaseChannel::SetRemoteContent_w,
this, content, action, error_desc));
}
@@ -416,10 +453,10 @@ bool BaseChannel::IsReadyToReceive() const {
bool BaseChannel::IsReadyToSend() const {
// Send outgoing data if we are enabled, have local and remote content,
// and we have had some form of connectivity.
- return enabled() &&
- IsReceiveContentDirection(remote_content_direction_) &&
+ return enabled() && IsReceiveContentDirection(remote_content_direction_) &&
IsSendContentDirection(local_content_direction_) &&
- was_ever_writable();
+ was_ever_writable() &&
+ (srtp_filter_.IsActive() || !ShouldSetupDtlsSrtp());
}
bool BaseChannel::SendPacket(rtc::Buffer* packet,
@@ -459,6 +496,7 @@ void BaseChannel::OnChannelRead(TransportChannel* channel,
const char* data, size_t len,
const rtc::PacketTime& packet_time,
int flags) {
+ TRACE_EVENT0("webrtc", "BaseChannel::OnChannelRead");
// OnChannelRead gets called from P2PSocket; now pass data to MediaEngine
ASSERT(worker_thread_ == rtc::Thread::Current());
@@ -474,6 +512,22 @@ void BaseChannel::OnReadyToSend(TransportChannel* channel) {
SetReadyToSend(channel == rtcp_transport_channel_, true);
}
+void BaseChannel::OnDtlsState(TransportChannel* channel,
+ DtlsTransportState state) {
+ if (!ShouldSetupDtlsSrtp()) {
+ return;
+ }
+
+ // Reset the srtp filter if it's not the CONNECTED state. For the CONNECTED
+ // state, setting up DTLS-SRTP context is deferred to ChannelWritable_w to
+ // cover other scenarios like the whole channel is writable (not just this
+ // TransportChannel) or when TransportChannel is attached after DTLS is
+ // negotiated.
+ if (state != DTLS_TRANSPORT_CONNECTED) {
+ srtp_filter_.ResetParams();
+ }
+}
+
void BaseChannel::SetReadyToSend(bool rtcp, bool ready) {
if (rtcp) {
rtcp_ready_to_send_ = ready;
@@ -512,7 +566,7 @@ bool BaseChannel::SendPacket(bool rtcp,
// Avoid a copy by transferring the ownership of the packet data.
int message_id = (!rtcp) ? MSG_RTPPACKET : MSG_RTCPPACKET;
PacketMessageData* data = new PacketMessageData;
- data->packet = packet->Pass();
+ data->packet = std::move(*packet);
data->options = options;
worker_thread_->Post(this, message_id, data);
return true;
@@ -628,9 +682,12 @@ bool BaseChannel::WantsPacket(bool rtcp, rtc::Buffer* packet) {
<< " packet: wrong size=" << packet->size();
return false;
}
-
- // Bundle filter handles both rtp and rtcp packets.
- return bundle_filter_.DemuxPacket(packet->data<char>(), packet->size(), rtcp);
+ if (rtcp) {
+ // Permit all (seemingly valid) RTCP packets.
+ return true;
+ }
+ // Check whether we handle this payload.
+ return bundle_filter_.DemuxPacket(packet->data<uint8_t>(), packet->size());
}
void BaseChannel::HandlePacket(bool rtcp, rtc::Buffer* packet,
@@ -758,8 +815,9 @@ void BaseChannel::UpdateWritableState_w() {
void BaseChannel::ChannelWritable_w() {
ASSERT(worker_thread_ == rtc::Thread::Current());
- if (writable_)
+ if (writable_) {
return;
+ }
LOG(LS_INFO) << "Channel writable (" << content_name_ << ")"
<< (was_ever_writable_ ? "" : " for the first time");
@@ -775,22 +833,8 @@ void BaseChannel::ChannelWritable_w() {
}
}
- // If we're doing DTLS-SRTP, now is the time.
- if (!was_ever_writable_ && ShouldSetupDtlsSrtp()) {
- if (!SetupDtlsSrtp(false)) {
- SignalDtlsSetupFailure_w(false);
- return;
- }
-
- if (rtcp_transport_channel_) {
- if (!SetupDtlsSrtp(true)) {
- SignalDtlsSetupFailure_w(true);
- return;
- }
- }
- }
-
was_ever_writable_ = true;
+ MaybeSetupDtlsSrtp_w();
writable_ = true;
ChangeState();
}
@@ -806,20 +850,21 @@ void BaseChannel::SignalDtlsSetupFailure_s(bool rtcp) {
SignalDtlsSetupFailure(this, rtcp);
}
-bool BaseChannel::SetDtlsSrtpCiphers(TransportChannel *tc, bool rtcp) {
- std::vector<std::string> ciphers;
- // We always use the default SRTP ciphers for RTCP, but we may use different
- // ciphers for RTP depending on the media type.
+bool BaseChannel::SetDtlsSrtpCryptoSuites(TransportChannel* tc, bool rtcp) {
+ std::vector<int> crypto_suites;
+ // We always use the default SRTP crypto suites for RTCP, but we may use
+ // different crypto suites for RTP depending on the media type.
if (!rtcp) {
- GetSrtpCryptoSuiteNames(&ciphers);
+ GetSrtpCryptoSuites(&crypto_suites);
} else {
- GetDefaultSrtpCryptoSuiteNames(&ciphers);
+ GetDefaultSrtpCryptoSuites(&crypto_suites);
}
- return tc->SetSrtpCiphers(ciphers);
+ return tc->SetSrtpCryptoSuites(crypto_suites);
}
bool BaseChannel::ShouldSetupDtlsSrtp() const {
- return true;
+ // Since DTLS is applied to all channels, checking RTP should be enough.
+ return transport_channel_ && transport_channel_->IsDtlsActive();
}
// This function returns true if either DTLS-SRTP is not in use
@@ -830,14 +875,12 @@ bool BaseChannel::SetupDtlsSrtp(bool rtcp_channel) {
TransportChannel* channel =
rtcp_channel ? rtcp_transport_channel_ : transport_channel_;
- // No DTLS
- if (!channel->IsDtlsActive())
- return true;
+ RTC_DCHECK(channel->IsDtlsActive());
- std::string selected_cipher;
+ int selected_crypto_suite;
- if (!channel->GetSrtpCryptoSuite(&selected_cipher)) {
- LOG(LS_ERROR) << "No DTLS-SRTP selected cipher";
+ if (!channel->GetSrtpCryptoSuite(&selected_crypto_suite)) {
+ LOG(LS_ERROR) << "No DTLS-SRTP selected crypto suite";
return false;
}
@@ -893,21 +936,15 @@ bool BaseChannel::SetupDtlsSrtp(bool rtcp_channel) {
}
if (rtcp_channel) {
- ret = srtp_filter_.SetRtcpParams(
- selected_cipher,
- &(*send_key)[0],
- static_cast<int>(send_key->size()),
- selected_cipher,
- &(*recv_key)[0],
- static_cast<int>(recv_key->size()));
+ ret = srtp_filter_.SetRtcpParams(selected_crypto_suite, &(*send_key)[0],
+ static_cast<int>(send_key->size()),
+ selected_crypto_suite, &(*recv_key)[0],
+ static_cast<int>(recv_key->size()));
} else {
- ret = srtp_filter_.SetRtpParams(
- selected_cipher,
- &(*send_key)[0],
- static_cast<int>(send_key->size()),
- selected_cipher,
- &(*recv_key)[0],
- static_cast<int>(recv_key->size()));
+ ret = srtp_filter_.SetRtpParams(selected_crypto_suite, &(*send_key)[0],
+ static_cast<int>(send_key->size()),
+ selected_crypto_suite, &(*recv_key)[0],
+ static_cast<int>(recv_key->size()));
}
if (!ret)
@@ -918,6 +955,28 @@ bool BaseChannel::SetupDtlsSrtp(bool rtcp_channel) {
return ret;
}
+void BaseChannel::MaybeSetupDtlsSrtp_w() {
+ if (srtp_filter_.IsActive()) {
+ return;
+ }
+
+ if (!ShouldSetupDtlsSrtp()) {
+ return;
+ }
+
+ if (!SetupDtlsSrtp(false)) {
+ SignalDtlsSetupFailure_w(false);
+ return;
+ }
+
+ if (rtcp_transport_channel_) {
+ if (!SetupDtlsSrtp(true)) {
+ SignalDtlsSetupFailure_w(true);
+ return;
+ }
+ }
+}
+
void BaseChannel::ChannelNotWritable_w() {
ASSERT(worker_thread_ == rtc::Thread::Current());
if (!writable_)
@@ -1022,7 +1081,7 @@ void BaseChannel::ActivateRtcpMux() {
void BaseChannel::ActivateRtcpMux_w() {
if (!rtcp_mux_filter_.IsActive()) {
rtcp_mux_filter_.SetActive();
- set_rtcp_transport_channel(nullptr);
+ set_rtcp_transport_channel(nullptr, true);
rtcp_transport_enabled_ = false;
}
}
@@ -1045,7 +1104,7 @@ bool BaseChannel::SetRtcpMux_w(bool enable, ContentAction action,
LOG(LS_INFO) << "Enabling rtcp-mux for " << content_name()
<< " by destroying RTCP transport channel for "
<< transport_name();
- set_rtcp_transport_channel(nullptr);
+ set_rtcp_transport_channel(nullptr, true);
rtcp_transport_enabled_ = false;
}
break;
@@ -1075,15 +1134,11 @@ bool BaseChannel::SetRtcpMux_w(bool enable, ContentAction action,
bool BaseChannel::AddRecvStream_w(const StreamParams& sp) {
ASSERT(worker_thread() == rtc::Thread::Current());
- if (!media_channel()->AddRecvStream(sp))
- return false;
-
- return bundle_filter_.AddStream(sp);
+ return media_channel()->AddRecvStream(sp);
}
bool BaseChannel::RemoveRecvStream_w(uint32_t ssrc) {
ASSERT(worker_thread() == rtc::Thread::Current());
- bundle_filter_.RemoveStream(ssrc);
return media_channel()->RemoveRecvStream(ssrc);
}
@@ -1243,6 +1298,7 @@ void BaseChannel::MaybeCacheRtpAbsSendTimeHeaderExtension(
}
void BaseChannel::OnMessage(rtc::Message *pmsg) {
+ TRACE_EVENT0("webrtc", "BaseChannel::OnMessage");
switch (pmsg->message_id) {
case MSG_RTPPACKET:
case MSG_RTCPPACKET: {
@@ -1324,15 +1380,6 @@ void VoiceChannel::SetEarlyMedia(bool enable) {
}
}
-bool VoiceChannel::PressDTMF(int digit, bool playout) {
- int flags = DF_SEND;
- if (playout) {
- flags |= DF_PLAY;
- }
- int duration_ms = 160;
- return InsertDtmf(0, digit, duration_ms, flags);
-}
-
bool VoiceChannel::CanInsertDtmf() {
return InvokeOnWorker(Bind(&VoiceMediaChannel::CanInsertDtmf,
media_channel()));
@@ -1340,10 +1387,9 @@ bool VoiceChannel::CanInsertDtmf() {
bool VoiceChannel::InsertDtmf(uint32_t ssrc,
int event_code,
- int duration,
- int flags) {
+ int duration) {
return InvokeOnWorker(Bind(&VoiceChannel::InsertDtmf_w, this,
- ssrc, event_code, duration, flags));
+ ssrc, event_code, duration));
}
bool VoiceChannel::SetOutputVolume(uint32_t ssrc, double volume) {
@@ -1351,6 +1397,15 @@ bool VoiceChannel::SetOutputVolume(uint32_t ssrc, double volume) {
media_channel(), ssrc, volume));
}
+void VoiceChannel::SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) {
+ // We need to work around Bind's lack of support for scoped_ptr and ownership
+ // passing. So we invoke to our own little routine that gets a pointer to
+ // our local variable. This is OK since we're synchronously invoking.
+ InvokeOnWorker(Bind(&SetRawAudioSink_w, media_channel(), ssrc, &sink));
+}
+
bool VoiceChannel::GetStats(VoiceMediaInfo* stats) {
return InvokeOnWorker(Bind(&VoiceMediaChannel::GetStats,
media_channel(), stats));
@@ -1440,6 +1495,7 @@ const ContentInfo* VoiceChannel::GetFirstContent(
bool VoiceChannel::SetLocalContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "VoiceChannel::SetLocalContent_w");
ASSERT(worker_thread() == rtc::Thread::Current());
LOG(LS_INFO) << "Setting local voice description";
@@ -1484,6 +1540,7 @@ bool VoiceChannel::SetLocalContent_w(const MediaContentDescription* content,
bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "VoiceChannel::SetRemoteContent_w");
ASSERT(worker_thread() == rtc::Thread::Current());
LOG(LS_INFO) << "Setting remote voice description";
@@ -1502,7 +1559,7 @@ bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content,
AudioSendParameters send_params = last_send_params_;
RtpSendParametersFromMediaDescription(audio, &send_params);
if (audio->agc_minus_10db()) {
- send_params.options.adjust_agc_delta.Set(kAgcMinus10db);
+ send_params.options.adjust_agc_delta = rtc::Optional<int>(kAgcMinus10db);
}
if (!media_channel()->SetSendParameters(send_params)) {
SafeSetError("Failed to set remote audio description send parameters.",
@@ -1539,13 +1596,11 @@ void VoiceChannel::HandleEarlyMediaTimeout() {
bool VoiceChannel::InsertDtmf_w(uint32_t ssrc,
int event,
- int duration,
- int flags) {
+ int duration) {
if (!enabled()) {
return false;
}
-
- return media_channel()->InsertDtmf(ssrc, event, duration, flags);
+ return media_channel()->InsertDtmf(ssrc, event, duration);
}
void VoiceChannel::OnMessage(rtc::Message *pmsg) {
@@ -1581,9 +1636,8 @@ void VoiceChannel::OnAudioMonitorUpdate(AudioMonitor* monitor,
SignalAudioMonitor(this, info);
}
-void VoiceChannel::GetSrtpCryptoSuiteNames(
- std::vector<std::string>* ciphers) const {
- GetSupportedAudioCryptoSuites(ciphers);
+void VoiceChannel::GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const {
+ GetSupportedAudioCryptoSuites(crypto_suites);
}
VideoChannel::VideoChannel(rtc::Thread* thread,
@@ -1653,20 +1707,6 @@ bool VideoChannel::IsScreencasting() {
return InvokeOnWorker(Bind(&VideoChannel::IsScreencasting_w, this));
}
-int VideoChannel::GetScreencastFps(uint32_t ssrc) {
- ScreencastDetailsData data(ssrc);
- worker_thread()->Invoke<void>(Bind(
- &VideoChannel::GetScreencastDetails_w, this, &data));
- return data.fps;
-}
-
-int VideoChannel::GetScreencastMaxPixels(uint32_t ssrc) {
- ScreencastDetailsData data(ssrc);
- worker_thread()->Invoke<void>(Bind(
- &VideoChannel::GetScreencastDetails_w, this, &data));
- return data.screencast_max_pixels;
-}
-
bool VideoChannel::SendIntraFrame() {
worker_thread()->Invoke<void>(Bind(
&VideoMediaChannel::SendIntraFrame, media_channel()));
@@ -1726,6 +1766,7 @@ const ContentInfo* VideoChannel::GetFirstContent(
bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "VideoChannel::SetLocalContent_w");
ASSERT(worker_thread() == rtc::Thread::Current());
LOG(LS_INFO) << "Setting local video description";
@@ -1770,6 +1811,7 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content,
bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "VideoChannel::SetRemoteContent_w");
ASSERT(worker_thread() == rtc::Thread::Current());
LOG(LS_INFO) << "Setting remote video description";
@@ -1789,7 +1831,7 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
VideoSendParameters send_params = last_send_params_;
RtpSendParametersFromMediaDescription(video, &send_params);
if (video->conference_mode()) {
- send_params.options.conference_mode.Set(true);
+ send_params.options.conference_mode = rtc::Optional<bool>(true);
}
if (!media_channel()->SetSendParameters(send_params)) {
SafeSetError("Failed to set remote video description send parameters.",
@@ -1877,18 +1919,6 @@ bool VideoChannel::IsScreencasting_w() const {
return !screencast_capturers_.empty();
}
-void VideoChannel::GetScreencastDetails_w(
- ScreencastDetailsData* data) const {
- ScreencastMap::const_iterator iter = screencast_capturers_.find(data->ssrc);
- if (iter == screencast_capturers_.end()) {
- return;
- }
- VideoCapturer* capturer = iter->second;
- const VideoFormat* video_format = capturer->GetCaptureFormat();
- data->fps = VideoFormat::IntervalToFps(video_format->interval);
- data->screencast_max_pixels = capturer->screencast_max_pixels();
-}
-
void VideoChannel::OnScreencastWindowEvent_s(uint32_t ssrc,
rtc::WindowEvent we) {
ASSERT(signaling_thread() == rtc::Thread::Current());
@@ -1971,9 +2001,8 @@ bool VideoChannel::GetLocalSsrc(const VideoCapturer* capturer, uint32_t* ssrc) {
return false;
}
-void VideoChannel::GetSrtpCryptoSuiteNames(
- std::vector<std::string>* ciphers) const {
- GetSupportedVideoCryptoSuites(ciphers);
+void VideoChannel::GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const {
+ GetSupportedVideoCryptoSuites(crypto_suites);
}
DataChannel::DataChannel(rtc::Thread* thread,
@@ -2067,6 +2096,7 @@ bool DataChannel::SetDataChannelTypeFromContent(
bool DataChannel::SetLocalContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "DataChannel::SetLocalContent_w");
ASSERT(worker_thread() == rtc::Thread::Current());
LOG(LS_INFO) << "Setting local data description";
@@ -2122,6 +2152,7 @@ bool DataChannel::SetLocalContent_w(const MediaContentDescription* content,
bool DataChannel::SetRemoteContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
+ TRACE_EVENT0("webrtc", "DataChannel::SetRemoteContent_w");
ASSERT(worker_thread() == rtc::Thread::Current());
const DataContentDescription* data =
@@ -2279,13 +2310,12 @@ void DataChannel::OnDataChannelReadyToSend(bool writable) {
new DataChannelReadyToSendMessageData(writable));
}
-void DataChannel::GetSrtpCryptoSuiteNames(
- std::vector<std::string>* ciphers) const {
- GetSupportedDataCryptoSuites(ciphers);
+void DataChannel::GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const {
+ GetSupportedDataCryptoSuites(crypto_suites);
}
bool DataChannel::ShouldSetupDtlsSrtp() const {
- return (data_channel_type_ == DCT_RTP);
+ return (data_channel_type_ == DCT_RTP) && BaseChannel::ShouldSetupDtlsSrtp();
}
void DataChannel::OnStreamClosedRemotely(uint32_t sid) {
diff --git a/talk/session/media/channel.h b/talk/session/media/channel.h
index 603115cee7..d8fde670a0 100644
--- a/talk/session/media/channel.h
+++ b/talk/session/media/channel.h
@@ -38,19 +38,24 @@
#include "talk/media/base/mediaengine.h"
#include "talk/media/base/streamparams.h"
#include "talk/media/base/videocapturer.h"
-#include "webrtc/p2p/base/transportcontroller.h"
-#include "webrtc/p2p/client/socketmonitor.h"
#include "talk/session/media/audiomonitor.h"
#include "talk/session/media/bundlefilter.h"
#include "talk/session/media/mediamonitor.h"
#include "talk/session/media/mediasession.h"
#include "talk/session/media/rtcpmuxfilter.h"
#include "talk/session/media/srtpfilter.h"
+#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/asyncudpsocket.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/network.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/base/window.h"
+#include "webrtc/p2p/base/transportcontroller.h"
+#include "webrtc/p2p/client/socketmonitor.h"
+
+namespace webrtc {
+class AudioSinkInterface;
+} // namespace webrtc
namespace cricket {
@@ -174,8 +179,11 @@ class BaseChannel
// Sets the |transport_channel_| (and |rtcp_transport_channel_|, if |rtcp_| is
// true). Gets the transport channels from |transport_controller_|.
bool SetTransport_w(const std::string& transport_name);
+
void set_transport_channel(TransportChannel* transport);
- void set_rtcp_transport_channel(TransportChannel* transport);
+ void set_rtcp_transport_channel(TransportChannel* transport,
+ bool update_writablity);
+
bool was_ever_writable() const { return was_ever_writable_; }
void set_local_content_direction(MediaContentDirection direction) {
local_content_direction_ = direction;
@@ -213,6 +221,8 @@ class BaseChannel
int flags);
void OnReadyToSend(TransportChannel* channel);
+ void OnDtlsState(TransportChannel* channel, DtlsTransportState state);
+
bool PacketIsRtcp(const TransportChannel* channel, const char* data,
size_t len);
bool SendPacket(bool rtcp,
@@ -235,8 +245,9 @@ class BaseChannel
// Do the DTLS key expansion and impose it on the SRTP/SRTCP filters.
// |rtcp_channel| indicates whether to set up the RTP or RTCP filter.
bool SetupDtlsSrtp(bool rtcp_channel);
+ void MaybeSetupDtlsSrtp_w();
// Set the DTLS-SRTP cipher policy on this channel as appropriate.
- bool SetDtlsSrtpCiphers(TransportChannel *tc, bool rtcp);
+ bool SetDtlsSrtpCryptoSuites(TransportChannel* tc, bool rtcp);
virtual void ChangeState() = 0;
@@ -282,9 +293,8 @@ class BaseChannel
void OnMessage(rtc::Message* pmsg) override;
// Handled in derived classes
- // Get the SRTP ciphers to use for RTP media
- virtual void GetSrtpCryptoSuiteNames(
- std::vector<std::string>* ciphers) const = 0;
+ // Get the SRTP crypto suites to use for RTP media
+ virtual void GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const = 0;
virtual void OnConnectionMonitorUpdate(ConnectionMonitor* monitor,
const std::vector<ConnectionInfo>& infos) = 0;
@@ -356,8 +366,6 @@ class VoiceChannel : public BaseChannel {
// own ringing sound
sigslot::signal1<VoiceChannel*> SignalEarlyMediaTimeout;
- // TODO(ronghuawu): Replace PressDTMF with InsertDtmf.
- bool PressDTMF(int digit, bool playout);
// Returns if the telephone-event has been negotiated.
bool CanInsertDtmf();
// Send and/or play a DTMF |event| according to the |flags|.
@@ -365,8 +373,11 @@ class VoiceChannel : public BaseChannel {
// The |ssrc| should be either 0 or a valid send stream ssrc.
// The valid value for the |event| are 0 which corresponding to DTMF
// event 0-9, *, #, A-D.
- bool InsertDtmf(uint32_t ssrc, int event_code, int duration, int flags);
+ bool InsertDtmf(uint32_t ssrc, int event_code, int duration);
bool SetOutputVolume(uint32_t ssrc, double volume);
+ void SetRawAudioSink(uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink);
+
// Get statistics about the current media session.
bool GetStats(VoiceMediaInfo* stats);
@@ -402,12 +413,12 @@ class VoiceChannel : public BaseChannel {
ContentAction action,
std::string* error_desc);
void HandleEarlyMediaTimeout();
- bool InsertDtmf_w(uint32_t ssrc, int event, int duration, int flags);
+ bool InsertDtmf_w(uint32_t ssrc, int event, int duration);
bool SetOutputVolume_w(uint32_t ssrc, double volume);
bool GetStats_w(VoiceMediaInfo* stats);
virtual void OnMessage(rtc::Message* pmsg);
- virtual void GetSrtpCryptoSuiteNames(std::vector<std::string>* ciphers) const;
+ virtual void GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const;
virtual void OnConnectionMonitorUpdate(
ConnectionMonitor* monitor, const std::vector<ConnectionInfo>& infos);
virtual void OnMediaMonitorUpdate(
@@ -456,8 +467,6 @@ class VideoChannel : public BaseChannel {
// True if we've added a screencast. Doesn't matter if the capturer
// has been started or not.
bool IsScreencasting();
- int GetScreencastFps(uint32_t ssrc);
- int GetScreencastMaxPixels(uint32_t ssrc);
// Get statistics about the current media session.
bool GetStats(VideoMediaInfo* stats);
@@ -476,7 +485,6 @@ class VideoChannel : public BaseChannel {
private:
typedef std::map<uint32_t, VideoCapturer*> ScreencastMap;
- struct ScreencastDetailsData;
// overrides from BaseChannel
virtual void ChangeState();
@@ -493,11 +501,10 @@ class VideoChannel : public BaseChannel {
bool RemoveScreencast_w(uint32_t ssrc);
void OnScreencastWindowEvent_s(uint32_t ssrc, rtc::WindowEvent we);
bool IsScreencasting_w() const;
- void GetScreencastDetails_w(ScreencastDetailsData* d) const;
bool GetStats_w(VideoMediaInfo* stats);
virtual void OnMessage(rtc::Message* pmsg);
- virtual void GetSrtpCryptoSuiteNames(std::vector<std::string>* ciphers) const;
+ virtual void GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const;
virtual void OnConnectionMonitorUpdate(
ConnectionMonitor* monitor, const std::vector<ConnectionInfo>& infos);
virtual void OnMediaMonitorUpdate(
@@ -614,7 +621,7 @@ class DataChannel : public BaseChannel {
virtual bool WantsPacket(bool rtcp, rtc::Buffer* packet);
virtual void OnMessage(rtc::Message* pmsg);
- virtual void GetSrtpCryptoSuiteNames(std::vector<std::string>* ciphers) const;
+ virtual void GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const;
virtual void OnConnectionMonitorUpdate(
ConnectionMonitor* monitor, const std::vector<ConnectionInfo>& infos);
virtual void OnMediaMonitorUpdate(
diff --git a/talk/session/media/channel_unittest.cc b/talk/session/media/channel_unittest.cc
index 18233202b6..6b1d66fe39 100644
--- a/talk/session/media/channel_unittest.cc
+++ b/talk/session/media/channel_unittest.cc
@@ -33,8 +33,8 @@
#include "talk/media/base/rtpdump.h"
#include "talk/media/base/screencastid.h"
#include "talk/media/base/testutils.h"
-#include "webrtc/p2p/base/faketransportcontroller.h"
#include "talk/session/media/channel.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/fileutils.h"
#include "webrtc/base/gunit.h"
#include "webrtc/base/helpers.h"
@@ -44,6 +44,7 @@
#include "webrtc/base/ssladapter.h"
#include "webrtc/base/sslidentity.h"
#include "webrtc/base/window.h"
+#include "webrtc/p2p/base/faketransportcontroller.h"
#define MAYBE_SKIP_TEST(feature) \
if (!(rtc::SSLStreamAdapter::feature())) { \
@@ -174,17 +175,15 @@ class ChannelTest : public testing::Test, public sigslot::has_slots<> {
if (flags1 & DTLS) {
// Confirmed to work with KT_RSA and KT_ECDSA.
- transport_controller1_.SetLocalCertificate(rtc::RTCCertificate::Create(
- rtc::scoped_ptr<rtc::SSLIdentity>(
- rtc::SSLIdentity::Generate("session1", rtc::KT_DEFAULT))
- .Pass()));
+ transport_controller1_.SetLocalCertificate(
+ rtc::RTCCertificate::Create(rtc::scoped_ptr<rtc::SSLIdentity>(
+ rtc::SSLIdentity::Generate("session1", rtc::KT_DEFAULT))));
}
if (flags2 & DTLS) {
// Confirmed to work with KT_RSA and KT_ECDSA.
- transport_controller2_.SetLocalCertificate(rtc::RTCCertificate::Create(
- rtc::scoped_ptr<rtc::SSLIdentity>(
- rtc::SSLIdentity::Generate("session2", rtc::KT_DEFAULT))
- .Pass()));
+ transport_controller2_.SetLocalCertificate(
+ rtc::RTCCertificate::Create(rtc::scoped_ptr<rtc::SSLIdentity>(
+ rtc::SSLIdentity::Generate("session2", rtc::KT_DEFAULT))));
}
// Add stream information (SSRC) to the local content but not to the remote
@@ -1473,12 +1472,6 @@ class ChannelTest : public testing::Test, public sigslot::has_slots<> {
EXPECT_TRUE(channel2_->bundle_filter()->FindPayloadType(pl_type1));
EXPECT_FALSE(channel1_->bundle_filter()->FindPayloadType(pl_type2));
EXPECT_FALSE(channel2_->bundle_filter()->FindPayloadType(pl_type2));
- // channel1 - should only have media_content2 as remote. i.e. kSsrc2
- EXPECT_TRUE(channel1_->bundle_filter()->FindStream(kSsrc2));
- EXPECT_FALSE(channel1_->bundle_filter()->FindStream(kSsrc1));
- // channel2 - should only have media_content1 as remote. i.e. kSsrc1
- EXPECT_TRUE(channel2_->bundle_filter()->FindStream(kSsrc1));
- EXPECT_FALSE(channel2_->bundle_filter()->FindStream(kSsrc2));
// Both channels can receive pl_type1 only.
EXPECT_TRUE(SendCustomRtp1(kSsrc1, ++sequence_number1_1, pl_type1));
@@ -1503,8 +1496,9 @@ class ChannelTest : public testing::Test, public sigslot::has_slots<> {
EXPECT_TRUE(SendCustomRtcp1(kSsrc2));
EXPECT_TRUE(SendCustomRtcp2(kSsrc1));
- EXPECT_FALSE(CheckCustomRtcp1(kSsrc1));
- EXPECT_FALSE(CheckCustomRtcp2(kSsrc2));
+ // Bundle filter shouldn't filter out any RTCP.
+ EXPECT_TRUE(CheckCustomRtcp1(kSsrc1));
+ EXPECT_TRUE(CheckCustomRtcp2(kSsrc2));
}
// Test that the media monitor can be run and gives timely callbacks.
@@ -2116,23 +2110,6 @@ TEST_F(VoiceChannelTest, TestMediaMonitor) {
Base::TestMediaMonitor();
}
-// Test that PressDTMF properly forwards to the media channel.
-TEST_F(VoiceChannelTest, TestDtmf) {
- CreateChannels(0, 0);
- EXPECT_TRUE(SendInitiate());
- EXPECT_TRUE(SendAccept());
- EXPECT_EQ(0U, media_channel1_->dtmf_info_queue().size());
-
- EXPECT_TRUE(channel1_->PressDTMF(1, true));
- EXPECT_TRUE(channel1_->PressDTMF(8, false));
-
- ASSERT_EQ(2U, media_channel1_->dtmf_info_queue().size());
- EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[0],
- 0, 1, 160, cricket::DF_PLAY | cricket::DF_SEND));
- EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[1],
- 0, 8, 160, cricket::DF_SEND));
-}
-
// Test that InsertDtmf properly forwards to the media channel.
TEST_F(VoiceChannelTest, TestInsertDtmf) {
CreateChannels(0, 0);
@@ -2140,18 +2117,17 @@ TEST_F(VoiceChannelTest, TestInsertDtmf) {
EXPECT_TRUE(SendAccept());
EXPECT_EQ(0U, media_channel1_->dtmf_info_queue().size());
- EXPECT_TRUE(channel1_->InsertDtmf(1, 3, 100, cricket::DF_SEND));
- EXPECT_TRUE(channel1_->InsertDtmf(2, 5, 110, cricket::DF_PLAY));
- EXPECT_TRUE(channel1_->InsertDtmf(3, 7, 120,
- cricket::DF_PLAY | cricket::DF_SEND));
+ EXPECT_TRUE(channel1_->InsertDtmf(1, 3, 100));
+ EXPECT_TRUE(channel1_->InsertDtmf(2, 5, 110));
+ EXPECT_TRUE(channel1_->InsertDtmf(3, 7, 120));
ASSERT_EQ(3U, media_channel1_->dtmf_info_queue().size());
EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[0],
- 1, 3, 100, cricket::DF_SEND));
+ 1, 3, 100));
EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[1],
- 2, 5, 110, cricket::DF_PLAY));
+ 2, 5, 110));
EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[2],
- 3, 7, 120, cricket::DF_PLAY | cricket::DF_SEND));
+ 3, 7, 120));
}
TEST_F(VoiceChannelTest, TestSetContentFailure) {
@@ -2253,21 +2229,19 @@ TEST_F(VoiceChannelTest, TestScaleVolumeMultiwayCall) {
}
TEST_F(VoiceChannelTest, SendBundleToBundle) {
- Base::SendBundleToBundle(kAudioPts, ARRAY_SIZE(kAudioPts), false, false);
+ Base::SendBundleToBundle(kAudioPts, arraysize(kAudioPts), false, false);
}
TEST_F(VoiceChannelTest, SendBundleToBundleSecure) {
- Base::SendBundleToBundle(kAudioPts, ARRAY_SIZE(kAudioPts), false, true);
+ Base::SendBundleToBundle(kAudioPts, arraysize(kAudioPts), false, true);
}
TEST_F(VoiceChannelTest, SendBundleToBundleWithRtcpMux) {
- Base::SendBundleToBundle(
- kAudioPts, ARRAY_SIZE(kAudioPts), true, false);
+ Base::SendBundleToBundle(kAudioPts, arraysize(kAudioPts), true, false);
}
TEST_F(VoiceChannelTest, SendBundleToBundleWithRtcpMuxSecure) {
- Base::SendBundleToBundle(
- kAudioPts, ARRAY_SIZE(kAudioPts), true, true);
+ Base::SendBundleToBundle(kAudioPts, arraysize(kAudioPts), true, true);
}
// VideoChannelTest
@@ -2501,21 +2475,19 @@ TEST_F(VideoChannelTest, TestFlushRtcp) {
}
TEST_F(VideoChannelTest, SendBundleToBundle) {
- Base::SendBundleToBundle(kVideoPts, ARRAY_SIZE(kVideoPts), false, false);
+ Base::SendBundleToBundle(kVideoPts, arraysize(kVideoPts), false, false);
}
TEST_F(VideoChannelTest, SendBundleToBundleSecure) {
- Base::SendBundleToBundle(kVideoPts, ARRAY_SIZE(kVideoPts), false, true);
+ Base::SendBundleToBundle(kVideoPts, arraysize(kVideoPts), false, true);
}
TEST_F(VideoChannelTest, SendBundleToBundleWithRtcpMux) {
- Base::SendBundleToBundle(
- kVideoPts, ARRAY_SIZE(kVideoPts), true, false);
+ Base::SendBundleToBundle(kVideoPts, arraysize(kVideoPts), true, false);
}
TEST_F(VideoChannelTest, SendBundleToBundleWithRtcpMuxSecure) {
- Base::SendBundleToBundle(
- kVideoPts, ARRAY_SIZE(kVideoPts), true, true);
+ Base::SendBundleToBundle(kVideoPts, arraysize(kVideoPts), true, true);
}
TEST_F(VideoChannelTest, TestSrtpError) {
diff --git a/talk/session/media/channelmanager.cc b/talk/session/media/channelmanager.cc
index e7e1cd44a2..e7a4b8bddb 100644
--- a/talk/session/media/channelmanager.cc
+++ b/talk/session/media/channelmanager.cc
@@ -49,6 +49,7 @@
#include "webrtc/base/sigslotrepeater.h"
#include "webrtc/base/stringencode.h"
#include "webrtc/base/stringutils.h"
+#include "webrtc/base/trace_event.h"
namespace cricket {
@@ -101,8 +102,6 @@ void ChannelManager::Construct(MediaEngineInterface* me,
initialized_ = false;
main_thread_ = rtc::Thread::Current();
worker_thread_ = worker_thread;
- // Get the default audio options from the media engine.
- audio_options_ = media_engine_->GetAudioOptions();
audio_output_volume_ = kNotSetOutputVolume;
local_renderer_ = NULL;
capturing_ = false;
@@ -156,7 +155,7 @@ void ChannelManager::GetSupportedAudioCodecs(
void ChannelManager::GetSupportedAudioRtpHeaderExtensions(
RtpHeaderExtensions* ext) const {
- *ext = media_engine_->audio_rtp_header_extensions();
+ *ext = media_engine_->GetAudioCapabilities().header_extensions;
}
void ChannelManager::GetSupportedVideoCodecs(
@@ -175,7 +174,7 @@ void ChannelManager::GetSupportedVideoCodecs(
void ChannelManager::GetSupportedVideoRtpHeaderExtensions(
RtpHeaderExtensions* ext) const {
- *ext = media_engine_->video_rtp_header_extensions();
+ *ext = media_engine_->GetVideoCapabilities().header_extensions;
}
void ChannelManager::GetSupportedDataCodecs(
@@ -205,11 +204,6 @@ bool ChannelManager::Init() {
return false;
}
- if (!SetAudioOptions(audio_options_)) {
- LOG(LS_WARNING) << "Failed to SetAudioOptions with options: "
- << audio_options_.ToString();
- }
-
// If audio_output_volume_ has been set via SetOutputVolume(), set the
// audio output volume of the engine.
if (kNotSetOutputVolume != audio_output_volume_ &&
@@ -218,11 +212,6 @@ bool ChannelManager::Init() {
<< audio_output_volume_;
}
- // Now apply the default video codec that has been set earlier.
- if (default_video_encoder_config_.max_codec.id != 0) {
- SetDefaultVideoEncoderConfig(default_video_encoder_config_);
- }
-
return initialized_;
}
@@ -295,6 +284,7 @@ VoiceChannel* ChannelManager::CreateVoiceChannel_w(
}
void ChannelManager::DestroyVoiceChannel(VoiceChannel* voice_channel) {
+ TRACE_EVENT0("webrtc", "ChannelManager::DestroyVoiceChannel");
if (voice_channel) {
worker_thread_->Invoke<void>(
Bind(&ChannelManager::DestroyVoiceChannel_w, this, voice_channel));
@@ -302,6 +292,7 @@ void ChannelManager::DestroyVoiceChannel(VoiceChannel* voice_channel) {
}
void ChannelManager::DestroyVoiceChannel_w(VoiceChannel* voice_channel) {
+ TRACE_EVENT0("webrtc", "ChannelManager::DestroyVoiceChannel_w");
// Destroy voice channel.
ASSERT(initialized_);
ASSERT(worker_thread_ == rtc::Thread::Current());
@@ -351,6 +342,7 @@ VideoChannel* ChannelManager::CreateVideoChannel_w(
}
void ChannelManager::DestroyVideoChannel(VideoChannel* video_channel) {
+ TRACE_EVENT0("webrtc", "ChannelManager::DestroyVideoChannel");
if (video_channel) {
worker_thread_->Invoke<void>(
Bind(&ChannelManager::DestroyVideoChannel_w, this, video_channel));
@@ -358,6 +350,7 @@ void ChannelManager::DestroyVideoChannel(VideoChannel* video_channel) {
}
void ChannelManager::DestroyVideoChannel_w(VideoChannel* video_channel) {
+ TRACE_EVENT0("webrtc", "ChannelManager::DestroyVideoChannel_w");
// Destroy video channel.
ASSERT(initialized_);
ASSERT(worker_thread_ == rtc::Thread::Current());
@@ -408,6 +401,7 @@ DataChannel* ChannelManager::CreateDataChannel_w(
}
void ChannelManager::DestroyDataChannel(DataChannel* data_channel) {
+ TRACE_EVENT0("webrtc", "ChannelManager::DestroyDataChannel");
if (data_channel) {
worker_thread_->Invoke<void>(
Bind(&ChannelManager::DestroyDataChannel_w, this, data_channel));
@@ -415,6 +409,7 @@ void ChannelManager::DestroyDataChannel(DataChannel* data_channel) {
}
void ChannelManager::DestroyDataChannel_w(DataChannel* data_channel) {
+ TRACE_EVENT0("webrtc", "ChannelManager::DestroyDataChannel_w");
// Destroy data channel.
ASSERT(initialized_);
DataChannels::iterator it = std::find(data_channels_.begin(),
@@ -427,43 +422,6 @@ void ChannelManager::DestroyDataChannel_w(DataChannel* data_channel) {
delete data_channel;
}
-bool ChannelManager::SetAudioOptions(const AudioOptions& options) {
- // "Get device ids from DeviceManager" - these are the defaults returned.
- Device in_dev("", -1);
- Device out_dev("", -1);
-
- // If we're initialized, pass the settings to the media engine.
- bool ret = true;
- if (initialized_) {
- ret = worker_thread_->Invoke<bool>(
- Bind(&ChannelManager::SetAudioOptions_w, this,
- options, &in_dev, &out_dev));
- }
-
- // If all worked well, save the values for use in GetAudioOptions.
- if (ret) {
- audio_options_ = options;
- }
- return ret;
-}
-
-bool ChannelManager::SetAudioOptions_w(
- const AudioOptions& options,
- const Device* in_dev, const Device* out_dev) {
- ASSERT(worker_thread_ == rtc::Thread::Current());
- ASSERT(initialized_);
-
- // Set audio options
- bool ret = media_engine_->SetAudioOptions(options);
-
- // Set the audio devices
- if (ret) {
- ret = media_engine_->SetSoundDevices(in_dev, out_dev);
- }
-
- return ret;
-}
-
bool ChannelManager::GetOutputVolume(int* level) {
if (!initialized_) {
return false;
@@ -487,39 +445,6 @@ bool ChannelManager::SetOutputVolume(int level) {
return ret;
}
-bool ChannelManager::SetDefaultVideoEncoderConfig(const VideoEncoderConfig& c) {
- bool ret = true;
- if (initialized_) {
- ret = worker_thread_->Invoke<bool>(
- Bind(&MediaEngineInterface::SetDefaultVideoEncoderConfig,
- media_engine_.get(), c));
- }
- if (ret) {
- default_video_encoder_config_ = c;
- }
- return ret;
-}
-
-void ChannelManager::SetVoiceLogging(int level, const char* filter) {
- if (initialized_) {
- worker_thread_->Invoke<void>(
- Bind(&MediaEngineInterface::SetVoiceLogging,
- media_engine_.get(), level, filter));
- } else {
- media_engine_->SetVoiceLogging(level, filter);
- }
-}
-
-void ChannelManager::SetVideoLogging(int level, const char* filter) {
- if (initialized_) {
- worker_thread_->Invoke<void>(
- Bind(&MediaEngineInterface::SetVideoLogging,
- media_engine_.get(), level, filter));
- } else {
- media_engine_->SetVideoLogging(level, filter);
- }
-}
-
std::vector<cricket::VideoFormat> ChannelManager::GetSupportedFormats(
VideoCapturer* capturer) const {
ASSERT(capturer != NULL);
diff --git a/talk/session/media/channelmanager.h b/talk/session/media/channelmanager.h
index 6312e61e06..2bc516bfaa 100644
--- a/talk/session/media/channelmanager.h
+++ b/talk/session/media/channelmanager.h
@@ -129,7 +129,6 @@ class ChannelManager : public rtc::MessageHandler,
bool GetOutputVolume(int* level);
bool SetOutputVolume(int level);
- bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config);
// RTX will be enabled/disabled in engines that support it. The supporting
// engines will start offering an RTX codec. Must be called before Init().
bool SetVideoRtxEnabled(bool enable);
@@ -137,10 +136,6 @@ class ChannelManager : public rtc::MessageHandler,
// Starts/stops the local microphone and enables polling of the input level.
bool capturing() const { return capturing_; }
- // Configures the logging output of the mediaengine(s).
- void SetVoiceLogging(int level, const char* filter);
- void SetVideoLogging(int level, const char* filter);
-
// Gets capturer's supported formats in a thread safe manner
std::vector<cricket::VideoFormat> GetSupportedFormats(
VideoCapturer* capturer) const;
@@ -181,11 +176,6 @@ class ChannelManager : public rtc::MessageHandler,
sigslot::signal2<VideoCapturer*, CaptureState> SignalVideoCaptureStateChange;
- protected:
- // Adds non-transient parameters which can only be changed through the
- // options store.
- bool SetAudioOptions(const AudioOptions& options);
-
private:
typedef std::vector<VoiceChannel*> VoiceChannels;
typedef std::vector<VideoChannel*> VideoChannels;
@@ -217,8 +207,6 @@ class ChannelManager : public rtc::MessageHandler,
bool rtcp,
DataChannelType data_channel_type);
void DestroyDataChannel_w(DataChannel* data_channel);
- bool SetAudioOptions_w(const AudioOptions& options,
- const Device* in_dev, const Device* out_dev);
void OnVideoCaptureStateChange(VideoCapturer* capturer,
CaptureState result);
void GetSupportedFormats_w(
@@ -238,9 +226,7 @@ class ChannelManager : public rtc::MessageHandler,
VideoChannels video_channels_;
DataChannels data_channels_;
- AudioOptions audio_options_;
int audio_output_volume_;
- VideoEncoderConfig default_video_encoder_config_;
VideoRenderer* local_renderer_;
bool enable_rtx_;
diff --git a/talk/session/media/channelmanager_unittest.cc b/talk/session/media/channelmanager_unittest.cc
index fa6aa2cab6..4740f0f37d 100644
--- a/talk/session/media/channelmanager_unittest.cc
+++ b/talk/session/media/channelmanager_unittest.cc
@@ -183,38 +183,6 @@ TEST_F(ChannelManagerTest, NoTransportChannelTest) {
cm_->Terminate();
}
-// Test that SetDefaultVideoCodec passes through the right values.
-TEST_F(ChannelManagerTest, SetDefaultVideoEncoderConfig) {
- cricket::VideoCodec codec(96, "G264", 1280, 720, 60, 0);
- cricket::VideoEncoderConfig config(codec, 1, 2);
- EXPECT_TRUE(cm_->Init());
- EXPECT_TRUE(cm_->SetDefaultVideoEncoderConfig(config));
- EXPECT_EQ(config, fme_->default_video_encoder_config());
-}
-
-struct GetCapturerFrameSize : public sigslot::has_slots<> {
- void OnVideoFrame(VideoCapturer* capturer, const VideoFrame* frame) {
- width = frame->GetWidth();
- height = frame->GetHeight();
- }
- GetCapturerFrameSize(VideoCapturer* capturer) : width(0), height(0) {
- capturer->SignalVideoFrame.connect(this,
- &GetCapturerFrameSize::OnVideoFrame);
- static_cast<FakeVideoCapturer*>(capturer)->CaptureFrame();
- }
- size_t width;
- size_t height;
-};
-
-// Test that SetDefaultVideoCodec passes through the right values.
-TEST_F(ChannelManagerTest, SetDefaultVideoCodecBeforeInit) {
- cricket::VideoCodec codec(96, "G264", 1280, 720, 60, 0);
- cricket::VideoEncoderConfig config(codec, 1, 2);
- EXPECT_TRUE(cm_->SetDefaultVideoEncoderConfig(config));
- EXPECT_TRUE(cm_->Init());
- EXPECT_EQ(config, fme_->default_video_encoder_config());
-}
-
TEST_F(ChannelManagerTest, GetSetOutputVolumeBeforeInit) {
int level;
// Before init, SetOutputVolume() remembers the volume but does not change the
@@ -250,33 +218,6 @@ TEST_F(ChannelManagerTest, GetSetOutputVolume) {
EXPECT_EQ(60, level);
}
-// Test that logging options set before Init are applied properly,
-// and retained even after Init.
-TEST_F(ChannelManagerTest, SetLoggingBeforeInit) {
- cm_->SetVoiceLogging(rtc::LS_INFO, "test-voice");
- cm_->SetVideoLogging(rtc::LS_VERBOSE, "test-video");
- EXPECT_EQ(rtc::LS_INFO, fme_->voice_loglevel());
- EXPECT_STREQ("test-voice", fme_->voice_logfilter().c_str());
- EXPECT_EQ(rtc::LS_VERBOSE, fme_->video_loglevel());
- EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
- EXPECT_TRUE(cm_->Init());
- EXPECT_EQ(rtc::LS_INFO, fme_->voice_loglevel());
- EXPECT_STREQ("test-voice", fme_->voice_logfilter().c_str());
- EXPECT_EQ(rtc::LS_VERBOSE, fme_->video_loglevel());
- EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
-}
-
-// Test that logging options set after Init are applied properly.
-TEST_F(ChannelManagerTest, SetLogging) {
- EXPECT_TRUE(cm_->Init());
- cm_->SetVoiceLogging(rtc::LS_INFO, "test-voice");
- cm_->SetVideoLogging(rtc::LS_VERBOSE, "test-video");
- EXPECT_EQ(rtc::LS_INFO, fme_->voice_loglevel());
- EXPECT_STREQ("test-voice", fme_->voice_logfilter().c_str());
- EXPECT_EQ(rtc::LS_VERBOSE, fme_->video_loglevel());
- EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
-}
-
TEST_F(ChannelManagerTest, SetVideoRtxEnabled) {
std::vector<VideoCodec> codecs;
const VideoCodec rtx_codec(96, "rtx", 0, 0, 0, 0);
diff --git a/talk/session/media/mediasession.cc b/talk/session/media/mediasession.cc
index 7413026092..24f01b4463 100644
--- a/talk/session/media/mediasession.cc
+++ b/talk/session/media/mediasession.cc
@@ -50,6 +50,17 @@ static const uint32_t kMaxSctpSid = 1023;
namespace {
const char kInline[] = "inline:";
+
+void GetSupportedCryptoSuiteNames(void (*func)(std::vector<int>*),
+ std::vector<std::string>* names) {
+#ifdef HAVE_SRTP
+ std::vector<int> crypto_suites;
+ func(&crypto_suites);
+ for (const auto crypto : crypto_suites) {
+ names->push_back(rtc::SrtpCryptoSuiteToName(crypto));
+ }
+#endif
+}
}
namespace cricket {
@@ -152,30 +163,50 @@ bool FindMatchingCrypto(const CryptoParamsVec& cryptos,
}
// For audio, HMAC 32 is prefered because of the low overhead.
-void GetSupportedAudioCryptoSuites(
- std::vector<std::string>* crypto_suites) {
+void GetSupportedAudioCryptoSuites(std::vector<int>* crypto_suites) {
#ifdef HAVE_SRTP
- crypto_suites->push_back(rtc::CS_AES_CM_128_HMAC_SHA1_32);
- crypto_suites->push_back(rtc::CS_AES_CM_128_HMAC_SHA1_80);
+ crypto_suites->push_back(rtc::SRTP_AES128_CM_SHA1_32);
+ crypto_suites->push_back(rtc::SRTP_AES128_CM_SHA1_80);
#endif
}
-void GetSupportedVideoCryptoSuites(
- std::vector<std::string>* crypto_suites) {
- GetDefaultSrtpCryptoSuiteNames(crypto_suites);
+void GetSupportedAudioCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names) {
+ GetSupportedCryptoSuiteNames(GetSupportedAudioCryptoSuites,
+ crypto_suite_names);
+}
+
+void GetSupportedVideoCryptoSuites(std::vector<int>* crypto_suites) {
+ GetDefaultSrtpCryptoSuites(crypto_suites);
+}
+
+void GetSupportedVideoCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names) {
+ GetSupportedCryptoSuiteNames(GetSupportedVideoCryptoSuites,
+ crypto_suite_names);
}
-void GetSupportedDataCryptoSuites(
- std::vector<std::string>* crypto_suites) {
- GetDefaultSrtpCryptoSuiteNames(crypto_suites);
+void GetSupportedDataCryptoSuites(std::vector<int>* crypto_suites) {
+ GetDefaultSrtpCryptoSuites(crypto_suites);
}
-void GetDefaultSrtpCryptoSuiteNames(std::vector<std::string>* crypto_suites) {
+void GetSupportedDataCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names) {
+ GetSupportedCryptoSuiteNames(GetSupportedDataCryptoSuites,
+ crypto_suite_names);
+}
+
+void GetDefaultSrtpCryptoSuites(std::vector<int>* crypto_suites) {
#ifdef HAVE_SRTP
- crypto_suites->push_back(rtc::CS_AES_CM_128_HMAC_SHA1_80);
+ crypto_suites->push_back(rtc::SRTP_AES128_CM_SHA1_80);
#endif
}
+void GetDefaultSrtpCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names) {
+ GetSupportedCryptoSuiteNames(GetDefaultSrtpCryptoSuites, crypto_suite_names);
+}
+
// For video support only 80-bit SHA1 HMAC. For audio 32-bit HMAC is
// tolerated unless bundle is enabled because it is low overhead. Pick the
// crypto in the list that is supported.
@@ -518,8 +549,8 @@ static bool AddStreamParams(
// Updates the transport infos of the |sdesc| according to the given
// |bundle_group|. The transport infos of the content names within the
-// |bundle_group| should be updated to use the ufrag and pwd of the first
-// content within the |bundle_group|.
+// |bundle_group| should be updated to use the ufrag, pwd and DTLS role of the
+// first content within the |bundle_group|.
static bool UpdateTransportInfoForBundle(const ContentGroup& bundle_group,
SessionDescription* sdesc) {
// The bundle should not be empty.
@@ -540,6 +571,8 @@ static bool UpdateTransportInfoForBundle(const ContentGroup& bundle_group,
selected_transport_info->description.ice_ufrag;
const std::string& selected_pwd =
selected_transport_info->description.ice_pwd;
+ ConnectionRole selected_connection_role =
+ selected_transport_info->description.connection_role;
for (TransportInfos::iterator it =
sdesc->transport_infos().begin();
it != sdesc->transport_infos().end(); ++it) {
@@ -547,6 +580,7 @@ static bool UpdateTransportInfoForBundle(const ContentGroup& bundle_group,
it->content_name != selected_content_name) {
it->description.ice_ufrag = selected_ufrag;
it->description.ice_pwd = selected_pwd;
+ it->description.connection_role = selected_connection_role;
}
}
return true;
@@ -602,6 +636,11 @@ static void PruneCryptos(const CryptoParamsVec& filter,
target_cryptos->end());
}
+static bool IsRtpProtocol(const std::string& protocol) {
+ return protocol.empty() ||
+ (protocol.find(cricket::kMediaProtocolRtpPrefix) != std::string::npos);
+}
+
static bool IsRtpContent(SessionDescription* sdesc,
const std::string& content_name) {
bool is_rtp = false;
@@ -612,9 +651,7 @@ static bool IsRtpContent(SessionDescription* sdesc,
if (!media_desc) {
return false;
}
- is_rtp = media_desc->protocol().empty() ||
- (media_desc->protocol().find(cricket::kMediaProtocolRtpPrefix) !=
- std::string::npos);
+ is_rtp = IsRtpProtocol(media_desc->protocol());
}
return is_rtp;
}
@@ -726,6 +763,11 @@ static bool CreateMediaContentOffer(
offer->set_crypto_required(CT_SDES);
}
offer->set_rtcp_mux(options.rtcp_mux_enabled);
+ // TODO(deadbeef): Once we're sure this works correctly, enable it in
+ // CreateOffer.
+ // if (offer->type() == cricket::MEDIA_TYPE_VIDEO) {
+ // offer->set_rtcp_reduced_size(true);
+ // }
offer->set_multistream(options.is_muc);
offer->set_rtp_header_extensions(rtp_extensions);
@@ -1004,6 +1046,11 @@ static bool CreateMediaContentAnswer(
answer->set_rtp_header_extensions(negotiated_rtp_extensions);
answer->set_rtcp_mux(options.rtcp_mux_enabled && offer->rtcp_mux());
+ // TODO(deadbeef): Once we're sure this works correctly, enable it in
+ // CreateAnswer.
+ // if (answer->type() == cricket::MEDIA_TYPE_VIDEO) {
+ // answer->set_rtcp_reduced_size(offer->rtcp_reduced_size());
+ // }
if (sdes_policy != SEC_DISABLED) {
CryptoParams crypto;
@@ -1036,12 +1083,16 @@ static bool CreateMediaContentAnswer(
answer->set_direction(MD_RECVONLY);
break;
case MD_RECVONLY:
- answer->set_direction(answer->streams().empty() ? MD_INACTIVE
- : MD_SENDONLY);
+ answer->set_direction(IsRtpProtocol(answer->protocol()) &&
+ answer->streams().empty()
+ ? MD_INACTIVE
+ : MD_SENDONLY);
break;
case MD_SENDRECV:
- answer->set_direction(answer->streams().empty() ? MD_RECVONLY
- : MD_SENDRECV);
+ answer->set_direction(IsRtpProtocol(answer->protocol()) &&
+ answer->streams().empty()
+ ? MD_RECVONLY
+ : MD_SENDRECV);
break;
default:
RTC_DCHECK(false && "MediaContentDescription has unexpected direction.");
@@ -1508,13 +1559,18 @@ bool MediaSessionDescriptionFactory::AddAudioContentForOffer(
const AudioCodecs& audio_codecs,
StreamParamsVec* current_streams,
SessionDescription* desc) const {
+ const ContentInfo* current_audio_content =
+ GetFirstAudioContent(current_description);
+ std::string content_name =
+ current_audio_content ? current_audio_content->name : CN_AUDIO;
+
cricket::SecurePolicy sdes_policy =
- IsDtlsActive(CN_AUDIO, current_description) ?
- cricket::SEC_DISABLED : secure();
+ IsDtlsActive(content_name, current_description) ? cricket::SEC_DISABLED
+ : secure();
scoped_ptr<AudioContentDescription> audio(new AudioContentDescription());
std::vector<std::string> crypto_suites;
- GetSupportedAudioCryptoSuites(&crypto_suites);
+ GetSupportedAudioCryptoSuiteNames(&crypto_suites);
if (!CreateMediaContentOffer(
options,
audio_codecs,
@@ -1546,8 +1602,8 @@ bool MediaSessionDescriptionFactory::AddAudioContentForOffer(
}
}
- desc->AddContent(CN_AUDIO, NS_JINGLE_RTP, audio.release());
- if (!AddTransportOffer(CN_AUDIO, options.transport_options,
+ desc->AddContent(content_name, NS_JINGLE_RTP, audio.release());
+ if (!AddTransportOffer(content_name, options.audio_transport_options,
current_description, desc)) {
return false;
}
@@ -1562,13 +1618,18 @@ bool MediaSessionDescriptionFactory::AddVideoContentForOffer(
const VideoCodecs& video_codecs,
StreamParamsVec* current_streams,
SessionDescription* desc) const {
+ const ContentInfo* current_video_content =
+ GetFirstVideoContent(current_description);
+ std::string content_name =
+ current_video_content ? current_video_content->name : CN_VIDEO;
+
cricket::SecurePolicy sdes_policy =
- IsDtlsActive(CN_VIDEO, current_description) ?
- cricket::SEC_DISABLED : secure();
+ IsDtlsActive(content_name, current_description) ? cricket::SEC_DISABLED
+ : secure();
scoped_ptr<VideoContentDescription> video(new VideoContentDescription());
std::vector<std::string> crypto_suites;
- GetSupportedVideoCryptoSuites(&crypto_suites);
+ GetSupportedVideoCryptoSuiteNames(&crypto_suites);
if (!CreateMediaContentOffer(
options,
video_codecs,
@@ -1601,8 +1662,8 @@ bool MediaSessionDescriptionFactory::AddVideoContentForOffer(
}
}
- desc->AddContent(CN_VIDEO, NS_JINGLE_RTP, video.release());
- if (!AddTransportOffer(CN_VIDEO, options.transport_options,
+ desc->AddContent(content_name, NS_JINGLE_RTP, video.release());
+ if (!AddTransportOffer(content_name, options.video_transport_options,
current_description, desc)) {
return false;
}
@@ -1623,9 +1684,14 @@ bool MediaSessionDescriptionFactory::AddDataContentForOffer(
FilterDataCodecs(data_codecs, is_sctp);
+ const ContentInfo* current_data_content =
+ GetFirstDataContent(current_description);
+ std::string content_name =
+ current_data_content ? current_data_content->name : CN_DATA;
+
cricket::SecurePolicy sdes_policy =
- IsDtlsActive(CN_DATA, current_description) ?
- cricket::SEC_DISABLED : secure();
+ IsDtlsActive(content_name, current_description) ? cricket::SEC_DISABLED
+ : secure();
std::vector<std::string> crypto_suites;
if (is_sctp) {
// SDES doesn't make sense for SCTP, so we disable it, and we only
@@ -1638,7 +1704,7 @@ bool MediaSessionDescriptionFactory::AddDataContentForOffer(
data->set_protocol(
secure_transport ? kMediaProtocolDtlsSctp : kMediaProtocolSctp);
} else {
- GetSupportedDataCryptoSuites(&crypto_suites);
+ GetSupportedDataCryptoSuiteNames(&crypto_suites);
}
if (!CreateMediaContentOffer(
@@ -1655,13 +1721,13 @@ bool MediaSessionDescriptionFactory::AddDataContentForOffer(
}
if (is_sctp) {
- desc->AddContent(CN_DATA, NS_JINGLE_DRAFT_SCTP, data.release());
+ desc->AddContent(content_name, NS_JINGLE_DRAFT_SCTP, data.release());
} else {
data->set_bandwidth(options.data_bandwidth);
SetMediaProtocol(secure_transport, data.get());
- desc->AddContent(CN_DATA, NS_JINGLE_RTP, data.release());
+ desc->AddContent(content_name, NS_JINGLE_RTP, data.release());
}
- if (!AddTransportOffer(CN_DATA, options.transport_options,
+ if (!AddTransportOffer(content_name, options.data_transport_options,
current_description, desc)) {
return false;
}
@@ -1676,10 +1742,9 @@ bool MediaSessionDescriptionFactory::AddAudioContentForAnswer(
SessionDescription* answer) const {
const ContentInfo* audio_content = GetFirstAudioContent(offer);
- scoped_ptr<TransportDescription> audio_transport(
- CreateTransportAnswer(audio_content->name, offer,
- options.transport_options,
- current_description));
+ scoped_ptr<TransportDescription> audio_transport(CreateTransportAnswer(
+ audio_content->name, offer, options.audio_transport_options,
+ current_description));
if (!audio_transport) {
return false;
}
@@ -1735,10 +1800,9 @@ bool MediaSessionDescriptionFactory::AddVideoContentForAnswer(
StreamParamsVec* current_streams,
SessionDescription* answer) const {
const ContentInfo* video_content = GetFirstVideoContent(offer);
- scoped_ptr<TransportDescription> video_transport(
- CreateTransportAnswer(video_content->name, offer,
- options.transport_options,
- current_description));
+ scoped_ptr<TransportDescription> video_transport(CreateTransportAnswer(
+ video_content->name, offer, options.video_transport_options,
+ current_description));
if (!video_transport) {
return false;
}
@@ -1791,10 +1855,9 @@ bool MediaSessionDescriptionFactory::AddDataContentForAnswer(
StreamParamsVec* current_streams,
SessionDescription* answer) const {
const ContentInfo* data_content = GetFirstDataContent(offer);
- scoped_ptr<TransportDescription> data_transport(
- CreateTransportAnswer(data_content->name, offer,
- options.transport_options,
- current_description));
+ scoped_ptr<TransportDescription> data_transport(CreateTransportAnswer(
+ data_content->name, offer, options.data_transport_options,
+ current_description));
if (!data_transport) {
return false;
}
diff --git a/talk/session/media/mediasession.h b/talk/session/media/mediasession.h
index e92628e711..1540274665 100644
--- a/talk/session/media/mediasession.h
+++ b/talk/session/media/mediasession.h
@@ -134,6 +134,10 @@ struct MediaSessionOptions {
bool HasSendMediaStream(MediaType type) const;
+ // TODO(deadbeef): Put all the audio/video/data-specific options into a map
+ // structure (content name -> options).
+ // MediaSessionDescriptionFactory assumes there will never be more than one
+ // audio/video/data content, but this will change with unified plan.
bool recv_audio;
bool recv_video;
DataChannelType data_channel_type;
@@ -144,7 +148,9 @@ struct MediaSessionOptions {
// bps. -1 == auto.
int video_bandwidth;
int data_bandwidth;
- TransportOptions transport_options;
+ TransportOptions audio_transport_options;
+ TransportOptions video_transport_options;
+ TransportOptions data_transport_options;
struct Stream {
Stream(MediaType type,
@@ -167,17 +173,7 @@ struct MediaSessionOptions {
// "content" (as used in XEP-0166) descriptions for voice and video.
class MediaContentDescription : public ContentDescription {
public:
- MediaContentDescription()
- : rtcp_mux_(false),
- bandwidth_(kAutoBandwidth),
- crypto_required_(CT_NONE),
- rtp_header_extensions_set_(false),
- multistream_(false),
- conference_mode_(false),
- partial_(false),
- buffered_mode_latency_(kBufferedModeDisabled),
- direction_(MD_SENDRECV) {
- }
+ MediaContentDescription() {}
virtual MediaType type() const = 0;
virtual bool has_codecs() const = 0;
@@ -195,6 +191,11 @@ class MediaContentDescription : public ContentDescription {
bool rtcp_mux() const { return rtcp_mux_; }
void set_rtcp_mux(bool mux) { rtcp_mux_ = mux; }
+ bool rtcp_reduced_size() const { return rtcp_reduced_size_; }
+ void set_rtcp_reduced_size(bool reduced_size) {
+ rtcp_reduced_size_ = reduced_size;
+ }
+
int bandwidth() const { return bandwidth_; }
void set_bandwidth(int bandwidth) { bandwidth_ = bandwidth; }
@@ -291,19 +292,20 @@ class MediaContentDescription : public ContentDescription {
int buffered_mode_latency() const { return buffered_mode_latency_; }
protected:
- bool rtcp_mux_;
- int bandwidth_;
+ bool rtcp_mux_ = false;
+ bool rtcp_reduced_size_ = false;
+ int bandwidth_ = kAutoBandwidth;
std::string protocol_;
std::vector<CryptoParams> cryptos_;
- CryptoType crypto_required_;
+ CryptoType crypto_required_ = CT_NONE;
std::vector<RtpHeaderExtension> rtp_header_extensions_;
- bool rtp_header_extensions_set_;
- bool multistream_;
+ bool rtp_header_extensions_set_ = false;
+ bool multistream_ = false;
StreamParamsVec streams_;
- bool conference_mode_;
- bool partial_;
- int buffered_mode_latency_;
- MediaContentDirection direction_;
+ bool conference_mode_ = false;
+ bool partial_ = false;
+ int buffered_mode_latency_ = kBufferedModeDisabled;
+ MediaContentDirection direction_ = MD_SENDRECV;
};
template <class C>
@@ -547,10 +549,19 @@ const VideoContentDescription* GetFirstVideoContentDescription(
const DataContentDescription* GetFirstDataContentDescription(
const SessionDescription* sdesc);
-void GetSupportedAudioCryptoSuites(std::vector<std::string>* crypto_suites);
-void GetSupportedVideoCryptoSuites(std::vector<std::string>* crypto_suites);
-void GetSupportedDataCryptoSuites(std::vector<std::string>* crypto_suites);
-void GetDefaultSrtpCryptoSuiteNames(std::vector<std::string>* crypto_suites);
+void GetSupportedAudioCryptoSuites(std::vector<int>* crypto_suites);
+void GetSupportedVideoCryptoSuites(std::vector<int>* crypto_suites);
+void GetSupportedDataCryptoSuites(std::vector<int>* crypto_suites);
+void GetDefaultSrtpCryptoSuites(std::vector<int>* crypto_suites);
+void GetSupportedAudioCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names);
+void GetSupportedVideoCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names);
+void GetSupportedDataCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names);
+void GetDefaultSrtpCryptoSuiteNames(
+ std::vector<std::string>* crypto_suite_names);
+
} // namespace cricket
#endif // TALK_SESSION_MEDIA_MEDIASESSION_H_
diff --git a/talk/session/media/mediasession_unittest.cc b/talk/session/media/mediasession_unittest.cc
index 72aefc884c..20b72e9394 100644
--- a/talk/session/media/mediasession_unittest.cc
+++ b/talk/session/media/mediasession_unittest.cc
@@ -69,6 +69,9 @@ using cricket::CryptoParamsVec;
using cricket::AudioContentDescription;
using cricket::VideoContentDescription;
using cricket::DataContentDescription;
+using cricket::GetFirstAudioContent;
+using cricket::GetFirstVideoContent;
+using cricket::GetFirstDataContent;
using cricket::GetFirstAudioContentDescription;
using cricket::GetFirstVideoContentDescription;
using cricket::GetFirstDataContentDescription;
@@ -235,11 +238,9 @@ class MediaSessionDescriptionFactoryTest : public testing::Test {
f2_.set_video_codecs(MAKE_VECTOR(kVideoCodecs2));
f2_.set_data_codecs(MAKE_VECTOR(kDataCodecs2));
tdf1_.set_certificate(rtc::RTCCertificate::Create(
- rtc::scoped_ptr<rtc::SSLIdentity>(
- new rtc::FakeSSLIdentity("id1")).Pass()));
+ rtc::scoped_ptr<rtc::SSLIdentity>(new rtc::FakeSSLIdentity("id1"))));
tdf2_.set_certificate(rtc::RTCCertificate::Create(
- rtc::scoped_ptr<rtc::SSLIdentity>(
- new rtc::FakeSSLIdentity("id2")).Pass()));
+ rtc::scoped_ptr<rtc::SSLIdentity>(new rtc::FakeSSLIdentity("id2"))));
}
// Create a video StreamParamsVec object with:
@@ -607,6 +608,7 @@ TEST_F(MediaSessionDescriptionFactoryTest,
ASSERT_CRYPTO(dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), dcd->protocol());
}
+
// Create a RTP data offer, and ensure it matches what we expect.
TEST_F(MediaSessionDescriptionFactoryTest, TestCreateRtpDataOffer) {
MediaSessionOptions opts;
@@ -2313,3 +2315,30 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestVADEnableOption) {
audio_content = answer->GetContentByName("audio");
EXPECT_TRUE(VerifyNoCNCodecs(audio_content));
}
+
+// Test that the content name ("mid" in SDP) is unchanged when creating a
+// new offer.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestContentNameNotChangedInSubsequentOffers) {
+ MediaSessionOptions opts;
+ opts.recv_audio = true;
+ opts.recv_video = true;
+ opts.data_channel_type = cricket::DCT_SCTP;
+ // Create offer and modify the default content names.
+ rtc::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, nullptr));
+ for (ContentInfo& content : offer->contents()) {
+ content.name.append("_modified");
+ }
+
+ rtc::scoped_ptr<SessionDescription> updated_offer(
+ f1_.CreateOffer(opts, offer.get()));
+ const ContentInfo* audio_content = GetFirstAudioContent(updated_offer.get());
+ const ContentInfo* video_content = GetFirstVideoContent(updated_offer.get());
+ const ContentInfo* data_content = GetFirstDataContent(updated_offer.get());
+ ASSERT_TRUE(audio_content != nullptr);
+ ASSERT_TRUE(video_content != nullptr);
+ ASSERT_TRUE(data_content != nullptr);
+ EXPECT_EQ("audio_modified", audio_content->name);
+ EXPECT_EQ("video_modified", video_content->name);
+ EXPECT_EQ("data_modified", data_content->name);
+}
diff --git a/talk/session/media/srtpfilter.cc b/talk/session/media/srtpfilter.cc
index 079ddfb57e..a200a3c4c2 100644
--- a/talk/session/media/srtpfilter.cc
+++ b/talk/session/media/srtpfilter.cc
@@ -146,10 +146,10 @@ bool SrtpFilter::SetProvisionalAnswer(
return DoSetAnswer(answer_params, source, false);
}
-bool SrtpFilter::SetRtpParams(const std::string& send_cs,
+bool SrtpFilter::SetRtpParams(int send_cs,
const uint8_t* send_key,
int send_key_len,
- const std::string& recv_cs,
+ int recv_cs,
const uint8_t* recv_key,
int recv_key_len) {
if (IsActive()) {
@@ -179,10 +179,10 @@ bool SrtpFilter::SetRtpParams(const std::string& send_cs,
// SrtpSession.
// - In the muxed case, they are keyed with the same keys, so
// this function is not needed
-bool SrtpFilter::SetRtcpParams(const std::string& send_cs,
+bool SrtpFilter::SetRtcpParams(int send_cs,
const uint8_t* send_key,
int send_key_len,
- const std::string& recv_cs,
+ int recv_cs,
const uint8_t* recv_key,
int recv_key_len) {
// This can only be called once, but can be safely called after
@@ -428,10 +428,12 @@ bool SrtpFilter::ApplyParams(const CryptoParams& send_params,
ParseKeyParams(recv_params.key_params, recv_key, sizeof(recv_key)));
if (ret) {
CreateSrtpSessions();
- ret = (send_session_->SetSend(send_params.cipher_suite,
- send_key, sizeof(send_key)) &&
- recv_session_->SetRecv(recv_params.cipher_suite,
- recv_key, sizeof(recv_key)));
+ ret = (send_session_->SetSend(
+ rtc::SrtpCryptoSuiteFromName(send_params.cipher_suite), send_key,
+ sizeof(send_key)) &&
+ recv_session_->SetRecv(
+ rtc::SrtpCryptoSuiteFromName(recv_params.cipher_suite), recv_key,
+ sizeof(recv_key)));
}
if (ret) {
LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
@@ -448,6 +450,10 @@ bool SrtpFilter::ApplyParams(const CryptoParams& send_params,
bool SrtpFilter::ResetParams() {
offer_params_.clear();
state_ = ST_INIT;
+ send_session_ = nullptr;
+ recv_session_ = nullptr;
+ send_rtcp_session_ = nullptr;
+ recv_rtcp_session_ = nullptr;
LOG(LS_INFO) << "SRTP reset to init state";
return true;
}
@@ -507,11 +513,11 @@ SrtpSession::~SrtpSession() {
}
}
-bool SrtpSession::SetSend(const std::string& cs, const uint8_t* key, int len) {
+bool SrtpSession::SetSend(int cs, const uint8_t* key, int len) {
return SetKey(ssrc_any_outbound, cs, key, len);
}
-bool SrtpSession::SetRecv(const std::string& cs, const uint8_t* key, int len) {
+bool SrtpSession::SetRecv(int cs, const uint8_t* key, int len) {
return SetKey(ssrc_any_inbound, cs, key, len);
}
@@ -658,10 +664,7 @@ void SrtpSession::set_signal_silent_time(uint32_t signal_silent_time_in_ms) {
srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
}
-bool SrtpSession::SetKey(int type,
- const std::string& cs,
- const uint8_t* key,
- int len) {
+bool SrtpSession::SetKey(int type, int cs, const uint8_t* key, int len) {
if (session_) {
LOG(LS_ERROR) << "Failed to create SRTP session: "
<< "SRTP session already created";
@@ -675,15 +678,15 @@ bool SrtpSession::SetKey(int type,
srtp_policy_t policy;
memset(&policy, 0, sizeof(policy));
- if (cs == rtc::CS_AES_CM_128_HMAC_SHA1_80) {
+ if (cs == rtc::SRTP_AES128_CM_SHA1_80) {
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp);
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp);
- } else if (cs == rtc::CS_AES_CM_128_HMAC_SHA1_32) {
+ } else if (cs == rtc::SRTP_AES128_CM_SHA1_32) {
crypto_policy_set_aes_cm_128_hmac_sha1_32(&policy.rtp); // rtp is 32,
crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); // rtcp still 80
} else {
LOG(LS_WARNING) << "Failed to create SRTP session: unsupported"
- << " cipher_suite " << cs.c_str();
+ << " cipher_suite " << cs;
return false;
}
diff --git a/talk/session/media/srtpfilter.h b/talk/session/media/srtpfilter.h
index 3c3a8e848b..6b941f32fd 100644
--- a/talk/session/media/srtpfilter.h
+++ b/talk/session/media/srtpfilter.h
@@ -104,16 +104,16 @@ class SrtpFilter {
// Just set up both sets of keys directly.
// Used with DTLS-SRTP.
- bool SetRtpParams(const std::string& send_cs,
+ bool SetRtpParams(int send_cs,
const uint8_t* send_key,
int send_key_len,
- const std::string& recv_cs,
+ int recv_cs,
const uint8_t* recv_key,
int recv_key_len);
- bool SetRtcpParams(const std::string& send_cs,
+ bool SetRtcpParams(int send_cs,
const uint8_t* send_key,
int send_key_len,
- const std::string& recv_cs,
+ int recv_cs,
const uint8_t* recv_key,
int recv_key_len);
@@ -138,6 +138,8 @@ class SrtpFilter {
// Update the silent threshold (in ms) for signaling errors.
void set_signal_silent_time(uint32_t signal_silent_time_in_ms);
+ bool ResetParams();
+
sigslot::repeater3<uint32_t, Mode, Error> SignalSrtpError;
protected:
@@ -153,7 +155,6 @@ class SrtpFilter {
CryptoParams* selected_params);
bool ApplyParams(const CryptoParams& send_params,
const CryptoParams& recv_params);
- bool ResetParams();
static bool ParseKeyParams(const std::string& params, uint8_t* key, int len);
private:
@@ -199,10 +200,10 @@ class SrtpSession {
// Configures the session for sending data using the specified
// cipher-suite and key. Receiving must be done by a separate session.
- bool SetSend(const std::string& cs, const uint8_t* key, int len);
+ bool SetSend(int cs, const uint8_t* key, int len);
// Configures the session for receiving data using the specified
// cipher-suite and key. Sending must be done by a separate session.
- bool SetRecv(const std::string& cs, const uint8_t* key, int len);
+ bool SetRecv(int cs, const uint8_t* key, int len);
// Encrypts/signs an individual RTP/RTCP packet, in-place.
// If an HMAC is used, this will increase the packet size.
@@ -232,7 +233,7 @@ class SrtpSession {
SignalSrtpError;
private:
- bool SetKey(int type, const std::string& cs, const uint8_t* key, int len);
+ bool SetKey(int type, int cs, const uint8_t* key, int len);
// Returns send stream current packet index from srtp db.
bool GetSendStreamPacketIndex(void* data, int in_len, int64_t* index);
diff --git a/talk/session/media/srtpfilter_unittest.cc b/talk/session/media/srtpfilter_unittest.cc
index 8122455205..11874380e2 100644
--- a/talk/session/media/srtpfilter_unittest.cc
+++ b/talk/session/media/srtpfilter_unittest.cc
@@ -508,21 +508,17 @@ TEST_F(SrtpFilterTest, TestDisableEncryption) {
// Test directly setting the params with AES_CM_128_HMAC_SHA1_80
TEST_F(SrtpFilterTest, TestProtect_SetParamsDirect_AES_CM_128_HMAC_SHA1_80) {
- EXPECT_TRUE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_80,
- kTestKey1, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_80,
+ EXPECT_TRUE(f1_.SetRtpParams(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_80,
kTestKey2, kTestKeyLen));
- EXPECT_TRUE(f2_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_80,
- kTestKey2, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_80,
+ EXPECT_TRUE(f2_.SetRtpParams(rtc::SRTP_AES128_CM_SHA1_80, kTestKey2,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_80,
kTestKey1, kTestKeyLen));
- EXPECT_TRUE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_80,
- kTestKey1, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_80,
+ EXPECT_TRUE(f1_.SetRtcpParams(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_80,
kTestKey2, kTestKeyLen));
- EXPECT_TRUE(f2_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_80,
- kTestKey2, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_80,
+ EXPECT_TRUE(f2_.SetRtcpParams(rtc::SRTP_AES128_CM_SHA1_80, kTestKey2,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_80,
kTestKey1, kTestKeyLen));
EXPECT_TRUE(f1_.IsActive());
EXPECT_TRUE(f2_.IsActive());
@@ -531,21 +527,17 @@ TEST_F(SrtpFilterTest, TestProtect_SetParamsDirect_AES_CM_128_HMAC_SHA1_80) {
// Test directly setting the params with AES_CM_128_HMAC_SHA1_32
TEST_F(SrtpFilterTest, TestProtect_SetParamsDirect_AES_CM_128_HMAC_SHA1_32) {
- EXPECT_TRUE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_32,
- kTestKey1, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_32,
+ EXPECT_TRUE(f1_.SetRtpParams(rtc::SRTP_AES128_CM_SHA1_32, kTestKey1,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_32,
kTestKey2, kTestKeyLen));
- EXPECT_TRUE(f2_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_32,
- kTestKey2, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_32,
+ EXPECT_TRUE(f2_.SetRtpParams(rtc::SRTP_AES128_CM_SHA1_32, kTestKey2,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_32,
kTestKey1, kTestKeyLen));
- EXPECT_TRUE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_32,
- kTestKey1, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_32,
+ EXPECT_TRUE(f1_.SetRtcpParams(rtc::SRTP_AES128_CM_SHA1_32, kTestKey1,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_32,
kTestKey2, kTestKeyLen));
- EXPECT_TRUE(f2_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_32,
- kTestKey2, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_32,
+ EXPECT_TRUE(f2_.SetRtcpParams(rtc::SRTP_AES128_CM_SHA1_32, kTestKey2,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_32,
kTestKey1, kTestKeyLen));
EXPECT_TRUE(f1_.IsActive());
EXPECT_TRUE(f2_.IsActive());
@@ -554,25 +546,21 @@ TEST_F(SrtpFilterTest, TestProtect_SetParamsDirect_AES_CM_128_HMAC_SHA1_32) {
// Test directly setting the params with bogus keys
TEST_F(SrtpFilterTest, TestSetParamsKeyTooShort) {
- EXPECT_FALSE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_80,
- kTestKey1, kTestKeyLen - 1,
- CS_AES_CM_128_HMAC_SHA1_80,
+ EXPECT_FALSE(f1_.SetRtpParams(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1,
+ kTestKeyLen - 1, rtc::SRTP_AES128_CM_SHA1_80,
kTestKey1, kTestKeyLen - 1));
- EXPECT_FALSE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_80,
- kTestKey1, kTestKeyLen - 1,
- CS_AES_CM_128_HMAC_SHA1_80,
+ EXPECT_FALSE(f1_.SetRtcpParams(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1,
+ kTestKeyLen - 1, rtc::SRTP_AES128_CM_SHA1_80,
kTestKey1, kTestKeyLen - 1));
}
#if defined(ENABLE_EXTERNAL_AUTH)
TEST_F(SrtpFilterTest, TestGetSendAuthParams) {
- EXPECT_TRUE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_32,
- kTestKey1, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_32,
+ EXPECT_TRUE(f1_.SetRtpParams(rtc::SRTP_AES128_CM_SHA1_32, kTestKey1,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_32,
kTestKey2, kTestKeyLen));
- EXPECT_TRUE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_32,
- kTestKey1, kTestKeyLen,
- CS_AES_CM_128_HMAC_SHA1_32,
+ EXPECT_TRUE(f1_.SetRtcpParams(rtc::SRTP_AES128_CM_SHA1_32, kTestKey1,
+ kTestKeyLen, rtc::SRTP_AES128_CM_SHA1_32,
kTestKey2, kTestKeyLen));
uint8_t* auth_key = NULL;
int auth_key_len = 0, auth_tag_len = 0;
@@ -629,28 +617,30 @@ class SrtpSessionTest : public testing::Test {
// Test that we can set up the session and keys properly.
TEST_F(SrtpSessionTest, TestGoodSetup) {
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
- EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
}
// Test that we can't change the keys once set.
TEST_F(SrtpSessionTest, TestBadSetup) {
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
- EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
- EXPECT_FALSE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey2, kTestKeyLen));
- EXPECT_FALSE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey2, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_FALSE(
+ s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey2, kTestKeyLen));
+ EXPECT_FALSE(
+ s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey2, kTestKeyLen));
}
// Test that we fail keys of the wrong length.
TEST_F(SrtpSessionTest, TestKeysTooShort) {
- EXPECT_FALSE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, 1));
- EXPECT_FALSE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, 1));
+ EXPECT_FALSE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, 1));
+ EXPECT_FALSE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, 1));
}
// Test that we can encrypt and decrypt RTP/RTCP using AES_CM_128_HMAC_SHA1_80.
TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_80) {
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
- EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
TestProtectRtp(CS_AES_CM_128_HMAC_SHA1_80);
TestProtectRtcp(CS_AES_CM_128_HMAC_SHA1_80);
TestUnprotectRtp(CS_AES_CM_128_HMAC_SHA1_80);
@@ -659,8 +649,8 @@ TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_80) {
// Test that we can encrypt and decrypt RTP/RTCP using AES_CM_128_HMAC_SHA1_32.
TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_32) {
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_32, kTestKey1, kTestKeyLen));
- EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_32, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_32, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_32, kTestKey1, kTestKeyLen));
TestProtectRtp(CS_AES_CM_128_HMAC_SHA1_32);
TestProtectRtcp(CS_AES_CM_128_HMAC_SHA1_32);
TestUnprotectRtp(CS_AES_CM_128_HMAC_SHA1_32);
@@ -668,7 +658,7 @@ TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_32) {
}
TEST_F(SrtpSessionTest, TestGetSendStreamPacketIndex) {
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_32, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_32, kTestKey1, kTestKeyLen));
int64_t index;
int out_len = 0;
EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_,
@@ -681,8 +671,8 @@ TEST_F(SrtpSessionTest, TestGetSendStreamPacketIndex) {
// Test that we fail to unprotect if someone tampers with the RTP/RTCP paylaods.
TEST_F(SrtpSessionTest, TestTamperReject) {
int out_len;
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
- EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
TestProtectRtp(CS_AES_CM_128_HMAC_SHA1_80);
TestProtectRtcp(CS_AES_CM_128_HMAC_SHA1_80);
rtp_packet_[0] = 0x12;
@@ -694,8 +684,8 @@ TEST_F(SrtpSessionTest, TestTamperReject) {
// Test that we fail to unprotect if the payloads are not authenticated.
TEST_F(SrtpSessionTest, TestUnencryptReject) {
int out_len;
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
- EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
EXPECT_FALSE(s2_.UnprotectRtp(rtp_packet_, rtp_len_, &out_len));
EXPECT_FALSE(s2_.UnprotectRtcp(rtcp_packet_, rtcp_len_, &out_len));
}
@@ -703,7 +693,7 @@ TEST_F(SrtpSessionTest, TestUnencryptReject) {
// Test that we fail when using buffers that are too small.
TEST_F(SrtpSessionTest, TestBuffersTooSmall) {
int out_len;
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
EXPECT_FALSE(s1_.ProtectRtp(rtp_packet_, rtp_len_,
sizeof(rtp_packet_) - 10, &out_len));
EXPECT_FALSE(s1_.ProtectRtcp(rtcp_packet_, rtcp_len_,
@@ -717,8 +707,8 @@ TEST_F(SrtpSessionTest, TestReplay) {
static const uint16_t replay_window = 1024;
int out_len;
- EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
- EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s1_.SetSend(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(rtc::SRTP_AES128_CM_SHA1_80, kTestKey1, kTestKeyLen));
// Initial sequence number.
rtc::SetBE16(reinterpret_cast<uint8_t*>(rtp_packet_) + 2, seqnum_big);