aboutsummaryrefslogtreecommitdiff
path: root/talk/app/webrtc
diff options
context:
space:
mode:
Diffstat (limited to 'talk/app/webrtc')
-rw-r--r--talk/app/webrtc/OWNERS6
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java29
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java180
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java152
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java6
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java117
-rw-r--r--talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java214
-rw-r--r--talk/app/webrtc/androidvideocapturer.cc22
-rw-r--r--talk/app/webrtc/androidvideocapturer.h2
-rw-r--r--talk/app/webrtc/audiotrack.cc77
-rw-r--r--talk/app/webrtc/audiotrack.h43
-rw-r--r--talk/app/webrtc/dtlsidentitystore.cc16
-rw-r--r--talk/app/webrtc/dtlsidentitystore.h3
-rw-r--r--talk/app/webrtc/fakeportallocatorfactory.h76
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java3
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java75
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java13
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/EglBase.java288
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/EglBase10.java299
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/EglBase14.java254
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java146
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java22
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/RendererCommon.java74
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java283
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java281
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java51
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java395
-rw-r--r--talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java81
-rw-r--r--talk/app/webrtc/java/jni/androidmediacodeccommon.h2
-rw-r--r--talk/app/webrtc/java/jni/androidmediadecoder_jni.cc303
-rw-r--r--talk/app/webrtc/java/jni/androidmediaencoder_jni.cc473
-rw-r--r--talk/app/webrtc/java/jni/androidmediaencoder_jni.h3
-rw-r--r--talk/app/webrtc/java/jni/androidvideocapturer_jni.cc100
-rw-r--r--talk/app/webrtc/java/jni/androidvideocapturer_jni.h18
-rw-r--r--talk/app/webrtc/java/jni/classreferenceholder.cc5
-rw-r--r--talk/app/webrtc/java/jni/jni_helpers.cc25
-rw-r--r--talk/app/webrtc/java/jni/jni_onload.cc55
-rw-r--r--talk/app/webrtc/java/jni/native_handle_impl.cc163
-rw-r--r--talk/app/webrtc/java/jni/native_handle_impl.h52
-rw-r--r--talk/app/webrtc/java/jni/peerconnection_jni.cc169
-rw-r--r--talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc31
-rw-r--r--talk/app/webrtc/java/jni/surfacetexturehelper_jni.h18
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java368
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java221
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/PeerConnection.java13
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java61
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/RtpSender.java25
-rw-r--r--talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java23
-rw-r--r--talk/app/webrtc/jsepsessiondescription.cc3
-rw-r--r--talk/app/webrtc/localaudiosource.cc4
-rw-r--r--talk/app/webrtc/localaudiosource.h15
-rw-r--r--talk/app/webrtc/localaudiosource_unittest.cc40
-rw-r--r--talk/app/webrtc/mediacontroller.cc14
-rw-r--r--talk/app/webrtc/mediastream_unittest.cc22
-rw-r--r--talk/app/webrtc/mediastreaminterface.h43
-rw-r--r--talk/app/webrtc/mediastreamobserver.cc101
-rw-r--r--talk/app/webrtc/mediastreamobserver.h65
-rw-r--r--talk/app/webrtc/mediastreamprovider.h19
-rw-r--r--talk/app/webrtc/objc/README54
-rw-r--r--talk/app/webrtc/objc/RTCFileLogger.mm41
-rw-r--r--talk/app/webrtc/objc/RTCPeerConnection.mm6
-rw-r--r--talk/app/webrtc/objc/RTCPeerConnectionInterface.mm10
-rw-r--r--talk/app/webrtc/objc/avfoundationvideocapturer.h1
-rw-r--r--talk/app/webrtc/objc/avfoundationvideocapturer.mm26
-rw-r--r--talk/app/webrtc/objc/public/RTCFileLogger.h23
-rw-r--r--talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h4
-rw-r--r--talk/app/webrtc/peerconnection.cc645
-rw-r--r--talk/app/webrtc/peerconnection.h83
-rw-r--r--talk/app/webrtc/peerconnection_unittest.cc714
-rw-r--r--talk/app/webrtc/peerconnectionendtoend_unittest.cc92
-rw-r--r--talk/app/webrtc/peerconnectionfactory.cc50
-rw-r--r--talk/app/webrtc/peerconnectionfactory.h22
-rw-r--r--talk/app/webrtc/peerconnectionfactory_unittest.cc300
-rw-r--r--talk/app/webrtc/peerconnectionfactoryproxy.h13
-rw-r--r--talk/app/webrtc/peerconnectioninterface.h116
-rw-r--r--talk/app/webrtc/peerconnectioninterface_unittest.cc175
-rw-r--r--talk/app/webrtc/peerconnectionproxy.h4
-rw-r--r--talk/app/webrtc/portallocatorfactory.cc68
-rw-r--r--talk/app/webrtc/portallocatorfactory.h43
-rw-r--r--talk/app/webrtc/remoteaudiosource.cc132
-rw-r--r--talk/app/webrtc/remoteaudiosource.h42
-rw-r--r--talk/app/webrtc/remoteaudiotrack.cc (renamed from talk/app/webrtc/mediastreamsignaling.h)4
-rw-r--r--talk/app/webrtc/remoteaudiotrack.h (renamed from talk/app/webrtc/mediastreamsignaling.cc)6
-rw-r--r--talk/app/webrtc/rtpreceiver.cc2
-rw-r--r--talk/app/webrtc/rtpreceiver.h8
-rw-r--r--talk/app/webrtc/rtpsender.cc211
-rw-r--r--talk/app/webrtc/rtpsender.h71
-rw-r--r--talk/app/webrtc/rtpsenderinterface.h20
-rw-r--r--talk/app/webrtc/rtpsenderreceiver_unittest.cc251
-rw-r--r--talk/app/webrtc/statscollector.cc71
-rw-r--r--talk/app/webrtc/statscollector.h1
-rw-r--r--talk/app/webrtc/statscollector_unittest.cc47
-rw-r--r--talk/app/webrtc/statstypes.cc5
-rw-r--r--talk/app/webrtc/statstypes.h1
-rw-r--r--talk/app/webrtc/test/DEPS5
-rw-r--r--talk/app/webrtc/test/androidtestinitializer.cc74
-rw-r--r--talk/app/webrtc/test/androidtestinitializer.h37
-rw-r--r--talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc4
-rw-r--r--talk/app/webrtc/test/fakedtlsidentitystore.h113
-rw-r--r--talk/app/webrtc/test/fakemediastreamsignaling.h140
-rw-r--r--talk/app/webrtc/test/peerconnectiontestwrapper.cc20
-rw-r--r--talk/app/webrtc/test/peerconnectiontestwrapper.h7
-rw-r--r--talk/app/webrtc/videosource.cc28
-rw-r--r--talk/app/webrtc/videosource.h11
-rw-r--r--talk/app/webrtc/videosource_unittest.cc41
-rw-r--r--talk/app/webrtc/videosourceproxy.h1
-rw-r--r--talk/app/webrtc/videotrack.cc4
-rw-r--r--talk/app/webrtc/videotrack_unittest.cc2
-rw-r--r--talk/app/webrtc/videotrackrenderers.cc4
-rw-r--r--talk/app/webrtc/videotrackrenderers.h1
-rw-r--r--talk/app/webrtc/webrtcsdp.cc69
-rw-r--r--talk/app/webrtc/webrtcsdp_unittest.cc89
-rw-r--r--talk/app/webrtc/webrtcsession.cc119
-rw-r--r--talk/app/webrtc/webrtcsession.h10
-rw-r--r--talk/app/webrtc/webrtcsession_unittest.cc198
-rw-r--r--talk/app/webrtc/webrtcsessiondescriptionfactory.cc37
116 files changed, 6711 insertions, 3327 deletions
diff --git a/talk/app/webrtc/OWNERS b/talk/app/webrtc/OWNERS
index ffd78e1777..20a1fdf80d 100644
--- a/talk/app/webrtc/OWNERS
+++ b/talk/app/webrtc/OWNERS
@@ -1,5 +1,5 @@
glaznev@webrtc.org
-juberti@google.com
-perkj@google.com
+juberti@webrtc.org
+perkj@webrtc.org
tkchin@webrtc.org
-tommi@google.com
+tommi@webrtc.org
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java
index 1c01ffa0b8..63c05fb616 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/GlRectDrawerTest.java
@@ -28,7 +28,6 @@ package org.webrtc;
import android.graphics.SurfaceTexture;
import android.opengl.GLES20;
-import android.opengl.Matrix;
import android.test.ActivityTestCase;
import android.test.suitebuilder.annotation.MediumTest;
import android.test.suitebuilder.annotation.SmallTest;
@@ -36,9 +35,6 @@ import android.test.suitebuilder.annotation.SmallTest;
import java.nio.ByteBuffer;
import java.util.Random;
-import javax.microedition.khronos.egl.EGL10;
-import javax.microedition.khronos.egl.EGLContext;
-
public final class GlRectDrawerTest extends ActivityTestCase {
// Resolution of the test image.
private static final int WIDTH = 16;
@@ -46,7 +42,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Seed for random pixel creation.
private static final int SEED = 42;
// When comparing pixels, allow some slack for float arithmetic and integer rounding.
- private static final float MAX_DIFF = 1.0f;
+ private static final float MAX_DIFF = 1.5f;
private static float normalizedByte(byte b) {
return (b & 0xFF) / 255.0f;
@@ -100,7 +96,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
@SmallTest
public void testRgbRendering() {
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(WIDTH, HEIGHT);
eglBase.makeCurrent();
@@ -119,7 +115,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Draw the RGB frame onto the pixel buffer.
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix());
+ drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix(), 0, 0, WIDTH, HEIGHT);
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
final ByteBuffer rgbaData = ByteBuffer.allocateDirect(WIDTH * HEIGHT * 4);
@@ -137,7 +133,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
@SmallTest
public void testYuvRendering() {
// Create EGL base with a pixel buffer as display output.
- EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(WIDTH, HEIGHT);
eglBase.makeCurrent();
@@ -166,7 +162,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Draw the YUV frame onto the pixel buffer.
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawYuv(yuvTextures, RendererCommon.identityMatrix());
+ drawer.drawYuv(yuvTextures, RendererCommon.identityMatrix(), 0, 0, WIDTH, HEIGHT);
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
final ByteBuffer data = ByteBuffer.allocateDirect(WIDTH * HEIGHT * 4);
@@ -231,8 +227,9 @@ public final class GlRectDrawerTest extends ActivityTestCase {
private final int rgbTexture;
public StubOesTextureProducer(
- EGLContext sharedContext, SurfaceTexture surfaceTexture, int width, int height) {
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PLAIN);
+ EglBase.Context sharedContext, SurfaceTexture surfaceTexture, int width,
+ int height) {
+ eglBase = EglBase.create(sharedContext, EglBase.CONFIG_PLAIN);
surfaceTexture.setDefaultBufferSize(width, height);
eglBase.createSurface(surfaceTexture);
assertEquals(eglBase.surfaceWidth(), width);
@@ -253,7 +250,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGB, WIDTH,
HEIGHT, 0, GLES20.GL_RGB, GLES20.GL_UNSIGNED_BYTE, rgbPlane);
// Draw the RGB data onto the SurfaceTexture.
- drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix());
+ drawer.drawRgb(rgbTexture, RendererCommon.identityMatrix(), 0, 0, WIDTH, HEIGHT);
eglBase.swapBuffers();
}
@@ -266,14 +263,14 @@ public final class GlRectDrawerTest extends ActivityTestCase {
}
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(WIDTH, HEIGHT);
// Create resources for generating OES textures.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(eglBase.getContext());
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
final StubOesTextureProducer oesProducer = new StubOesTextureProducer(
- eglBase.getContext(), surfaceTextureHelper.getSurfaceTexture(), WIDTH, HEIGHT);
+ eglBase.getEglBaseContext(), surfaceTextureHelper.getSurfaceTexture(), WIDTH, HEIGHT);
final SurfaceTextureHelperTest.MockTextureListener listener =
new SurfaceTextureHelperTest.MockTextureListener();
surfaceTextureHelper.setListener(listener);
@@ -291,7 +288,7 @@ public final class GlRectDrawerTest extends ActivityTestCase {
// Draw the OES texture on the pixel buffer.
eglBase.makeCurrent();
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawOes(listener.oesTextureId, listener.transformMatrix);
+ drawer.drawOes(listener.oesTextureId, listener.transformMatrix, 0, 0, WIDTH, HEIGHT);
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
final ByteBuffer rgbaData = ByteBuffer.allocateDirect(WIDTH * HEIGHT * 4);
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java
new file mode 100644
index 0000000000..b1ec5dda0e
--- /dev/null
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/MediaCodecVideoEncoderTest.java
@@ -0,0 +1,180 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.webrtc;
+
+import android.annotation.TargetApi;
+import android.opengl.GLES11Ext;
+import android.opengl.GLES20;
+import android.os.Build;
+import android.test.ActivityTestCase;
+import android.test.suitebuilder.annotation.SmallTest;
+import android.util.Log;
+
+import org.webrtc.MediaCodecVideoEncoder.OutputBufferInfo;
+
+import java.nio.ByteBuffer;
+
+@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1)
+public final class MediaCodecVideoEncoderTest extends ActivityTestCase {
+ final static String TAG = "MediaCodecVideoEncoderTest";
+
+ @SmallTest
+ public static void testInitializeUsingByteBuffer() {
+ if (!MediaCodecVideoEncoder.isVp8HwSupported()) {
+ Log.i(TAG,
+ "Hardware does not support VP8 encoding, skipping testInitReleaseUsingByteBuffer");
+ return;
+ }
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30, null));
+ encoder.release();
+ }
+
+ @SmallTest
+ public static void testInitilizeUsingTextures() {
+ if (!MediaCodecVideoEncoder.isVp8HwSupportedUsingTextures()) {
+ Log.i(TAG, "hardware does not support VP8 encoding, skipping testEncoderUsingTextures");
+ return;
+ }
+ EglBase14 eglBase = new EglBase14(null, EglBase.CONFIG_PLAIN);
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30,
+ eglBase.getEglBaseContext()));
+ encoder.release();
+ eglBase.release();
+ }
+
+ @SmallTest
+ public static void testInitializeUsingByteBufferReInitilizeUsingTextures() {
+ if (!MediaCodecVideoEncoder.isVp8HwSupportedUsingTextures()) {
+ Log.i(TAG, "hardware does not support VP8 encoding, skipping testEncoderUsingTextures");
+ return;
+ }
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30,
+ null));
+ encoder.release();
+ EglBase14 eglBase = new EglBase14(null, EglBase.CONFIG_PLAIN);
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, 640, 480, 300, 30,
+ eglBase.getEglBaseContext()));
+ encoder.release();
+ eglBase.release();
+ }
+
+ @SmallTest
+ public static void testEncoderUsingByteBuffer() throws InterruptedException {
+ if (!MediaCodecVideoEncoder.isVp8HwSupported()) {
+ Log.i(TAG, "Hardware does not support VP8 encoding, skipping testEncoderUsingByteBuffer");
+ return;
+ }
+
+ final int width = 640;
+ final int height = 480;
+ final int min_size = width * height * 3 / 2;
+ final long presentationTimestampUs = 2;
+
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, width, height, 300, 30, null));
+ ByteBuffer[] inputBuffers = encoder.getInputBuffers();
+ assertNotNull(inputBuffers);
+ assertTrue(min_size <= inputBuffers[0].capacity());
+
+ int bufferIndex;
+ do {
+ Thread.sleep(10);
+ bufferIndex = encoder.dequeueInputBuffer();
+ } while (bufferIndex == -1); // |-1| is returned when there is no buffer available yet.
+
+ assertTrue(bufferIndex >= 0);
+ assertTrue(bufferIndex < inputBuffers.length);
+ assertTrue(encoder.encodeBuffer(true, bufferIndex, min_size, presentationTimestampUs));
+
+ OutputBufferInfo info;
+ do {
+ info = encoder.dequeueOutputBuffer();
+ Thread.sleep(10);
+ } while (info == null);
+ assertTrue(info.index >= 0);
+ assertEquals(presentationTimestampUs, info.presentationTimestampUs);
+ assertTrue(info.buffer.capacity() > 0);
+ encoder.releaseOutputBuffer(info.index);
+
+ encoder.release();
+ }
+
+ @SmallTest
+ public static void testEncoderUsingTextures() throws InterruptedException {
+ if (!MediaCodecVideoEncoder.isVp8HwSupportedUsingTextures()) {
+ Log.i(TAG, "Hardware does not support VP8 encoding, skipping testEncoderUsingTextures");
+ return;
+ }
+
+ final int width = 640;
+ final int height = 480;
+ final long presentationTs = 2;
+
+ final EglBase14 eglOesBase = new EglBase14(null, EglBase.CONFIG_PIXEL_BUFFER);
+ eglOesBase.createDummyPbufferSurface();
+ eglOesBase.makeCurrent();
+ int oesTextureId = GlUtil.generateTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
+
+ // TODO(perkj): This test is week since we don't fill the texture with valid data with correct
+ // width and height and verify the encoded data. Fill the OES texture and figure out a way to
+ // verify that the output make sense.
+
+ MediaCodecVideoEncoder encoder = new MediaCodecVideoEncoder();
+
+ assertTrue(encoder.initEncode(
+ MediaCodecVideoEncoder.VideoCodecType.VIDEO_CODEC_VP8, width, height, 300, 30,
+ eglOesBase.getEglBaseContext()));
+ assertTrue(encoder.encodeTexture(true, oesTextureId, RendererCommon.identityMatrix(),
+ presentationTs));
+ GlUtil.checkNoGLES2Error("encodeTexture");
+
+ // It should be Ok to delete the texture after calling encodeTexture.
+ GLES20.glDeleteTextures(1, new int[] {oesTextureId}, 0);
+
+ OutputBufferInfo info = encoder.dequeueOutputBuffer();
+ while (info == null) {
+ info = encoder.dequeueOutputBuffer();
+ Thread.sleep(20);
+ }
+ assertTrue(info.index != -1);
+ assertTrue(info.buffer.capacity() > 0);
+ assertEquals(presentationTs, info.presentationTimestampUs);
+ encoder.releaseOutputBuffer(info.index);
+
+ encoder.release();
+ eglOesBase.release();
+ }
+}
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java
index 882fde1875..9e0164d4b8 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceTextureHelperTest.java
@@ -37,8 +37,6 @@ import android.test.suitebuilder.annotation.SmallTest;
import java.nio.ByteBuffer;
-import javax.microedition.khronos.egl.EGL10;
-
public final class SurfaceTextureHelperTest extends ActivityTestCase {
/**
* Mock texture listener with blocking wait functionality.
@@ -99,6 +97,14 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
}
}
+ /** Assert that two integers are close, with difference at most
+ * {@code threshold}. */
+ public static void assertClose(int threshold, int expected, int actual) {
+ if (Math.abs(expected - actual) <= threshold)
+ return;
+ failNotEquals("Not close enough, threshold " + threshold, expected, actual);
+ }
+
/**
* Test normal use by receiving three uniform texture frames. Texture frames are returned as early
* as possible. The texture pixel values are inspected by drawing the texture frame to a pixel
@@ -109,20 +115,21 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
final int width = 16;
final int height = 16;
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(width, height);
final GlRectDrawer drawer = new GlRectDrawer();
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(eglBase.getContext());
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
final MockTextureListener listener = new MockTextureListener();
surfaceTextureHelper.setListener(listener);
surfaceTextureHelper.getSurfaceTexture().setDefaultBufferSize(width, height);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in
// |surfaceTextureHelper| as the target EGLSurface.
- final EglBase eglOesBase = new EglBase(eglBase.getContext(), EglBase.ConfigType.PLAIN);
+ final EglBase eglOesBase =
+ EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
assertEquals(eglOesBase.surfaceWidth(), width);
assertEquals(eglOesBase.surfaceHeight(), height);
@@ -142,7 +149,7 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Wait for an OES texture to arrive and draw it onto the pixel buffer.
listener.waitForNewFrame();
eglBase.makeCurrent();
- drawer.drawOes(listener.oesTextureId, listener.transformMatrix);
+ drawer.drawOes(listener.oesTextureId, listener.transformMatrix, 0, 0, width, height);
surfaceTextureHelper.returnTextureFrame();
@@ -176,19 +183,20 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
final int width = 16;
final int height = 16;
// Create EGL base with a pixel buffer as display output.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PIXEL_BUFFER);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createPbufferSurface(width, height);
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(eglBase.getContext());
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
final MockTextureListener listener = new MockTextureListener();
surfaceTextureHelper.setListener(listener);
surfaceTextureHelper.getSurfaceTexture().setDefaultBufferSize(width, height);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in
// |surfaceTextureHelper| as the target EGLSurface.
- final EglBase eglOesBase = new EglBase(eglBase.getContext(), EglBase.ConfigType.PLAIN);
+ final EglBase eglOesBase =
+ EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
assertEquals(eglOesBase.surfaceWidth(), width);
assertEquals(eglOesBase.surfaceHeight(), height);
@@ -212,7 +220,7 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Draw the pending texture frame onto the pixel buffer.
eglBase.makeCurrent();
final GlRectDrawer drawer = new GlRectDrawer();
- drawer.drawOes(listener.oesTextureId, listener.transformMatrix);
+ drawer.drawOes(listener.oesTextureId, listener.transformMatrix, 0, 0, width, height);
drawer.release();
// Download the pixels in the pixel buffer as RGBA. Not all platforms support RGB, e.g. Nexus 9.
@@ -240,11 +248,11 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
public static void testDisconnect() throws InterruptedException {
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(EGL10.EGL_NO_CONTEXT);
+ SurfaceTextureHelper.create(null);
final MockTextureListener listener = new MockTextureListener();
surfaceTextureHelper.setListener(listener);
// Create EglBase with the SurfaceTexture as target EGLSurface.
- final EglBase eglBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PLAIN);
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
eglBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
eglBase.makeCurrent();
// Assert no frame has been received yet.
@@ -276,7 +284,7 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
@SmallTest
public static void testDisconnectImmediately() {
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(EGL10.EGL_NO_CONTEXT);
+ SurfaceTextureHelper.create(null);
surfaceTextureHelper.disconnect();
}
@@ -292,14 +300,14 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Create SurfaceTextureHelper and listener.
final SurfaceTextureHelper surfaceTextureHelper =
- SurfaceTextureHelper.create(EGL10.EGL_NO_CONTEXT, handler);
+ SurfaceTextureHelper.create(null, handler);
// Create a mock listener and expect frames to be delivered on |thread|.
final MockTextureListener listener = new MockTextureListener(thread);
surfaceTextureHelper.setListener(listener);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the
// SurfaceTexture in |surfaceTextureHelper| as the target EGLSurface.
- final EglBase eglOesBase = new EglBase(EGL10.EGL_NO_CONTEXT, EglBase.ConfigType.PLAIN);
+ final EglBase eglOesBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
eglOesBase.makeCurrent();
// Draw a frame onto the SurfaceTexture.
@@ -313,7 +321,119 @@ public final class SurfaceTextureHelperTest extends ActivityTestCase {
// Return the frame from this thread.
surfaceTextureHelper.returnTextureFrame();
+ surfaceTextureHelper.disconnect(handler);
+ }
+
+ /**
+ * Test use SurfaceTextureHelper on a separate thread. A uniform texture frame is created and
+ * received on a thread separate from the test thread and returned after disconnect.
+ */
+ @MediumTest
+ public static void testLateReturnFrameOnSeparateThread() throws InterruptedException {
+ final HandlerThread thread = new HandlerThread("SurfaceTextureHelperTestThread");
+ thread.start();
+ final Handler handler = new Handler(thread.getLooper());
+
+ // Create SurfaceTextureHelper and listener.
+ final SurfaceTextureHelper surfaceTextureHelper =
+ SurfaceTextureHelper.create(null, handler);
+ // Create a mock listener and expect frames to be delivered on |thread|.
+ final MockTextureListener listener = new MockTextureListener(thread);
+ surfaceTextureHelper.setListener(listener);
+
+ // Create resources for stubbing an OES texture producer. |eglOesBase| has the
+ // SurfaceTexture in |surfaceTextureHelper| as the target EGLSurface.
+ final EglBase eglOesBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
+ eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
+ eglOesBase.makeCurrent();
+ // Draw a frame onto the SurfaceTexture.
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ // swapBuffers() will ultimately trigger onTextureFrameAvailable().
+ eglOesBase.swapBuffers();
+ eglOesBase.release();
+
+ // Wait for an OES texture to arrive.
+ listener.waitForNewFrame();
+
+ surfaceTextureHelper.disconnect(handler);
+
+ surfaceTextureHelper.returnTextureFrame();
+ }
+
+ @MediumTest
+ public static void testTexturetoYUV() throws InterruptedException {
+ final int width = 16;
+ final int height = 16;
+
+ final EglBase eglBase = EglBase.create(null, EglBase.CONFIG_PLAIN);
+
+ // Create SurfaceTextureHelper and listener.
+ final SurfaceTextureHelper surfaceTextureHelper =
+ SurfaceTextureHelper.create(eglBase.getEglBaseContext());
+ final MockTextureListener listener = new MockTextureListener();
+ surfaceTextureHelper.setListener(listener);
+ surfaceTextureHelper.getSurfaceTexture().setDefaultBufferSize(width, height);
+
+ // Create resources for stubbing an OES texture producer. |eglBase| has the SurfaceTexture in
+ // |surfaceTextureHelper| as the target EGLSurface.
+
+ eglBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
+ assertEquals(eglBase.surfaceWidth(), width);
+ assertEquals(eglBase.surfaceHeight(), height);
+
+ final int red[] = new int[] {79, 144, 185};
+ final int green[] = new int[] {66, 210, 162};
+ final int blue[] = new int[] {161, 117, 158};
+
+ final int ref_y[] = new int[] {81, 180, 168};
+ final int ref_u[] = new int[] {173, 93, 122};
+ final int ref_v[] = new int[] {127, 103, 140};
+
+ // Draw three frames.
+ for (int i = 0; i < 3; ++i) {
+ // Draw a constant color frame onto the SurfaceTexture.
+ eglBase.makeCurrent();
+ GLES20.glClearColor(red[i] / 255.0f, green[i] / 255.0f, blue[i] / 255.0f, 1.0f);
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ // swapBuffers() will ultimately trigger onTextureFrameAvailable().
+ eglBase.swapBuffers();
+
+ // Wait for an OES texture to arrive.
+ listener.waitForNewFrame();
+
+ // Memory layout: Lines are 16 bytes. First 16 lines are
+ // the Y data. These are followed by 8 lines with 8 bytes of U
+ // data on the left and 8 bytes of V data on the right.
+ //
+ // Offset
+ // 0 YYYYYYYY YYYYYYYY
+ // 16 YYYYYYYY YYYYYYYY
+ // ...
+ // 240 YYYYYYYY YYYYYYYY
+ // 256 UUUUUUUU VVVVVVVV
+ // 272 UUUUUUUU VVVVVVVV
+ // ...
+ // 368 UUUUUUUU VVVVVVVV
+ // 384 buffer end
+ ByteBuffer buffer = ByteBuffer.allocateDirect(width * height * 3 / 2);
+ surfaceTextureHelper.textureToYUV(buffer, width, height, width,
+ listener.oesTextureId, listener.transformMatrix);
+
+ surfaceTextureHelper.returnTextureFrame();
+
+ // Allow off-by-one differences due to different rounding.
+ while (buffer.position() < width*height) {
+ assertClose(1, buffer.get() & 0xff, ref_y[i]);
+ }
+ while (buffer.hasRemaining()) {
+ if (buffer.position() % width < width/2)
+ assertClose(1, buffer.get() & 0xff, ref_u[i]);
+ else
+ assertClose(1, buffer.get() & 0xff, ref_v[i]);
+ }
+ }
+
surfaceTextureHelper.disconnect();
- thread.quitSafely();
+ eglBase.release();
}
}
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java
index 47fe780124..341c632b58 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/SurfaceViewRendererOnMeasureTest.java
@@ -36,8 +36,6 @@ import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
-import javax.microedition.khronos.egl.EGL10;
-
public final class SurfaceViewRendererOnMeasureTest extends ActivityTestCase {
/**
* List with all possible scaling types.
@@ -111,7 +109,7 @@ public final class SurfaceViewRendererOnMeasureTest extends ActivityTestCase {
}
// Test behaviour after SurfaceViewRenderer.init() is called, but still no frame.
- surfaceViewRenderer.init(EGL10.EGL_NO_CONTEXT, null);
+ surfaceViewRenderer.init((EglBase.Context) null, null);
for (RendererCommon.ScalingType scalingType : scalingTypes) {
for (int measureSpecMode : measureSpecModes) {
final int zeroMeasureSize = MeasureSpec.makeMeasureSpec(0, measureSpecMode);
@@ -134,7 +132,7 @@ public final class SurfaceViewRendererOnMeasureTest extends ActivityTestCase {
public void testFrame1280x720() {
final SurfaceViewRenderer surfaceViewRenderer =
new SurfaceViewRenderer(getInstrumentation().getContext());
- surfaceViewRenderer.init(EGL10.EGL_NO_CONTEXT, null);
+ surfaceViewRenderer.init((EglBase.Context) null, null);
// Test different rotation degress, but same rotated size.
for (int rotationDegree : new int[] {0, 90, 180, 270}) {
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java
index dbbe5963cd..1b97201a0a 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTest.java
@@ -29,7 +29,6 @@ package org.webrtc;
import android.test.ActivityTestCase;
import android.test.suitebuilder.annotation.MediumTest;
import android.test.suitebuilder.annotation.SmallTest;
-import android.util.Log;
import android.util.Size;
import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
@@ -37,8 +36,6 @@ import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
import java.util.HashSet;
import java.util.Set;
-import javax.microedition.khronos.egl.EGL10;
-
@SuppressWarnings("deprecation")
public class VideoCapturerAndroidTest extends ActivityTestCase {
static final String TAG = "VideoCapturerAndroidTest";
@@ -87,8 +84,10 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@SmallTest
public void testCreateAndReleaseUsingTextures() {
+ EglBase eglBase = EglBase.create();
VideoCapturerAndroidTestFixtures.release(
- VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT));
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext()));
+ eglBase.release();
}
@SmallTest
@@ -108,12 +107,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.startCapturerAndRender(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@SmallTest
- public void DISABLED_testStartVideoCapturerUsingTextures() throws InterruptedException {
+ public void testStartVideoCapturerUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.startCapturerAndRender(capturer);
+ eglBase.release();
}
@SmallTest
@@ -151,11 +151,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.switchCamera(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@SmallTest
- public void DISABLED_testSwitchVideoCapturerUsingTextures() throws InterruptedException {
- VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ public void testSwitchVideoCapturerUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.switchCamera(capturer);
+ eglBase.release();
}
@MediumTest
@@ -179,12 +181,14 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@MediumTest
public void testCameraCallsAfterStopUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
final String deviceName = CameraEnumerationAndroid.getDeviceName(0);
final VideoCapturerAndroid capturer = VideoCapturerAndroid.create(deviceName, null,
- EGL10.EGL_NO_CONTEXT);
+ eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.cameraCallsAfterStop(capturer,
getInstrumentation().getContext());
+ eglBase.release();
}
@SmallTest
@@ -195,11 +199,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.stopRestartVideoSource(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@SmallTest
- public void DISABLED_testStopRestartVideoSourceUsingTextures() throws InterruptedException {
- VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ public void testStopRestartVideoSourceUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.stopRestartVideoSource(capturer);
+ eglBase.release();
}
@SmallTest
@@ -215,13 +221,50 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@SmallTest
public void testStartStopWithDifferentResolutionsUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
String deviceName = CameraEnumerationAndroid.getDeviceName(0);
VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create(deviceName, null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create(deviceName, null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.startStopWithDifferentResolutions(capturer,
getInstrumentation().getContext());
+ eglBase.release();
+ }
+
+ @SmallTest
+ // This test that an error is reported if the camera is already opened
+ // when VideoCapturerAndroid is started.
+ public void testStartWhileCameraAlreadyOpened() throws InterruptedException {
+ String deviceName = CameraEnumerationAndroid.getDeviceName(0);
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create(deviceName, null);
+ VideoCapturerAndroidTestFixtures.startWhileCameraIsAlreadyOpen(
+ capturer, getInstrumentation().getContext());
+ }
+
+ @SmallTest
+ // This test that VideoCapturerAndroid can be started, even if the camera is already opened
+ // if the camera is closed while VideoCapturerAndroid is re-trying to start.
+ public void testStartWhileCameraIsAlreadyOpenAndCloseCamera() throws InterruptedException {
+ String deviceName = CameraEnumerationAndroid.getDeviceName(0);
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create(deviceName, null);
+ VideoCapturerAndroidTestFixtures.startWhileCameraIsAlreadyOpenAndCloseCamera(
+ capturer, getInstrumentation().getContext());
+ }
+
+ @SmallTest
+ // This test that VideoCapturerAndroid.stop can be called while VideoCapturerAndroid is
+ // re-trying to start.
+ public void startWhileCameraIsAlreadyOpenAndStop() throws InterruptedException {
+ String deviceName = CameraEnumerationAndroid.getDeviceName(0);
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create(deviceName, null);
+ VideoCapturerAndroidTestFixtures.startWhileCameraIsAlreadyOpenAndStop(
+ capturer, getInstrumentation().getContext());
}
+
+
@SmallTest
// This test what happens if buffers are returned after the capturer have
// been stopped and restarted. It does not test or use the C++ layer.
@@ -235,11 +278,13 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
@SmallTest
public void testReturnBufferLateUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
String deviceName = CameraEnumerationAndroid.getDeviceName(0);
VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create(deviceName, null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create(deviceName, null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.returnBufferLate(capturer,
getInstrumentation().getContext());
+ eglBase.release();
}
@MediumTest
@@ -251,11 +296,45 @@ public class VideoCapturerAndroidTest extends ActivityTestCase {
VideoCapturerAndroidTestFixtures.returnBufferLateEndToEnd(capturer);
}
- // TODO(perkj): Enable once VideoCapture to texture support has landed in C++.
@MediumTest
- public void DISABLED_testReturnBufferLateEndToEndUsingTextures() throws InterruptedException {
+ public void testReturnBufferLateEndToEndUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
final VideoCapturerAndroid capturer =
- VideoCapturerAndroid.create("", null, EGL10.EGL_NO_CONTEXT);
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
VideoCapturerAndroidTestFixtures.returnBufferLateEndToEnd(capturer);
+ eglBase.release();
+ }
+
+ @MediumTest
+ // This test that CameraEventsHandler.onError is triggered if video buffers are not returned to
+ // the capturer.
+ public void testCameraFreezedEventOnBufferStarvationUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroidTestFixtures.CameraEvents cameraEvents =
+ VideoCapturerAndroidTestFixtures.createCameraEvents();
+ VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", cameraEvents,
+ eglBase.getEglBaseContext());
+ VideoCapturerAndroidTestFixtures.cameraFreezedEventOnBufferStarvationUsingTextures(capturer,
+ cameraEvents, getInstrumentation().getContext());
+ eglBase.release();
+ }
+
+ @MediumTest
+ // This test that frames forwarded to a renderer is scaled if onOutputFormatRequest is
+ // called. This test both Java and C++ parts of of the stack.
+ public void testScaleCameraOutput() throws InterruptedException {
+ VideoCapturerAndroid capturer = VideoCapturerAndroid.create("", null);
+ VideoCapturerAndroidTestFixtures.scaleCameraOutput(capturer);
+ }
+
+ @MediumTest
+ // This test that frames forwarded to a renderer is scaled if onOutputFormatRequest is
+ // called. This test both Java and C++ parts of of the stack.
+ public void testScaleCameraOutputUsingTextures() throws InterruptedException {
+ EglBase eglBase = EglBase.create();
+ VideoCapturerAndroid capturer =
+ VideoCapturerAndroid.create("", null, eglBase.getEglBaseContext());
+ VideoCapturerAndroidTestFixtures.scaleCameraOutput(capturer);
+ eglBase.release();
}
}
diff --git a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java
index 11b3ce98a0..0b42e33785 100644
--- a/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java
+++ b/talk/app/webrtc/androidtests/src/org/webrtc/VideoCapturerAndroidTestFixtures.java
@@ -29,6 +29,7 @@ package org.webrtc;
import android.content.Context;
import android.hardware.Camera;
+import org.webrtc.VideoCapturerAndroidTestFixtures;
import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
import org.webrtc.VideoRenderer.I420Frame;
@@ -42,16 +43,32 @@ public class VideoCapturerAndroidTestFixtures {
static class RendererCallbacks implements VideoRenderer.Callbacks {
private int framesRendered = 0;
private Object frameLock = 0;
+ private int width = 0;
+ private int height = 0;
@Override
public void renderFrame(I420Frame frame) {
synchronized (frameLock) {
++framesRendered;
+ width = frame.rotatedWidth();
+ height = frame.rotatedHeight();
frameLock.notify();
}
VideoRenderer.renderFrameDone(frame);
}
+ public int frameWidth() {
+ synchronized (frameLock) {
+ return width;
+ }
+ }
+
+ public int frameHeight() {
+ synchronized (frameLock) {
+ return height;
+ }
+ }
+
public int WaitForNextFrameToRender() throws InterruptedException {
synchronized (frameLock) {
frameLock.wait();
@@ -102,11 +119,11 @@ public class VideoCapturerAndroidTestFixtures {
}
@Override
- public void onByteBufferFrameCaptured(byte[] frame, int length, int width, int height,
- int rotation, long timeStamp) {
+ public void onByteBufferFrameCaptured(byte[] frame, int width, int height, int rotation,
+ long timeStamp) {
synchronized (frameLock) {
++framesCaptured;
- frameSize = length;
+ frameSize = frame.length;
frameWidth = width;
frameHeight = height;
timestamps.add(timeStamp);
@@ -115,7 +132,8 @@ public class VideoCapturerAndroidTestFixtures {
}
@Override
public void onTextureFrameCaptured(
- int width, int height, int oesTextureId, float[] transformMatrix, long timeStamp) {
+ int width, int height, int oesTextureId, float[] transformMatrix, int rotation,
+ long timeStamp) {
synchronized (frameLock) {
++framesCaptured;
frameWidth = width;
@@ -174,9 +192,20 @@ public class VideoCapturerAndroidTestFixtures {
VideoCapturerAndroid.CameraEventsHandler {
public boolean onCameraOpeningCalled;
public boolean onFirstFrameAvailableCalled;
+ public final Object onCameraFreezedLock = new Object();
+ private String onCameraFreezedDescription;
@Override
- public void onCameraError(String errorDescription) { }
+ public void onCameraError(String errorDescription) {
+ }
+
+ @Override
+ public void onCameraFreezed(String errorDescription) {
+ synchronized (onCameraFreezedLock) {
+ onCameraFreezedDescription = errorDescription;
+ onCameraFreezedLock.notifyAll();
+ }
+ }
@Override
public void onCameraOpening(int cameraId) {
@@ -190,6 +219,13 @@ public class VideoCapturerAndroidTestFixtures {
@Override
public void onCameraClosed() { }
+
+ public String WaitForCameraFreezed() throws InterruptedException {
+ synchronized (onCameraFreezedLock) {
+ onCameraFreezedLock.wait();
+ return onCameraFreezedDescription;
+ }
+ }
}
static public CameraEvents createCameraEvents() {
@@ -275,8 +311,8 @@ public class VideoCapturerAndroidTestFixtures {
assertTrue(observer.WaitForCapturerToStart());
observer.WaitForNextCapturedFrame();
capturer.stopCapture();
- for (long timeStamp : observer.getCopyAndResetListOftimeStamps()) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
capturer.dispose();
@@ -296,9 +332,10 @@ public class VideoCapturerAndroidTestFixtures {
// Make sure camera is started and then stop it.
assertTrue(observer.WaitForCapturerToStart());
capturer.stopCapture();
- for (long timeStamp : observer.getCopyAndResetListOftimeStamps()) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
+
// We can't change |capturer| at this point, but we should not crash.
capturer.switchCamera(null);
capturer.onOutputFormatRequest(640, 480, 15);
@@ -357,17 +394,90 @@ public class VideoCapturerAndroidTestFixtures {
if (capturer.isCapturingToTexture()) {
assertEquals(0, observer.frameSize());
} else {
- assertEquals(format.frameSize(), observer.frameSize());
+ assertTrue(format.frameSize() <= observer.frameSize());
}
capturer.stopCapture();
- for (long timestamp : observer.getCopyAndResetListOftimeStamps()) {
- capturer.returnBuffer(timestamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
}
capturer.dispose();
assertTrue(capturer.isReleased());
}
+ static void waitUntilIdle(VideoCapturerAndroid capturer) throws InterruptedException {
+ final CountDownLatch barrier = new CountDownLatch(1);
+ capturer.getCameraThreadHandler().post(new Runnable() {
+ @Override public void run() {
+ barrier.countDown();
+ }
+ });
+ barrier.await();
+ }
+
+ static public void startWhileCameraIsAlreadyOpen(
+ VideoCapturerAndroid capturer, Context appContext) throws InterruptedException {
+ Camera camera = Camera.open(capturer.getCurrentCameraId());
+
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+
+ if (android.os.Build.VERSION.SDK_INT > android.os.Build.VERSION_CODES.LOLLIPOP_MR1) {
+ // The first opened camera client will be evicted.
+ assertTrue(observer.WaitForCapturerToStart());
+ capturer.stopCapture();
+ } else {
+ assertFalse(observer.WaitForCapturerToStart());
+ }
+
+ capturer.dispose();
+ camera.release();
+ }
+
+ static public void startWhileCameraIsAlreadyOpenAndCloseCamera(
+ VideoCapturerAndroid capturer, Context appContext) throws InterruptedException {
+ Camera camera = Camera.open(capturer.getCurrentCameraId());
+
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+ waitUntilIdle(capturer);
+
+ camera.release();
+
+ // Make sure camera is started and first frame is received and then stop it.
+ assertTrue(observer.WaitForCapturerToStart());
+ observer.WaitForNextCapturedFrame();
+ capturer.stopCapture();
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
+ }
+ capturer.dispose();
+ assertTrue(capturer.isReleased());
+ }
+
+ static public void startWhileCameraIsAlreadyOpenAndStop(
+ VideoCapturerAndroid capturer, Context appContext) throws InterruptedException {
+ Camera camera = Camera.open(capturer.getCurrentCameraId());
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+ capturer.stopCapture();
+ capturer.dispose();
+ assertTrue(capturer.isReleased());
+ camera.release();
+ }
+
static public void returnBufferLate(VideoCapturerAndroid capturer,
Context appContext) throws InterruptedException {
FakeCapturerObserver observer = new FakeCapturerObserver();
@@ -387,9 +497,8 @@ public class VideoCapturerAndroidTestFixtures {
capturer.startCapture(format.width, format.height, format.maxFramerate,
appContext, observer);
observer.WaitForCapturerToStart();
-
- for (Long timeStamp : listOftimestamps) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
observer.WaitForNextCapturedFrame();
@@ -397,9 +506,10 @@ public class VideoCapturerAndroidTestFixtures {
listOftimestamps = observer.getCopyAndResetListOftimeStamps();
assertTrue(listOftimestamps.size() >= 1);
- for (Long timeStamp : listOftimestamps) {
- capturer.returnBuffer(timeStamp);
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
}
+
capturer.dispose();
assertTrue(capturer.isReleased());
}
@@ -410,6 +520,7 @@ public class VideoCapturerAndroidTestFixtures {
final VideoSource source = factory.createVideoSource(capturer, new MediaConstraints());
final VideoTrack track = factory.createVideoTrack("dummy", source);
final FakeAsyncRenderer renderer = new FakeAsyncRenderer();
+
track.addRenderer(new VideoRenderer(renderer));
// Wait for at least one frame that has not been returned.
assertFalse(renderer.waitForPendingFrames().isEmpty());
@@ -420,9 +531,7 @@ public class VideoCapturerAndroidTestFixtures {
track.dispose();
source.dispose();
factory.dispose();
-
- // The pending frames should keep the JNI parts and |capturer| alive.
- assertFalse(capturer.isReleased());
+ assertTrue(capturer.isReleased());
// Return the frame(s), on a different thread out of spite.
final List<I420Frame> pendingFrames = renderer.waitForPendingFrames();
@@ -436,8 +545,71 @@ public class VideoCapturerAndroidTestFixtures {
});
returnThread.start();
returnThread.join();
+ }
+
+ static public void cameraFreezedEventOnBufferStarvationUsingTextures(
+ VideoCapturerAndroid capturer,
+ CameraEvents events, Context appContext) throws InterruptedException {
+ assertTrue("Not capturing to textures.", capturer.isCapturingToTexture());
- // Check that frames have successfully returned. This will cause |capturer| to be released.
+ final List<CaptureFormat> formats = capturer.getSupportedFormats();
+ final CameraEnumerationAndroid.CaptureFormat format = formats.get(0);
+
+ final FakeCapturerObserver observer = new FakeCapturerObserver();
+ capturer.startCapture(format.width, format.height, format.maxFramerate,
+ appContext, observer);
+ // Make sure camera is started.
+ assertTrue(observer.WaitForCapturerToStart());
+ // Since we don't return the buffer, we should get a starvation message if we are
+ // capturing to a texture.
+ assertEquals("Camera failure. Client must return video buffers.",
+ events.WaitForCameraFreezed());
+
+ capturer.stopCapture();
+ if (capturer.isCapturingToTexture()) {
+ capturer.surfaceHelper.returnTextureFrame();
+ }
+
+ capturer.dispose();
assertTrue(capturer.isReleased());
}
+
+ static public void scaleCameraOutput(VideoCapturerAndroid capturer) throws InterruptedException {
+ PeerConnectionFactory factory = new PeerConnectionFactory();
+ VideoSource source =
+ factory.createVideoSource(capturer, new MediaConstraints());
+ VideoTrack track = factory.createVideoTrack("dummy", source);
+ RendererCallbacks renderer = new RendererCallbacks();
+ track.addRenderer(new VideoRenderer(renderer));
+ assertTrue(renderer.WaitForNextFrameToRender() > 0);
+
+ final int startWidth = renderer.frameWidth();
+ final int startHeight = renderer.frameHeight();
+ final int frameRate = 30;
+ final int scaledWidth = startWidth / 2;
+ final int scaledHeight = startHeight / 2;
+
+ // Request the captured frames to be scaled.
+ capturer.onOutputFormatRequest(scaledWidth, scaledHeight, frameRate);
+
+ boolean gotExpectedResolution = false;
+ int numberOfInspectedFrames = 0;
+
+ do {
+ renderer.WaitForNextFrameToRender();
+ ++numberOfInspectedFrames;
+
+ gotExpectedResolution = (renderer.frameWidth() == scaledWidth
+ && renderer.frameHeight() == scaledHeight);
+ } while (!gotExpectedResolution && numberOfInspectedFrames < 30);
+
+ source.stop();
+ track.dispose();
+ source.dispose();
+ factory.dispose();
+ assertTrue(capturer.isReleased());
+
+ assertTrue(gotExpectedResolution);
+ }
+
}
diff --git a/talk/app/webrtc/androidvideocapturer.cc b/talk/app/webrtc/androidvideocapturer.cc
index afcfb5bb7c..d8f12174db 100644
--- a/talk/app/webrtc/androidvideocapturer.cc
+++ b/talk/app/webrtc/androidvideocapturer.cc
@@ -26,6 +26,7 @@
*/
#include "talk/app/webrtc/androidvideocapturer.h"
+#include "talk/app/webrtc/java/jni/native_handle_impl.h"
#include "talk/media/webrtc/webrtcvideoframe.h"
#include "webrtc/base/common.h"
#include "webrtc/base/json.h"
@@ -57,11 +58,13 @@ class AndroidVideoCapturer::FrameFactory : public cricket::VideoFrameFactory {
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& buffer,
int rotation,
int64_t time_stamp_in_ns) {
+ RTC_DCHECK(rotation == 0 || rotation == 90 || rotation == 180 ||
+ rotation == 270);
buffer_ = buffer;
captured_frame_.width = buffer->width();
captured_frame_.height = buffer->height();
captured_frame_.time_stamp = time_stamp_in_ns;
- captured_frame_.rotation = rotation;
+ captured_frame_.rotation = static_cast<webrtc::VideoRotation>(rotation);
}
void ClearCapturedFrame() {
@@ -85,7 +88,7 @@ class AndroidVideoCapturer::FrameFactory : public cricket::VideoFrameFactory {
rtc::scoped_ptr<cricket::VideoFrame> frame(new cricket::WebRtcVideoFrame(
ShallowCenterCrop(buffer_, dst_width, dst_height),
- captured_frame->time_stamp, captured_frame->GetRotation()));
+ captured_frame->time_stamp, captured_frame->rotation));
// Caller takes ownership.
// TODO(magjed): Change CreateAliasedFrame() to return a rtc::scoped_ptr.
return apply_rotation_ ? frame->GetCopyWithRotationApplied()->Copy()
@@ -99,10 +102,17 @@ class AndroidVideoCapturer::FrameFactory : public cricket::VideoFrameFactory {
int output_width,
int output_height) const override {
if (buffer_->native_handle() != nullptr) {
- // TODO(perkj): Implement CreateAliasedFrame properly for textures.
- rtc::scoped_ptr<cricket::VideoFrame> frame(new cricket::WebRtcVideoFrame(
- buffer_, input_frame->time_stamp, input_frame->GetRotation()));
- return frame.release();
+ // TODO(perkj) Implement cropping.
+ RTC_CHECK_EQ(cropped_input_width, buffer_->width());
+ RTC_CHECK_EQ(cropped_input_height, buffer_->height());
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> scaled_buffer(
+ static_cast<webrtc_jni::AndroidTextureBuffer*>(buffer_.get())
+ ->ScaleAndRotate(output_width, output_height,
+ apply_rotation_ ? input_frame->rotation :
+ webrtc::kVideoRotation_0));
+ return new cricket::WebRtcVideoFrame(
+ scaled_buffer, input_frame->time_stamp,
+ apply_rotation_ ? webrtc::kVideoRotation_0 : input_frame->rotation);
}
return VideoFrameFactory::CreateAliasedFrame(input_frame,
cropped_input_width,
diff --git a/talk/app/webrtc/androidvideocapturer.h b/talk/app/webrtc/androidvideocapturer.h
index df783bdf6f..c665eabd91 100644
--- a/talk/app/webrtc/androidvideocapturer.h
+++ b/talk/app/webrtc/androidvideocapturer.h
@@ -32,7 +32,7 @@
#include "talk/media/base/videocapturer.h"
#include "webrtc/base/thread_checker.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
namespace webrtc {
diff --git a/talk/app/webrtc/audiotrack.cc b/talk/app/webrtc/audiotrack.cc
index b0c91296f9..b3223cd29f 100644
--- a/talk/app/webrtc/audiotrack.cc
+++ b/talk/app/webrtc/audiotrack.cc
@@ -27,27 +27,82 @@
#include "talk/app/webrtc/audiotrack.h"
-#include <string>
+#include "webrtc/base/checks.h"
+
+using rtc::scoped_refptr;
namespace webrtc {
-static const char kAudioTrackKind[] = "audio";
+const char MediaStreamTrackInterface::kAudioKind[] = "audio";
+
+// static
+scoped_refptr<AudioTrack> AudioTrack::Create(
+ const std::string& id,
+ const scoped_refptr<AudioSourceInterface>& source) {
+ return new rtc::RefCountedObject<AudioTrack>(id, source);
+}
AudioTrack::AudioTrack(const std::string& label,
- AudioSourceInterface* audio_source)
- : MediaStreamTrack<AudioTrackInterface>(label),
- audio_source_(audio_source) {
+ const scoped_refptr<AudioSourceInterface>& source)
+ : MediaStreamTrack<AudioTrackInterface>(label), audio_source_(source) {
+ if (audio_source_) {
+ audio_source_->RegisterObserver(this);
+ OnChanged();
+ }
+}
+
+AudioTrack::~AudioTrack() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ set_state(MediaStreamTrackInterface::kEnded);
+ if (audio_source_)
+ audio_source_->UnregisterObserver(this);
}
std::string AudioTrack::kind() const {
- return kAudioTrackKind;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return kAudioKind;
+}
+
+AudioSourceInterface* AudioTrack::GetSource() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return audio_source_.get();
}
-rtc::scoped_refptr<AudioTrack> AudioTrack::Create(
- const std::string& id, AudioSourceInterface* source) {
- rtc::RefCountedObject<AudioTrack>* track =
- new rtc::RefCountedObject<AudioTrack>(id, source);
- return track;
+void AudioTrack::AddSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (audio_source_)
+ audio_source_->AddSink(sink);
+}
+
+void AudioTrack::RemoveSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (audio_source_)
+ audio_source_->RemoveSink(sink);
+}
+
+void AudioTrack::OnChanged() {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (state() == kFailed)
+ return; // We can't recover from this state (do we ever set it?).
+
+ TrackState new_state = kInitializing;
+
+ // |audio_source_| must be non-null if we ever get here.
+ switch (audio_source_->state()) {
+ case MediaSourceInterface::kLive:
+ case MediaSourceInterface::kMuted:
+ new_state = kLive;
+ break;
+ case MediaSourceInterface::kEnded:
+ new_state = kEnded;
+ break;
+ case MediaSourceInterface::kInitializing:
+ default:
+ // use kInitializing.
+ break;
+ }
+
+ set_state(new_state);
}
} // namespace webrtc
diff --git a/talk/app/webrtc/audiotrack.h b/talk/app/webrtc/audiotrack.h
index 750f272ba2..55f4837714 100644
--- a/talk/app/webrtc/audiotrack.h
+++ b/talk/app/webrtc/audiotrack.h
@@ -28,40 +28,47 @@
#ifndef TALK_APP_WEBRTC_AUDIOTRACK_H_
#define TALK_APP_WEBRTC_AUDIOTRACK_H_
+#include <string>
+
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/mediastreamtrack.h"
#include "talk/app/webrtc/notifier.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/base/thread_checker.h"
namespace webrtc {
-class AudioTrack : public MediaStreamTrack<AudioTrackInterface> {
+class AudioTrack : public MediaStreamTrack<AudioTrackInterface>,
+ public ObserverInterface {
+ protected:
+ // Protected ctor to force use of factory method.
+ AudioTrack(const std::string& label,
+ const rtc::scoped_refptr<AudioSourceInterface>& source);
+ ~AudioTrack() override;
+
public:
static rtc::scoped_refptr<AudioTrack> Create(
- const std::string& id, AudioSourceInterface* source);
-
- // AudioTrackInterface implementation.
- AudioSourceInterface* GetSource() const override {
- return audio_source_.get();
- }
- // TODO(xians): Implement these methods.
- void AddSink(AudioTrackSinkInterface* sink) override {}
- void RemoveSink(AudioTrackSinkInterface* sink) override {}
- bool GetSignalLevel(int* level) override { return false; }
- rtc::scoped_refptr<AudioProcessorInterface> GetAudioProcessor() override {
- return NULL;
- }
- cricket::AudioRenderer* GetRenderer() override { return NULL; }
+ const std::string& id,
+ const rtc::scoped_refptr<AudioSourceInterface>& source);
+ private:
// MediaStreamTrack implementation.
std::string kind() const override;
- protected:
- AudioTrack(const std::string& label, AudioSourceInterface* audio_source);
+ // AudioTrackInterface implementation.
+ AudioSourceInterface* GetSource() const override;
+
+ void AddSink(AudioTrackSinkInterface* sink) override;
+ void RemoveSink(AudioTrackSinkInterface* sink) override;
+
+ // ObserverInterface implementation.
+ void OnChanged() override;
private:
- rtc::scoped_refptr<AudioSourceInterface> audio_source_;
+ const rtc::scoped_refptr<AudioSourceInterface> audio_source_;
+ rtc::ThreadChecker thread_checker_;
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTrack);
};
} // namespace webrtc
diff --git a/talk/app/webrtc/dtlsidentitystore.cc b/talk/app/webrtc/dtlsidentitystore.cc
index 27587796bc..390ec0d0b7 100644
--- a/talk/app/webrtc/dtlsidentitystore.cc
+++ b/talk/app/webrtc/dtlsidentitystore.cc
@@ -27,6 +27,8 @@
#include "talk/app/webrtc/dtlsidentitystore.h"
+#include <utility>
+
#include "talk/app/webrtc/webrtcsessiondescriptionfactory.h"
#include "webrtc/base/logging.h"
@@ -72,7 +74,7 @@ class DtlsIdentityStoreImpl::WorkerTask : public sigslot::has_slots<>,
// Posting to |this| avoids touching |store_| on threads other than
// |signaling_thread_| and thus avoids having to use locks.
IdentityResultMessageData* msg = new IdentityResultMessageData(
- new IdentityResult(key_type_, identity.Pass()));
+ new IdentityResult(key_type_, std::move(identity)));
signaling_thread_->Post(this, MSG_GENERATE_IDENTITY_RESULT, msg);
}
@@ -93,7 +95,7 @@ class DtlsIdentityStoreImpl::WorkerTask : public sigslot::has_slots<>,
static_cast<IdentityResultMessageData*>(msg->pdata));
if (store_) {
store_->OnIdentityGenerated(pdata->data()->key_type_,
- pdata->data()->identity_.Pass());
+ std::move(pdata->data()->identity_));
}
}
break;
@@ -152,7 +154,7 @@ void DtlsIdentityStoreImpl::OnMessage(rtc::Message* msg) {
rtc::scoped_ptr<IdentityResultMessageData> pdata(
static_cast<IdentityResultMessageData*>(msg->pdata));
OnIdentityGenerated(pdata->data()->key_type_,
- pdata->data()->identity_.Pass());
+ std::move(pdata->data()->identity_));
break;
}
}
@@ -178,9 +180,9 @@ void DtlsIdentityStoreImpl::GenerateIdentity(
// Return identity async - post even though we are on |signaling_thread_|.
LOG(LS_VERBOSE) << "Using a free DTLS identity.";
++request_info_[key_type].gen_in_progress_counts_;
- IdentityResultMessageData* msg = new IdentityResultMessageData(
- new IdentityResult(key_type,
- request_info_[key_type].free_identity_.Pass()));
+ IdentityResultMessageData* msg =
+ new IdentityResultMessageData(new IdentityResult(
+ key_type, std::move(request_info_[key_type].free_identity_)));
signaling_thread_->Post(this, MSG_GENERATE_IDENTITY_RESULT, msg);
return;
}
@@ -228,7 +230,7 @@ void DtlsIdentityStoreImpl::OnIdentityGenerated(
// Return the result to the observer.
if (identity.get()) {
LOG(LS_VERBOSE) << "A DTLS identity is returned to an observer.";
- observer->OnSuccess(identity.Pass());
+ observer->OnSuccess(std::move(identity));
} else {
LOG(LS_WARNING) << "Failed to generate DTLS identity.";
observer->OnFailure(0);
diff --git a/talk/app/webrtc/dtlsidentitystore.h b/talk/app/webrtc/dtlsidentitystore.h
index a0eef98e1b..2a5309d34b 100644
--- a/talk/app/webrtc/dtlsidentitystore.h
+++ b/talk/app/webrtc/dtlsidentitystore.h
@@ -30,6 +30,7 @@
#include <queue>
#include <string>
+#include <utility>
#include "webrtc/base/messagehandler.h"
#include "webrtc/base/messagequeue.h"
@@ -129,7 +130,7 @@ class DtlsIdentityStoreImpl : public DtlsIdentityStoreInterface,
struct IdentityResult {
IdentityResult(rtc::KeyType key_type,
rtc::scoped_ptr<rtc::SSLIdentity> identity)
- : key_type_(key_type), identity_(identity.Pass()) {}
+ : key_type_(key_type), identity_(std::move(identity)) {}
rtc::KeyType key_type_;
rtc::scoped_ptr<rtc::SSLIdentity> identity_;
diff --git a/talk/app/webrtc/fakeportallocatorfactory.h b/talk/app/webrtc/fakeportallocatorfactory.h
deleted file mode 100644
index f326b62043..0000000000
--- a/talk/app/webrtc/fakeportallocatorfactory.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * libjingle
- * Copyright 2011 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// This file defines a fake port allocator factory used for testing.
-// This implementation creates instances of cricket::FakePortAllocator.
-
-#ifndef TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
-#define TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
-
-#include "talk/app/webrtc/peerconnectioninterface.h"
-#include "webrtc/p2p/client/fakeportallocator.h"
-
-namespace webrtc {
-
-class FakePortAllocatorFactory : public PortAllocatorFactoryInterface {
- public:
- static FakePortAllocatorFactory* Create() {
- rtc::RefCountedObject<FakePortAllocatorFactory>* allocator =
- new rtc::RefCountedObject<FakePortAllocatorFactory>();
- return allocator;
- }
-
- virtual cricket::PortAllocator* CreatePortAllocator(
- const std::vector<StunConfiguration>& stun_configurations,
- const std::vector<TurnConfiguration>& turn_configurations) {
- stun_configs_ = stun_configurations;
- turn_configs_ = turn_configurations;
- return new cricket::FakePortAllocator(rtc::Thread::Current(), NULL);
- }
-
- const std::vector<StunConfiguration>& stun_configs() const {
- return stun_configs_;
- }
-
- const std::vector<TurnConfiguration>& turn_configs() const {
- return turn_configs_;
- }
-
- void SetNetworkIgnoreMask(int network_ignore_mask) {}
-
- protected:
- FakePortAllocatorFactory() {}
- ~FakePortAllocatorFactory() {}
-
- private:
- std::vector<PortAllocatorFactoryInterface::StunConfiguration> stun_configs_;
- std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turn_configs_;
-};
-
-} // namespace webrtc
-
-#endif // TALK_APP_WEBRTC_FAKEPORTALLOCATORFACTORY_H_
diff --git a/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java b/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java
index 097d1cd906..3444529596 100644
--- a/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java
+++ b/talk/app/webrtc/java/android/org/webrtc/Camera2Enumerator.java
@@ -27,7 +27,9 @@
package org.webrtc;
+import android.annotation.TargetApi;
import android.content.Context;
+
import android.graphics.ImageFormat;
import android.hardware.camera2.CameraCharacteristics;
import android.hardware.camera2.CameraManager;
@@ -45,6 +47,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
+@TargetApi(21)
public class Camera2Enumerator implements CameraEnumerationAndroid.Enumerator {
private final static String TAG = "Camera2Enumerator";
private final static double NANO_SECONDS_PER_SECOND = 1.0e9;
diff --git a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java
index 3e37f6afdc..5f68c3759e 100644
--- a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java
+++ b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerationAndroid.java
@@ -29,7 +29,6 @@ package org.webrtc;
import static java.lang.Math.abs;
import static java.lang.Math.ceil;
-import android.hardware.Camera;
import android.graphics.ImageFormat;
import org.json.JSONArray;
@@ -72,7 +71,7 @@ public class CameraEnumerationAndroid {
// other image formats then this needs to be updated and
// VideoCapturerAndroid.getSupportedFormats need to return CaptureFormats of
// all imageFormats.
- public final int imageFormat = ImageFormat.YV12;
+ public final int imageFormat = ImageFormat.NV21;
public CaptureFormat(int width, int height, int minFramerate,
int maxFramerate) {
@@ -88,25 +87,15 @@ public class CameraEnumerationAndroid {
}
// Calculates the frame size of the specified image format. Currently only
- // supporting ImageFormat.YV12. The YV12's stride is the closest rounded up
- // multiple of 16 of the width and width and height are always even.
- // Android guarantees this:
- // http://developer.android.com/reference/android/hardware/Camera.Parameters.html#setPreviewFormat%28int%29
+ // supporting ImageFormat.NV21.
+ // The size is width * height * number of bytes per pixel.
+ // http://developer.android.com/reference/android/hardware/Camera.html#addCallbackBuffer(byte[])
public static int frameSize(int width, int height, int imageFormat) {
- if (imageFormat != ImageFormat.YV12) {
+ if (imageFormat != ImageFormat.NV21) {
throw new UnsupportedOperationException("Don't know how to calculate "
- + "the frame size of non-YV12 image formats.");
+ + "the frame size of non-NV21 image formats.");
}
- int yStride = roundUp(width, 16);
- int uvStride = roundUp(yStride / 2, 16);
- int ySize = yStride * height;
- int uvSize = uvStride * height / 2;
- return ySize + uvSize * 2;
- }
-
- // Rounds up |x| to the closest value that is a multiple of |alignment|.
- private static int roundUp(int x, int alignment) {
- return (int)ceil(x / (double)alignment) * alignment;
+ return (width * height * ImageFormat.getBitsPerPixel(imageFormat)) / 8;
}
@Override
@@ -114,21 +103,19 @@ public class CameraEnumerationAndroid {
return width + "x" + height + "@[" + minFramerate + ":" + maxFramerate + "]";
}
- @Override
- public boolean equals(Object that) {
- if (!(that instanceof CaptureFormat)) {
+ public boolean isSameFormat(final CaptureFormat that) {
+ if (that == null) {
return false;
}
- final CaptureFormat c = (CaptureFormat) that;
- return width == c.width && height == c.height && maxFramerate == c.maxFramerate
- && minFramerate == c.minFramerate;
+ return width == that.width && height == that.height && maxFramerate == that.maxFramerate
+ && minFramerate == that.minFramerate;
}
}
// Returns device names that can be used to create a new VideoCapturerAndroid.
public static String[] getDeviceNames() {
- String[] names = new String[Camera.getNumberOfCameras()];
- for (int i = 0; i < Camera.getNumberOfCameras(); ++i) {
+ String[] names = new String[android.hardware.Camera.getNumberOfCameras()];
+ for (int i = 0; i < android.hardware.Camera.getNumberOfCameras(); ++i) {
names[i] = getDeviceName(i);
}
return names;
@@ -136,22 +123,22 @@ public class CameraEnumerationAndroid {
// Returns number of cameras on device.
public static int getDeviceCount() {
- return Camera.getNumberOfCameras();
+ return android.hardware.Camera.getNumberOfCameras();
}
// Returns the name of the camera with camera index. Returns null if the
// camera can not be used.
public static String getDeviceName(int index) {
- Camera.CameraInfo info = new Camera.CameraInfo();
+ android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
try {
- Camera.getCameraInfo(index, info);
+ android.hardware.Camera.getCameraInfo(index, info);
} catch (Exception e) {
Logging.e(TAG, "getCameraInfo failed on index " + index,e);
return null;
}
String facing =
- (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) ? "front" : "back";
+ (info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT) ? "front" : "back";
return "Camera " + index + ", Facing " + facing
+ ", Orientation " + info.orientation;
}
@@ -159,13 +146,13 @@ public class CameraEnumerationAndroid {
// Returns the name of the front facing camera. Returns null if the
// camera can not be used or does not exist.
public static String getNameOfFrontFacingDevice() {
- return getNameOfDevice(Camera.CameraInfo.CAMERA_FACING_FRONT);
+ return getNameOfDevice(android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT);
}
// Returns the name of the back facing camera. Returns null if the
// camera can not be used or does not exist.
public static String getNameOfBackFacingDevice() {
- return getNameOfDevice(Camera.CameraInfo.CAMERA_FACING_BACK);
+ return getNameOfDevice(android.hardware.Camera.CameraInfo.CAMERA_FACING_BACK);
}
public static String getSupportedFormatsAsJson(int id) throws JSONException {
@@ -194,7 +181,8 @@ public class CameraEnumerationAndroid {
}
}
- public static int[] getFramerateRange(Camera.Parameters parameters, final int framerate) {
+ public static int[] getFramerateRange(android.hardware.Camera.Parameters parameters,
+ final int framerate) {
List<int[]> listFpsRange = parameters.getSupportedPreviewFpsRange();
if (listFpsRange.isEmpty()) {
Logging.w(TAG, "No supported preview fps range");
@@ -203,27 +191,30 @@ public class CameraEnumerationAndroid {
return Collections.min(listFpsRange,
new ClosestComparator<int[]>() {
@Override int diff(int[] range) {
- return abs(framerate - range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX])
- + abs(framerate - range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
+ final int maxFpsWeight = 10;
+ return range[android.hardware.Camera.Parameters.PREVIEW_FPS_MIN_INDEX]
+ + maxFpsWeight * abs(framerate
+ - range[android.hardware.Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
}
});
}
- public static Camera.Size getClosestSupportedSize(
- List<Camera.Size> supportedSizes, final int requestedWidth, final int requestedHeight) {
+ public static android.hardware.Camera.Size getClosestSupportedSize(
+ List<android.hardware.Camera.Size> supportedSizes, final int requestedWidth,
+ final int requestedHeight) {
return Collections.min(supportedSizes,
- new ClosestComparator<Camera.Size>() {
- @Override int diff(Camera.Size size) {
+ new ClosestComparator<android.hardware.Camera.Size>() {
+ @Override int diff(android.hardware.Camera.Size size) {
return abs(requestedWidth - size.width) + abs(requestedHeight - size.height);
}
});
}
private static String getNameOfDevice(int facing) {
- final Camera.CameraInfo info = new Camera.CameraInfo();
- for (int i = 0; i < Camera.getNumberOfCameras(); ++i) {
+ final android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
+ for (int i = 0; i < android.hardware.Camera.getNumberOfCameras(); ++i) {
try {
- Camera.getCameraInfo(i, info);
+ android.hardware.Camera.getCameraInfo(i, info);
if (info.facing == facing) {
return getDeviceName(i);
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java
index 2f35dc3493..54469cc341 100644
--- a/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java
+++ b/talk/app/webrtc/java/android/org/webrtc/CameraEnumerator.java
@@ -27,7 +27,6 @@
package org.webrtc;
-import android.hardware.Camera;
import android.os.SystemClock;
import org.webrtc.CameraEnumerationAndroid.CaptureFormat;
@@ -60,11 +59,11 @@ public class CameraEnumerator implements CameraEnumerationAndroid.Enumerator {
private List<CaptureFormat> enumerateFormats(int cameraId) {
Logging.d(TAG, "Get supported formats for camera index " + cameraId + ".");
final long startTimeMs = SystemClock.elapsedRealtime();
- final Camera.Parameters parameters;
- Camera camera = null;
+ final android.hardware.Camera.Parameters parameters;
+ android.hardware.Camera camera = null;
try {
Logging.d(TAG, "Opening camera with index " + cameraId);
- camera = Camera.open(cameraId);
+ camera = android.hardware.Camera.open(cameraId);
parameters = camera.getParameters();
} catch (RuntimeException e) {
Logging.e(TAG, "Open camera failed on camera index " + cameraId, e);
@@ -84,10 +83,10 @@ public class CameraEnumerator implements CameraEnumerationAndroid.Enumerator {
// getSupportedPreviewFpsRange() returns a sorted list. Take the fps range
// corresponding to the highest fps.
final int[] range = listFpsRange.get(listFpsRange.size() - 1);
- minFps = range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX];
- maxFps = range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX];
+ minFps = range[android.hardware.Camera.Parameters.PREVIEW_FPS_MIN_INDEX];
+ maxFps = range[android.hardware.Camera.Parameters.PREVIEW_FPS_MAX_INDEX];
}
- for (Camera.Size size : parameters.getSupportedPreviewSizes()) {
+ for (android.hardware.Camera.Size size : parameters.getSupportedPreviewSizes()) {
formatList.add(new CaptureFormat(size.width, size.height, minFps, maxFps));
}
} catch (Exception e) {
diff --git a/talk/app/webrtc/java/android/org/webrtc/EglBase.java b/talk/app/webrtc/java/android/org/webrtc/EglBase.java
index 2ee36882e8..035645bdd1 100644
--- a/talk/app/webrtc/java/android/org/webrtc/EglBase.java
+++ b/talk/app/webrtc/java/android/org/webrtc/EglBase.java
@@ -28,244 +28,108 @@
package org.webrtc;
import android.graphics.SurfaceTexture;
-import android.view.SurfaceHolder;
-
-import org.webrtc.Logging;
+import android.view.Surface;
import javax.microedition.khronos.egl.EGL10;
-import javax.microedition.khronos.egl.EGLConfig;
-import javax.microedition.khronos.egl.EGLContext;
-import javax.microedition.khronos.egl.EGLDisplay;
-import javax.microedition.khronos.egl.EGLSurface;
+
/**
- * Holds EGL state and utility methods for handling an EGLContext, an EGLDisplay, and an EGLSurface.
+ * Holds EGL state and utility methods for handling an egl 1.0 EGLContext, an EGLDisplay,
+ * and an EGLSurface.
*/
-public final class EglBase {
- private static final String TAG = "EglBase";
+public abstract class EglBase {
+ // EGL wrapper for an actual EGLContext.
+ public static class Context {
+ }
+
// These constants are taken from EGL14.EGL_OPENGL_ES2_BIT and EGL14.EGL_CONTEXT_CLIENT_VERSION.
// https://android.googlesource.com/platform/frameworks/base/+/master/opengl/java/android/opengl/EGL14.java
// This is similar to how GlSurfaceView does:
// http://grepcode.com/file/repository.grepcode.com/java/ext/com.google.android/android/5.1.1_r1/android/opengl/GLSurfaceView.java#760
private static final int EGL_OPENGL_ES2_BIT = 4;
- private static final int EGL_CONTEXT_CLIENT_VERSION = 0x3098;
// Android-specific extension.
private static final int EGL_RECORDABLE_ANDROID = 0x3142;
- private final EGL10 egl;
- private EGLContext eglContext;
- private ConfigType configType;
- private EGLConfig eglConfig;
- private EGLDisplay eglDisplay;
- private EGLSurface eglSurface = EGL10.EGL_NO_SURFACE;
-
- // EGLConfig constructor type. Influences eglChooseConfig arguments.
- public static enum ConfigType {
- // No special parameters.
- PLAIN,
- // Configures with EGL_SURFACE_TYPE = EGL_PBUFFER_BIT.
- PIXEL_BUFFER,
- // Configures with EGL_RECORDABLE_ANDROID = 1.
- // Discourages EGL from using pixel formats that cannot efficiently be
- // converted to something usable by the video encoder.
- RECORDABLE
- }
-
- // Create root context without any EGLSurface or parent EGLContext. This can be used for branching
- // new contexts that share data.
- public EglBase() {
- this(EGL10.EGL_NO_CONTEXT, ConfigType.PLAIN);
- }
-
- // Create a new context with the specified config type, sharing data with sharedContext.
- public EglBase(EGLContext sharedContext, ConfigType configType) {
- this.egl = (EGL10) EGLContext.getEGL();
- this.configType = configType;
- eglDisplay = getEglDisplay();
- eglConfig = getEglConfig(eglDisplay, configType);
- eglContext = createEglContext(sharedContext, eglDisplay, eglConfig);
- }
-
- // Create EGLSurface from the Android SurfaceHolder.
- public void createSurface(SurfaceHolder surfaceHolder) {
- createSurfaceInternal(surfaceHolder);
- }
+ public static final int[] CONFIG_PLAIN = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_RGBA = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_ALPHA_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_PIXEL_BUFFER = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_SURFACE_TYPE, EGL10.EGL_PBUFFER_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_PIXEL_RGBA_BUFFER = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_ALPHA_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL10.EGL_SURFACE_TYPE, EGL10.EGL_PBUFFER_BIT,
+ EGL10.EGL_NONE
+ };
+ public static final int[] CONFIG_RECORDABLE = {
+ EGL10.EGL_RED_SIZE, 8,
+ EGL10.EGL_GREEN_SIZE, 8,
+ EGL10.EGL_BLUE_SIZE, 8,
+ EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL_RECORDABLE_ANDROID, 1,
+ EGL10.EGL_NONE
+ };
+
+ // Create a new context with the specified config attributes, sharing data with sharedContext.
+ // |sharedContext| can be null.
+ public static EglBase create(Context sharedContext, int[] configAttributes) {
+ return (EglBase14.isEGL14Supported()
+ && (sharedContext == null || sharedContext instanceof EglBase14.Context))
+ ? new EglBase14((EglBase14.Context) sharedContext, configAttributes)
+ : new EglBase10((EglBase10.Context) sharedContext, configAttributes);
+ }
+
+ public static EglBase create() {
+ return create(null, CONFIG_PLAIN);
+ }
+
+ public abstract void createSurface(Surface surface);
// Create EGLSurface from the Android SurfaceTexture.
- public void createSurface(SurfaceTexture surfaceTexture) {
- createSurfaceInternal(surfaceTexture);
- }
-
- // Create EGLSurface from either a SurfaceHolder or a SurfaceTexture.
- private void createSurfaceInternal(Object nativeWindow) {
- if (!(nativeWindow instanceof SurfaceHolder) && !(nativeWindow instanceof SurfaceTexture)) {
- throw new IllegalStateException("Input must be either a SurfaceHolder or SurfaceTexture");
- }
- checkIsNotReleased();
- if (configType == ConfigType.PIXEL_BUFFER) {
- Logging.w(TAG, "This EGL context is configured for PIXEL_BUFFER, but uses regular Surface");
- }
- if (eglSurface != EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Already has an EGLSurface");
- }
- int[] surfaceAttribs = {EGL10.EGL_NONE};
- eglSurface = egl.eglCreateWindowSurface(eglDisplay, eglConfig, nativeWindow, surfaceAttribs);
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Failed to create window surface");
- }
- }
+ public abstract void createSurface(SurfaceTexture surfaceTexture);
// Create dummy 1x1 pixel buffer surface so the context can be made current.
- public void createDummyPbufferSurface() {
- createPbufferSurface(1, 1);
- }
-
- public void createPbufferSurface(int width, int height) {
- checkIsNotReleased();
- if (configType != ConfigType.PIXEL_BUFFER) {
- throw new RuntimeException(
- "This EGL context is not configured to use a pixel buffer: " + configType);
- }
- if (eglSurface != EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Already has an EGLSurface");
- }
- int[] surfaceAttribs = {EGL10.EGL_WIDTH, width, EGL10.EGL_HEIGHT, height, EGL10.EGL_NONE};
- eglSurface = egl.eglCreatePbufferSurface(eglDisplay, eglConfig, surfaceAttribs);
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("Failed to create pixel buffer surface");
- }
- }
+ public abstract void createDummyPbufferSurface();
- public EGLContext getContext() {
- return eglContext;
- }
+ public abstract void createPbufferSurface(int width, int height);
- public boolean hasSurface() {
- return eglSurface != EGL10.EGL_NO_SURFACE;
- }
+ public abstract Context getEglBaseContext();
- public int surfaceWidth() {
- final int widthArray[] = new int[1];
- egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_WIDTH, widthArray);
- return widthArray[0];
- }
+ public abstract boolean hasSurface();
- public int surfaceHeight() {
- final int heightArray[] = new int[1];
- egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_HEIGHT, heightArray);
- return heightArray[0];
- }
+ public abstract int surfaceWidth();
- public void releaseSurface() {
- if (eglSurface != EGL10.EGL_NO_SURFACE) {
- egl.eglDestroySurface(eglDisplay, eglSurface);
- eglSurface = EGL10.EGL_NO_SURFACE;
- }
- }
+ public abstract int surfaceHeight();
- private void checkIsNotReleased() {
- if (eglDisplay == EGL10.EGL_NO_DISPLAY || eglContext == EGL10.EGL_NO_CONTEXT
- || eglConfig == null) {
- throw new RuntimeException("This object has been released");
- }
- }
+ public abstract void releaseSurface();
- public void release() {
- checkIsNotReleased();
- releaseSurface();
- detachCurrent();
- egl.eglDestroyContext(eglDisplay, eglContext);
- egl.eglTerminate(eglDisplay);
- eglContext = EGL10.EGL_NO_CONTEXT;
- eglDisplay = EGL10.EGL_NO_DISPLAY;
- eglConfig = null;
- }
+ public abstract void release();
- public void makeCurrent() {
- checkIsNotReleased();
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("No EGLSurface - can't make current");
- }
- if (!egl.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) {
- throw new RuntimeException("eglMakeCurrent failed");
- }
- }
+ public abstract void makeCurrent();
// Detach the current EGL context, so that it can be made current on another thread.
- public void detachCurrent() {
- if (!egl.eglMakeCurrent(
- eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT)) {
- throw new RuntimeException("eglMakeCurrent failed");
- }
- }
+ public abstract void detachCurrent();
- public void swapBuffers() {
- checkIsNotReleased();
- if (eglSurface == EGL10.EGL_NO_SURFACE) {
- throw new RuntimeException("No EGLSurface - can't swap buffers");
- }
- egl.eglSwapBuffers(eglDisplay, eglSurface);
- }
-
- // Return an EGLDisplay, or die trying.
- private EGLDisplay getEglDisplay() {
- EGLDisplay eglDisplay = egl.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY);
- if (eglDisplay == EGL10.EGL_NO_DISPLAY) {
- throw new RuntimeException("Unable to get EGL10 display");
- }
- int[] version = new int[2];
- if (!egl.eglInitialize(eglDisplay, version)) {
- throw new RuntimeException("Unable to initialize EGL10");
- }
- return eglDisplay;
- }
-
- // Return an EGLConfig, or die trying.
- private EGLConfig getEglConfig(EGLDisplay eglDisplay, ConfigType configType) {
- // Always RGB888, GLES2.
- int[] configAttributes = {
- EGL10.EGL_RED_SIZE, 8,
- EGL10.EGL_GREEN_SIZE, 8,
- EGL10.EGL_BLUE_SIZE, 8,
- EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
- EGL10.EGL_NONE, 0, // Allocate dummy fields for specific options.
- EGL10.EGL_NONE
- };
-
- // Fill in dummy fields based on configType.
- switch (configType) {
- case PLAIN:
- break;
- case PIXEL_BUFFER:
- configAttributes[configAttributes.length - 3] = EGL10.EGL_SURFACE_TYPE;
- configAttributes[configAttributes.length - 2] = EGL10.EGL_PBUFFER_BIT;
- break;
- case RECORDABLE:
- configAttributes[configAttributes.length - 3] = EGL_RECORDABLE_ANDROID;
- configAttributes[configAttributes.length - 2] = 1;
- break;
- default:
- throw new IllegalArgumentException();
- }
-
- EGLConfig[] configs = new EGLConfig[1];
- int[] numConfigs = new int[1];
- if (!egl.eglChooseConfig(
- eglDisplay, configAttributes, configs, configs.length, numConfigs)) {
- throw new RuntimeException("Unable to find RGB888 " + configType + " EGL config");
- }
- return configs[0];
- }
-
- // Return an EGLConfig, or die trying.
- private EGLContext createEglContext(
- EGLContext sharedContext, EGLDisplay eglDisplay, EGLConfig eglConfig) {
- int[] contextAttributes = {EGL_CONTEXT_CLIENT_VERSION, 2, EGL10.EGL_NONE};
- EGLContext eglContext =
- egl.eglCreateContext(eglDisplay, eglConfig, sharedContext, contextAttributes);
- if (eglContext == EGL10.EGL_NO_CONTEXT) {
- throw new RuntimeException("Failed to create EGL context");
- }
- return eglContext;
- }
+ public abstract void swapBuffers();
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/EglBase10.java b/talk/app/webrtc/java/android/org/webrtc/EglBase10.java
new file mode 100644
index 0000000000..f2aa9857fa
--- /dev/null
+++ b/talk/app/webrtc/java/android/org/webrtc/EglBase10.java
@@ -0,0 +1,299 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import android.graphics.Canvas;
+import android.graphics.SurfaceTexture;
+import android.graphics.Rect;
+import android.view.Surface;
+import android.view.SurfaceHolder;
+
+import javax.microedition.khronos.egl.EGL10;
+import javax.microedition.khronos.egl.EGLConfig;
+import javax.microedition.khronos.egl.EGLContext;
+import javax.microedition.khronos.egl.EGLDisplay;
+import javax.microedition.khronos.egl.EGLSurface;
+
+/**
+ * Holds EGL state and utility methods for handling an egl 1.0 EGLContext, an EGLDisplay,
+ * and an EGLSurface.
+ */
+final class EglBase10 extends EglBase {
+ // This constant is taken from EGL14.EGL_CONTEXT_CLIENT_VERSION.
+ private static final int EGL_CONTEXT_CLIENT_VERSION = 0x3098;
+
+ private final EGL10 egl;
+ private EGLContext eglContext;
+ private EGLConfig eglConfig;
+ private EGLDisplay eglDisplay;
+ private EGLSurface eglSurface = EGL10.EGL_NO_SURFACE;
+
+ // EGL wrapper for an actual EGLContext.
+ public static class Context extends EglBase.Context {
+ private final EGLContext eglContext;
+
+ public Context(EGLContext eglContext) {
+ this.eglContext = eglContext;
+ }
+ }
+
+ // Create a new context with the specified config type, sharing data with sharedContext.
+ EglBase10(Context sharedContext, int[] configAttributes) {
+ this.egl = (EGL10) EGLContext.getEGL();
+ eglDisplay = getEglDisplay();
+ eglConfig = getEglConfig(eglDisplay, configAttributes);
+ eglContext = createEglContext(sharedContext, eglDisplay, eglConfig);
+ }
+
+ @Override
+ public void createSurface(Surface surface) {
+ /**
+ * We have to wrap Surface in a SurfaceHolder because for some reason eglCreateWindowSurface
+ * couldn't actually take a Surface object until API 17. Older versions fortunately just call
+ * SurfaceHolder.getSurface(), so we'll do that. No other methods are relevant.
+ */
+ class FakeSurfaceHolder implements SurfaceHolder {
+ private final Surface surface;
+
+ FakeSurfaceHolder(Surface surface) {
+ this.surface = surface;
+ }
+
+ @Override
+ public void addCallback(Callback callback) {}
+
+ @Override
+ public void removeCallback(Callback callback) {}
+
+ @Override
+ public boolean isCreating() {
+ return false;
+ }
+
+ @Deprecated
+ @Override
+ public void setType(int i) {}
+
+ @Override
+ public void setFixedSize(int i, int i2) {}
+
+ @Override
+ public void setSizeFromLayout() {}
+
+ @Override
+ public void setFormat(int i) {}
+
+ @Override
+ public void setKeepScreenOn(boolean b) {}
+
+ @Override
+ public Canvas lockCanvas() {
+ return null;
+ }
+
+ @Override
+ public Canvas lockCanvas(Rect rect) {
+ return null;
+ }
+
+ @Override
+ public void unlockCanvasAndPost(Canvas canvas) {}
+
+ @Override
+ public Rect getSurfaceFrame() {
+ return null;
+ }
+
+ @Override
+ public Surface getSurface() {
+ return surface;
+ }
+ }
+
+ createSurfaceInternal(new FakeSurfaceHolder(surface));
+ }
+
+ // Create EGLSurface from the Android SurfaceTexture.
+ @Override
+ public void createSurface(SurfaceTexture surfaceTexture) {
+ createSurfaceInternal(surfaceTexture);
+ }
+
+ // Create EGLSurface from either a SurfaceHolder or a SurfaceTexture.
+ private void createSurfaceInternal(Object nativeWindow) {
+ if (!(nativeWindow instanceof SurfaceHolder) && !(nativeWindow instanceof SurfaceTexture)) {
+ throw new IllegalStateException("Input must be either a SurfaceHolder or SurfaceTexture");
+ }
+ checkIsNotReleased();
+ if (eglSurface != EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL10.EGL_NONE};
+ eglSurface = egl.eglCreateWindowSurface(eglDisplay, eglConfig, nativeWindow, surfaceAttribs);
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create window surface");
+ }
+ }
+
+ // Create dummy 1x1 pixel buffer surface so the context can be made current.
+ @Override
+ public void createDummyPbufferSurface() {
+ createPbufferSurface(1, 1);
+ }
+
+ @Override
+ public void createPbufferSurface(int width, int height) {
+ checkIsNotReleased();
+ if (eglSurface != EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL10.EGL_WIDTH, width, EGL10.EGL_HEIGHT, height, EGL10.EGL_NONE};
+ eglSurface = egl.eglCreatePbufferSurface(eglDisplay, eglConfig, surfaceAttribs);
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create pixel buffer surface");
+ }
+ }
+
+ @Override
+ public org.webrtc.EglBase.Context getEglBaseContext() {
+ return new EglBase10.Context(eglContext);
+ }
+
+ @Override
+ public boolean hasSurface() {
+ return eglSurface != EGL10.EGL_NO_SURFACE;
+ }
+
+ @Override
+ public int surfaceWidth() {
+ final int widthArray[] = new int[1];
+ egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_WIDTH, widthArray);
+ return widthArray[0];
+ }
+
+ @Override
+ public int surfaceHeight() {
+ final int heightArray[] = new int[1];
+ egl.eglQuerySurface(eglDisplay, eglSurface, EGL10.EGL_HEIGHT, heightArray);
+ return heightArray[0];
+ }
+
+ @Override
+ public void releaseSurface() {
+ if (eglSurface != EGL10.EGL_NO_SURFACE) {
+ egl.eglDestroySurface(eglDisplay, eglSurface);
+ eglSurface = EGL10.EGL_NO_SURFACE;
+ }
+ }
+
+ private void checkIsNotReleased() {
+ if (eglDisplay == EGL10.EGL_NO_DISPLAY || eglContext == EGL10.EGL_NO_CONTEXT
+ || eglConfig == null) {
+ throw new RuntimeException("This object has been released");
+ }
+ }
+
+ @Override
+ public void release() {
+ checkIsNotReleased();
+ releaseSurface();
+ detachCurrent();
+ egl.eglDestroyContext(eglDisplay, eglContext);
+ egl.eglTerminate(eglDisplay);
+ eglContext = EGL10.EGL_NO_CONTEXT;
+ eglDisplay = EGL10.EGL_NO_DISPLAY;
+ eglConfig = null;
+ }
+
+ @Override
+ public void makeCurrent() {
+ checkIsNotReleased();
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't make current");
+ }
+ if (!egl.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ // Detach the current EGL context, so that it can be made current on another thread.
+ @Override
+ public void detachCurrent() {
+ if (!egl.eglMakeCurrent(
+ eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ @Override
+ public void swapBuffers() {
+ checkIsNotReleased();
+ if (eglSurface == EGL10.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't swap buffers");
+ }
+ egl.eglSwapBuffers(eglDisplay, eglSurface);
+ }
+
+ // Return an EGLDisplay, or die trying.
+ private EGLDisplay getEglDisplay() {
+ EGLDisplay eglDisplay = egl.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY);
+ if (eglDisplay == EGL10.EGL_NO_DISPLAY) {
+ throw new RuntimeException("Unable to get EGL10 display");
+ }
+ int[] version = new int[2];
+ if (!egl.eglInitialize(eglDisplay, version)) {
+ throw new RuntimeException("Unable to initialize EGL10");
+ }
+ return eglDisplay;
+ }
+
+ // Return an EGLConfig, or die trying.
+ private EGLConfig getEglConfig(EGLDisplay eglDisplay, int[] configAttributes) {
+ EGLConfig[] configs = new EGLConfig[1];
+ int[] numConfigs = new int[1];
+ if (!egl.eglChooseConfig(
+ eglDisplay, configAttributes, configs, configs.length, numConfigs)) {
+ throw new RuntimeException("Unable to find any matching EGL config");
+ }
+ return configs[0];
+ }
+
+ // Return an EGLConfig, or die trying.
+ private EGLContext createEglContext(
+ Context sharedContext, EGLDisplay eglDisplay, EGLConfig eglConfig) {
+ int[] contextAttributes = {EGL_CONTEXT_CLIENT_VERSION, 2, EGL10.EGL_NONE};
+ EGLContext rootContext =
+ sharedContext == null ? EGL10.EGL_NO_CONTEXT : sharedContext.eglContext;
+ EGLContext eglContext =
+ egl.eglCreateContext(eglDisplay, eglConfig, rootContext, contextAttributes);
+ if (eglContext == EGL10.EGL_NO_CONTEXT) {
+ throw new RuntimeException("Failed to create EGL context");
+ }
+ return eglContext;
+ }
+}
diff --git a/talk/app/webrtc/java/android/org/webrtc/EglBase14.java b/talk/app/webrtc/java/android/org/webrtc/EglBase14.java
new file mode 100644
index 0000000000..c6f98c3b31
--- /dev/null
+++ b/talk/app/webrtc/java/android/org/webrtc/EglBase14.java
@@ -0,0 +1,254 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.webrtc;
+
+import android.annotation.TargetApi;
+import android.graphics.SurfaceTexture;
+import android.opengl.EGL14;
+import android.opengl.EGLConfig;
+import android.opengl.EGLContext;
+import android.opengl.EGLDisplay;
+import android.opengl.EGLExt;
+import android.opengl.EGLSurface;
+import android.view.Surface;
+
+import org.webrtc.Logging;
+
+/**
+ * Holds EGL state and utility methods for handling an EGL14 EGLContext, an EGLDisplay,
+ * and an EGLSurface.
+ */
+@TargetApi(18)
+final class EglBase14 extends EglBase {
+ private static final String TAG = "EglBase14";
+ private static final int EGLExt_SDK_VERSION = android.os.Build.VERSION_CODES.JELLY_BEAN_MR2;
+ private static final int CURRENT_SDK_VERSION = android.os.Build.VERSION.SDK_INT;
+ private EGLContext eglContext;
+ private EGLConfig eglConfig;
+ private EGLDisplay eglDisplay;
+ private EGLSurface eglSurface = EGL14.EGL_NO_SURFACE;
+
+ // EGL 1.4 is supported from API 17. But EGLExt that is used for setting presentation
+ // time stamp on a surface is supported from 18 so we require 18.
+ public static boolean isEGL14Supported() {
+ Logging.d(TAG, "SDK version: " + CURRENT_SDK_VERSION
+ + ". isEGL14Supported: " + (CURRENT_SDK_VERSION >= EGLExt_SDK_VERSION));
+ return (CURRENT_SDK_VERSION >= EGLExt_SDK_VERSION);
+ }
+
+ public static class Context extends EglBase.Context {
+ private final android.opengl.EGLContext egl14Context;
+
+ Context(android.opengl.EGLContext eglContext) {
+ this.egl14Context = eglContext;
+ }
+ }
+
+ // Create a new context with the specified config type, sharing data with sharedContext.
+ // |sharedContext| may be null.
+ EglBase14(EglBase14.Context sharedContext, int[] configAttributes) {
+ eglDisplay = getEglDisplay();
+ eglConfig = getEglConfig(eglDisplay, configAttributes);
+ eglContext = createEglContext(sharedContext, eglDisplay, eglConfig);
+ }
+
+ // Create EGLSurface from the Android Surface.
+ @Override
+ public void createSurface(Surface surface) {
+ createSurfaceInternal(surface);
+ }
+
+ // Create EGLSurface from the Android SurfaceTexture.
+ @Override
+ public void createSurface(SurfaceTexture surfaceTexture) {
+ createSurfaceInternal(surfaceTexture);
+ }
+
+ // Create EGLSurface from either Surface or SurfaceTexture.
+ private void createSurfaceInternal(Object surface) {
+ if (!(surface instanceof Surface) && !(surface instanceof SurfaceTexture)) {
+ throw new IllegalStateException("Input must be either a Surface or SurfaceTexture");
+ }
+ checkIsNotReleased();
+ if (eglSurface != EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL14.EGL_NONE};
+ eglSurface = EGL14.eglCreateWindowSurface(eglDisplay, eglConfig, surface, surfaceAttribs, 0);
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create window surface");
+ }
+ }
+
+ @Override
+ public void createDummyPbufferSurface() {
+ createPbufferSurface(1, 1);
+ }
+
+ @Override
+ public void createPbufferSurface(int width, int height) {
+ checkIsNotReleased();
+ if (eglSurface != EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Already has an EGLSurface");
+ }
+ int[] surfaceAttribs = {EGL14.EGL_WIDTH, width, EGL14.EGL_HEIGHT, height, EGL14.EGL_NONE};
+ eglSurface = EGL14.eglCreatePbufferSurface(eglDisplay, eglConfig, surfaceAttribs, 0);
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("Failed to create pixel buffer surface");
+ }
+ }
+
+ @Override
+ public Context getEglBaseContext() {
+ return new EglBase14.Context(eglContext);
+ }
+
+ @Override
+ public boolean hasSurface() {
+ return eglSurface != EGL14.EGL_NO_SURFACE;
+ }
+
+ @Override
+ public int surfaceWidth() {
+ final int widthArray[] = new int[1];
+ EGL14.eglQuerySurface(eglDisplay, eglSurface, EGL14.EGL_WIDTH, widthArray, 0);
+ return widthArray[0];
+ }
+
+ @Override
+ public int surfaceHeight() {
+ final int heightArray[] = new int[1];
+ EGL14.eglQuerySurface(eglDisplay, eglSurface, EGL14.EGL_HEIGHT, heightArray, 0);
+ return heightArray[0];
+ }
+
+ @Override
+ public void releaseSurface() {
+ if (eglSurface != EGL14.EGL_NO_SURFACE) {
+ EGL14.eglDestroySurface(eglDisplay, eglSurface);
+ eglSurface = EGL14.EGL_NO_SURFACE;
+ }
+ }
+
+ private void checkIsNotReleased() {
+ if (eglDisplay == EGL14.EGL_NO_DISPLAY || eglContext == EGL14.EGL_NO_CONTEXT
+ || eglConfig == null) {
+ throw new RuntimeException("This object has been released");
+ }
+ }
+
+ @Override
+ public void release() {
+ checkIsNotReleased();
+ releaseSurface();
+ detachCurrent();
+ EGL14.eglDestroyContext(eglDisplay, eglContext);
+ EGL14.eglReleaseThread();
+ EGL14.eglTerminate(eglDisplay);
+ eglContext = EGL14.EGL_NO_CONTEXT;
+ eglDisplay = EGL14.EGL_NO_DISPLAY;
+ eglConfig = null;
+ }
+
+ @Override
+ public void makeCurrent() {
+ checkIsNotReleased();
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't make current");
+ }
+ if (!EGL14.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ // Detach the current EGL context, so that it can be made current on another thread.
+ @Override
+ public void detachCurrent() {
+ if (!EGL14.eglMakeCurrent(
+ eglDisplay, EGL14.EGL_NO_SURFACE, EGL14.EGL_NO_SURFACE, EGL14.EGL_NO_CONTEXT)) {
+ throw new RuntimeException("eglMakeCurrent failed");
+ }
+ }
+
+ @Override
+ public void swapBuffers() {
+ checkIsNotReleased();
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't swap buffers");
+ }
+ EGL14.eglSwapBuffers(eglDisplay, eglSurface);
+ }
+
+ public void swapBuffers(long timeStampNs) {
+ checkIsNotReleased();
+ if (eglSurface == EGL14.EGL_NO_SURFACE) {
+ throw new RuntimeException("No EGLSurface - can't swap buffers");
+ }
+ // See https://android.googlesource.com/platform/frameworks/native/+/tools_r22.2/opengl/specs/EGL_ANDROID_presentation_time.txt
+ EGLExt.eglPresentationTimeANDROID(eglDisplay, eglSurface, timeStampNs);
+ EGL14.eglSwapBuffers(eglDisplay, eglSurface);
+ }
+
+ // Return an EGLDisplay, or die trying.
+ private static EGLDisplay getEglDisplay() {
+ EGLDisplay eglDisplay = EGL14.eglGetDisplay(EGL14.EGL_DEFAULT_DISPLAY);
+ if (eglDisplay == EGL14.EGL_NO_DISPLAY) {
+ throw new RuntimeException("Unable to get EGL14 display");
+ }
+ int[] version = new int[2];
+ if (!EGL14.eglInitialize(eglDisplay, version, 0, version, 1)) {
+ throw new RuntimeException("Unable to initialize EGL14");
+ }
+ return eglDisplay;
+ }
+
+ // Return an EGLConfig, or die trying.
+ private static EGLConfig getEglConfig(EGLDisplay eglDisplay, int[] configAttributes) {
+ EGLConfig[] configs = new EGLConfig[1];
+ int[] numConfigs = new int[1];
+ if (!EGL14.eglChooseConfig(
+ eglDisplay, configAttributes, 0, configs, 0, configs.length, numConfigs, 0)) {
+ throw new RuntimeException("Unable to find any matching EGL config");
+ }
+ return configs[0];
+ }
+
+ // Return an EGLConfig, or die trying.
+ private static EGLContext createEglContext(
+ EglBase14.Context sharedContext, EGLDisplay eglDisplay, EGLConfig eglConfig) {
+ int[] contextAttributes = {EGL14.EGL_CONTEXT_CLIENT_VERSION, 2, EGL14.EGL_NONE};
+ EGLContext rootContext =
+ sharedContext == null ? EGL14.EGL_NO_CONTEXT : sharedContext.egl14Context;
+ EGLContext eglContext =
+ EGL14.eglCreateContext(eglDisplay, eglConfig, rootContext, contextAttributes, 0);
+ if (eglContext == EGL14.EGL_NO_CONTEXT) {
+ throw new RuntimeException("Failed to create EGL context");
+ }
+ return eglContext;
+ }
+}
diff --git a/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java b/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java
index 2cb8af754d..6d3d5d2563 100644
--- a/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java
+++ b/talk/app/webrtc/java/android/org/webrtc/GlRectDrawer.java
@@ -40,13 +40,13 @@ import java.util.IdentityHashMap;
import java.util.Map;
/**
- * Helper class to draw a quad that covers the entire viewport. Rotation, mirror, and cropping is
- * specified using a 4x4 texture coordinate transform matrix. The frame input can either be an OES
- * texture or YUV textures in I420 format. The GL state must be preserved between draw calls, this
- * is intentional to maximize performance. The function release() must be called manually to free
- * the resources held by this object.
+ * Helper class to draw an opaque quad on the target viewport location. Rotation, mirror, and
+ * cropping is specified using a 4x4 texture coordinate transform matrix. The frame input can either
+ * be an OES texture or YUV textures in I420 format. The GL state must be preserved between draw
+ * calls, this is intentional to maximize performance. The function release() must be called
+ * manually to free the resources held by this object.
*/
-public class GlRectDrawer {
+public class GlRectDrawer implements RendererCommon.GlDrawer {
// Simple vertex shader, used for both YUV and OES.
private static final String VERTEX_SHADER_STRING =
"varying vec2 interp_tc;\n"
@@ -118,67 +118,31 @@ public class GlRectDrawer {
1.0f, 1.0f // Top right.
});
- // The keys are one of the fragments shaders above.
- private final Map<String, GlShader> shaders = new IdentityHashMap<String, GlShader>();
- private GlShader currentShader;
- private float[] currentTexMatrix;
- private int texMatrixLocation;
- // Intermediate copy buffer for uploading yuv frames that are not packed, i.e. stride > width.
- // TODO(magjed): Investigate when GL_UNPACK_ROW_LENGTH is available, or make a custom shader that
- // handles stride and compare performance with intermediate copy.
- private ByteBuffer copyBuffer;
+ private static class Shader {
+ public final GlShader glShader;
+ public final int texMatrixLocation;
- /**
- * Upload |planes| into |outputYuvTextures|, taking stride into consideration. |outputYuvTextures|
- * must have been generated in advance.
- */
- public void uploadYuvData(
- int[] outputYuvTextures, int width, int height, int[] strides, ByteBuffer[] planes) {
- // Make a first pass to see if we need a temporary copy buffer.
- int copyCapacityNeeded = 0;
- for (int i = 0; i < 3; ++i) {
- final int planeWidth = (i == 0) ? width : width / 2;
- final int planeHeight = (i == 0) ? height : height / 2;
- if (strides[i] > planeWidth) {
- copyCapacityNeeded = Math.max(copyCapacityNeeded, planeWidth * planeHeight);
- }
- }
- // Allocate copy buffer if necessary.
- if (copyCapacityNeeded > 0
- && (copyBuffer == null || copyBuffer.capacity() < copyCapacityNeeded)) {
- copyBuffer = ByteBuffer.allocateDirect(copyCapacityNeeded);
- }
- // Upload each plane.
- for (int i = 0; i < 3; ++i) {
- GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
- GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, outputYuvTextures[i]);
- final int planeWidth = (i == 0) ? width : width / 2;
- final int planeHeight = (i == 0) ? height : height / 2;
- // GLES only accepts packed data, i.e. stride == planeWidth.
- final ByteBuffer packedByteBuffer;
- if (strides[i] == planeWidth) {
- // Input is packed already.
- packedByteBuffer = planes[i];
- } else {
- VideoRenderer.nativeCopyPlane(
- planes[i], planeWidth, planeHeight, strides[i], copyBuffer, planeWidth);
- packedByteBuffer = copyBuffer;
- }
- GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, planeWidth, planeHeight, 0,
- GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, packedByteBuffer);
+ public Shader(String fragmentShader) {
+ this.glShader = new GlShader(VERTEX_SHADER_STRING, fragmentShader);
+ this.texMatrixLocation = glShader.getUniformLocation("texMatrix");
}
}
+ // The keys are one of the fragments shaders above.
+ private final Map<String, Shader> shaders = new IdentityHashMap<String, Shader>();
+
/**
* Draw an OES texture frame with specified texture transformation matrix. Required resources are
* allocated at the first call to this function.
*/
- public void drawOes(int oesTextureId, float[] texMatrix) {
- prepareShader(OES_FRAGMENT_SHADER_STRING);
+ @Override
+ public void drawOes(int oesTextureId, float[] texMatrix, int x, int y, int width, int height) {
+ prepareShader(OES_FRAGMENT_SHADER_STRING, texMatrix);
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
// updateTexImage() may be called from another thread in another EGL context, so we need to
// bind/unbind the texture in each draw call so that GLES understads it's a new texture.
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, oesTextureId);
- drawRectangle(texMatrix);
+ drawRectangle(x, y, width, height);
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, 0);
}
@@ -186,10 +150,12 @@ public class GlRectDrawer {
* Draw a RGB(A) texture frame with specified texture transformation matrix. Required resources
* are allocated at the first call to this function.
*/
- public void drawRgb(int textureId, float[] texMatrix) {
- prepareShader(RGB_FRAGMENT_SHADER_STRING);
+ @Override
+ public void drawRgb(int textureId, float[] texMatrix, int x, int y, int width, int height) {
+ prepareShader(RGB_FRAGMENT_SHADER_STRING, texMatrix);
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, textureId);
- drawRectangle(texMatrix);
+ drawRectangle(x, y, width, height);
// Unbind the texture as a precaution.
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, 0);
}
@@ -198,14 +164,15 @@ public class GlRectDrawer {
* Draw a YUV frame with specified texture transformation matrix. Required resources are
* allocated at the first call to this function.
*/
- public void drawYuv(int[] yuvTextures, float[] texMatrix) {
- prepareShader(YUV_FRAGMENT_SHADER_STRING);
+ @Override
+ public void drawYuv(int[] yuvTextures, float[] texMatrix, int x, int y, int width, int height) {
+ prepareShader(YUV_FRAGMENT_SHADER_STRING, texMatrix);
// Bind the textures.
for (int i = 0; i < 3; ++i) {
GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, yuvTextures[i]);
}
- drawRectangle(texMatrix);
+ drawRectangle(x, y, width, height);
// Unbind the textures as a precaution..
for (int i = 0; i < 3; ++i) {
GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
@@ -213,60 +180,51 @@ public class GlRectDrawer {
}
}
- private void drawRectangle(float[] texMatrix) {
- // Try avoid uploading the texture if possible.
- if (!Arrays.equals(currentTexMatrix, texMatrix)) {
- currentTexMatrix = texMatrix.clone();
- // Copy the texture transformation matrix over.
- GLES20.glUniformMatrix4fv(texMatrixLocation, 1, false, texMatrix, 0);
- }
+ private void drawRectangle(int x, int y, int width, int height) {
// Draw quad.
+ GLES20.glViewport(x, y, width, height);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
}
- private void prepareShader(String fragmentShader) {
- // Lazy allocation.
- if (!shaders.containsKey(fragmentShader)) {
- final GlShader shader = new GlShader(VERTEX_SHADER_STRING, fragmentShader);
+ private void prepareShader(String fragmentShader, float[] texMatrix) {
+ final Shader shader;
+ if (shaders.containsKey(fragmentShader)) {
+ shader = shaders.get(fragmentShader);
+ } else {
+ // Lazy allocation.
+ shader = new Shader(fragmentShader);
shaders.put(fragmentShader, shader);
- shader.useProgram();
+ shader.glShader.useProgram();
// Initialize fragment shader uniform values.
if (fragmentShader == YUV_FRAGMENT_SHADER_STRING) {
- GLES20.glUniform1i(shader.getUniformLocation("y_tex"), 0);
- GLES20.glUniform1i(shader.getUniformLocation("u_tex"), 1);
- GLES20.glUniform1i(shader.getUniformLocation("v_tex"), 2);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("y_tex"), 0);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("u_tex"), 1);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("v_tex"), 2);
} else if (fragmentShader == RGB_FRAGMENT_SHADER_STRING) {
- GLES20.glUniform1i(shader.getUniformLocation("rgb_tex"), 0);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("rgb_tex"), 0);
} else if (fragmentShader == OES_FRAGMENT_SHADER_STRING) {
- GLES20.glUniform1i(shader.getUniformLocation("oes_tex"), 0);
+ GLES20.glUniform1i(shader.glShader.getUniformLocation("oes_tex"), 0);
} else {
throw new IllegalStateException("Unknown fragment shader: " + fragmentShader);
}
GlUtil.checkNoGLES2Error("Initialize fragment shader uniform values.");
// Initialize vertex shader attributes.
- shader.setVertexAttribArray("in_pos", 2, FULL_RECTANGLE_BUF);
- shader.setVertexAttribArray("in_tc", 2, FULL_RECTANGLE_TEX_BUF);
- }
-
- // Update GLES state if shader is not already current.
- final GlShader shader = shaders.get(fragmentShader);
- if (currentShader != shader) {
- currentShader = shader;
- shader.useProgram();
- GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
- currentTexMatrix = null;
- texMatrixLocation = shader.getUniformLocation("texMatrix");
+ shader.glShader.setVertexAttribArray("in_pos", 2, FULL_RECTANGLE_BUF);
+ shader.glShader.setVertexAttribArray("in_tc", 2, FULL_RECTANGLE_TEX_BUF);
}
+ shader.glShader.useProgram();
+ // Copy the texture transformation matrix over.
+ GLES20.glUniformMatrix4fv(shader.texMatrixLocation, 1, false, texMatrix, 0);
}
/**
* Release all GLES resources. This needs to be done manually, otherwise the resources are leaked.
*/
+ @Override
public void release() {
- for (GlShader shader : shaders.values()) {
- shader.release();
+ for (Shader shader : shaders.values()) {
+ shader.glShader.release();
}
shaders.clear();
- copyBuffer = null;
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java b/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java
index e3a7850db4..950dcdfa44 100644
--- a/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java
+++ b/talk/app/webrtc/java/android/org/webrtc/NetworkMonitorAutoDetect.java
@@ -55,7 +55,7 @@ import android.util.Log;
* ACCESS_NETWORK_STATE permission.
*/
public class NetworkMonitorAutoDetect extends BroadcastReceiver {
- static enum ConnectionType {
+ public static enum ConnectionType {
CONNECTION_UNKNOWN,
CONNECTION_ETHERNET,
CONNECTION_WIFI,
@@ -96,6 +96,10 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
/** Queries the ConnectivityManager for information about the current connection. */
static class ConnectivityManagerDelegate {
+ /**
+ * Note: In some rare Android systems connectivityManager is null. We handle that
+ * gracefully below.
+ */
private final ConnectivityManager connectivityManager;
ConnectivityManagerDelegate(Context context) {
@@ -114,6 +118,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
* default network.
*/
NetworkState getNetworkState() {
+ if (connectivityManager == null) {
+ return new NetworkState(false, -1, -1);
+ }
return getNetworkState(connectivityManager.getActiveNetworkInfo());
}
@@ -123,6 +130,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
NetworkState getNetworkState(Network network) {
+ if (connectivityManager == null) {
+ return new NetworkState(false, -1, -1);
+ }
return getNetworkState(connectivityManager.getNetworkInfo(network));
}
@@ -142,6 +152,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
Network[] getAllNetworks() {
+ if (connectivityManager == null) {
+ return new Network[0];
+ }
return connectivityManager.getAllNetworks();
}
@@ -152,6 +165,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
int getDefaultNetId() {
+ if (connectivityManager == null) {
+ return INVALID_NET_ID;
+ }
// Android Lollipop had no API to get the default network; only an
// API to return the NetworkInfo for the default network. To
// determine the default network one can find the network with
@@ -188,6 +204,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
*/
@SuppressLint("NewApi")
boolean hasInternetCapability(Network network) {
+ if (connectivityManager == null) {
+ return false;
+ }
final NetworkCapabilities capabilities =
connectivityManager.getNetworkCapabilities(network);
return capabilities != null && capabilities.hasCapability(NET_CAPABILITY_INTERNET);
@@ -240,7 +259,6 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver {
static final int INVALID_NET_ID = -1;
private static final String TAG = "NetworkMonitorAutoDetect";
- private static final int UNKNOWN_LINK_SPEED = -1;
private final IntentFilter intentFilter;
// Observer for the connection type change.
diff --git a/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java b/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java
index 94d180da5a..5ada4cc416 100644
--- a/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java
+++ b/talk/app/webrtc/java/android/org/webrtc/RendererCommon.java
@@ -28,8 +28,11 @@
package org.webrtc;
import android.graphics.Point;
+import android.opengl.GLES20;
import android.opengl.Matrix;
+import java.nio.ByteBuffer;
+
/**
* Static helper functions for renderer implementations.
*/
@@ -47,6 +50,73 @@ public class RendererCommon {
public void onFrameResolutionChanged(int videoWidth, int videoHeight, int rotation);
}
+ /** Interface for rendering frames on an EGLSurface. */
+ public static interface GlDrawer {
+ /**
+ * Functions for drawing frames with different sources. The rendering surface target is
+ * implied by the current EGL context of the calling thread and requires no explicit argument.
+ * The coordinates specify the viewport location on the surface target.
+ */
+ void drawOes(int oesTextureId, float[] texMatrix, int x, int y, int width, int height);
+ void drawRgb(int textureId, float[] texMatrix, int x, int y, int width, int height);
+ void drawYuv(int[] yuvTextures, float[] texMatrix, int x, int y, int width, int height);
+
+ /**
+ * Release all GL resources. This needs to be done manually, otherwise resources may leak.
+ */
+ void release();
+ }
+
+ /**
+ * Helper class for uploading YUV bytebuffer frames to textures that handles stride > width. This
+ * class keeps an internal ByteBuffer to avoid unnecessary allocations for intermediate copies.
+ */
+ public static class YuvUploader {
+ // Intermediate copy buffer for uploading yuv frames that are not packed, i.e. stride > width.
+ // TODO(magjed): Investigate when GL_UNPACK_ROW_LENGTH is available, or make a custom shader
+ // that handles stride and compare performance with intermediate copy.
+ private ByteBuffer copyBuffer;
+
+ /**
+ * Upload |planes| into |outputYuvTextures|, taking stride into consideration.
+ * |outputYuvTextures| must have been generated in advance.
+ */
+ public void uploadYuvData(
+ int[] outputYuvTextures, int width, int height, int[] strides, ByteBuffer[] planes) {
+ final int[] planeWidths = new int[] {width, width / 2, width / 2};
+ final int[] planeHeights = new int[] {height, height / 2, height / 2};
+ // Make a first pass to see if we need a temporary copy buffer.
+ int copyCapacityNeeded = 0;
+ for (int i = 0; i < 3; ++i) {
+ if (strides[i] > planeWidths[i]) {
+ copyCapacityNeeded = Math.max(copyCapacityNeeded, planeWidths[i] * planeHeights[i]);
+ }
+ }
+ // Allocate copy buffer if necessary.
+ if (copyCapacityNeeded > 0
+ && (copyBuffer == null || copyBuffer.capacity() < copyCapacityNeeded)) {
+ copyBuffer = ByteBuffer.allocateDirect(copyCapacityNeeded);
+ }
+ // Upload each plane.
+ for (int i = 0; i < 3; ++i) {
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
+ GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, outputYuvTextures[i]);
+ // GLES only accepts packed data, i.e. stride == planeWidth.
+ final ByteBuffer packedByteBuffer;
+ if (strides[i] == planeWidths[i]) {
+ // Input is packed already.
+ packedByteBuffer = planes[i];
+ } else {
+ VideoRenderer.nativeCopyPlane(
+ planes[i], planeWidths[i], planeHeights[i], strides[i], copyBuffer, planeWidths[i]);
+ packedByteBuffer = copyBuffer;
+ }
+ GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE, planeWidths[i],
+ planeHeights[i], 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE, packedByteBuffer);
+ }
+ }
+ }
+
// Types of video scaling:
// SCALE_ASPECT_FIT - video frame is scaled to fit the size of the view by
// maintaining the aspect ratio (black borders may be displayed).
@@ -182,9 +252,9 @@ public class RendererCommon {
}
// Each dimension is constrained on max display size and how much we are allowed to crop.
final int width = Math.min(maxDisplayWidth,
- (int) (maxDisplayHeight / minVisibleFraction * videoAspectRatio));
+ Math.round(maxDisplayHeight / minVisibleFraction * videoAspectRatio));
final int height = Math.min(maxDisplayHeight,
- (int) (maxDisplayWidth / minVisibleFraction / videoAspectRatio));
+ Math.round(maxDisplayWidth / minVisibleFraction / videoAspectRatio));
return new Point(width, height);
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java b/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java
index b9c158f848..b001d2a101 100644
--- a/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java
+++ b/talk/app/webrtc/java/android/org/webrtc/SurfaceTextureHelper.java
@@ -35,12 +35,12 @@ import android.os.Handler;
import android.os.HandlerThread;
import android.os.SystemClock;
+import java.nio.ByteBuffer;
+import java.nio.FloatBuffer;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import javax.microedition.khronos.egl.EGLContext;
-
/**
* Helper class to create and synchronize access to a SurfaceTexture. The caller will get notified
* of new frames in onTextureFrameAvailable(), and should call returnTextureFrame() when done with
@@ -51,7 +51,7 @@ import javax.microedition.khronos.egl.EGLContext;
* wrapping texture frames into webrtc::VideoFrames and also handles calling returnTextureFrame()
* when the webrtc::VideoFrame is no longer used.
*/
-final class SurfaceTextureHelper {
+class SurfaceTextureHelper {
private static final String TAG = "SurfaceTextureHelper";
/**
* Callback interface for being notified that a new texture frame is available. The calls will be
@@ -65,7 +65,7 @@ final class SurfaceTextureHelper {
int oesTextureId, float[] transformMatrix, long timestampNs);
}
- public static SurfaceTextureHelper create(EGLContext sharedContext) {
+ public static SurfaceTextureHelper create(EglBase.Context sharedContext) {
return create(sharedContext, null);
}
@@ -74,7 +74,8 @@ final class SurfaceTextureHelper {
* |handler| is non-null, the callback will be executed on that handler's thread. If |handler| is
* null, a dedicated private thread is created for the callbacks.
*/
- public static SurfaceTextureHelper create(final EGLContext sharedContext, final Handler handler) {
+ public static SurfaceTextureHelper create(final EglBase.Context sharedContext,
+ final Handler handler) {
final Handler finalHandler;
if (handler != null) {
finalHandler = handler;
@@ -94,25 +95,240 @@ final class SurfaceTextureHelper {
});
}
+ // State for YUV conversion, instantiated on demand.
+ static private class YuvConverter {
+ private final EglBase eglBase;
+ private final GlShader shader;
+ private boolean released = false;
+
+ // Vertex coordinates in Normalized Device Coordinates, i.e.
+ // (-1, -1) is bottom-left and (1, 1) is top-right.
+ private static final FloatBuffer DEVICE_RECTANGLE =
+ GlUtil.createFloatBuffer(new float[] {
+ -1.0f, -1.0f, // Bottom left.
+ 1.0f, -1.0f, // Bottom right.
+ -1.0f, 1.0f, // Top left.
+ 1.0f, 1.0f, // Top right.
+ });
+
+ // Texture coordinates - (0, 0) is bottom-left and (1, 1) is top-right.
+ private static final FloatBuffer TEXTURE_RECTANGLE =
+ GlUtil.createFloatBuffer(new float[] {
+ 0.0f, 0.0f, // Bottom left.
+ 1.0f, 0.0f, // Bottom right.
+ 0.0f, 1.0f, // Top left.
+ 1.0f, 1.0f // Top right.
+ });
+
+ private static final String VERTEX_SHADER =
+ "varying vec2 interp_tc;\n"
+ + "attribute vec4 in_pos;\n"
+ + "attribute vec4 in_tc;\n"
+ + "\n"
+ + "uniform mat4 texMatrix;\n"
+ + "\n"
+ + "void main() {\n"
+ + " gl_Position = in_pos;\n"
+ + " interp_tc = (texMatrix * in_tc).xy;\n"
+ + "}\n";
+
+ private static final String FRAGMENT_SHADER =
+ "#extension GL_OES_EGL_image_external : require\n"
+ + "precision mediump float;\n"
+ + "varying vec2 interp_tc;\n"
+ + "\n"
+ + "uniform samplerExternalOES oesTex;\n"
+ // Difference in texture coordinate corresponding to one
+ // sub-pixel in the x direction.
+ + "uniform vec2 xUnit;\n"
+ // Color conversion coefficients, including constant term
+ + "uniform vec4 coeffs;\n"
+ + "\n"
+ + "void main() {\n"
+ // Since the alpha read from the texture is always 1, this could
+ // be written as a mat4 x vec4 multiply. However, that seems to
+ // give a worse framerate, possibly because the additional
+ // multiplies by 1.0 consume resources. TODO(nisse): Could also
+ // try to do it as a vec3 x mat3x4, followed by an add in of a
+ // constant vector.
+ + " gl_FragColor.r = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc - 1.5 * xUnit).rgb);\n"
+ + " gl_FragColor.g = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc - 0.5 * xUnit).rgb);\n"
+ + " gl_FragColor.b = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc + 0.5 * xUnit).rgb);\n"
+ + " gl_FragColor.a = coeffs.a + dot(coeffs.rgb,\n"
+ + " texture2D(oesTex, interp_tc + 1.5 * xUnit).rgb);\n"
+ + "}\n";
+
+ private int texMatrixLoc;
+ private int xUnitLoc;
+ private int coeffsLoc;;
+
+ YuvConverter (EglBase.Context sharedContext) {
+ eglBase = EglBase.create(sharedContext, EglBase.CONFIG_PIXEL_RGBA_BUFFER);
+ eglBase.createDummyPbufferSurface();
+ eglBase.makeCurrent();
+
+ shader = new GlShader(VERTEX_SHADER, FRAGMENT_SHADER);
+ shader.useProgram();
+ texMatrixLoc = shader.getUniformLocation("texMatrix");
+ xUnitLoc = shader.getUniformLocation("xUnit");
+ coeffsLoc = shader.getUniformLocation("coeffs");
+ GLES20.glUniform1i(shader.getUniformLocation("oesTex"), 0);
+ GlUtil.checkNoGLES2Error("Initialize fragment shader uniform values.");
+ // Initialize vertex shader attributes.
+ shader.setVertexAttribArray("in_pos", 2, DEVICE_RECTANGLE);
+ // If the width is not a multiple of 4 pixels, the texture
+ // will be scaled up slightly and clipped at the right border.
+ shader.setVertexAttribArray("in_tc", 2, TEXTURE_RECTANGLE);
+ eglBase.detachCurrent();
+ }
+
+ synchronized void convert(ByteBuffer buf,
+ int width, int height, int stride, int textureId, float [] transformMatrix) {
+ if (released) {
+ throw new IllegalStateException(
+ "YuvConverter.convert called on released object");
+ }
+
+ // We draw into a buffer laid out like
+ //
+ // +---------+
+ // | |
+ // | Y |
+ // | |
+ // | |
+ // +----+----+
+ // | U | V |
+ // | | |
+ // +----+----+
+ //
+ // In memory, we use the same stride for all of Y, U and V. The
+ // U data starts at offset |height| * |stride| from the Y data,
+ // and the V data starts at at offset |stride/2| from the U
+ // data, with rows of U and V data alternating.
+ //
+ // Now, it would have made sense to allocate a pixel buffer with
+ // a single byte per pixel (EGL10.EGL_COLOR_BUFFER_TYPE,
+ // EGL10.EGL_LUMINANCE_BUFFER,), but that seems to be
+ // unsupported by devices. So do the following hack: Allocate an
+ // RGBA buffer, of width |stride|/4. To render each of these
+ // large pixels, sample the texture at 4 different x coordinates
+ // and store the results in the four components.
+ //
+ // Since the V data needs to start on a boundary of such a
+ // larger pixel, it is not sufficient that |stride| is even, it
+ // has to be a multiple of 8 pixels.
+
+ if (stride % 8 != 0) {
+ throw new IllegalArgumentException(
+ "Invalid stride, must be a multiple of 8");
+ }
+ if (stride < width){
+ throw new IllegalArgumentException(
+ "Invalid stride, must >= width");
+ }
+
+ int y_width = (width+3) / 4;
+ int uv_width = (width+7) / 8;
+ int uv_height = (height+1)/2;
+ int total_height = height + uv_height;
+ int size = stride * total_height;
+
+ if (buf.capacity() < size) {
+ throw new IllegalArgumentException("YuvConverter.convert called with too small buffer");
+ }
+ // Produce a frame buffer starting at top-left corner, not
+ // bottom-left.
+ transformMatrix =
+ RendererCommon.multiplyMatrices(transformMatrix,
+ RendererCommon.verticalFlipMatrix());
+
+ // Create new pBuffferSurface with the correct size if needed.
+ if (eglBase.hasSurface()) {
+ if (eglBase.surfaceWidth() != stride/4 ||
+ eglBase.surfaceHeight() != total_height){
+ eglBase.releaseSurface();
+ eglBase.createPbufferSurface(stride/4, total_height);
+ }
+ } else {
+ eglBase.createPbufferSurface(stride/4, total_height);
+ }
+
+ eglBase.makeCurrent();
+
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
+ GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, textureId);
+ GLES20.glUniformMatrix4fv(texMatrixLoc, 1, false, transformMatrix, 0);
+
+ // Draw Y
+ GLES20.glViewport(0, 0, y_width, height);
+ // Matrix * (1;0;0;0) / width. Note that opengl uses column major order.
+ GLES20.glUniform2f(xUnitLoc,
+ transformMatrix[0] / width,
+ transformMatrix[1] / width);
+ // Y'UV444 to RGB888, see
+ // https://en.wikipedia.org/wiki/YUV#Y.27UV444_to_RGB888_conversion.
+ // We use the ITU-R coefficients for U and V */
+ GLES20.glUniform4f(coeffsLoc, 0.299f, 0.587f, 0.114f, 0.0f);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+
+ // Draw U
+ GLES20.glViewport(0, height, uv_width, uv_height);
+ // Matrix * (1;0;0;0) / (2*width). Note that opengl uses column major order.
+ GLES20.glUniform2f(xUnitLoc,
+ transformMatrix[0] / (2.0f*width),
+ transformMatrix[1] / (2.0f*width));
+ GLES20.glUniform4f(coeffsLoc, -0.169f, -0.331f, 0.499f, 0.5f);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+
+ // Draw V
+ GLES20.glViewport(stride/8, height, uv_width, uv_height);
+ GLES20.glUniform4f(coeffsLoc, 0.499f, -0.418f, -0.0813f, 0.5f);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+
+ GLES20.glReadPixels(0, 0, stride/4, total_height, GLES20.GL_RGBA,
+ GLES20.GL_UNSIGNED_BYTE, buf);
+
+ GlUtil.checkNoGLES2Error("YuvConverter.convert");
+
+ // Unbind texture. Reportedly needed on some devices to get
+ // the texture updated from the camera.
+ GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, 0);
+ eglBase.detachCurrent();
+ }
+
+ synchronized void release() {
+ released = true;
+ eglBase.makeCurrent();
+ shader.release();
+ eglBase.release();
+ }
+ }
+
private final Handler handler;
- private final boolean isOwningThread;
+ private boolean isOwningThread;
private final EglBase eglBase;
private final SurfaceTexture surfaceTexture;
private final int oesTextureId;
+ private YuvConverter yuvConverter;
+
private OnTextureFrameAvailableListener listener;
// The possible states of this class.
private boolean hasPendingTexture = false;
- private boolean isTextureInUse = false;
+ private volatile boolean isTextureInUse = false;
private boolean isQuitting = false;
- private SurfaceTextureHelper(EGLContext sharedContext, Handler handler, boolean isOwningThread) {
+ private SurfaceTextureHelper(EglBase.Context sharedContext,
+ Handler handler, boolean isOwningThread) {
if (handler.getLooper().getThread() != Thread.currentThread()) {
throw new IllegalStateException("SurfaceTextureHelper must be created on the handler thread");
}
this.handler = handler;
this.isOwningThread = isOwningThread;
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PIXEL_BUFFER);
+ eglBase = EglBase.create(sharedContext, EglBase.CONFIG_PIXEL_BUFFER);
eglBase.createDummyPbufferSurface();
eglBase.makeCurrent();
@@ -120,6 +336,18 @@ final class SurfaceTextureHelper {
surfaceTexture = new SurfaceTexture(oesTextureId);
}
+ private YuvConverter getYuvConverter() {
+ // yuvConverter is assigned once
+ if (yuvConverter != null)
+ return yuvConverter;
+
+ synchronized(this) {
+ if (yuvConverter == null)
+ yuvConverter = new YuvConverter(eglBase.getEglBaseContext());
+ return yuvConverter;
+ }
+ }
+
/**
* Start to stream textures to the given |listener|.
* A Listener can only be set once.
@@ -164,12 +392,19 @@ final class SurfaceTextureHelper {
});
}
+ public boolean isTextureInUse() {
+ return isTextureInUse;
+ }
+
/**
* Call disconnect() to stop receiving frames. Resources are released when the texture frame has
* been returned by a call to returnTextureFrame(). You are guaranteed to not receive any more
* onTextureFrameAvailable() after this function returns.
*/
public void disconnect() {
+ if (!isOwningThread) {
+ throw new IllegalStateException("Must call disconnect(handler).");
+ }
if (handler.getLooper().getThread() == Thread.currentThread()) {
isQuitting = true;
if (!isTextureInUse) {
@@ -190,6 +425,28 @@ final class SurfaceTextureHelper {
ThreadUtils.awaitUninterruptibly(barrier);
}
+ /**
+ * Call disconnect() to stop receiving frames and quit the looper used by |handler|.
+ * Resources are released when the texture frame has been returned by a call to
+ * returnTextureFrame(). You are guaranteed to not receive any more
+ * onTextureFrameAvailable() after this function returns.
+ */
+ public void disconnect(Handler handler) {
+ if (this.handler != handler) {
+ throw new IllegalStateException("Wrong handler.");
+ }
+ isOwningThread = true;
+ disconnect();
+ }
+
+ public void textureToYUV(ByteBuffer buf,
+ int width, int height, int stride, int textureId, float [] transformMatrix) {
+ if (textureId != oesTextureId)
+ throw new IllegalStateException("textureToByteBuffer called with unexpected textureId");
+
+ getYuvConverter().convert(buf, width, height, stride, textureId, transformMatrix);
+ }
+
private void tryDeliverTextureFrame() {
if (handler.getLooper().getThread() != Thread.currentThread()) {
throw new IllegalStateException("Wrong thread.");
@@ -218,12 +475,14 @@ final class SurfaceTextureHelper {
if (isTextureInUse || !isQuitting) {
throw new IllegalStateException("Unexpected release.");
}
+ synchronized (this) {
+ if (yuvConverter != null)
+ yuvConverter.release();
+ }
eglBase.makeCurrent();
GLES20.glDeleteTextures(1, new int[] {oesTextureId}, 0);
surfaceTexture.release();
eglBase.release();
- if (isOwningThread) {
- handler.getLooper().quit();
- }
+ handler.getLooper().quit();
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java b/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java
index d7c9e2af0a..fa199b33c8 100644
--- a/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java
+++ b/talk/app/webrtc/java/android/org/webrtc/SurfaceViewRenderer.java
@@ -28,10 +28,9 @@
package org.webrtc;
import android.content.Context;
+import android.content.res.Resources.NotFoundException;
import android.graphics.Point;
-import android.graphics.SurfaceTexture;
import android.opengl.GLES20;
-import android.opengl.Matrix;
import android.os.Handler;
import android.os.HandlerThread;
import android.util.AttributeSet;
@@ -67,7 +66,8 @@ public class SurfaceViewRenderer extends SurfaceView
// EGL and GL resources for drawing YUV/OES textures. After initilization, these are only accessed
// from the render thread.
private EglBase eglBase;
- private GlRectDrawer drawer;
+ private final RendererCommon.YuvUploader yuvUploader = new RendererCommon.YuvUploader();
+ private RendererCommon.GlDrawer drawer;
// Texture ids for YUV frames. Allocated on first arrival of a YUV frame.
private int[] yuvTextures = null;
@@ -77,23 +77,22 @@ public class SurfaceViewRenderer extends SurfaceView
// These variables are synchronized on |layoutLock|.
private final Object layoutLock = new Object();
- // These three different dimension values are used to keep track of the state in these functions:
- // requestLayout() -> onMeasure() -> onLayout() -> surfaceChanged().
- // requestLayout() is triggered internally by frame size changes, but can also be triggered
- // externally by layout update requests.
- // Most recent measurement specification from onMeasure().
- private int widthSpec;
- private int heightSpec;
- // Current size on screen in pixels. Updated in onLayout(), and should be consistent with
- // |widthSpec|/|heightSpec| after that.
- private int layoutWidth;
- private int layoutHeight;
- // Current surface size of the underlying Surface. Updated in surfaceChanged(), and should be
- // consistent with |layoutWidth|/|layoutHeight| after that.
+ // These dimension values are used to keep track of the state in these functions: onMeasure(),
+ // onLayout(), and surfaceChanged(). A new layout is triggered with requestLayout(). This happens
+ // internally when the incoming frame size changes. requestLayout() can also be triggered
+ // externally. The layout change is a two pass process: first onMeasure() is called in a top-down
+ // traversal of the View tree, followed by an onLayout() pass that is also top-down. During the
+ // onLayout() pass, each parent is responsible for positioning its children using the sizes
+ // computed in the measure pass.
+ // |desiredLayoutsize| is the layout size we have requested in onMeasure() and are waiting for to
+ // take effect.
+ private Point desiredLayoutSize = new Point();
+ // |layoutSize|/|surfaceSize| is the actual current layout/surface size. They are updated in
+ // onLayout() and surfaceChanged() respectively.
+ private final Point layoutSize = new Point();
// TODO(magjed): Enable hardware scaler with SurfaceHolder.setFixedSize(). This will decouple
// layout and surface size.
- private int surfaceWidth;
- private int surfaceHeight;
+ private final Point surfaceSize = new Point();
// |isSurfaceCreated| keeps track of the current status in surfaceCreated()/surfaceDestroyed().
private boolean isSurfaceCreated;
// Last rendered frame dimensions, or 0 if no frame has been rendered yet.
@@ -121,12 +120,18 @@ public class SurfaceViewRenderer extends SurfaceView
// Time in ns spent in renderFrameOnRenderThread() function.
private long renderTimeNs;
- // Runnable for posting frames to render thread..
+ // Runnable for posting frames to render thread.
private final Runnable renderFrameRunnable = new Runnable() {
@Override public void run() {
renderFrameOnRenderThread();
}
};
+ // Runnable for clearing Surface to black.
+ private final Runnable makeBlackRunnable = new Runnable() {
+ @Override public void run() {
+ makeBlack();
+ }
+ };
/**
* Standard View constructor. In order to render something, you must first call init().
@@ -149,17 +154,28 @@ public class SurfaceViewRenderer extends SurfaceView
* reinitialize the renderer after a previous init()/release() cycle.
*/
public void init(
- EGLContext sharedContext, RendererCommon.RendererEvents rendererEvents) {
+ EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents) {
+ init(sharedContext, rendererEvents, EglBase.CONFIG_PLAIN, new GlRectDrawer());
+ }
+
+ /**
+ * Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
+ * for drawing frames on the EGLSurface. This class is responsible for calling release() on
+ * |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
+ * init()/release() cycle.
+ */
+ public void init(EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents,
+ int[] configAttributes, RendererCommon.GlDrawer drawer) {
synchronized (handlerLock) {
if (renderThreadHandler != null) {
- throw new IllegalStateException("Already initialized");
+ throw new IllegalStateException(getResourceName() + "Already initialized");
}
- Logging.d(TAG, "Initializing");
+ Logging.d(TAG, getResourceName() + "Initializing.");
this.rendererEvents = rendererEvents;
+ this.drawer = drawer;
renderThread = new HandlerThread(TAG);
renderThread.start();
- drawer = new GlRectDrawer();
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PLAIN);
+ eglBase = EglBase.create(sharedContext, configAttributes);
renderThreadHandler = new Handler(renderThread.getLooper());
}
tryCreateEglSurface();
@@ -174,8 +190,8 @@ public class SurfaceViewRenderer extends SurfaceView
runOnRenderThread(new Runnable() {
@Override public void run() {
synchronized (layoutLock) {
- if (isSurfaceCreated) {
- eglBase.createSurface(getHolder());
+ if (isSurfaceCreated && !eglBase.hasSurface()) {
+ eglBase.createSurface(getHolder().getSurface());
eglBase.makeCurrent();
// Necessary for YUV frames with odd width.
GLES20.glPixelStorei(GLES20.GL_UNPACK_ALIGNMENT, 1);
@@ -195,7 +211,7 @@ public class SurfaceViewRenderer extends SurfaceView
final CountDownLatch eglCleanupBarrier = new CountDownLatch(1);
synchronized (handlerLock) {
if (renderThreadHandler == null) {
- Logging.d(TAG, "Already released");
+ Logging.d(TAG, getResourceName() + "Already released");
return;
}
// Release EGL and GL resources on render thread.
@@ -210,11 +226,8 @@ public class SurfaceViewRenderer extends SurfaceView
GLES20.glDeleteTextures(3, yuvTextures, 0);
yuvTextures = null;
}
- if (eglBase.hasSurface()) {
- // Clear last rendered image to black.
- GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
- eglBase.swapBuffers();
- }
+ // Clear last rendered image to black.
+ makeBlack();
eglBase.release();
eglBase = null;
eglCleanupBarrier.countDown();
@@ -242,6 +255,14 @@ public class SurfaceViewRenderer extends SurfaceView
frameRotation = 0;
rendererEvents = null;
}
+ resetStatistics();
+ }
+
+ /**
+ * Reset statistics. This will reset the logged statistics in logStatistics(), and
+ * RendererEvents.onFirstFrameRendered() will be called for the next frame.
+ */
+ public void resetStatistics() {
synchronized (statisticsLock) {
framesReceived = 0;
framesDropped = 0;
@@ -277,27 +298,28 @@ public class SurfaceViewRenderer extends SurfaceView
}
synchronized (handlerLock) {
if (renderThreadHandler == null) {
- Logging.d(TAG, "Dropping frame - SurfaceViewRenderer not initialized or already released.");
- } else {
- synchronized (frameLock) {
- if (pendingFrame == null) {
- updateFrameDimensionsAndReportEvents(frame);
- pendingFrame = frame;
- renderThreadHandler.post(renderFrameRunnable);
- return;
+ Logging.d(TAG, getResourceName()
+ + "Dropping frame - Not initialized or already released.");
+ VideoRenderer.renderFrameDone(frame);
+ return;
+ }
+ synchronized (frameLock) {
+ if (pendingFrame != null) {
+ // Drop old frame.
+ synchronized (statisticsLock) {
+ ++framesDropped;
}
+ VideoRenderer.renderFrameDone(pendingFrame);
}
+ pendingFrame = frame;
+ updateFrameDimensionsAndReportEvents(frame);
+ renderThreadHandler.post(renderFrameRunnable);
}
}
- // Drop frame.
- synchronized (statisticsLock) {
- ++framesDropped;
- }
- VideoRenderer.renderFrameDone(frame);
}
// Returns desired layout size given current measure specification and video aspect ratio.
- private Point getDesiredLayoutSize() {
+ private Point getDesiredLayoutSize(int widthSpec, int heightSpec) {
synchronized (layoutLock) {
final int maxWidth = getDefaultSize(Integer.MAX_VALUE, widthSpec);
final int maxHeight = getDefaultSize(Integer.MAX_VALUE, heightSpec);
@@ -317,18 +339,30 @@ public class SurfaceViewRenderer extends SurfaceView
@Override
protected void onMeasure(int widthSpec, int heightSpec) {
synchronized (layoutLock) {
- this.widthSpec = widthSpec;
- this.heightSpec = heightSpec;
- final Point size = getDesiredLayoutSize();
- setMeasuredDimension(size.x, size.y);
+ if (frameWidth == 0 || frameHeight == 0) {
+ super.onMeasure(widthSpec, heightSpec);
+ return;
+ }
+ desiredLayoutSize = getDesiredLayoutSize(widthSpec, heightSpec);
+ if (desiredLayoutSize.x != getMeasuredWidth() || desiredLayoutSize.y != getMeasuredHeight()) {
+ // Clear the surface asap before the layout change to avoid stretched video and other
+ // render artifacs. Don't wait for it to finish because the IO thread should never be
+ // blocked, so it's a best-effort attempt.
+ synchronized (handlerLock) {
+ if (renderThreadHandler != null) {
+ renderThreadHandler.postAtFrontOfQueue(makeBlackRunnable);
+ }
+ }
+ }
+ setMeasuredDimension(desiredLayoutSize.x, desiredLayoutSize.y);
}
}
@Override
protected void onLayout(boolean changed, int left, int top, int right, int bottom) {
synchronized (layoutLock) {
- layoutWidth = right - left;
- layoutHeight = bottom - top;
+ layoutSize.x = right - left;
+ layoutSize.y = bottom - top;
}
// Might have a pending frame waiting for a layout of correct size.
runOnRenderThread(renderFrameRunnable);
@@ -337,7 +371,7 @@ public class SurfaceViewRenderer extends SurfaceView
// SurfaceHolder.Callback interface.
@Override
public void surfaceCreated(final SurfaceHolder holder) {
- Logging.d(TAG, "Surface created");
+ Logging.d(TAG, getResourceName() + "Surface created.");
synchronized (layoutLock) {
isSurfaceCreated = true;
}
@@ -346,11 +380,11 @@ public class SurfaceViewRenderer extends SurfaceView
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
- Logging.d(TAG, "Surface destroyed");
+ Logging.d(TAG, getResourceName() + "Surface destroyed.");
synchronized (layoutLock) {
isSurfaceCreated = false;
- surfaceWidth = 0;
- surfaceHeight = 0;
+ surfaceSize.x = 0;
+ surfaceSize.y = 0;
}
runOnRenderThread(new Runnable() {
@Override public void run() {
@@ -361,10 +395,10 @@ public class SurfaceViewRenderer extends SurfaceView
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
- Logging.d(TAG, "Surface changed: " + width + "x" + height);
+ Logging.d(TAG, getResourceName() + "Surface changed: " + width + "x" + height);
synchronized (layoutLock) {
- surfaceWidth = width;
- surfaceHeight = height;
+ surfaceSize.x = width;
+ surfaceSize.y = height;
}
// Might have a pending frame waiting for a surface of correct size.
runOnRenderThread(renderFrameRunnable);
@@ -381,26 +415,35 @@ public class SurfaceViewRenderer extends SurfaceView
}
}
+ private String getResourceName() {
+ try {
+ return getResources().getResourceEntryName(getId()) + ": ";
+ } catch (NotFoundException e) {
+ return "";
+ }
+ }
+
+ private void makeBlack() {
+ if (Thread.currentThread() != renderThread) {
+ throw new IllegalStateException(getResourceName() + "Wrong thread.");
+ }
+ if (eglBase != null && eglBase.hasSurface()) {
+ GLES20.glClearColor(0, 0, 0, 0);
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ eglBase.swapBuffers();
+ }
+ }
+
/**
* Requests new layout if necessary. Returns true if layout and surface size are consistent.
*/
private boolean checkConsistentLayout() {
+ if (Thread.currentThread() != renderThread) {
+ throw new IllegalStateException(getResourceName() + "Wrong thread.");
+ }
synchronized (layoutLock) {
- final Point desiredLayoutSize = getDesiredLayoutSize();
- if (desiredLayoutSize.x != layoutWidth || desiredLayoutSize.y != layoutHeight) {
- Logging.d(TAG, "Requesting new layout with size: "
- + desiredLayoutSize.x + "x" + desiredLayoutSize.y);
- // Request layout update on UI thread.
- post(new Runnable() {
- @Override public void run() {
- requestLayout();
- }
- });
- return false;
- }
- // Wait for requestLayout() to propagate through this sequence before returning true:
- // requestLayout() -> onMeasure() -> onLayout() -> surfaceChanged().
- return surfaceWidth == layoutWidth && surfaceHeight == layoutHeight;
+ // Return false while we are in the middle of a layout change.
+ return layoutSize.equals(desiredLayoutSize) && surfaceSize.equals(layoutSize);
}
}
@@ -408,61 +451,51 @@ public class SurfaceViewRenderer extends SurfaceView
* Renders and releases |pendingFrame|.
*/
private void renderFrameOnRenderThread() {
+ if (Thread.currentThread() != renderThread) {
+ throw new IllegalStateException(getResourceName() + "Wrong thread.");
+ }
+ // Fetch and render |pendingFrame|.
+ final VideoRenderer.I420Frame frame;
+ synchronized (frameLock) {
+ if (pendingFrame == null) {
+ return;
+ }
+ frame = pendingFrame;
+ pendingFrame = null;
+ }
if (eglBase == null || !eglBase.hasSurface()) {
- Logging.d(TAG, "No surface to draw on");
+ Logging.d(TAG, getResourceName() + "No surface to draw on");
+ VideoRenderer.renderFrameDone(frame);
return;
}
if (!checkConsistentLayout()) {
// Output intermediate black frames while the layout is updated.
- GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
- eglBase.swapBuffers();
+ makeBlack();
+ VideoRenderer.renderFrameDone(frame);
return;
}
// After a surface size change, the EGLSurface might still have a buffer of the old size in the
// pipeline. Querying the EGLSurface will show if the underlying buffer dimensions haven't yet
// changed. Such a buffer will be rendered incorrectly, so flush it with a black frame.
synchronized (layoutLock) {
- if (eglBase.surfaceWidth() != surfaceWidth || eglBase.surfaceHeight() != surfaceHeight) {
- GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
- eglBase.swapBuffers();
+ if (eglBase.surfaceWidth() != surfaceSize.x || eglBase.surfaceHeight() != surfaceSize.y) {
+ makeBlack();
}
}
- // Fetch and render |pendingFrame|.
- final VideoRenderer.I420Frame frame;
- synchronized (frameLock) {
- if (pendingFrame == null) {
- return;
- }
- frame = pendingFrame;
- pendingFrame = null;
- }
final long startTimeNs = System.nanoTime();
- final float[] samplingMatrix;
- if (frame.yuvFrame) {
- // The convention in WebRTC is that the first element in a ByteBuffer corresponds to the
- // top-left corner of the image, but in glTexImage2D() the first element corresponds to the
- // bottom-left corner. We correct this discrepancy by setting a vertical flip as sampling
- // matrix.
- samplingMatrix = RendererCommon.verticalFlipMatrix();
- } else {
- // TODO(magjed): Move updateTexImage() to the video source instead.
- SurfaceTexture surfaceTexture = (SurfaceTexture) frame.textureObject;
- surfaceTexture.updateTexImage();
- samplingMatrix = new float[16];
- surfaceTexture.getTransformMatrix(samplingMatrix);
- }
-
final float[] texMatrix;
synchronized (layoutLock) {
final float[] rotatedSamplingMatrix =
- RendererCommon.rotateTextureMatrix(samplingMatrix, frame.rotationDegree);
+ RendererCommon.rotateTextureMatrix(frame.samplingMatrix, frame.rotationDegree);
final float[] layoutMatrix = RendererCommon.getLayoutMatrix(
- mirror, frameAspectRatio(), (float) layoutWidth / layoutHeight);
+ mirror, frameAspectRatio(), (float) layoutSize.x / layoutSize.y);
texMatrix = RendererCommon.multiplyMatrices(rotatedSamplingMatrix, layoutMatrix);
}
- GLES20.glViewport(0, 0, surfaceWidth, surfaceHeight);
+ // TODO(magjed): glClear() shouldn't be necessary since every pixel is covered anyway, but it's
+ // a workaround for bug 5147. Performance will be slightly worse.
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
if (frame.yuvFrame) {
// Make sure YUV textures are allocated.
if (yuvTextures == null) {
@@ -471,11 +504,11 @@ public class SurfaceViewRenderer extends SurfaceView
yuvTextures[i] = GlUtil.generateTexture(GLES20.GL_TEXTURE_2D);
}
}
- drawer.uploadYuvData(
+ yuvUploader.uploadYuvData(
yuvTextures, frame.width, frame.height, frame.yuvStrides, frame.yuvPlanes);
- drawer.drawYuv(yuvTextures, texMatrix);
+ drawer.drawYuv(yuvTextures, texMatrix, 0, 0, surfaceSize.x, surfaceSize.y);
} else {
- drawer.drawOes(frame.textureId, texMatrix);
+ drawer.drawOes(frame.textureId, texMatrix, 0, 0, surfaceSize.x, surfaceSize.y);
}
eglBase.swapBuffers();
@@ -483,6 +516,12 @@ public class SurfaceViewRenderer extends SurfaceView
synchronized (statisticsLock) {
if (framesRendered == 0) {
firstFrameTimeNs = startTimeNs;
+ synchronized (layoutLock) {
+ Logging.d(TAG, getResourceName() + "Reporting first rendered frame.");
+ if (rendererEvents != null) {
+ rendererEvents.onFirstFrameRendered();
+ }
+ }
}
++framesRendered;
renderTimeNs += (System.nanoTime() - startTimeNs);
@@ -508,32 +547,32 @@ public class SurfaceViewRenderer extends SurfaceView
synchronized (layoutLock) {
if (frameWidth != frame.width || frameHeight != frame.height
|| frameRotation != frame.rotationDegree) {
+ Logging.d(TAG, getResourceName() + "Reporting frame resolution changed to "
+ + frame.width + "x" + frame.height + " with rotation " + frame.rotationDegree);
if (rendererEvents != null) {
- final String id = getResources().getResourceEntryName(getId());
- if (frameWidth == 0 || frameHeight == 0) {
- Logging.d(TAG, "ID: " + id + ". Reporting first rendered frame.");
- rendererEvents.onFirstFrameRendered();
- }
- Logging.d(TAG, "ID: " + id + ". Reporting frame resolution changed to "
- + frame.width + "x" + frame.height + " with rotation " + frame.rotationDegree);
rendererEvents.onFrameResolutionChanged(frame.width, frame.height, frame.rotationDegree);
}
frameWidth = frame.width;
frameHeight = frame.height;
frameRotation = frame.rotationDegree;
+ post(new Runnable() {
+ @Override public void run() {
+ requestLayout();
+ }
+ });
}
}
}
private void logStatistics() {
synchronized (statisticsLock) {
- Logging.d(TAG, "ID: " + getResources().getResourceEntryName(getId()) + ". Frames received: "
+ Logging.d(TAG, getResourceName() + "Frames received: "
+ framesReceived + ". Dropped: " + framesDropped + ". Rendered: " + framesRendered);
if (framesReceived > 0 && framesRendered > 0) {
final long timeSinceFirstFrameNs = System.nanoTime() - firstFrameTimeNs;
- Logging.d(TAG, "Duration: " + (int) (timeSinceFirstFrameNs / 1e6) +
- " ms. FPS: " + (float) framesRendered * 1e9 / timeSinceFirstFrameNs);
- Logging.d(TAG, "Average render time: "
+ Logging.d(TAG, getResourceName() + "Duration: " + (int) (timeSinceFirstFrameNs / 1e6) +
+ " ms. FPS: " + framesRendered * 1e9 / timeSinceFirstFrameNs);
+ Logging.d(TAG, getResourceName() + "Average render time: "
+ (int) (renderTimeNs / (1000 * framesRendered)) + " us.");
}
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java b/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java
index 0d8968aba9..e60ead9f00 100644
--- a/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java
+++ b/talk/app/webrtc/java/android/org/webrtc/ThreadUtils.java
@@ -28,11 +28,13 @@
package org.webrtc;
import android.os.Handler;
+import android.os.SystemClock;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
-final class ThreadUtils {
+public class ThreadUtils {
/**
* Utility class to be used for checking that a method is called on the correct thread.
*/
@@ -86,6 +88,29 @@ final class ThreadUtils {
}
}
+ public static boolean joinUninterruptibly(final Thread thread, long timeoutMs) {
+ final long startTimeMs = SystemClock.elapsedRealtime();
+ long timeRemainingMs = timeoutMs;
+ boolean wasInterrupted = false;
+ while (timeRemainingMs > 0) {
+ try {
+ thread.join(timeRemainingMs);
+ break;
+ } catch (InterruptedException e) {
+ // Someone is asking us to return early at our convenience. We can't cancel this operation,
+ // but we should preserve the information and pass it along.
+ wasInterrupted = true;
+ final long elapsedTimeMs = SystemClock.elapsedRealtime() - startTimeMs;
+ timeRemainingMs = timeoutMs - elapsedTimeMs;
+ }
+ }
+ // Pass interruption information along.
+ if (wasInterrupted) {
+ Thread.currentThread().interrupt();
+ }
+ return !thread.isAlive();
+ }
+
public static void joinUninterruptibly(final Thread thread) {
executeUninterruptibly(new BlockingOperation() {
@Override
@@ -104,6 +129,30 @@ final class ThreadUtils {
});
}
+ public static boolean awaitUninterruptibly(CountDownLatch barrier, long timeoutMs) {
+ final long startTimeMs = SystemClock.elapsedRealtime();
+ long timeRemainingMs = timeoutMs;
+ boolean wasInterrupted = false;
+ boolean result = false;
+ do {
+ try {
+ result = barrier.await(timeRemainingMs, TimeUnit.MILLISECONDS);
+ break;
+ } catch (InterruptedException e) {
+ // Someone is asking us to return early at our convenience. We can't cancel this operation,
+ // but we should preserve the information and pass it along.
+ wasInterrupted = true;
+ final long elapsedTimeMs = SystemClock.elapsedRealtime() - startTimeMs;
+ timeRemainingMs = timeoutMs - elapsedTimeMs;
+ }
+ } while (timeRemainingMs > 0);
+ // Pass interruption information along.
+ if (wasInterrupted) {
+ Thread.currentThread().interrupt();
+ }
+ return result;
+ }
+
/**
* Post |callable| to |handler| and wait for the result.
*/
diff --git a/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java b/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java
index 4caefc513d..36f60edd5c 100644
--- a/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java
+++ b/talk/app/webrtc/java/android/org/webrtc/VideoCapturerAndroid.java
@@ -28,9 +28,6 @@
package org.webrtc;
import android.content.Context;
-import android.graphics.SurfaceTexture;
-import android.hardware.Camera;
-import android.hardware.Camera.PreviewCallback;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.SystemClock;
@@ -53,9 +50,6 @@ import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import javax.microedition.khronos.egl.EGLContext;
-import javax.microedition.khronos.egl.EGL10;
-
// Android specific implementation of VideoCapturer.
// An instance of this class can be created by an application using
// VideoCapturerAndroid.create();
@@ -68,21 +62,22 @@ import javax.microedition.khronos.egl.EGL10;
// camera thread. The internal *OnCameraThread() methods must check |camera| for null to check if
// the camera has been stopped.
@SuppressWarnings("deprecation")
-public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallback,
+public class VideoCapturerAndroid extends VideoCapturer implements
+ android.hardware.Camera.PreviewCallback,
SurfaceTextureHelper.OnTextureFrameAvailableListener {
private final static String TAG = "VideoCapturerAndroid";
private final static int CAMERA_OBSERVER_PERIOD_MS = 2000;
+ private final static int CAMERA_FREEZE_REPORT_TIMOUT_MS = 6000;
- private Camera camera; // Only non-null while capturing.
+ private android.hardware.Camera camera; // Only non-null while capturing.
private HandlerThread cameraThread;
private final Handler cameraThreadHandler;
private Context applicationContext;
// Synchronization lock for |id|.
private final Object cameraIdLock = new Object();
private int id;
- private Camera.CameraInfo info;
- private final FramePool videoBuffers;
- private final CameraStatistics cameraStatistics = new CameraStatistics();
+ private android.hardware.Camera.CameraInfo info;
+ private final CameraStatistics cameraStatistics;
// Remember the requested format in case we want to switch cameras.
private int requestedWidth;
private int requestedHeight;
@@ -94,17 +89,28 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private CapturerObserver frameObserver = null;
private final CameraEventsHandler eventsHandler;
private boolean firstFrameReported;
+ // Arbitrary queue depth. Higher number means more memory allocated & held,
+ // lower number means more sensitivity to processing time in the client (and
+ // potentially stalling the capturer if it runs out of buffers to write to).
+ private static final int NUMBER_OF_CAPTURE_BUFFERS = 3;
+ private final Set<byte[]> queuedBuffers = new HashSet<byte[]>();
private final boolean isCapturingToTexture;
- private final SurfaceTextureHelper surfaceHelper;
+ final SurfaceTextureHelper surfaceHelper; // Package visible for testing purposes.
// The camera API can output one old frame after the camera has been switched or the resolution
// has been changed. This flag is used for dropping the first frame after camera restart.
private boolean dropNextFrame = false;
+ // |openCameraOnCodecThreadRunner| is used for retrying to open the camera if it is in use by
+ // another application when startCaptureOnCameraThread is called.
+ private Runnable openCameraOnCodecThreadRunner;
+ private final static int MAX_OPEN_CAMERA_ATTEMPTS = 3;
+ private final static int OPEN_CAMERA_DELAY_MS = 500;
+ private int openCameraAttempts;
// Camera error callback.
- private final Camera.ErrorCallback cameraErrorCallback =
- new Camera.ErrorCallback() {
+ private final android.hardware.Camera.ErrorCallback cameraErrorCallback =
+ new android.hardware.Camera.ErrorCallback() {
@Override
- public void onError(int error, Camera camera) {
+ public void onError(int error, android.hardware.Camera camera) {
String errorMessage;
if (error == android.hardware.Camera.CAMERA_ERROR_SERVER_DIED) {
errorMessage = "Camera server died!";
@@ -120,47 +126,45 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Camera observer - monitors camera framerate. Observer is executed on camera thread.
private final Runnable cameraObserver = new Runnable() {
+ private int freezePeriodCount;
@Override
public void run() {
int cameraFramesCount = cameraStatistics.getAndResetFrameCount();
int cameraFps = (cameraFramesCount * 1000 + CAMERA_OBSERVER_PERIOD_MS / 2)
/ CAMERA_OBSERVER_PERIOD_MS;
- Logging.d(TAG, "Camera fps: " + cameraFps +
- ". Pending buffers: " + cameraStatistics.pendingFramesTimeStamps());
+ Logging.d(TAG, "Camera fps: " + cameraFps +".");
if (cameraFramesCount == 0) {
- Logging.e(TAG, "Camera freezed.");
- if (eventsHandler != null) {
- eventsHandler.onCameraError("Camera failure.");
+ ++freezePeriodCount;
+ if (CAMERA_OBSERVER_PERIOD_MS * freezePeriodCount > CAMERA_FREEZE_REPORT_TIMOUT_MS
+ && eventsHandler != null) {
+ Logging.e(TAG, "Camera freezed.");
+ if (surfaceHelper.isTextureInUse()) {
+ // This can only happen if we are capturing to textures.
+ eventsHandler.onCameraFreezed("Camera failure. Client must return video buffers.");
+ } else {
+ eventsHandler.onCameraFreezed("Camera failure.");
+ }
+ return;
}
} else {
- cameraThreadHandler.postDelayed(this, CAMERA_OBSERVER_PERIOD_MS);
+ freezePeriodCount = 0;
}
+ cameraThreadHandler.postDelayed(this, CAMERA_OBSERVER_PERIOD_MS);
}
};
private static class CameraStatistics {
private int frameCount = 0;
private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
- private final Set<Long> timeStampsNs = new HashSet<Long>();
CameraStatistics() {
threadChecker.detachThread();
}
- public void addPendingFrame(long timestamp) {
+ public void addFrame() {
threadChecker.checkIsOnValidThread();
++frameCount;
- timeStampsNs.add(timestamp);
- }
-
- public void frameReturned(long timestamp) {
- threadChecker.checkIsOnValidThread();
- if (!timeStampsNs.contains(timestamp)) {
- throw new IllegalStateException(
- "CameraStatistics.frameReturned called with unknown timestamp " + timestamp);
- }
- timeStampsNs.remove(timestamp);
}
public int getAndResetFrameCount() {
@@ -169,28 +173,16 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
frameCount = 0;
return count;
}
-
- // Return number of pending frames that have not been returned.
- public int pendingFramesCount() {
- threadChecker.checkIsOnValidThread();
- return timeStampsNs.size();
- }
-
- public String pendingFramesTimeStamps() {
- threadChecker.checkIsOnValidThread();
- List<Long> timeStampsMs = new ArrayList<Long>();
- for (long ts : timeStampsNs) {
- timeStampsMs.add(TimeUnit.NANOSECONDS.toMillis(ts));
- }
- return timeStampsMs.toString();
- }
}
public static interface CameraEventsHandler {
- // Camera error handler - invoked when camera stops receiving frames
+ // Camera error handler - invoked when camera can not be opened
// or any camera exception happens on camera thread.
void onCameraError(String errorDescription);
+ // Invoked when camera stops receiving frames
+ void onCameraFreezed(String errorDescription);
+
// Callback invoked when camera is opening.
void onCameraOpening(int cameraId);
@@ -216,7 +208,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
public static VideoCapturerAndroid create(String name,
- CameraEventsHandler eventsHandler, EGLContext sharedEglContext) {
+ CameraEventsHandler eventsHandler, EglBase.Context sharedEglContext) {
final int cameraId = lookupDeviceName(name);
if (cameraId == -1) {
return null;
@@ -224,7 +216,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
final VideoCapturerAndroid capturer = new VideoCapturerAndroid(cameraId, eventsHandler,
sharedEglContext);
- capturer.setNativeCapturer(nativeCreateVideoCapturer(capturer));
+ capturer.setNativeCapturer(
+ nativeCreateVideoCapturer(capturer, capturer.surfaceHelper));
return capturer;
}
@@ -243,7 +236,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Switch camera to the next valid camera id. This can only be called while
// the camera is running.
public void switchCamera(final CameraSwitchHandler handler) {
- if (Camera.getNumberOfCameras() < 2) {
+ if (android.hardware.Camera.getNumberOfCameras() < 2) {
if (handler != null) {
handler.onCameraSwitchError("No camera to switch to.");
}
@@ -274,7 +267,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
pendingCameraSwitch = false;
}
if (handler != null) {
- handler.onCameraSwitchDone(info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT);
+ handler.onCameraSwitchDone(
+ info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT);
}
}
});
@@ -282,6 +276,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Requests a new output format from the video capturer. Captured frames
// by the camera will be scaled/or dropped by the video capturer.
+ // It does not matter if width and height are flipped. I.E, |width| = 640, |height| = 480 produce
+ // the same result as |width| = 480, |height| = 640.
// TODO(magjed/perkj): Document what this function does. Change name?
public void onOutputFormatRequest(final int width, final int height, final int framerate) {
cameraThreadHandler.post(new Runnable() {
@@ -303,7 +299,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Helper function to retrieve the current camera id synchronously. Note that the camera id might
// change at any point by switchCamera() calls.
- private int getCurrentCameraId() {
+ int getCurrentCameraId() {
synchronized (cameraIdLock) {
return id;
}
@@ -329,20 +325,19 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
private VideoCapturerAndroid(int cameraId, CameraEventsHandler eventsHandler,
- EGLContext sharedContext) {
- Logging.d(TAG, "VideoCapturerAndroid");
+ EglBase.Context sharedContext) {
this.id = cameraId;
this.eventsHandler = eventsHandler;
cameraThread = new HandlerThread(TAG);
cameraThread.start();
cameraThreadHandler = new Handler(cameraThread.getLooper());
- videoBuffers = new FramePool(cameraThread);
isCapturingToTexture = (sharedContext != null);
- surfaceHelper = SurfaceTextureHelper.create(
- isCapturingToTexture ? sharedContext : EGL10.EGL_NO_CONTEXT, cameraThreadHandler);
+ cameraStatistics = new CameraStatistics();
+ surfaceHelper = SurfaceTextureHelper.create(sharedContext, cameraThreadHandler);
if (isCapturingToTexture) {
surfaceHelper.setListener(this);
}
+ Logging.d(TAG, "VideoCapturerAndroid isCapturingToTexture : " + isCapturingToTexture);
}
private void checkIsOnCameraThread() {
@@ -355,13 +350,13 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// found. If |deviceName| is empty, the first available device is used.
private static int lookupDeviceName(String deviceName) {
Logging.d(TAG, "lookupDeviceName: " + deviceName);
- if (deviceName == null || Camera.getNumberOfCameras() == 0) {
+ if (deviceName == null || android.hardware.Camera.getNumberOfCameras() == 0) {
return -1;
}
if (deviceName.isEmpty()) {
return 0;
}
- for (int i = 0; i < Camera.getNumberOfCameras(); ++i) {
+ for (int i = 0; i < android.hardware.Camera.getNumberOfCameras(); ++i) {
if (deviceName.equals(CameraEnumerationAndroid.getDeviceName(i))) {
return i;
}
@@ -382,14 +377,9 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
if (camera != null) {
throw new IllegalStateException("Release called while camera is running");
}
- if (cameraStatistics.pendingFramesCount() != 0) {
- throw new IllegalStateException("Release called with pending frames left");
- }
}
});
- surfaceHelper.disconnect();
- cameraThread.quit();
- ThreadUtils.joinUninterruptibly(cameraThread);
+ surfaceHelper.disconnect(cameraThreadHandler);
cameraThread = null;
}
@@ -413,6 +403,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
if (frameObserver == null) {
throw new RuntimeException("frameObserver not set.");
}
+
cameraThreadHandler.post(new Runnable() {
@Override public void run() {
startCaptureOnCameraThread(width, height, framerate, frameObserver,
@@ -422,8 +413,8 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
private void startCaptureOnCameraThread(
- int width, int height, int framerate, CapturerObserver frameObserver,
- Context applicationContext) {
+ final int width, final int height, final int framerate, final CapturerObserver frameObserver,
+ final Context applicationContext) {
Throwable error = null;
checkIsOnCameraThread();
if (camera != null) {
@@ -431,17 +422,36 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
this.applicationContext = applicationContext;
this.frameObserver = frameObserver;
+ this.firstFrameReported = false;
+
try {
- synchronized (cameraIdLock) {
- Logging.d(TAG, "Opening camera " + id);
- firstFrameReported = false;
- if (eventsHandler != null) {
- eventsHandler.onCameraOpening(id);
+ try {
+ synchronized (cameraIdLock) {
+ Logging.d(TAG, "Opening camera " + id);
+ if (eventsHandler != null) {
+ eventsHandler.onCameraOpening(id);
+ }
+ camera = android.hardware.Camera.open(id);
+ info = new android.hardware.Camera.CameraInfo();
+ android.hardware.Camera.getCameraInfo(id, info);
+ }
+ } catch (RuntimeException e) {
+ openCameraAttempts++;
+ if (openCameraAttempts < MAX_OPEN_CAMERA_ATTEMPTS) {
+ Logging.e(TAG, "Camera.open failed, retrying", e);
+ openCameraOnCodecThreadRunner = new Runnable() {
+ @Override public void run() {
+ startCaptureOnCameraThread(width, height, framerate, frameObserver,
+ applicationContext);
+ }
+ };
+ cameraThreadHandler.postDelayed(openCameraOnCodecThreadRunner, OPEN_CAMERA_DELAY_MS);
+ return;
}
- camera = Camera.open(id);
- info = new Camera.CameraInfo();
- Camera.getCameraInfo(id, info);
+ openCameraAttempts = 0;
+ throw e;
}
+
try {
camera.setPreviewTexture(surfaceHelper.getSurfaceTexture());
} catch (IOException e) {
@@ -485,17 +495,18 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
requestedFramerate = framerate;
// Find closest supported format for |width| x |height| @ |framerate|.
- final Camera.Parameters parameters = camera.getParameters();
+ final android.hardware.Camera.Parameters parameters = camera.getParameters();
final int[] range = CameraEnumerationAndroid.getFramerateRange(parameters, framerate * 1000);
- final Camera.Size previewSize = CameraEnumerationAndroid.getClosestSupportedSize(
- parameters.getSupportedPreviewSizes(), width, height);
+ final android.hardware.Camera.Size previewSize =
+ CameraEnumerationAndroid.getClosestSupportedSize(
+ parameters.getSupportedPreviewSizes(), width, height);
final CaptureFormat captureFormat = new CaptureFormat(
previewSize.width, previewSize.height,
- range[Camera.Parameters.PREVIEW_FPS_MIN_INDEX],
- range[Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
+ range[android.hardware.Camera.Parameters.PREVIEW_FPS_MIN_INDEX],
+ range[android.hardware.Camera.Parameters.PREVIEW_FPS_MAX_INDEX]);
// Check if we are already using this capture format, then we don't need to do anything.
- if (captureFormat.equals(this.captureFormat)) {
+ if (captureFormat.isSameFormat(this.captureFormat)) {
return;
}
@@ -511,11 +522,15 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
parameters.setPreviewFpsRange(captureFormat.minFramerate, captureFormat.maxFramerate);
}
parameters.setPreviewSize(captureFormat.width, captureFormat.height);
- parameters.setPreviewFormat(captureFormat.imageFormat);
+
+ if (!isCapturingToTexture) {
+ parameters.setPreviewFormat(captureFormat.imageFormat);
+ }
// Picture size is for taking pictures and not for preview/video, but we need to set it anyway
// as a workaround for an aspect ratio problem on Nexus 7.
- final Camera.Size pictureSize = CameraEnumerationAndroid.getClosestSupportedSize(
- parameters.getSupportedPictureSizes(), width, height);
+ final android.hardware.Camera.Size pictureSize =
+ CameraEnumerationAndroid.getClosestSupportedSize(
+ parameters.getSupportedPictureSizes(), width, height);
parameters.setPictureSize(pictureSize.width, pictureSize.height);
// Temporarily stop preview if it's already running.
@@ -532,13 +547,19 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
this.captureFormat = captureFormat;
List<String> focusModes = parameters.getSupportedFocusModes();
- if (focusModes.contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
- parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
+ if (focusModes.contains(android.hardware.Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
+ parameters.setFocusMode(android.hardware.Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
}
camera.setParameters(parameters);
if (!isCapturingToTexture) {
- videoBuffers.queueCameraBuffers(captureFormat.frameSize(), camera);
+ queuedBuffers.clear();
+ final int frameSize = captureFormat.frameSize();
+ for (int i = 0; i < NUMBER_OF_CAPTURE_BUFFERS; ++i) {
+ final ByteBuffer buffer = ByteBuffer.allocateDirect(frameSize);
+ queuedBuffers.add(buffer.array());
+ camera.addCallbackBuffer(buffer.array());
+ }
camera.setPreviewCallbackWithBuffer(this);
}
camera.startPreview();
@@ -561,6 +582,10 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private void stopCaptureOnCameraThread() {
checkIsOnCameraThread();
Logging.d(TAG, "stopCaptureOnCameraThread");
+ if (openCameraOnCodecThreadRunner != null) {
+ cameraThreadHandler.removeCallbacks(openCameraOnCodecThreadRunner);
+ }
+ openCameraAttempts = 0;
if (camera == null) {
Logging.e(TAG, "Calling stopCapture() for already stopped camera.");
return;
@@ -571,13 +596,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
Logging.d(TAG, "Stop preview.");
camera.stopPreview();
camera.setPreviewCallbackWithBuffer(null);
- if (!isCapturingToTexture()) {
- videoBuffers.stopReturnBuffersToCamera();
- Logging.d(TAG, "stopReturnBuffersToCamera called."
- + (cameraStatistics.pendingFramesCount() == 0?
- " All buffers have been returned."
- : " Pending buffers: " + cameraStatistics.pendingFramesTimeStamps() + "."));
- }
+ queuedBuffers.clear();
captureFormat = null;
Logging.d(TAG, "Release camera.");
@@ -593,7 +612,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
Logging.d(TAG, "switchCameraOnCameraThread");
stopCaptureOnCameraThread();
synchronized (cameraIdLock) {
- id = (id + 1) % Camera.getNumberOfCameras();
+ id = (id + 1) % android.hardware.Camera.getNumberOfCameras();
}
dropNextFrame = true;
startCaptureOnCameraThread(requestedWidth, requestedHeight, requestedFramerate, frameObserver,
@@ -612,17 +631,9 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
frameObserver.onOutputFormatRequest(width, height, framerate);
}
- public void returnBuffer(final long timeStamp) {
- cameraThreadHandler.post(new Runnable() {
- @Override public void run() {
- cameraStatistics.frameReturned(timeStamp);
- if (isCapturingToTexture) {
- surfaceHelper.returnTextureFrame();
- } else {
- videoBuffers.returnBuffer(timeStamp);
- }
- }
- });
+ // Exposed for testing purposes only.
+ Handler getCameraThreadHandler() {
+ return cameraThreadHandler;
}
private int getDeviceOrientation() {
@@ -650,7 +661,7 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private int getFrameOrientation() {
int rotation = getDeviceOrientation();
- if (info.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
+ if (info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_BACK) {
rotation = 360 - rotation;
}
return (info.orientation + rotation) % 360;
@@ -658,9 +669,10 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Called on cameraThread so must not "synchronized".
@Override
- public void onPreviewFrame(byte[] data, Camera callbackCamera) {
+ public void onPreviewFrame(byte[] data, android.hardware.Camera callbackCamera) {
checkIsOnCameraThread();
- if (camera == null) {
+ if (camera == null || !queuedBuffers.contains(data)) {
+ // The camera has been stopped or |data| is an old invalid buffer.
return;
}
if (camera != callbackCamera) {
@@ -675,16 +687,10 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
firstFrameReported = true;
}
- // Mark the frame owning |data| as used.
- // Note that since data is directBuffer,
- // data.length >= videoBuffers.frameSize.
- if (videoBuffers.reserveByteBuffer(data, captureTimeNs)) {
- cameraStatistics.addPendingFrame(captureTimeNs);
- frameObserver.onByteBufferFrameCaptured(data, videoBuffers.frameSize, captureFormat.width,
- captureFormat.height, getFrameOrientation(), captureTimeNs);
- } else {
- Logging.w(TAG, "reserveByteBuffer failed - dropping frame.");
- }
+ cameraStatistics.addFrame();
+ frameObserver.onByteBufferFrameCaptured(data, captureFormat.width, captureFormat.height,
+ getFrameOrientation(), captureTimeNs);
+ camera.addCallbackBuffer(data);
}
@Override
@@ -696,135 +702,22 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
surfaceHelper.returnTextureFrame();
return;
}
- if (!dropNextFrame) {
+ if (dropNextFrame) {
surfaceHelper.returnTextureFrame();
- dropNextFrame = true;
+ dropNextFrame = false;
return;
}
int rotation = getFrameOrientation();
- if (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
+ if (info.facing == android.hardware.Camera.CameraInfo.CAMERA_FACING_FRONT) {
// Undo the mirror that the OS "helps" us with.
// http://developer.android.com/reference/android/hardware/Camera.html#setDisplayOrientation(int)
transformMatrix =
RendererCommon.multiplyMatrices(transformMatrix, RendererCommon.horizontalFlipMatrix());
}
- transformMatrix = RendererCommon.rotateTextureMatrix(transformMatrix, rotation);
-
- final int rotatedWidth = (rotation % 180 == 0) ? captureFormat.width : captureFormat.height;
- final int rotatedHeight = (rotation % 180 == 0) ? captureFormat.height : captureFormat.width;
- cameraStatistics.addPendingFrame(timestampNs);
- frameObserver.onTextureFrameCaptured(rotatedWidth, rotatedHeight, oesTextureId,
- transformMatrix, timestampNs);
- }
-
- // Class used for allocating and bookkeeping video frames. All buffers are
- // direct allocated so that they can be directly used from native code. This class is
- // not thread-safe, and enforces single thread use.
- private static class FramePool {
- // Thread that all calls should be made on.
- private final Thread thread;
- // Arbitrary queue depth. Higher number means more memory allocated & held,
- // lower number means more sensitivity to processing time in the client (and
- // potentially stalling the capturer if it runs out of buffers to write to).
- private static final int numCaptureBuffers = 3;
- // This container tracks the buffers added as camera callback buffers. It is needed for finding
- // the corresponding ByteBuffer given a byte[].
- private final Map<byte[], ByteBuffer> queuedBuffers = new IdentityHashMap<byte[], ByteBuffer>();
- // This container tracks the frames that have been sent but not returned. It is needed for
- // keeping the buffers alive and for finding the corresponding ByteBuffer given a timestamp.
- private final Map<Long, ByteBuffer> pendingBuffers = new HashMap<Long, ByteBuffer>();
- private int frameSize = 0;
- private Camera camera;
-
- public FramePool(Thread thread) {
- this.thread = thread;
- }
-
- private void checkIsOnValidThread() {
- if (Thread.currentThread() != thread) {
- throw new IllegalStateException("Wrong thread");
- }
- }
-
- // Discards previous queued buffers and adds new callback buffers to camera.
- public void queueCameraBuffers(int frameSize, Camera camera) {
- checkIsOnValidThread();
- this.camera = camera;
- this.frameSize = frameSize;
-
- queuedBuffers.clear();
- for (int i = 0; i < numCaptureBuffers; ++i) {
- final ByteBuffer buffer = ByteBuffer.allocateDirect(frameSize);
- camera.addCallbackBuffer(buffer.array());
- queuedBuffers.put(buffer.array(), buffer);
- }
- Logging.d(TAG, "queueCameraBuffers enqueued " + numCaptureBuffers
- + " buffers of size " + frameSize + ".");
- }
-
- public void stopReturnBuffersToCamera() {
- checkIsOnValidThread();
- this.camera = null;
- queuedBuffers.clear();
- // Frames in |pendingBuffers| need to be kept alive until they are returned.
- }
-
- public boolean reserveByteBuffer(byte[] data, long timeStamp) {
- checkIsOnValidThread();
- final ByteBuffer buffer = queuedBuffers.remove(data);
- if (buffer == null) {
- // Frames might be posted to |onPreviewFrame| with the previous format while changing
- // capture format in |startPreviewOnCameraThread|. Drop these old frames.
- Logging.w(TAG, "Received callback buffer from previous configuration with length: "
- + (data == null ? "null" : data.length));
- return false;
- }
- if (buffer.capacity() != frameSize) {
- throw new IllegalStateException("Callback buffer has unexpected frame size");
- }
- if (pendingBuffers.containsKey(timeStamp)) {
- Logging.e(TAG, "Timestamp already present in pending buffers - they need to be unique");
- return false;
- }
- pendingBuffers.put(timeStamp, buffer);
- if (queuedBuffers.isEmpty()) {
- Logging.d(TAG, "Camera is running out of capture buffers.");
- }
- return true;
- }
-
- public void returnBuffer(long timeStamp) {
- checkIsOnValidThread();
- final ByteBuffer returnedFrame = pendingBuffers.remove(timeStamp);
- if (returnedFrame == null) {
- throw new RuntimeException("unknown data buffer with time stamp "
- + timeStamp + "returned?!?");
- }
-
- if (camera != null && returnedFrame.capacity() == frameSize) {
- camera.addCallbackBuffer(returnedFrame.array());
- if (queuedBuffers.isEmpty()) {
- Logging.d(TAG, "Frame returned when camera is running out of capture"
- + " buffers for TS " + TimeUnit.NANOSECONDS.toMillis(timeStamp));
- }
- queuedBuffers.put(returnedFrame.array(), returnedFrame);
- return;
- }
-
- if (returnedFrame.capacity() != frameSize) {
- Logging.d(TAG, "returnBuffer with time stamp "
- + TimeUnit.NANOSECONDS.toMillis(timeStamp)
- + " called with old frame size, " + returnedFrame.capacity() + ".");
- // Since this frame has the wrong size, don't requeue it. Frames with the correct size are
- // created in queueCameraBuffers so this must be an old buffer.
- return;
- }
-
- Logging.d(TAG, "returnBuffer with time stamp "
- + TimeUnit.NANOSECONDS.toMillis(timeStamp)
- + " called after camera has been stopped.");
- }
+ cameraStatistics.addFrame();
+ frameObserver.onTextureFrameCaptured(captureFormat.width, captureFormat.height, oesTextureId,
+ transformMatrix, rotation, timestampNs);
}
// Interface used for providing callbacks to an observer.
@@ -835,13 +728,14 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
// Delivers a captured frame. Called on a Java thread owned by
// VideoCapturerAndroid.
- abstract void onByteBufferFrameCaptured(byte[] data, int length, int width, int height,
- int rotation, long timeStamp);
+ abstract void onByteBufferFrameCaptured(byte[] data, int width, int height, int rotation,
+ long timeStamp);
// Delivers a captured frame in a texture with id |oesTextureId|. Called on a Java thread
// owned by VideoCapturerAndroid.
abstract void onTextureFrameCaptured(
- int width, int height, int oesTextureId, float[] transformMatrix, long timestamp);
+ int width, int height, int oesTextureId, float[] transformMatrix, int rotation,
+ long timestamp);
// Requests an output format from the video capturer. Captured frames
// by the camera will be scaled/or dropped by the video capturer.
@@ -864,17 +758,18 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
}
@Override
- public void onByteBufferFrameCaptured(byte[] data, int length, int width, int height,
+ public void onByteBufferFrameCaptured(byte[] data, int width, int height,
int rotation, long timeStamp) {
- nativeOnByteBufferFrameCaptured(nativeCapturer, data, length, width, height, rotation,
+ nativeOnByteBufferFrameCaptured(nativeCapturer, data, data.length, width, height, rotation,
timeStamp);
}
@Override
public void onTextureFrameCaptured(
- int width, int height, int oesTextureId, float[] transformMatrix, long timestamp) {
+ int width, int height, int oesTextureId, float[] transformMatrix, int rotation,
+ long timestamp) {
nativeOnTextureFrameCaptured(nativeCapturer, width, height, oesTextureId, transformMatrix,
- timestamp);
+ rotation, timestamp);
}
@Override
@@ -887,10 +782,12 @@ public class VideoCapturerAndroid extends VideoCapturer implements PreviewCallba
private native void nativeOnByteBufferFrameCaptured(long nativeCapturer,
byte[] data, int length, int width, int height, int rotation, long timeStamp);
private native void nativeOnTextureFrameCaptured(long nativeCapturer, int width, int height,
- int oesTextureId, float[] transformMatrix, long timestamp);
+ int oesTextureId, float[] transformMatrix, int rotation, long timestamp);
private native void nativeOnOutputFormatRequest(long nativeCapturer,
int width, int height, int framerate);
}
- private static native long nativeCreateVideoCapturer(VideoCapturerAndroid videoCapturer);
+ private static native long nativeCreateVideoCapturer(
+ VideoCapturerAndroid videoCapturer,
+ SurfaceTextureHelper surfaceHelper);
}
diff --git a/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java b/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
index bacd0cf11f..bb6f01cea2 100644
--- a/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
+++ b/talk/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
@@ -38,7 +38,7 @@ import javax.microedition.khronos.opengles.GL10;
import android.annotation.SuppressLint;
import android.graphics.Point;
import android.graphics.Rect;
-import android.graphics.SurfaceTexture;
+import android.opengl.EGL14;
import android.opengl.GLES20;
import android.opengl.GLSurfaceView;
@@ -59,7 +59,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private static Runnable eglContextReady = null;
private static final String TAG = "VideoRendererGui";
private GLSurfaceView surface;
- private static EGLContext eglContext = null;
+ private static EglBase.Context eglContext = null;
// Indicates if SurfaceView.Renderer.onSurfaceCreated was called.
// If true then for every newly created yuv image renderer createTexture()
// should be called. The variable is accessed on multiple threads and
@@ -69,8 +69,6 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private int screenHeight;
// List of yuv renderers.
private final ArrayList<YuvImageRenderer> yuvImageRenderers;
- // |drawer| is synchronized on |yuvImageRenderers|.
- private GlRectDrawer drawer;
// Render and draw threads.
private static Thread renderFrameThread;
private static Thread drawThread;
@@ -99,6 +97,8 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
// currently leaking resources to avoid a rare crash in release() where the EGLContext has
// become invalid beforehand.
private int[] yuvTextures = { 0, 0, 0 };
+ private final RendererCommon.YuvUploader yuvUploader = new RendererCommon.YuvUploader();
+ private final RendererCommon.GlDrawer drawer;
// Resources for making a deep copy of incoming OES texture frame.
private GlTextureFrameBuffer textureCopy;
@@ -157,12 +157,13 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private YuvImageRenderer(
GLSurfaceView surface, int id,
int x, int y, int width, int height,
- RendererCommon.ScalingType scalingType, boolean mirror) {
+ RendererCommon.ScalingType scalingType, boolean mirror, RendererCommon.GlDrawer drawer) {
Logging.d(TAG, "YuvImageRenderer.Create id: " + id);
this.surface = surface;
this.id = id;
this.scalingType = scalingType;
this.mirror = mirror;
+ this.drawer = drawer;
layoutInPercentage = new Rect(x, y, Math.min(100, x + width), Math.min(100, y + height));
updateLayoutProperties = false;
rotationDegree = 0;
@@ -174,6 +175,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
private synchronized void release() {
surface = null;
+ drawer.release();
synchronized (pendingFrameLock) {
if (pendingFrame != null) {
VideoRenderer.renderFrameDone(pendingFrame);
@@ -226,7 +228,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
}
- private void draw(GlRectDrawer drawer) {
+ private void draw() {
if (!seenFrame) {
// No frame received yet - nothing to render.
return;
@@ -241,29 +243,15 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
if (isNewFrame) {
+ rotatedSamplingMatrix = RendererCommon.rotateTextureMatrix(
+ pendingFrame.samplingMatrix, pendingFrame.rotationDegree);
if (pendingFrame.yuvFrame) {
rendererType = RendererType.RENDERER_YUV;
- drawer.uploadYuvData(yuvTextures, pendingFrame.width, pendingFrame.height,
+ yuvUploader.uploadYuvData(yuvTextures, pendingFrame.width, pendingFrame.height,
pendingFrame.yuvStrides, pendingFrame.yuvPlanes);
- // The convention in WebRTC is that the first element in a ByteBuffer corresponds to the
- // top-left corner of the image, but in glTexImage2D() the first element corresponds to
- // the bottom-left corner. We correct this discrepancy by setting a vertical flip as
- // sampling matrix.
- final float[] samplingMatrix = RendererCommon.verticalFlipMatrix();
- rotatedSamplingMatrix =
- RendererCommon.rotateTextureMatrix(samplingMatrix, pendingFrame.rotationDegree);
} else {
rendererType = RendererType.RENDERER_TEXTURE;
- // External texture rendering. Update texture image to latest and make a deep copy of
- // the external texture.
- // TODO(magjed): Move updateTexImage() to the video source instead.
- final SurfaceTexture surfaceTexture = (SurfaceTexture) pendingFrame.textureObject;
- surfaceTexture.updateTexImage();
- final float[] samplingMatrix = new float[16];
- surfaceTexture.getTransformMatrix(samplingMatrix);
- rotatedSamplingMatrix =
- RendererCommon.rotateTextureMatrix(samplingMatrix, pendingFrame.rotationDegree);
-
+ // External texture rendering. Make a deep copy of the external texture.
// Reallocate offscreen texture if necessary.
textureCopy.setSize(pendingFrame.rotatedWidth(), pendingFrame.rotatedHeight());
@@ -272,12 +260,13 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
GlUtil.checkNoGLES2Error("glBindFramebuffer");
// Copy the OES texture content. This will also normalize the sampling matrix.
- GLES20.glViewport(0, 0, textureCopy.getWidth(), textureCopy.getHeight());
- drawer.drawOes(pendingFrame.textureId, rotatedSamplingMatrix);
+ drawer.drawOes(pendingFrame.textureId, rotatedSamplingMatrix,
+ 0, 0, textureCopy.getWidth(), textureCopy.getHeight());
rotatedSamplingMatrix = RendererCommon.identityMatrix();
// Restore normal framebuffer.
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
+ GLES20.glFinish();
}
copyTimeNs += (System.nanoTime() - now);
VideoRenderer.renderFrameDone(pendingFrame);
@@ -285,17 +274,17 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
}
- // OpenGL defaults to lower left origin - flip vertically.
- GLES20.glViewport(displayLayout.left, screenHeight - displayLayout.bottom,
- displayLayout.width(), displayLayout.height());
-
updateLayoutMatrix();
final float[] texMatrix =
RendererCommon.multiplyMatrices(rotatedSamplingMatrix, layoutMatrix);
+ // OpenGL defaults to lower left origin - flip viewport position vertically.
+ final int viewportY = screenHeight - displayLayout.bottom;
if (rendererType == RendererType.RENDERER_YUV) {
- drawer.drawYuv(yuvTextures, texMatrix);
+ drawer.drawYuv(yuvTextures, texMatrix,
+ displayLayout.left, viewportY, displayLayout.width(), displayLayout.height());
} else {
- drawer.drawRgb(textureCopy.getTextureId(), texMatrix);
+ drawer.drawRgb(textureCopy.getTextureId(), texMatrix,
+ displayLayout.left, viewportY, displayLayout.width(), displayLayout.height());
}
if (isNewFrame) {
@@ -314,7 +303,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
". Dropped: " + framesDropped + ". Rendered: " + framesRendered);
if (framesReceived > 0 && framesRendered > 0) {
Logging.d(TAG, "Duration: " + (int)(timeSinceFirstFrameNs / 1e6) +
- " ms. FPS: " + (float)framesRendered * 1e9 / timeSinceFirstFrameNs);
+ " ms. FPS: " + framesRendered * 1e9 / timeSinceFirstFrameNs);
Logging.d(TAG, "Draw time: " +
(int) (drawTimeNs / (1000 * framesRendered)) + " us. Copy time: " +
(int) (copyTimeNs / (1000 * framesReceived)) + " us");
@@ -429,7 +418,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
eglContextReady = eglContextReadyCallback;
}
- public static synchronized EGLContext getEGLContext() {
+ public static synchronized EglBase.Context getEglBaseContext() {
return eglContext;
}
@@ -477,6 +466,16 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
*/
public static synchronized YuvImageRenderer create(int x, int y, int width, int height,
RendererCommon.ScalingType scalingType, boolean mirror) {
+ return create(x, y, width, height, scalingType, mirror, new GlRectDrawer());
+ }
+
+ /**
+ * Creates VideoRenderer.Callbacks with top left corner at (x, y) and resolution (width, height).
+ * All parameters are in percentage of screen resolution. The custom |drawer| will be used for
+ * drawing frames on the EGLSurface. This class is responsible for calling release() on |drawer|.
+ */
+ public static synchronized YuvImageRenderer create(int x, int y, int width, int height,
+ RendererCommon.ScalingType scalingType, boolean mirror, RendererCommon.GlDrawer drawer) {
// Check display region parameters.
if (x < 0 || x > 100 || y < 0 || y > 100 ||
width < 0 || width > 100 || height < 0 || height > 100 ||
@@ -490,7 +489,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
}
final YuvImageRenderer yuvImageRenderer = new YuvImageRenderer(
instance.surface, instance.yuvImageRenderers.size(),
- x, y, width, height, scalingType, mirror);
+ x, y, width, height, scalingType, mirror, drawer);
synchronized (instance.yuvImageRenderers) {
if (instance.onSurfaceCreatedCalled) {
// onSurfaceCreated has already been called for VideoRendererGui -
@@ -498,6 +497,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
// rendering list.
final CountDownLatch countDownLatch = new CountDownLatch(1);
instance.surface.queueEvent(new Runnable() {
+ @Override
public void run() {
yuvImageRenderer.createTextures();
yuvImageRenderer.setScreenSize(
@@ -608,13 +608,16 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
Logging.d(TAG, "VideoRendererGui.onSurfaceCreated");
// Store render EGL context.
synchronized (VideoRendererGui.class) {
- eglContext = ((EGL10) EGLContext.getEGL()).eglGetCurrentContext();
+ if (EglBase14.isEGL14Supported()) {
+ eglContext = new EglBase14.Context(EGL14.eglGetCurrentContext());
+ } else {
+ eglContext = new EglBase10.Context(((EGL10) EGLContext.getEGL()).eglGetCurrentContext());
+ }
+
Logging.d(TAG, "VideoRendererGui EGL Context: " + eglContext);
}
synchronized (yuvImageRenderers) {
- // Create drawer for YUV/OES frames.
- drawer = new GlRectDrawer();
// Create textures for all images.
for (YuvImageRenderer yuvImageRenderer : yuvImageRenderers) {
yuvImageRenderer.createTextures();
@@ -655,7 +658,7 @@ public class VideoRendererGui implements GLSurfaceView.Renderer {
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
synchronized (yuvImageRenderers) {
for (YuvImageRenderer yuvImageRenderer : yuvImageRenderers) {
- yuvImageRenderer.draw(drawer);
+ yuvImageRenderer.draw();
}
}
}
diff --git a/talk/app/webrtc/java/jni/androidmediacodeccommon.h b/talk/app/webrtc/java/jni/androidmediacodeccommon.h
index 348a716496..92ea135f12 100644
--- a/talk/app/webrtc/java/jni/androidmediacodeccommon.h
+++ b/talk/app/webrtc/java/jni/androidmediacodeccommon.h
@@ -72,6 +72,8 @@ enum { kMediaCodecTimeoutMs = 1000 };
enum { kMediaCodecStatisticsIntervalMs = 3000 };
// Maximum amount of pending frames for VP8 decoder.
enum { kMaxPendingFramesVp8 = 1 };
+// Maximum amount of pending frames for VP9 decoder.
+enum { kMaxPendingFramesVp9 = 1 };
// Maximum amount of pending frames for H.264 decoder.
enum { kMaxPendingFramesH264 = 30 };
// Maximum amount of decoded frames for which per-frame logging is enabled.
diff --git a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
index b664f16e2e..c3d287ce0d 100644
--- a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
@@ -33,14 +33,15 @@
#include "talk/app/webrtc/java/jni/androidmediacodeccommon.h"
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
+#include "talk/app/webrtc/java/jni/surfacetexturehelper_jni.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/base/thread.h"
#include "webrtc/base/timeutils.h"
-#include "webrtc/common_video/interface/i420_buffer_pool.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/logcat_trace_context.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "third_party/libyuv/include/libyuv/convert.h"
@@ -62,6 +63,7 @@ using webrtc::VideoCodec;
using webrtc::VideoCodecType;
using webrtc::kVideoCodecH264;
using webrtc::kVideoCodecVP8;
+using webrtc::kVideoCodecVP9;
namespace webrtc_jni {
@@ -87,9 +89,14 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
int32_t Release() override;
int32_t Reset() override;
+
+ bool PrefersLateDecoding() const override { return true; }
+
// rtc::MessageHandler implementation.
void OnMessage(rtc::Message* msg) override;
+ const char* ImplementationName() const override;
+
private:
// CHECK-fail if not running on |codec_thread_|.
void CheckOnCodecThread();
@@ -105,13 +112,17 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
// Type of video codec.
VideoCodecType codecType_;
+ // Render EGL context - owned by factory, should not be allocated/destroyed
+ // by VideoDecoder.
+ jobject render_egl_context_;
+
bool key_frame_required_;
bool inited_;
bool sw_fallback_required_;
bool use_surface_;
VideoCodec codec_;
webrtc::I420BufferPool decoded_frame_pool_;
- NativeHandleImpl native_handle_;
+ rtc::scoped_refptr<SurfaceTextureHelper> surface_texture_helper_;
DecodedImageCallback* callback_;
int frames_received_; // Number of frames received by decoder.
int frames_decoded_; // Number of frames decoded by decoder.
@@ -120,10 +131,6 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
int current_bytes_; // Encoded bytes in the current statistics interval.
int current_decoding_time_ms_; // Overall decoding time in the current second
uint32_t max_pending_frames_; // Maximum number of pending input frames
- std::vector<int32_t> timestamps_;
- std::vector<int64_t> ntp_times_ms_;
- std::vector<int64_t> frame_rtc_times_ms_; // Time when video frame is sent to
- // decoder input.
// State that is constant for the lifetime of this object once the ctor
// returns.
@@ -134,7 +141,8 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
jmethodID j_release_method_;
jmethodID j_dequeue_input_buffer_method_;
jmethodID j_queue_input_buffer_method_;
- jmethodID j_dequeue_output_buffer_method_;
+ jmethodID j_dequeue_byte_buffer_method_;
+ jmethodID j_dequeue_texture_buffer_method_;
jmethodID j_return_decoded_byte_buffer_method_;
// MediaCodecVideoDecoder fields.
jfieldID j_input_buffers_field_;
@@ -144,24 +152,23 @@ class MediaCodecVideoDecoder : public webrtc::VideoDecoder,
jfieldID j_height_field_;
jfieldID j_stride_field_;
jfieldID j_slice_height_field_;
- jfieldID j_surface_texture_field_;
// MediaCodecVideoDecoder.DecodedTextureBuffer fields.
- jfieldID j_textureID_field_;
- jfieldID j_texture_presentation_timestamp_us_field_;
- // MediaCodecVideoDecoder.DecodedByteBuffer fields.
+ jfieldID j_texture_id_field_;
+ jfieldID j_transform_matrix_field_;
+ jfieldID j_texture_timestamp_ms_field_;
+ jfieldID j_texture_ntp_timestamp_ms_field_;
+ jfieldID j_texture_decode_time_ms_field_;
+ jfieldID j_texture_frame_delay_ms_field_;
+ // MediaCodecVideoDecoder.DecodedOutputBuffer fields.
jfieldID j_info_index_field_;
jfieldID j_info_offset_field_;
jfieldID j_info_size_field_;
- jfieldID j_info_presentation_timestamp_us_field_;
+ jfieldID j_info_timestamp_ms_field_;
+ jfieldID j_info_ntp_timestamp_ms_field_;
+ jfieldID j_byte_buffer_decode_time_ms_field_;
// Global references; must be deleted in Release().
std::vector<jobject> input_buffers_;
- jobject surface_texture_;
- jobject previous_surface_texture_;
-
- // Render EGL context - owned by factory, should not be allocated/destroyed
- // by VideoDecoder.
- jobject render_egl_context_;
};
MediaCodecVideoDecoder::MediaCodecVideoDecoder(
@@ -171,8 +178,6 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
key_frame_required_(true),
inited_(false),
sw_fallback_required_(false),
- surface_texture_(NULL),
- previous_surface_texture_(NULL),
codec_thread_(new Thread()),
j_media_codec_video_decoder_class_(
jni,
@@ -191,19 +196,22 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
j_init_decode_method_ = GetMethodID(
jni, *j_media_codec_video_decoder_class_, "initDecode",
"(Lorg/webrtc/MediaCodecVideoDecoder$VideoCodecType;"
- "IILjavax/microedition/khronos/egl/EGLContext;)Z");
+ "IILorg/webrtc/SurfaceTextureHelper;)Z");
j_release_method_ =
GetMethodID(jni, *j_media_codec_video_decoder_class_, "release", "()V");
j_dequeue_input_buffer_method_ = GetMethodID(
jni, *j_media_codec_video_decoder_class_, "dequeueInputBuffer", "()I");
j_queue_input_buffer_method_ = GetMethodID(
- jni, *j_media_codec_video_decoder_class_, "queueInputBuffer", "(IIJ)Z");
- j_dequeue_output_buffer_method_ = GetMethodID(
+ jni, *j_media_codec_video_decoder_class_, "queueInputBuffer", "(IIJJJ)Z");
+ j_dequeue_byte_buffer_method_ = GetMethodID(
jni, *j_media_codec_video_decoder_class_, "dequeueOutputBuffer",
- "(I)Ljava/lang/Object;");
+ "(I)Lorg/webrtc/MediaCodecVideoDecoder$DecodedOutputBuffer;");
+ j_dequeue_texture_buffer_method_ = GetMethodID(
+ jni, *j_media_codec_video_decoder_class_, "dequeueTextureBuffer",
+ "(I)Lorg/webrtc/MediaCodecVideoDecoder$DecodedTextureBuffer;");
j_return_decoded_byte_buffer_method_ =
GetMethodID(jni, *j_media_codec_video_decoder_class_,
- "returnDecodedByteBuffer", "(I)V");
+ "returnDecodedOutputBuffer", "(I)V");
j_input_buffers_field_ = GetFieldID(
jni, *j_media_codec_video_decoder_class_,
@@ -221,28 +229,36 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
jni, *j_media_codec_video_decoder_class_, "stride", "I");
j_slice_height_field_ = GetFieldID(
jni, *j_media_codec_video_decoder_class_, "sliceHeight", "I");
- j_surface_texture_field_ = GetFieldID(
- jni, *j_media_codec_video_decoder_class_, "surfaceTexture",
- "Landroid/graphics/SurfaceTexture;");
- jclass j_decoder_decoded_texture_buffer_class = FindClass(jni,
+ jclass j_decoded_texture_buffer_class = FindClass(jni,
"org/webrtc/MediaCodecVideoDecoder$DecodedTextureBuffer");
- j_textureID_field_ = GetFieldID(
- jni, j_decoder_decoded_texture_buffer_class, "textureID", "I");
- j_texture_presentation_timestamp_us_field_ =
- GetFieldID(jni, j_decoder_decoded_texture_buffer_class,
- "presentationTimestampUs", "J");
-
- jclass j_decoder_decoded_byte_buffer_class = FindClass(jni,
- "org/webrtc/MediaCodecVideoDecoder$DecodedByteBuffer");
+ j_texture_id_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "textureID", "I");
+ j_transform_matrix_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "transformMatrix", "[F");
+ j_texture_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "timeStampMs", "J");
+ j_texture_ntp_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "ntpTimeStampMs", "J");
+ j_texture_decode_time_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "decodeTimeMs", "J");
+ j_texture_frame_delay_ms_field_ = GetFieldID(
+ jni, j_decoded_texture_buffer_class, "frameDelayMs", "J");
+
+ jclass j_decoded_output_buffer_class = FindClass(jni,
+ "org/webrtc/MediaCodecVideoDecoder$DecodedOutputBuffer");
j_info_index_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "index", "I");
+ jni, j_decoded_output_buffer_class, "index", "I");
j_info_offset_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "offset", "I");
+ jni, j_decoded_output_buffer_class, "offset", "I");
j_info_size_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "size", "I");
- j_info_presentation_timestamp_us_field_ = GetFieldID(
- jni, j_decoder_decoded_byte_buffer_class, "presentationTimestampUs", "J");
+ jni, j_decoded_output_buffer_class, "size", "I");
+ j_info_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_output_buffer_class, "timeStampMs", "J");
+ j_info_ntp_timestamp_ms_field_ = GetFieldID(
+ jni, j_decoded_output_buffer_class, "ntpTimeStampMs", "J");
+ j_byte_buffer_decode_time_ms_field_ = GetFieldID(
+ jni, j_decoded_output_buffer_class, "decodeTimeMs", "J");
CHECK_EXCEPTION(jni) << "MediaCodecVideoDecoder ctor failed";
use_surface_ = (render_egl_context_ != NULL);
@@ -254,14 +270,6 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
MediaCodecVideoDecoder::~MediaCodecVideoDecoder() {
// Call Release() to ensure no more callbacks to us after we are deleted.
Release();
- // Delete global references.
- JNIEnv* jni = AttachCurrentThreadIfNeeded();
- if (previous_surface_texture_ != NULL) {
- jni->DeleteGlobalRef(previous_surface_texture_);
- }
- if (surface_texture_ != NULL) {
- jni->DeleteGlobalRef(surface_texture_);
- }
}
int32_t MediaCodecVideoDecoder::InitDecode(const VideoCodec* inst,
@@ -312,6 +320,21 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
frames_received_ = 0;
frames_decoded_ = 0;
+ jobject java_surface_texture_helper_ = nullptr;
+ if (use_surface_) {
+ java_surface_texture_helper_ = jni->CallStaticObjectMethod(
+ FindClass(jni, "org/webrtc/SurfaceTextureHelper"),
+ GetStaticMethodID(jni,
+ FindClass(jni, "org/webrtc/SurfaceTextureHelper"),
+ "create",
+ "(Lorg/webrtc/EglBase$Context;)"
+ "Lorg/webrtc/SurfaceTextureHelper;"),
+ render_egl_context_);
+ RTC_CHECK(java_surface_texture_helper_ != nullptr);
+ surface_texture_helper_ = new rtc::RefCountedObject<SurfaceTextureHelper>(
+ jni, java_surface_texture_helper_);
+ }
+
jobject j_video_codec_enum = JavaEnumFromIndex(
jni, "MediaCodecVideoDecoder$VideoCodecType", codecType_);
bool success = jni->CallBooleanMethod(
@@ -320,7 +343,7 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
j_video_codec_enum,
codec_.width,
codec_.height,
- use_surface_ ? render_egl_context_ : nullptr);
+ java_surface_texture_helper_);
if (CheckException(jni) || !success) {
ALOGE << "Codec initialization error - fallback to SW codec.";
sw_fallback_required_ = true;
@@ -332,6 +355,9 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
case kVideoCodecVP8:
max_pending_frames_ = kMaxPendingFramesVp8;
break;
+ case kVideoCodecVP9:
+ max_pending_frames_ = kMaxPendingFramesVp9;
+ break;
case kVideoCodecH264:
max_pending_frames_ = kMaxPendingFramesH264;
break;
@@ -342,9 +368,6 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
current_frames_ = 0;
current_bytes_ = 0;
current_decoding_time_ms_ = 0;
- timestamps_.clear();
- ntp_times_ms_.clear();
- frame_rtc_times_ms_.clear();
jobjectArray input_buffers = (jobjectArray)GetObjectField(
jni, *j_media_codec_video_decoder_, j_input_buffers_field_);
@@ -361,15 +384,6 @@ int32_t MediaCodecVideoDecoder::InitDecodeOnCodecThread() {
}
}
- if (use_surface_) {
- jobject surface_texture = GetObjectField(
- jni, *j_media_codec_video_decoder_, j_surface_texture_field_);
- if (previous_surface_texture_ != NULL) {
- jni->DeleteGlobalRef(previous_surface_texture_);
- }
- previous_surface_texture_ = surface_texture_;
- surface_texture_ = jni->NewGlobalRef(surface_texture);
- }
codec_thread_->PostDelayed(kMediaCodecPollMs, this);
return WEBRTC_VIDEO_CODEC_OK;
@@ -395,6 +409,7 @@ int32_t MediaCodecVideoDecoder::ReleaseOnCodecThread() {
}
input_buffers_.clear();
jni->CallVoidMethod(*j_media_codec_video_decoder_, j_release_method_);
+ surface_texture_helper_ = nullptr;
inited_ = false;
rtc::MessageQueueManager::Clear(this);
if (CheckException(jni)) {
@@ -501,19 +516,21 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
// Try to drain the decoder and wait until output is not too
// much behind the input.
- if (frames_received_ > frames_decoded_ + max_pending_frames_) {
+ const int64 drain_start = GetCurrentTimeMs();
+ while ((frames_received_ > frames_decoded_ + max_pending_frames_) &&
+ (GetCurrentTimeMs() - drain_start) < kMediaCodecTimeoutMs) {
ALOGV("Received: %d. Decoded: %d. Wait for output...",
frames_received_, frames_decoded_);
- if (!DeliverPendingOutputs(jni, kMediaCodecTimeoutMs * 1000)) {
+ if (!DeliverPendingOutputs(jni, kMediaCodecPollMs)) {
ALOGE << "DeliverPendingOutputs error. Frames received: " <<
frames_received_ << ". Frames decoded: " << frames_decoded_;
return ProcessHWErrorOnCodecThread();
}
- if (frames_received_ > frames_decoded_ + max_pending_frames_) {
- ALOGE << "Output buffer dequeue timeout. Frames received: " <<
- frames_received_ << ". Frames decoded: " << frames_decoded_;
- return ProcessHWErrorOnCodecThread();
- }
+ }
+ if (frames_received_ > frames_decoded_ + max_pending_frames_) {
+ ALOGE << "Output buffer dequeue timeout. Frames received: " <<
+ frames_received_ << ". Frames decoded: " << frames_decoded_;
+ return ProcessHWErrorOnCodecThread();
}
// Get input buffer.
@@ -535,11 +552,14 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
" is bigger than buffer size " << buffer_capacity;
return ProcessHWErrorOnCodecThread();
}
- jlong timestamp_us = (frames_received_ * 1000000) / codec_.maxFramerate;
+ jlong presentation_timestamp_us =
+ (frames_received_ * 1000000) / codec_.maxFramerate;
if (frames_decoded_ < kMaxDecodedLogFrames) {
ALOGD << "Decoder frame in # " << frames_received_ << ". Type: "
<< inputImage._frameType << ". Buffer # " <<
- j_input_buffer_index << ". TS: " << (int)(timestamp_us / 1000)
+ j_input_buffer_index << ". pTS: "
+ << (int)(presentation_timestamp_us / 1000)
+ << ". TS: " << inputImage._timeStamp
<< ". Size: " << inputImage._length;
}
memcpy(buffer, inputImage._buffer, inputImage._length);
@@ -547,16 +567,16 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
// Save input image timestamps for later output.
frames_received_++;
current_bytes_ += inputImage._length;
- timestamps_.push_back(inputImage._timeStamp);
- ntp_times_ms_.push_back(inputImage.ntp_time_ms_);
- frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
// Feed input to decoder.
- bool success = jni->CallBooleanMethod(*j_media_codec_video_decoder_,
- j_queue_input_buffer_method_,
- j_input_buffer_index,
- inputImage._length,
- timestamp_us);
+ bool success = jni->CallBooleanMethod(
+ *j_media_codec_video_decoder_,
+ j_queue_input_buffer_method_,
+ j_input_buffer_index,
+ inputImage._length,
+ presentation_timestamp_us,
+ static_cast<int64_t> (inputImage._timeStamp),
+ inputImage.ntp_time_ms_);
if (CheckException(jni) || !success) {
ALOGE << "queueInputBuffer error";
return ProcessHWErrorOnCodecThread();
@@ -572,16 +592,18 @@ int32_t MediaCodecVideoDecoder::DecodeOnCodecThread(
}
bool MediaCodecVideoDecoder::DeliverPendingOutputs(
- JNIEnv* jni, int dequeue_timeout_us) {
+ JNIEnv* jni, int dequeue_timeout_ms) {
if (frames_received_ <= frames_decoded_) {
// No need to query for output buffers - decoder is drained.
return true;
}
// Get decoder output.
- jobject j_decoder_output_buffer = jni->CallObjectMethod(
- *j_media_codec_video_decoder_,
- j_dequeue_output_buffer_method_,
- dequeue_timeout_us);
+ jobject j_decoder_output_buffer =
+ jni->CallObjectMethod(*j_media_codec_video_decoder_,
+ use_surface_ ? j_dequeue_texture_buffer_method_
+ : j_dequeue_byte_buffer_method_,
+ dequeue_timeout_ms);
+
if (CheckException(jni)) {
ALOGE << "dequeueOutputBuffer() error";
return false;
@@ -601,19 +623,35 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
j_slice_height_field_);
rtc::scoped_refptr<webrtc::VideoFrameBuffer> frame_buffer;
- long output_timestamps_ms = 0;
+ int64_t output_timestamps_ms = 0;
+ int64_t output_ntp_timestamps_ms = 0;
+ int decode_time_ms = 0;
+ int64_t frame_delayed_ms = 0;
if (use_surface_) {
// Extract data from Java DecodedTextureBuffer.
const int texture_id =
- GetIntField(jni, j_decoder_output_buffer, j_textureID_field_);
- const int64_t timestamp_us =
- GetLongField(jni, j_decoder_output_buffer,
- j_texture_presentation_timestamp_us_field_);
- output_timestamps_ms = timestamp_us / rtc::kNumMicrosecsPerMillisec;
- // Create webrtc::VideoFrameBuffer with native texture handle.
- native_handle_.SetTextureObject(surface_texture_, texture_id);
- frame_buffer = new rtc::RefCountedObject<JniNativeHandleBuffer>(
- &native_handle_, width, height);
+ GetIntField(jni, j_decoder_output_buffer, j_texture_id_field_);
+ if (texture_id != 0) { // |texture_id| == 0 represents a dropped frame.
+ const jfloatArray j_transform_matrix =
+ reinterpret_cast<jfloatArray>(GetObjectField(
+ jni, j_decoder_output_buffer, j_transform_matrix_field_));
+ const int64_t timestamp_us =
+ GetLongField(jni, j_decoder_output_buffer,
+ j_texture_timestamp_ms_field_);
+ output_timestamps_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_texture_timestamp_ms_field_);
+ output_ntp_timestamps_ms =
+ GetLongField(jni, j_decoder_output_buffer,
+ j_texture_ntp_timestamp_ms_field_);
+ decode_time_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_texture_decode_time_ms_field_);
+ frame_delayed_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_texture_frame_delay_ms_field_);
+
+ // Create webrtc::VideoFrameBuffer with native texture handle.
+ frame_buffer = surface_texture_helper_->CreateTextureFrame(
+ width, height, NativeHandleImpl(jni, texture_id, j_transform_matrix));
+ }
} else {
// Extract data from Java ByteBuffer and create output yuv420 frame -
// for non surface decoding only.
@@ -623,9 +661,14 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
GetIntField(jni, j_decoder_output_buffer, j_info_offset_field_);
const int output_buffer_size =
GetIntField(jni, j_decoder_output_buffer, j_info_size_field_);
- const int64_t timestamp_us = GetLongField(
- jni, j_decoder_output_buffer, j_info_presentation_timestamp_us_field_);
- output_timestamps_ms = timestamp_us / rtc::kNumMicrosecsPerMillisec;
+ output_timestamps_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_info_timestamp_ms_field_);
+ output_ntp_timestamps_ms =
+ GetLongField(jni, j_decoder_output_buffer,
+ j_info_ntp_timestamp_ms_field_);
+
+ decode_time_ms = GetLongField(jni, j_decoder_output_buffer,
+ j_byte_buffer_decode_time_ms_field_);
if (output_buffer_size < width * height * 3 / 2) {
ALOGE << "Insufficient output buffer size: " << output_buffer_size;
@@ -683,41 +726,31 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
j_return_decoded_byte_buffer_method_,
output_buffer_index);
if (CheckException(jni)) {
- ALOGE << "returnDecodedByteBuffer error";
+ ALOGE << "returnDecodedOutputBuffer error";
return false;
}
}
VideoFrame decoded_frame(frame_buffer, 0, 0, webrtc::kVideoRotation_0);
+ decoded_frame.set_timestamp(output_timestamps_ms);
+ decoded_frame.set_ntp_time_ms(output_ntp_timestamps_ms);
- // Get frame timestamps from a queue.
- if (timestamps_.size() > 0) {
- decoded_frame.set_timestamp(timestamps_.front());
- timestamps_.erase(timestamps_.begin());
- }
- if (ntp_times_ms_.size() > 0) {
- decoded_frame.set_ntp_time_ms(ntp_times_ms_.front());
- ntp_times_ms_.erase(ntp_times_ms_.begin());
- }
- int64_t frame_decoding_time_ms = 0;
- if (frame_rtc_times_ms_.size() > 0) {
- frame_decoding_time_ms = GetCurrentTimeMs() - frame_rtc_times_ms_.front();
- frame_rtc_times_ms_.erase(frame_rtc_times_ms_.begin());
- }
if (frames_decoded_ < kMaxDecodedLogFrames) {
ALOGD << "Decoder frame out # " << frames_decoded_ << ". " << width <<
" x " << height << ". " << stride << " x " << slice_height <<
- ". Color: " << color_format << ". TS:" << (int)output_timestamps_ms <<
- ". DecTime: " << (int)frame_decoding_time_ms;
+ ". Color: " << color_format << ". TS:" << decoded_frame.timestamp() <<
+ ". DecTime: " << (int)decode_time_ms <<
+ ". DelayTime: " << (int)frame_delayed_ms;
}
// Calculate and print decoding statistics - every 3 seconds.
frames_decoded_++;
current_frames_++;
- current_decoding_time_ms_ += frame_decoding_time_ms;
+ current_decoding_time_ms_ += decode_time_ms;
int statistic_time_ms = GetCurrentTimeMs() - start_time_ms_;
if (statistic_time_ms >= kMediaCodecStatisticsIntervalMs &&
current_frames_ > 0) {
- ALOGD << "Decoded frames: " << frames_decoded_ << ". Bitrate: " <<
+ ALOGD << "Decoded frames: " << frames_decoded_ << ". Received frames: "
+ << frames_received_ << ". Bitrate: " <<
(current_bytes_ * 8 / statistic_time_ms) << " kbps, fps: " <<
((current_frames_ * 1000 + statistic_time_ms / 2) / statistic_time_ms)
<< ". decTime: " << (current_decoding_time_ms_ / current_frames_) <<
@@ -728,12 +761,15 @@ bool MediaCodecVideoDecoder::DeliverPendingOutputs(
current_decoding_time_ms_ = 0;
}
- // Callback - output decoded frame.
- const int32_t callback_status = callback_->Decoded(decoded_frame);
- if (callback_status > 0) {
- ALOGE << "callback error";
+ // |.IsZeroSize())| returns true when a frame has been dropped.
+ if (!decoded_frame.IsZeroSize()) {
+ // Callback - output decoded frame.
+ const int32_t callback_status =
+ callback_->Decoded(decoded_frame, decode_time_ms);
+ if (callback_status > 0) {
+ ALOGE << "callback error";
+ }
}
-
return true;
}
@@ -790,6 +826,17 @@ MediaCodecVideoDecoderFactory::MediaCodecVideoDecoderFactory() :
supported_codec_types_.push_back(kVideoCodecVP8);
}
+ bool is_vp9_hw_supported = jni->CallStaticBooleanMethod(
+ j_decoder_class,
+ GetStaticMethodID(jni, j_decoder_class, "isVp9HwSupported", "()Z"));
+ if (CheckException(jni)) {
+ is_vp9_hw_supported = false;
+ }
+ if (is_vp9_hw_supported) {
+ ALOGD << "VP9 HW Decoder supported.";
+ supported_codec_types_.push_back(kVideoCodecVP9);
+ }
+
bool is_h264_hw_supported = jni->CallStaticBooleanMethod(
j_decoder_class,
GetStaticMethodID(jni, j_decoder_class, "isH264HwSupported", "()Z"));
@@ -825,7 +872,7 @@ void MediaCodecVideoDecoderFactory::SetEGLContext(
render_egl_context_ = NULL;
} else {
jclass j_egl_context_class =
- FindClass(jni, "javax/microedition/khronos/egl/EGLContext");
+ FindClass(jni, "org/webrtc/EglBase$Context");
if (!jni->IsInstanceOf(render_egl_context_, j_egl_context_class)) {
ALOGE << "Wrong EGL Context.";
jni->DeleteGlobalRef(render_egl_context_);
@@ -841,7 +888,7 @@ void MediaCodecVideoDecoderFactory::SetEGLContext(
webrtc::VideoDecoder* MediaCodecVideoDecoderFactory::CreateVideoDecoder(
VideoCodecType type) {
if (supported_codec_types_.empty()) {
- ALOGE << "No HW video decoder for type " << (int)type;
+ ALOGW << "No HW video decoder for type " << (int)type;
return NULL;
}
for (VideoCodecType codec_type : supported_codec_types_) {
@@ -851,7 +898,7 @@ webrtc::VideoDecoder* MediaCodecVideoDecoderFactory::CreateVideoDecoder(
AttachCurrentThreadIfNeeded(), type, render_egl_context_);
}
}
- ALOGE << "Can not find HW video decoder for type " << (int)type;
+ ALOGW << "Can not find HW video decoder for type " << (int)type;
return NULL;
}
@@ -861,5 +908,9 @@ void MediaCodecVideoDecoderFactory::DestroyVideoDecoder(
delete decoder;
}
+const char* MediaCodecVideoDecoder::ImplementationName() const {
+ return "MediaCodec";
+}
+
} // namespace webrtc_jni
diff --git a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
index ac349e7faf..64831c3174 100644
--- a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
@@ -29,14 +29,16 @@
#include "talk/app/webrtc/java/jni/androidmediaencoder_jni.h"
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/androidmediacodeccommon.h"
+#include "talk/app/webrtc/java/jni/native_handle_impl.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/thread.h"
+#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
-#include "webrtc/modules/video_coding/utility/include/vp8_header_parser.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
#include "webrtc/system_wrappers/include/field_trial.h"
#include "webrtc/system_wrappers/include/logcat_trace_context.h"
#include "third_party/libyuv/include/libyuv/convert.h"
@@ -56,6 +58,7 @@ using webrtc::VideoCodec;
using webrtc::VideoCodecType;
using webrtc::kVideoCodecH264;
using webrtc::kVideoCodecVP8;
+using webrtc::kVideoCodecVP9;
namespace webrtc_jni {
@@ -79,7 +82,9 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
public rtc::MessageHandler {
public:
virtual ~MediaCodecVideoEncoder();
- explicit MediaCodecVideoEncoder(JNIEnv* jni, VideoCodecType codecType);
+ MediaCodecVideoEncoder(JNIEnv* jni,
+ VideoCodecType codecType,
+ jobject egl_context);
// webrtc::VideoEncoder implementation. Everything trampolines to
// |codec_thread_| for execution.
@@ -103,13 +108,18 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
int GetTargetFramerate() override;
+ bool SupportsNativeHandle() const override { return true; }
+ const char* ImplementationName() const override;
+
private:
// CHECK-fail if not running on |codec_thread_|.
void CheckOnCodecThread();
- // Release() and InitEncode() in an attempt to restore the codec to an
+ private:
+ // ResetCodecOnCodecThread() calls ReleaseOnCodecThread() and
+ // InitEncodeOnCodecThread() in an attempt to restore the codec to an
// operable state. Necessary after all manner of OMX-layer errors.
- void ResetCodec();
+ bool ResetCodecOnCodecThread();
// Implementation of webrtc::VideoEncoder methods above, all running on the
// codec thread exclusively.
@@ -117,10 +127,20 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
// If width==0 then this is assumed to be a re-initialization and the
// previously-current values are reused instead of the passed parameters
// (makes it easier to reason about thread-safety).
- int32_t InitEncodeOnCodecThread(int width, int height, int kbps, int fps);
+ int32_t InitEncodeOnCodecThread(int width, int height, int kbps, int fps,
+ bool use_surface);
+ // Reconfigure to match |frame| in width, height. Also reconfigures the
+ // encoder if |frame| is a texture/byte buffer and the encoder is initialized
+ // for byte buffer/texture. Returns false if reconfiguring fails.
+ bool MaybeReconfigureEncoderOnCodecThread(const webrtc::VideoFrame& frame);
int32_t EncodeOnCodecThread(
const webrtc::VideoFrame& input_image,
const std::vector<webrtc::FrameType>* frame_types);
+ bool EncodeByteBufferOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame, int input_buffer_index);
+ bool EncodeTextureOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame);
+
int32_t RegisterEncodeCompleteCallbackOnCodecThread(
webrtc::EncodedImageCallback* callback);
int32_t ReleaseOnCodecThread();
@@ -150,11 +170,14 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
// State that is constant for the lifetime of this object once the ctor
// returns.
scoped_ptr<Thread> codec_thread_; // Thread on which to operate MediaCodec.
+ rtc::ThreadChecker codec_thread_checker_;
ScopedGlobalRef<jclass> j_media_codec_video_encoder_class_;
ScopedGlobalRef<jobject> j_media_codec_video_encoder_;
jmethodID j_init_encode_method_;
+ jmethodID j_get_input_buffers_method_;
jmethodID j_dequeue_input_buffer_method_;
- jmethodID j_encode_method_;
+ jmethodID j_encode_buffer_method_;
+ jmethodID j_encode_texture_method_;
jmethodID j_release_method_;
jmethodID j_set_rates_method_;
jmethodID j_dequeue_output_buffer_method_;
@@ -170,6 +193,7 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
int width_; // Frame width in pixels.
int height_; // Frame height in pixels.
bool inited_;
+ bool use_surface_;
uint16_t picture_id_;
enum libyuv::FourCC encoder_fourcc_; // Encoder color space format.
int last_set_bitrate_kbps_; // Last-requested bitrate in kbps.
@@ -205,6 +229,16 @@ class MediaCodecVideoEncoder : public webrtc::VideoEncoder,
// H264 bitstream parser, used to extract QP from encoded bitstreams.
webrtc::H264BitstreamParser h264_bitstream_parser_;
+
+ // VP9 variables to populate codec specific structure.
+ webrtc::GofInfoVP9 gof_; // Contains each frame's temporal information for
+ // non-flexible VP9 mode.
+ uint8_t tl0_pic_idx_;
+ size_t gof_idx_;
+
+ // EGL context - owned by factory, should not be allocated/destroyed
+ // by MediaCodecVideoEncoder.
+ jobject egl_context_;
};
MediaCodecVideoEncoder::~MediaCodecVideoEncoder() {
@@ -213,11 +247,9 @@ MediaCodecVideoEncoder::~MediaCodecVideoEncoder() {
}
MediaCodecVideoEncoder::MediaCodecVideoEncoder(
- JNIEnv* jni, VideoCodecType codecType) :
+ JNIEnv* jni, VideoCodecType codecType, jobject egl_context) :
codecType_(codecType),
callback_(NULL),
- inited_(false),
- picture_id_(0),
codec_thread_(new Thread()),
j_media_codec_video_encoder_class_(
jni,
@@ -228,7 +260,11 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(
GetMethodID(jni,
*j_media_codec_video_encoder_class_,
"<init>",
- "()V"))) {
+ "()V"))),
+ inited_(false),
+ use_surface_(false),
+ picture_id_(0),
+ egl_context_(egl_context) {
ScopedLocalRefFrame local_ref_frame(jni);
// It would be nice to avoid spinning up a new thread per MediaCodec, and
// instead re-use e.g. the PeerConnectionFactory's |worker_thread_|, but bug
@@ -239,19 +275,27 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(
// thread.
codec_thread_->SetName("MediaCodecVideoEncoder", NULL);
RTC_CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoEncoder";
-
+ codec_thread_checker_.DetachFromThread();
jclass j_output_buffer_info_class =
FindClass(jni, "org/webrtc/MediaCodecVideoEncoder$OutputBufferInfo");
j_init_encode_method_ = GetMethodID(
jni,
*j_media_codec_video_encoder_class_,
"initEncode",
- "(Lorg/webrtc/MediaCodecVideoEncoder$VideoCodecType;IIII)"
- "[Ljava/nio/ByteBuffer;");
+ "(Lorg/webrtc/MediaCodecVideoEncoder$VideoCodecType;"
+ "IIIILorg/webrtc/EglBase14$Context;)Z");
+ j_get_input_buffers_method_ = GetMethodID(
+ jni,
+ *j_media_codec_video_encoder_class_,
+ "getInputBuffers",
+ "()[Ljava/nio/ByteBuffer;");
j_dequeue_input_buffer_method_ = GetMethodID(
jni, *j_media_codec_video_encoder_class_, "dequeueInputBuffer", "()I");
- j_encode_method_ = GetMethodID(
- jni, *j_media_codec_video_encoder_class_, "encode", "(ZIIJ)Z");
+ j_encode_buffer_method_ = GetMethodID(
+ jni, *j_media_codec_video_encoder_class_, "encodeBuffer", "(ZIIJ)Z");
+ j_encode_texture_method_ = GetMethodID(
+ jni, *j_media_codec_video_encoder_class_, "encodeTexture",
+ "(ZI[FJ)Z");
j_release_method_ =
GetMethodID(jni, *j_media_codec_video_encoder_class_, "release", "()V");
j_set_rates_method_ = GetMethodID(
@@ -275,6 +319,7 @@ MediaCodecVideoEncoder::MediaCodecVideoEncoder(
j_info_presentation_timestamp_us_field_ = GetFieldID(
jni, j_output_buffer_info_class, "presentationTimestampUs", "J");
CHECK_EXCEPTION(jni) << "MediaCodecVideoEncoder ctor failed";
+ srand(time(NULL));
AllowBlockingCalls();
}
@@ -295,8 +340,8 @@ int32_t MediaCodecVideoEncoder::InitEncode(
<< codecType_;
ALOGD << "InitEncode request";
- scale_ = webrtc::field_trial::FindFullName(
- "WebRTC-MediaCodecVideoEncoder-AutomaticResize") == "Enabled";
+ scale_ = (codecType_ != kVideoCodecVP9) && (webrtc::field_trial::FindFullName(
+ "WebRTC-MediaCodecVideoEncoder-AutomaticResize") == "Enabled");
ALOGD << "Encoder automatic resize " << (scale_ ? "enabled" : "disabled");
if (scale_) {
if (codecType_ == kVideoCodecVP8) {
@@ -331,7 +376,8 @@ int32_t MediaCodecVideoEncoder::InitEncode(
codec_settings->width,
codec_settings->height,
codec_settings->startBitrate,
- codec_settings->maxFramerate));
+ codec_settings->maxFramerate,
+ false /* use_surface */));
}
int32_t MediaCodecVideoEncoder::Encode(
@@ -374,6 +420,7 @@ int32_t MediaCodecVideoEncoder::SetRates(uint32_t new_bit_rate,
}
void MediaCodecVideoEncoder::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
@@ -381,7 +428,6 @@ void MediaCodecVideoEncoder::OnMessage(rtc::Message* msg) {
// functor), so expect no ID/data.
RTC_CHECK(!msg->message_id) << "Unexpected message!";
RTC_CHECK(!msg->pdata) << "Unexpected message!";
- CheckOnCodecThread();
if (!inited_) {
return;
}
@@ -393,26 +439,24 @@ void MediaCodecVideoEncoder::OnMessage(rtc::Message* msg) {
codec_thread_->PostDelayed(kMediaCodecPollMs, this);
}
-void MediaCodecVideoEncoder::CheckOnCodecThread() {
- RTC_CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
- << "Running on wrong thread!";
-}
-
-void MediaCodecVideoEncoder::ResetCodec() {
- ALOGE << "ResetCodec";
- if (Release() != WEBRTC_VIDEO_CODEC_OK ||
- codec_thread_->Invoke<int32_t>(Bind(
- &MediaCodecVideoEncoder::InitEncodeOnCodecThread, this,
- width_, height_, 0, 0)) != WEBRTC_VIDEO_CODEC_OK) {
+bool MediaCodecVideoEncoder::ResetCodecOnCodecThread() {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ ALOGE << "ResetOnCodecThread";
+ if (ReleaseOnCodecThread() != WEBRTC_VIDEO_CODEC_OK ||
+ InitEncodeOnCodecThread(width_, height_, 0, 0, false) !=
+ WEBRTC_VIDEO_CODEC_OK) {
// TODO(fischman): wouldn't it be nice if there was a way to gracefully
// degrade to a SW encoder at this point? There isn't one AFAICT :(
// https://code.google.com/p/webrtc/issues/detail?id=2920
+ return false;
}
+ return true;
}
int32_t MediaCodecVideoEncoder::InitEncodeOnCodecThread(
- int width, int height, int kbps, int fps) {
- CheckOnCodecThread();
+ int width, int height, int kbps, int fps, bool use_surface) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ RTC_CHECK(!use_surface || egl_context_ != nullptr) << "EGL context not set.";
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
@@ -448,52 +492,63 @@ int32_t MediaCodecVideoEncoder::InitEncodeOnCodecThread(
render_times_ms_.clear();
frame_rtc_times_ms_.clear();
drop_next_input_frame_ = false;
+ use_surface_ = use_surface;
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
+ gof_.SetGofInfoVP9(webrtc::TemporalStructureMode::kTemporalStructureMode1);
+ tl0_pic_idx_ = static_cast<uint8_t>(rand());
+ gof_idx_ = 0;
+
// We enforce no extra stride/padding in the format creation step.
jobject j_video_codec_enum = JavaEnumFromIndex(
jni, "MediaCodecVideoEncoder$VideoCodecType", codecType_);
- jobjectArray input_buffers = reinterpret_cast<jobjectArray>(
- jni->CallObjectMethod(*j_media_codec_video_encoder_,
- j_init_encode_method_,
- j_video_codec_enum,
- width_,
- height_,
- kbps,
- fps));
- CHECK_EXCEPTION(jni);
- if (IsNull(jni, input_buffers)) {
+ const bool encode_status = jni->CallBooleanMethod(
+ *j_media_codec_video_encoder_, j_init_encode_method_,
+ j_video_codec_enum, width, height, kbps, fps,
+ (use_surface ? egl_context_ : nullptr));
+ if (!encode_status) {
+ ALOGE << "Failed to configure encoder.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
+ CHECK_EXCEPTION(jni);
- inited_ = true;
- switch (GetIntField(jni, *j_media_codec_video_encoder_,
- j_color_format_field_)) {
- case COLOR_FormatYUV420Planar:
- encoder_fourcc_ = libyuv::FOURCC_YU12;
- break;
- case COLOR_FormatYUV420SemiPlanar:
- case COLOR_QCOM_FormatYUV420SemiPlanar:
- case COLOR_QCOM_FORMATYUV420PackedSemiPlanar32m:
- encoder_fourcc_ = libyuv::FOURCC_NV12;
- break;
- default:
- LOG(LS_ERROR) << "Wrong color format.";
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- size_t num_input_buffers = jni->GetArrayLength(input_buffers);
- RTC_CHECK(input_buffers_.empty())
- << "Unexpected double InitEncode without Release";
- input_buffers_.resize(num_input_buffers);
- for (size_t i = 0; i < num_input_buffers; ++i) {
- input_buffers_[i] =
- jni->NewGlobalRef(jni->GetObjectArrayElement(input_buffers, i));
- int64_t yuv_buffer_capacity =
- jni->GetDirectBufferCapacity(input_buffers_[i]);
+ if (!use_surface) {
+ jobjectArray input_buffers = reinterpret_cast<jobjectArray>(
+ jni->CallObjectMethod(*j_media_codec_video_encoder_,
+ j_get_input_buffers_method_));
CHECK_EXCEPTION(jni);
- RTC_CHECK(yuv_buffer_capacity >= yuv_size_) << "Insufficient capacity";
+ if (IsNull(jni, input_buffers)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ switch (GetIntField(jni, *j_media_codec_video_encoder_,
+ j_color_format_field_)) {
+ case COLOR_FormatYUV420Planar:
+ encoder_fourcc_ = libyuv::FOURCC_YU12;
+ break;
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_QCOM_FormatYUV420SemiPlanar:
+ case COLOR_QCOM_FORMATYUV420PackedSemiPlanar32m:
+ encoder_fourcc_ = libyuv::FOURCC_NV12;
+ break;
+ default:
+ LOG(LS_ERROR) << "Wrong color format.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ size_t num_input_buffers = jni->GetArrayLength(input_buffers);
+ RTC_CHECK(input_buffers_.empty())
+ << "Unexpected double InitEncode without Release";
+ input_buffers_.resize(num_input_buffers);
+ for (size_t i = 0; i < num_input_buffers; ++i) {
+ input_buffers_[i] =
+ jni->NewGlobalRef(jni->GetObjectArrayElement(input_buffers, i));
+ int64_t yuv_buffer_capacity =
+ jni->GetDirectBufferCapacity(input_buffers_[i]);
+ CHECK_EXCEPTION(jni);
+ RTC_CHECK(yuv_buffer_capacity >= yuv_size_) << "Insufficient capacity";
+ }
}
- CHECK_EXCEPTION(jni);
+ inited_ = true;
codec_thread_->PostDelayed(kMediaCodecPollMs, this);
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -501,40 +556,53 @@ int32_t MediaCodecVideoEncoder::InitEncodeOnCodecThread(
int32_t MediaCodecVideoEncoder::EncodeOnCodecThread(
const webrtc::VideoFrame& frame,
const std::vector<webrtc::FrameType>* frame_types) {
- CheckOnCodecThread();
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
+
frames_received_++;
if (!DeliverPendingOutputs(jni)) {
- ResetCodec();
- // Continue as if everything's fine.
+ if (!ResetCodecOnCodecThread())
+ return WEBRTC_VIDEO_CODEC_ERROR;
}
if (drop_next_input_frame_) {
- ALOGV("Encoder drop frame - failed callback.");
+ ALOGW << "Encoder drop frame - failed callback.";
drop_next_input_frame_ = false;
return WEBRTC_VIDEO_CODEC_OK;
}
RTC_CHECK(frame_types->size() == 1) << "Unexpected stream count";
- // Check framerate before spatial resolution change.
- if (scale_)
- quality_scaler_.OnEncodeFrame(frame);
- const VideoFrame& input_frame =
- scale_ ? quality_scaler_.GetScaledFrame(frame) : frame;
+ VideoFrame input_frame = frame;
+ if (scale_) {
+ // Check framerate before spatial resolution change.
+ quality_scaler_.OnEncodeFrame(frame);
+ const webrtc::QualityScaler::Resolution scaled_resolution =
+ quality_scaler_.GetScaledResolution();
+ if (scaled_resolution.width != frame.width() ||
+ scaled_resolution.height != frame.height()) {
+ if (frame.native_handle() != nullptr) {
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> scaled_buffer(
+ static_cast<AndroidTextureBuffer*>(
+ frame.video_frame_buffer().get())->ScaleAndRotate(
+ scaled_resolution.width,
+ scaled_resolution.height,
+ webrtc::kVideoRotation_0));
+ input_frame.set_video_frame_buffer(scaled_buffer);
+ } else {
+ input_frame = quality_scaler_.GetScaledFrame(frame);
+ }
+ }
+ }
- if (input_frame.width() != width_ || input_frame.height() != height_) {
- ALOGD << "Frame resolution change from " << width_ << " x " << height_ <<
- " to " << input_frame.width() << " x " << input_frame.height();
- width_ = input_frame.width();
- height_ = input_frame.height();
- ResetCodec();
- return WEBRTC_VIDEO_CODEC_OK;
+ if (!MaybeReconfigureEncoderOnCodecThread(input_frame)) {
+ ALOGE << "Failed to reconfigure encoder.";
+ return WEBRTC_VIDEO_CODEC_ERROR;
}
// Check if we accumulated too many frames in encoder input buffers
@@ -552,65 +620,138 @@ int32_t MediaCodecVideoEncoder::EncodeOnCodecThread(
}
}
- int j_input_buffer_index = jni->CallIntMethod(*j_media_codec_video_encoder_,
- j_dequeue_input_buffer_method_);
- CHECK_EXCEPTION(jni);
- if (j_input_buffer_index == -1) {
- // Video codec falls behind - no input buffer available.
- ALOGV("Encoder drop frame - no input buffers available");
- frames_dropped_++;
- // Report dropped frame to quality_scaler_.
- OnDroppedFrame();
- return WEBRTC_VIDEO_CODEC_OK; // TODO(fischman): see webrtc bug 2887.
- }
- if (j_input_buffer_index == -2) {
- ResetCodec();
+ const bool key_frame = frame_types->front() != webrtc::kVideoFrameDelta;
+ bool encode_status = true;
+ if (!input_frame.native_handle()) {
+ int j_input_buffer_index = jni->CallIntMethod(*j_media_codec_video_encoder_,
+ j_dequeue_input_buffer_method_);
+ CHECK_EXCEPTION(jni);
+ if (j_input_buffer_index == -1) {
+ // Video codec falls behind - no input buffer available.
+ ALOGW << "Encoder drop frame - no input buffers available";
+ frames_dropped_++;
+ // Report dropped frame to quality_scaler_.
+ OnDroppedFrame();
+ return WEBRTC_VIDEO_CODEC_OK; // TODO(fischman): see webrtc bug 2887.
+ }
+ if (j_input_buffer_index == -2) {
+ ResetCodecOnCodecThread();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ encode_status = EncodeByteBufferOnCodecThread(jni, key_frame, input_frame,
+ j_input_buffer_index);
+ } else {
+ encode_status = EncodeTextureOnCodecThread(jni, key_frame, input_frame);
+ }
+
+ if (!encode_status) {
+ ALOGE << "Failed encode frame with timestamp: " << input_frame.timestamp();
+ ResetCodecOnCodecThread();
return WEBRTC_VIDEO_CODEC_ERROR;
}
+ last_input_timestamp_ms_ =
+ current_timestamp_us_ / rtc::kNumMicrosecsPerMillisec;
+ frames_in_queue_++;
+
+ // Save input image timestamps for later output
+ timestamps_.push_back(input_frame.timestamp());
+ render_times_ms_.push_back(input_frame.render_time_ms());
+ frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
+ current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;
+
+ if (!DeliverPendingOutputs(jni)) {
+ ALOGE << "Failed deliver pending outputs.";
+ ResetCodecOnCodecThread();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool MediaCodecVideoEncoder::MaybeReconfigureEncoderOnCodecThread(
+ const webrtc::VideoFrame& frame) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+
+ const bool is_texture_frame = frame.native_handle() != nullptr;
+ const bool reconfigure_due_to_format = is_texture_frame != use_surface_;
+ const bool reconfigure_due_to_size =
+ frame.width() != width_ || frame.height() != height_;
+
+ if (reconfigure_due_to_format) {
+ ALOGD << "Reconfigure encoder due to format change. "
+ << (use_surface_ ?
+ "Reconfiguring to encode from byte buffer." :
+ "Reconfiguring to encode from texture.");
+ }
+ if (reconfigure_due_to_size) {
+ ALOGD << "Reconfigure encoder due to frame resolution change from "
+ << width_ << " x " << height_ << " to " << frame.width() << " x "
+ << frame.height();
+ width_ = frame.width();
+ height_ = frame.height();
+ }
+
+ if (!reconfigure_due_to_format && !reconfigure_due_to_size)
+ return true;
+
+ ReleaseOnCodecThread();
+
+ return InitEncodeOnCodecThread(width_, height_, 0, 0 , is_texture_frame) ==
+ WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool MediaCodecVideoEncoder::EncodeByteBufferOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame, int input_buffer_index) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ RTC_CHECK(!use_surface_);
+
ALOGV("Encoder frame in # %d. TS: %lld. Q: %d",
frames_received_ - 1, current_timestamp_us_ / 1000, frames_in_queue_);
- jobject j_input_buffer = input_buffers_[j_input_buffer_index];
+ jobject j_input_buffer = input_buffers_[input_buffer_index];
uint8_t* yuv_buffer =
reinterpret_cast<uint8_t*>(jni->GetDirectBufferAddress(j_input_buffer));
CHECK_EXCEPTION(jni);
RTC_CHECK(yuv_buffer) << "Indirect buffer??";
RTC_CHECK(!libyuv::ConvertFromI420(
- input_frame.buffer(webrtc::kYPlane), input_frame.stride(webrtc::kYPlane),
- input_frame.buffer(webrtc::kUPlane), input_frame.stride(webrtc::kUPlane),
- input_frame.buffer(webrtc::kVPlane), input_frame.stride(webrtc::kVPlane),
+ frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
+ frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
yuv_buffer, width_, width_, height_, encoder_fourcc_))
<< "ConvertFromI420 failed";
- last_input_timestamp_ms_ = current_timestamp_us_ / 1000;
- frames_in_queue_++;
- // Save input image timestamps for later output
- timestamps_.push_back(input_frame.timestamp());
- render_times_ms_.push_back(input_frame.render_time_ms());
- frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
-
- bool key_frame = frame_types->front() != webrtc::kVideoFrameDelta;
bool encode_status = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
- j_encode_method_,
+ j_encode_buffer_method_,
key_frame,
- j_input_buffer_index,
+ input_buffer_index,
yuv_size_,
current_timestamp_us_);
CHECK_EXCEPTION(jni);
- current_timestamp_us_ += 1000000 / last_set_fps_;
+ return encode_status;
+}
- if (!encode_status || !DeliverPendingOutputs(jni)) {
- ResetCodec();
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
+bool MediaCodecVideoEncoder::EncodeTextureOnCodecThread(JNIEnv* jni,
+ bool key_frame, const webrtc::VideoFrame& frame) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
+ RTC_CHECK(use_surface_);
+ NativeHandleImpl* handle =
+ static_cast<NativeHandleImpl*>(frame.native_handle());
+ jfloatArray sampling_matrix = jni->NewFloatArray(16);
+ jni->SetFloatArrayRegion(sampling_matrix, 0, 16, handle->sampling_matrix);
- return WEBRTC_VIDEO_CODEC_OK;
+ bool encode_status = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
+ j_encode_texture_method_,
+ key_frame,
+ handle->oes_texture_id,
+ sampling_matrix,
+ current_timestamp_us_);
+ CHECK_EXCEPTION(jni);
+ return encode_status;
}
int32_t MediaCodecVideoEncoder::RegisterEncodeCompleteCallbackOnCodecThread(
webrtc::EncodedImageCallback* callback) {
- CheckOnCodecThread();
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
callback_ = callback;
@@ -618,10 +759,10 @@ int32_t MediaCodecVideoEncoder::RegisterEncodeCompleteCallbackOnCodecThread(
}
int32_t MediaCodecVideoEncoder::ReleaseOnCodecThread() {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
if (!inited_) {
return WEBRTC_VIDEO_CODEC_OK;
}
- CheckOnCodecThread();
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ALOGD << "EncoderReleaseOnCodecThread: Frames received: " <<
frames_received_ << ". Encoded: " << frames_encoded_ <<
@@ -634,13 +775,14 @@ int32_t MediaCodecVideoEncoder::ReleaseOnCodecThread() {
CHECK_EXCEPTION(jni);
rtc::MessageQueueManager::Clear(this);
inited_ = false;
+ use_surface_ = false;
ALOGD << "EncoderReleaseOnCodecThread done.";
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t MediaCodecVideoEncoder::SetRatesOnCodecThread(uint32_t new_bit_rate,
uint32_t frame_rate) {
- CheckOnCodecThread();
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
if (last_set_bitrate_kbps_ == new_bit_rate &&
last_set_fps_ == frame_rate) {
return WEBRTC_VIDEO_CODEC_OK;
@@ -659,7 +801,7 @@ int32_t MediaCodecVideoEncoder::SetRatesOnCodecThread(uint32_t new_bit_rate,
last_set_fps_);
CHECK_EXCEPTION(jni);
if (!ret) {
- ResetCodec();
+ ResetCodecOnCodecThread();
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
@@ -691,6 +833,7 @@ jlong MediaCodecVideoEncoder::GetOutputBufferInfoPresentationTimestampUs(
}
bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
+ RTC_DCHECK(codec_thread_checker_.CalledOnValidThread());
while (true) {
jobject j_output_buffer_info = jni->CallObjectMethod(
*j_media_codec_video_encoder_, j_dequeue_output_buffer_method_);
@@ -702,7 +845,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
int output_buffer_index =
GetOutputBufferInfoIndex(jni, j_output_buffer_info);
if (output_buffer_index == -1) {
- ResetCodec();
+ ResetCodecOnCodecThread();
return false;
}
@@ -786,19 +929,42 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
info.codecSpecific.VP8.layerSync = false;
info.codecSpecific.VP8.tl0PicIdx = webrtc::kNoTl0PicIdx;
info.codecSpecific.VP8.keyIdx = webrtc::kNoKeyIdx;
- picture_id_ = (picture_id_ + 1) & 0x7FFF;
+ } else if (codecType_ == kVideoCodecVP9) {
+ if (key_frame) {
+ gof_idx_ = 0;
+ }
+ info.codecSpecific.VP9.picture_id = picture_id_;
+ info.codecSpecific.VP9.inter_pic_predicted = key_frame ? false : true;
+ info.codecSpecific.VP9.flexible_mode = false;
+ info.codecSpecific.VP9.ss_data_available = key_frame ? true : false;
+ info.codecSpecific.VP9.tl0_pic_idx = tl0_pic_idx_++;
+ info.codecSpecific.VP9.temporal_idx = webrtc::kNoTemporalIdx;
+ info.codecSpecific.VP9.spatial_idx = webrtc::kNoSpatialIdx;
+ info.codecSpecific.VP9.temporal_up_switch = true;
+ info.codecSpecific.VP9.inter_layer_predicted = false;
+ info.codecSpecific.VP9.gof_idx =
+ static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
+ info.codecSpecific.VP9.num_spatial_layers = 1;
+ info.codecSpecific.VP9.spatial_layer_resolution_present = false;
+ if (info.codecSpecific.VP9.ss_data_available) {
+ info.codecSpecific.VP9.spatial_layer_resolution_present = true;
+ info.codecSpecific.VP9.width[0] = width_;
+ info.codecSpecific.VP9.height[0] = height_;
+ info.codecSpecific.VP9.gof.CopyGofInfoVP9(gof_);
+ }
}
+ picture_id_ = (picture_id_ + 1) & 0x7FFF;
// Generate a header describing a single fragment.
webrtc::RTPFragmentationHeader header;
memset(&header, 0, sizeof(header));
- if (codecType_ == kVideoCodecVP8) {
+ if (codecType_ == kVideoCodecVP8 || codecType_ == kVideoCodecVP9) {
header.VerifyAndAllocateFragmentationHeader(1);
header.fragmentationOffset[0] = 0;
header.fragmentationLength[0] = image->_length;
header.fragmentationPlType[0] = 0;
header.fragmentationTimeDiff[0] = 0;
- if (scale_) {
+ if (codecType_ == kVideoCodecVP8 && scale_) {
int qp;
if (webrtc::vp8::GetQp(payload, payload_size, &qp))
quality_scaler_.ReportQP(qp);
@@ -829,7 +995,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
ALOGE << "Data:" << image->_buffer[0] << " " << image->_buffer[1]
<< " " << image->_buffer[2] << " " << image->_buffer[3]
<< " " << image->_buffer[4] << " " << image->_buffer[5];
- ResetCodec();
+ ResetCodecOnCodecThread();
return false;
}
scPositions[scPositionsLength] = payload_size;
@@ -852,7 +1018,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
output_buffer_index);
CHECK_EXCEPTION(jni);
if (!success) {
- ResetCodec();
+ ResetCodecOnCodecThread();
return false;
}
@@ -907,7 +1073,12 @@ int MediaCodecVideoEncoder::GetTargetFramerate() {
return scale_ ? quality_scaler_.GetTargetFramerate() : -1;
}
-MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory() {
+const char* MediaCodecVideoEncoder::ImplementationName() const {
+ return "MediaCodec";
+}
+
+MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory()
+ : egl_context_(nullptr) {
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedLocalRefFrame local_ref_frame(jni);
jclass j_encoder_class = FindClass(jni, "org/webrtc/MediaCodecVideoEncoder");
@@ -923,6 +1094,16 @@ MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory() {
MAX_VIDEO_WIDTH, MAX_VIDEO_HEIGHT, MAX_VIDEO_FPS));
}
+ bool is_vp9_hw_supported = jni->CallStaticBooleanMethod(
+ j_encoder_class,
+ GetStaticMethodID(jni, j_encoder_class, "isVp9HwSupported", "()Z"));
+ CHECK_EXCEPTION(jni);
+ if (is_vp9_hw_supported) {
+ ALOGD << "VP9 HW Encoder supported.";
+ supported_codecs_.push_back(VideoCodec(kVideoCodecVP9, "VP9",
+ MAX_VIDEO_WIDTH, MAX_VIDEO_HEIGHT, MAX_VIDEO_FPS));
+ }
+
bool is_h264_hw_supported = jni->CallStaticBooleanMethod(
j_encoder_class,
GetStaticMethodID(jni, j_encoder_class, "isH264HwSupported", "()Z"));
@@ -936,9 +1117,37 @@ MediaCodecVideoEncoderFactory::MediaCodecVideoEncoderFactory() {
MediaCodecVideoEncoderFactory::~MediaCodecVideoEncoderFactory() {}
+void MediaCodecVideoEncoderFactory::SetEGLContext(
+ JNIEnv* jni, jobject render_egl_context) {
+ ALOGD << "MediaCodecVideoEncoderFactory::SetEGLContext";
+ if (egl_context_) {
+ jni->DeleteGlobalRef(egl_context_);
+ egl_context_ = NULL;
+ }
+ if (!IsNull(jni, render_egl_context)) {
+ egl_context_ = jni->NewGlobalRef(render_egl_context);
+ if (CheckException(jni)) {
+ ALOGE << "error calling NewGlobalRef for EGL Context.";
+ egl_context_ = NULL;
+ } else {
+ jclass j_egl_context_class =
+ FindClass(jni, "org/webrtc/EglBase14$Context");
+ if (!jni->IsInstanceOf(egl_context_, j_egl_context_class)) {
+ ALOGE << "Wrong EGL Context.";
+ jni->DeleteGlobalRef(egl_context_);
+ egl_context_ = NULL;
+ }
+ }
+ }
+ if (egl_context_ == NULL) {
+ ALOGW << "NULL VideoDecoder EGL context - HW surface encoding is disabled.";
+ }
+}
+
webrtc::VideoEncoder* MediaCodecVideoEncoderFactory::CreateVideoEncoder(
VideoCodecType type) {
if (supported_codecs_.empty()) {
+ ALOGW << "No HW video encoder for type " << (int)type;
return NULL;
}
for (std::vector<VideoCodec>::const_iterator it = supported_codecs_.begin();
@@ -946,9 +1155,11 @@ webrtc::VideoEncoder* MediaCodecVideoEncoderFactory::CreateVideoEncoder(
if (it->type == type) {
ALOGD << "Create HW video encoder for type " << (int)type <<
" (" << it->name << ").";
- return new MediaCodecVideoEncoder(AttachCurrentThreadIfNeeded(), type);
+ return new MediaCodecVideoEncoder(AttachCurrentThreadIfNeeded(), type,
+ egl_context_);
}
}
+ ALOGW << "Can not find HW video encoder for type " << (int)type;
return NULL;
}
diff --git a/talk/app/webrtc/java/jni/androidmediaencoder_jni.h b/talk/app/webrtc/java/jni/androidmediaencoder_jni.h
index ff124aa146..8ff8164c3b 100644
--- a/talk/app/webrtc/java/jni/androidmediaencoder_jni.h
+++ b/talk/app/webrtc/java/jni/androidmediaencoder_jni.h
@@ -43,6 +43,8 @@ class MediaCodecVideoEncoderFactory
MediaCodecVideoEncoderFactory();
virtual ~MediaCodecVideoEncoderFactory();
+ void SetEGLContext(JNIEnv* jni, jobject render_egl_context);
+
// WebRtcVideoEncoderFactory implementation.
webrtc::VideoEncoder* CreateVideoEncoder(webrtc::VideoCodecType type)
override;
@@ -50,6 +52,7 @@ class MediaCodecVideoEncoderFactory
void DestroyVideoEncoder(webrtc::VideoEncoder* encoder) override;
private:
+ jobject egl_context_;
// Empty if platform support is lacking, const after ctor returns.
std::vector<VideoCodec> supported_codecs_;
};
diff --git a/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc b/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
index 02b9f22015..8813c89de4 100644
--- a/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
+++ b/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
@@ -29,8 +29,9 @@
#include "talk/app/webrtc/java/jni/androidvideocapturer_jni.h"
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
+#include "talk/app/webrtc/java/jni/surfacetexturehelper_jni.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
#include "webrtc/base/bind.h"
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
namespace webrtc_jni {
@@ -47,15 +48,19 @@ int AndroidVideoCapturerJni::SetAndroidObjects(JNIEnv* jni,
return 0;
}
-AndroidVideoCapturerJni::AndroidVideoCapturerJni(JNIEnv* jni,
- jobject j_video_capturer)
- : j_capturer_global_(jni, j_video_capturer),
+AndroidVideoCapturerJni::AndroidVideoCapturerJni(
+ JNIEnv* jni,
+ jobject j_video_capturer,
+ jobject j_surface_texture_helper)
+ : j_video_capturer_(jni, j_video_capturer),
j_video_capturer_class_(
jni, FindClass(jni, "org/webrtc/VideoCapturerAndroid")),
j_observer_class_(
jni,
FindClass(jni,
"org/webrtc/VideoCapturerAndroid$NativeObserver")),
+ surface_texture_helper_(new rtc::RefCountedObject<SurfaceTextureHelper>(
+ jni, j_surface_texture_helper)),
capturer_(nullptr) {
LOG(LS_INFO) << "AndroidVideoCapturerJni ctor";
thread_checker_.DetachFromThread();
@@ -64,7 +69,7 @@ AndroidVideoCapturerJni::AndroidVideoCapturerJni(JNIEnv* jni,
AndroidVideoCapturerJni::~AndroidVideoCapturerJni() {
LOG(LS_INFO) << "AndroidVideoCapturerJni dtor";
jni()->CallVoidMethod(
- *j_capturer_global_,
+ *j_video_capturer_,
GetMethodID(jni(), *j_video_capturer_class_, "release", "()V"));
CHECK_EXCEPTION(jni()) << "error during VideoCapturerAndroid.release()";
}
@@ -90,7 +95,7 @@ void AndroidVideoCapturerJni::Start(int width, int height, int framerate,
jni(), *j_video_capturer_class_, "startCapture",
"(IIILandroid/content/Context;"
"Lorg/webrtc/VideoCapturerAndroid$CapturerObserver;)V");
- jni()->CallVoidMethod(*j_capturer_global_,
+ jni()->CallVoidMethod(*j_video_capturer_,
m, width, height,
framerate,
application_context_,
@@ -109,7 +114,7 @@ void AndroidVideoCapturerJni::Stop() {
}
jmethodID m = GetMethodID(jni(), *j_video_capturer_class_,
"stopCapture", "()V");
- jni()->CallVoidMethod(*j_capturer_global_, m);
+ jni()->CallVoidMethod(*j_video_capturer_, m);
CHECK_EXCEPTION(jni()) << "error during VideoCapturerAndroid.stopCapture";
LOG(LS_INFO) << "AndroidVideoCapturerJni stop done";
}
@@ -127,19 +132,12 @@ void AndroidVideoCapturerJni::AsyncCapturerInvoke(
invoker_->AsyncInvoke<void>(rtc::Bind(method, capturer_, args...));
}
-void AndroidVideoCapturerJni::ReturnBuffer(int64_t time_stamp) {
- jmethodID m = GetMethodID(jni(), *j_video_capturer_class_,
- "returnBuffer", "(J)V");
- jni()->CallVoidMethod(*j_capturer_global_, m, time_stamp);
- CHECK_EXCEPTION(jni()) << "error during VideoCapturerAndroid.returnBuffer";
-}
-
std::string AndroidVideoCapturerJni::GetSupportedFormats() {
jmethodID m =
GetMethodID(jni(), *j_video_capturer_class_,
"getSupportedFormatsAsJson", "()Ljava/lang/String;");
jstring j_json_caps =
- (jstring) jni()->CallObjectMethod(*j_capturer_global_, m);
+ (jstring) jni()->CallObjectMethod(*j_video_capturer_, m);
CHECK_EXCEPTION(jni()) << "error during supportedFormatsAsJson";
return JavaToStdString(jni(), j_json_caps);
}
@@ -158,46 +156,33 @@ void AndroidVideoCapturerJni::OnMemoryBufferFrame(void* video_frame,
int rotation,
int64_t timestamp_ns) {
const uint8_t* y_plane = static_cast<uint8_t*>(video_frame);
- // Android guarantees that the stride is a multiple of 16.
- // http://developer.android.com/reference/android/hardware/Camera.Parameters.html#setPreviewFormat%28int%29
- int y_stride;
- int uv_stride;
- webrtc::Calc16ByteAlignedStride(width, &y_stride, &uv_stride);
- const uint8_t* v_plane = y_plane + y_stride * height;
- const uint8_t* u_plane =
- v_plane + uv_stride * webrtc::AlignInt(height, 2) / 2;
-
- // Wrap the Java buffer, and call ReturnBuffer() in the wrapped
- // VideoFrameBuffer destructor.
- rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
- new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
- width, height, y_plane, y_stride, u_plane, uv_stride, v_plane,
- uv_stride,
- rtc::Bind(&AndroidVideoCapturerJni::ReturnBuffer, this,
- timestamp_ns)));
+ const uint8_t* vu_plane = y_plane + width * height;
+
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+ buffer_pool_.CreateBuffer(width, height);
+ libyuv::NV21ToI420(
+ y_plane, width,
+ vu_plane, width,
+ buffer->MutableData(webrtc::kYPlane), buffer->stride(webrtc::kYPlane),
+ buffer->MutableData(webrtc::kUPlane), buffer->stride(webrtc::kUPlane),
+ buffer->MutableData(webrtc::kVPlane), buffer->stride(webrtc::kVPlane),
+ width, height);
AsyncCapturerInvoke("OnIncomingFrame",
&webrtc::AndroidVideoCapturer::OnIncomingFrame,
buffer, rotation, timestamp_ns);
}
-void AndroidVideoCapturerJni::OnTextureFrame(
- int width,
- int height,
- int64_t timestamp_ns,
- const NativeTextureHandleImpl& handle) {
- // TODO(magjed): Fix this. See bug webrtc:4993.
- RTC_NOTREACHED()
- << "The rest of the stack for Android expects the native "
- "handle to be a NativeHandleImpl with a SurfaceTexture, not a "
- "NativeTextureHandleImpl";
+void AndroidVideoCapturerJni::OnTextureFrame(int width,
+ int height,
+ int rotation,
+ int64_t timestamp_ns,
+ const NativeHandleImpl& handle) {
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
- new rtc::RefCountedObject<AndroidTextureBuffer>(
- width, height, handle,
- rtc::Bind(&AndroidVideoCapturerJni::ReturnBuffer, this,
- timestamp_ns)));
+ surface_texture_helper_->CreateTextureFrame(width, height, handle));
+
AsyncCapturerInvoke("OnIncomingFrame",
&webrtc::AndroidVideoCapturer::OnIncomingFrame,
- buffer, 0, timestamp_ns);
+ buffer, rotation, timestamp_ns);
}
void AndroidVideoCapturerJni::OnOutputFormatRequest(int width,
@@ -216,13 +201,6 @@ JOW(void,
jint width, jint height, jint rotation, jlong timestamp) {
jboolean is_copy = true;
jbyte* bytes = jni->GetByteArrayElements(j_frame, &is_copy);
- // If this is a copy of the original frame, it means that the memory
- // is not direct memory and thus VideoCapturerAndroid does not guarantee
- // that the memory is valid when we have released |j_frame|.
- // TODO(magjed): Move ReleaseByteArrayElements() into ReturnBuffer() and
- // remove this check.
- RTC_CHECK(!is_copy)
- << "NativeObserver_nativeOnFrameCaptured: frame is a copy";
reinterpret_cast<AndroidVideoCapturerJni*>(j_capturer)
->OnMemoryBufferFrame(bytes, length, width, height, rotation, timestamp);
jni->ReleaseByteArrayElements(j_frame, bytes, JNI_ABORT);
@@ -231,11 +209,11 @@ JOW(void,
JOW(void, VideoCapturerAndroid_00024NativeObserver_nativeOnTextureFrameCaptured)
(JNIEnv* jni, jclass, jlong j_capturer, jint j_width, jint j_height,
jint j_oes_texture_id, jfloatArray j_transform_matrix,
- jlong j_timestamp) {
+ jint j_rotation, jlong j_timestamp) {
reinterpret_cast<AndroidVideoCapturerJni*>(j_capturer)
- ->OnTextureFrame(j_width, j_height, j_timestamp,
- NativeTextureHandleImpl(jni, j_oes_texture_id,
- j_transform_matrix));
+ ->OnTextureFrame(j_width, j_height, j_rotation, j_timestamp,
+ NativeHandleImpl(jni, j_oes_texture_id,
+ j_transform_matrix));
}
JOW(void, VideoCapturerAndroid_00024NativeObserver_nativeCapturerStarted)
@@ -254,9 +232,11 @@ JOW(void, VideoCapturerAndroid_00024NativeObserver_nativeOnOutputFormatRequest)
}
JOW(jlong, VideoCapturerAndroid_nativeCreateVideoCapturer)
- (JNIEnv* jni, jclass, jobject j_video_capturer) {
+ (JNIEnv* jni, jclass,
+ jobject j_video_capturer, jobject j_surface_texture_helper) {
rtc::scoped_refptr<webrtc::AndroidVideoCapturerDelegate> delegate =
- new rtc::RefCountedObject<AndroidVideoCapturerJni>(jni, j_video_capturer);
+ new rtc::RefCountedObject<AndroidVideoCapturerJni>(
+ jni, j_video_capturer, j_surface_texture_helper);
rtc::scoped_ptr<cricket::VideoCapturer> capturer(
new webrtc::AndroidVideoCapturer(delegate));
// Caller takes ownership of the cricket::VideoCapturer* pointer.
diff --git a/talk/app/webrtc/java/jni/androidvideocapturer_jni.h b/talk/app/webrtc/java/jni/androidvideocapturer_jni.h
index d1eb3a0ad0..89ecacb3a5 100644
--- a/talk/app/webrtc/java/jni/androidvideocapturer_jni.h
+++ b/talk/app/webrtc/java/jni/androidvideocapturer_jni.h
@@ -36,10 +36,12 @@
#include "webrtc/base/asyncinvoker.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/thread_checker.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
namespace webrtc_jni {
-class NativeTextureHandleImpl;
+struct NativeHandleImpl;
+class SurfaceTextureHelper;
// AndroidVideoCapturerJni implements AndroidVideoCapturerDelegate.
// The purpose of the delegate is to hide the JNI specifics from the C++ only
@@ -48,7 +50,9 @@ class AndroidVideoCapturerJni : public webrtc::AndroidVideoCapturerDelegate {
public:
static int SetAndroidObjects(JNIEnv* jni, jobject appliction_context);
- AndroidVideoCapturerJni(JNIEnv* jni, jobject j_video_capturer);
+ AndroidVideoCapturerJni(JNIEnv* jni,
+ jobject j_video_capturer,
+ jobject j_surface_texture_helper);
void Start(int width, int height, int framerate,
webrtc::AndroidVideoCapturer* capturer) override;
@@ -60,15 +64,14 @@ class AndroidVideoCapturerJni : public webrtc::AndroidVideoCapturerDelegate {
void OnCapturerStarted(bool success);
void OnMemoryBufferFrame(void* video_frame, int length, int width,
int height, int rotation, int64_t timestamp_ns);
- void OnTextureFrame(int width, int height, int64_t timestamp_ns,
- const NativeTextureHandleImpl& handle);
+ void OnTextureFrame(int width, int height, int rotation, int64_t timestamp_ns,
+ const NativeHandleImpl& handle);
void OnOutputFormatRequest(int width, int height, int fps);
protected:
~AndroidVideoCapturerJni();
private:
- void ReturnBuffer(int64_t time_stamp);
JNIEnv* jni();
// To avoid deducing Args from the 3rd parameter of AsyncCapturerInvoke.
@@ -85,10 +88,13 @@ class AndroidVideoCapturerJni : public webrtc::AndroidVideoCapturerDelegate {
void (webrtc::AndroidVideoCapturer::*method)(Args...),
typename Identity<Args>::type... args);
- const ScopedGlobalRef<jobject> j_capturer_global_;
+ const ScopedGlobalRef<jobject> j_video_capturer_;
const ScopedGlobalRef<jclass> j_video_capturer_class_;
const ScopedGlobalRef<jclass> j_observer_class_;
+ // Used on the Java thread running the camera.
+ webrtc::I420BufferPool buffer_pool_;
+ rtc::scoped_refptr<SurfaceTextureHelper> surface_texture_helper_;
rtc::ThreadChecker thread_checker_;
// |capturer| is a guaranteed to be a valid pointer between a call to
diff --git a/talk/app/webrtc/java/jni/classreferenceholder.cc b/talk/app/webrtc/java/jni/classreferenceholder.cc
index 4c836f8252..5fe8ec707c 100644
--- a/talk/app/webrtc/java/jni/classreferenceholder.cc
+++ b/talk/app/webrtc/java/jni/classreferenceholder.cc
@@ -72,20 +72,21 @@ ClassReferenceHolder::ClassReferenceHolder(JNIEnv* jni) {
LoadClass(jni, "org/webrtc/IceCandidate");
#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
LoadClass(jni, "android/graphics/SurfaceTexture");
- LoadClass(jni, "javax/microedition/khronos/egl/EGLContext");
LoadClass(jni, "org/webrtc/CameraEnumerator");
LoadClass(jni, "org/webrtc/Camera2Enumerator");
LoadClass(jni, "org/webrtc/CameraEnumerationAndroid");
LoadClass(jni, "org/webrtc/VideoCapturerAndroid");
LoadClass(jni, "org/webrtc/VideoCapturerAndroid$NativeObserver");
LoadClass(jni, "org/webrtc/EglBase");
+ LoadClass(jni, "org/webrtc/EglBase$Context");
+ LoadClass(jni, "org/webrtc/EglBase14$Context");
LoadClass(jni, "org/webrtc/NetworkMonitor");
LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder");
LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder$OutputBufferInfo");
LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder$VideoCodecType");
LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder");
LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$DecodedTextureBuffer");
- LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$DecodedByteBuffer");
+ LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$DecodedOutputBuffer");
LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$VideoCodecType");
LoadClass(jni, "org/webrtc/SurfaceTextureHelper");
#endif
diff --git a/talk/app/webrtc/java/jni/jni_helpers.cc b/talk/app/webrtc/java/jni/jni_helpers.cc
index 755698e379..3a7ff21e77 100644
--- a/talk/app/webrtc/java/jni/jni_helpers.cc
+++ b/talk/app/webrtc/java/jni/jni_helpers.cc
@@ -1,4 +1,3 @@
-
/*
* libjingle
* Copyright 2015 Google Inc.
@@ -33,8 +32,6 @@
#include <sys/syscall.h>
#include <unistd.h>
-#include "unicode/unistr.h"
-
namespace webrtc_jni {
static JavaVM* g_jvm = nullptr;
@@ -46,8 +43,6 @@ static pthread_once_t g_jni_ptr_once = PTHREAD_ONCE_INIT;
// were attached by the JVM because of a Java->native call.
static pthread_key_t g_jni_ptr;
-using icu::UnicodeString;
-
JavaVM *GetJVM() {
RTC_CHECK(g_jvm) << "JNI_OnLoad failed to run?";
return g_jvm;
@@ -232,22 +227,20 @@ bool IsNull(JNIEnv* jni, jobject obj) {
// Given a UTF-8 encoded |native| string return a new (UTF-16) jstring.
jstring JavaStringFromStdString(JNIEnv* jni, const std::string& native) {
- UnicodeString ustr(UnicodeString::fromUTF8(native));
- jstring jstr = jni->NewString(ustr.getBuffer(), ustr.length());
- CHECK_EXCEPTION(jni) << "error during NewString";
+ jstring jstr = jni->NewStringUTF(native.c_str());
+ CHECK_EXCEPTION(jni) << "error during NewStringUTF";
return jstr;
}
// Given a (UTF-16) jstring return a new UTF-8 native string.
std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
- const jchar* jchars = jni->GetStringChars(j_string, NULL);
- CHECK_EXCEPTION(jni) << "Error during GetStringChars";
- UnicodeString ustr(jchars, jni->GetStringLength(j_string));
- CHECK_EXCEPTION(jni) << "Error during GetStringLength";
- jni->ReleaseStringChars(j_string, jchars);
- CHECK_EXCEPTION(jni) << "Error during ReleaseStringChars";
- std::string ret;
- return ustr.toUTF8String(ret);
+ const char* chars = jni->GetStringUTFChars(j_string, NULL);
+ CHECK_EXCEPTION(jni) << "Error during GetStringUTFChars";
+ std::string str(chars, jni->GetStringUTFLength(j_string));
+ CHECK_EXCEPTION(jni) << "Error during GetStringUTFLength";
+ jni->ReleaseStringUTFChars(j_string, chars);
+ CHECK_EXCEPTION(jni) << "Error during ReleaseStringUTFChars";
+ return str;
}
// Return the (singleton) Java Enum object corresponding to |index|;
diff --git a/talk/app/webrtc/java/jni/jni_onload.cc b/talk/app/webrtc/java/jni/jni_onload.cc
new file mode 100644
index 0000000000..9664ecdca6
--- /dev/null
+++ b/talk/app/webrtc/java/jni/jni_onload.cc
@@ -0,0 +1,55 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <jni.h>
+#undef JNIEXPORT
+#define JNIEXPORT __attribute__((visibility("default")))
+
+#include "talk/app/webrtc/java/jni/classreferenceholder.h"
+#include "talk/app/webrtc/java/jni/jni_helpers.h"
+#include "webrtc/base/ssladapter.h"
+
+namespace webrtc_jni {
+
+extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
+ jint ret = InitGlobalJniVariables(jvm);
+ RTC_DCHECK_GE(ret, 0);
+ if (ret < 0)
+ return -1;
+
+ RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
+ LoadGlobalClassReferenceHolder();
+
+ return ret;
+}
+
+extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM *jvm, void *reserved) {
+ FreeGlobalClassReferenceHolder();
+ RTC_CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
+}
+
+} // namespace webrtc_jni
diff --git a/talk/app/webrtc/java/jni/native_handle_impl.cc b/talk/app/webrtc/java/jni/native_handle_impl.cc
index ac3e0455df..1757184154 100644
--- a/talk/app/webrtc/java/jni/native_handle_impl.cc
+++ b/talk/app/webrtc/java/jni/native_handle_impl.cc
@@ -27,14 +27,65 @@
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
+#include "talk/app/webrtc/java/jni/jni_helpers.h"
+#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/keep_ref_until_done.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/base/logging.h"
+
+using webrtc::NativeHandleBuffer;
+
+namespace {
+
+void RotateMatrix(float a[16], webrtc::VideoRotation rotation) {
+ // Texture coordinates are in the range 0 to 1. The transformation of the last
+ // row in each rotation matrix is needed for proper translation, e.g, to
+ // mirror x, we don't replace x by -x, but by 1-x.
+ switch (rotation) {
+ case webrtc::kVideoRotation_0:
+ break;
+ case webrtc::kVideoRotation_90: {
+ const float ROTATE_90[16] =
+ { a[4], a[5], a[6], a[7],
+ -a[0], -a[1], -a[2], -a[3],
+ a[8], a[9], a[10], a[11],
+ a[0] + a[12], a[1] + a[13], a[2] + a[14], a[3] + a[15]};
+ memcpy(a, ROTATE_90, sizeof(ROTATE_90));
+ } break;
+ case webrtc::kVideoRotation_180: {
+ const float ROTATE_180[16] =
+ { -a[0], -a[1], -a[2], -a[3],
+ -a[4], -a[5], -a[6], -a[7],
+ a[8], a[9], a[10], a[11],
+ a[0] + a[4] + a[12], a[1] +a[5] + a[13], a[2] + a[6] + a[14],
+ a[3] + a[11]+ a[15]};
+ memcpy(a, ROTATE_180, sizeof(ROTATE_180));
+ }
+ break;
+ case webrtc::kVideoRotation_270: {
+ const float ROTATE_270[16] =
+ { -a[4], -a[5], -a[6], -a[7],
+ a[0], a[1], a[2], a[3],
+ a[8], a[9], a[10], a[11],
+ a[4] + a[12], a[5] + a[13], a[6] + a[14], a[7] + a[15]};
+ memcpy(a, ROTATE_270, sizeof(ROTATE_270));
+ } break;
+ }
+}
+
+} // anonymouse namespace
namespace webrtc_jni {
-NativeTextureHandleImpl::NativeTextureHandleImpl(JNIEnv* jni,
- jint j_oes_texture_id,
- jfloatArray j_transform_matrix)
- : oes_texture_id(j_oes_texture_id) {
+// Aligning pointer to 64 bytes for improved performance, e.g. use SIMD.
+static const int kBufferAlignment = 64;
+
+NativeHandleImpl::NativeHandleImpl(JNIEnv* jni,
+ jint j_oes_texture_id,
+ jfloatArray j_transform_matrix)
+ : oes_texture_id(j_oes_texture_id) {
RTC_CHECK_EQ(16, jni->GetArrayLength(j_transform_matrix));
jfloat* transform_matrix_ptr =
jni->GetFloatArrayElements(j_transform_matrix, nullptr);
@@ -44,41 +95,15 @@ NativeTextureHandleImpl::NativeTextureHandleImpl(JNIEnv* jni,
jni->ReleaseFloatArrayElements(j_transform_matrix, transform_matrix_ptr, 0);
}
-NativeHandleImpl::NativeHandleImpl() : texture_object_(NULL), texture_id_(-1) {}
-
-void* NativeHandleImpl::GetHandle() {
- return texture_object_;
-}
-
-int NativeHandleImpl::GetTextureId() {
- return texture_id_;
-}
-
-void NativeHandleImpl::SetTextureObject(void* texture_object, int texture_id) {
- texture_object_ = reinterpret_cast<jobject>(texture_object);
- texture_id_ = texture_id;
-}
-
-JniNativeHandleBuffer::JniNativeHandleBuffer(void* native_handle,
- int width,
- int height)
- : NativeHandleBuffer(native_handle, width, height) {}
-
-rtc::scoped_refptr<webrtc::VideoFrameBuffer>
-JniNativeHandleBuffer::NativeToI420Buffer() {
- // TODO(pbos): Implement before using this in the encoder pipeline (or
- // remove the RTC_CHECK() in VideoCapture).
- RTC_NOTREACHED();
- return nullptr;
-}
-
AndroidTextureBuffer::AndroidTextureBuffer(
int width,
int height,
- const NativeTextureHandleImpl& native_handle,
+ const NativeHandleImpl& native_handle,
+ jobject surface_texture_helper,
const rtc::Callback0<void>& no_longer_used)
: webrtc::NativeHandleBuffer(&native_handle_, width, height),
native_handle_(native_handle),
+ surface_texture_helper_(surface_texture_helper),
no_longer_used_cb_(no_longer_used) {}
AndroidTextureBuffer::~AndroidTextureBuffer() {
@@ -87,9 +112,75 @@ AndroidTextureBuffer::~AndroidTextureBuffer() {
rtc::scoped_refptr<webrtc::VideoFrameBuffer>
AndroidTextureBuffer::NativeToI420Buffer() {
- RTC_NOTREACHED()
- << "AndroidTextureBuffer::NativeToI420Buffer not implemented.";
- return nullptr;
+ int uv_width = (width()+7) / 8;
+ int stride = 8 * uv_width;
+ int uv_height = (height()+1)/2;
+ size_t size = stride * (height() + uv_height);
+ // The data is owned by the frame, and the normal case is that the
+ // data is deleted by the frame's destructor callback.
+ //
+ // TODO(nisse): Use an I420BufferPool. We then need to extend that
+ // class, and I420Buffer, to support our memory layout.
+ rtc::scoped_ptr<uint8_t, webrtc::AlignedFreeDeleter> yuv_data(
+ static_cast<uint8_t*>(webrtc::AlignedMalloc(size, kBufferAlignment)));
+ // See SurfaceTextureHelper.java for the required layout.
+ uint8_t* y_data = yuv_data.get();
+ uint8_t* u_data = y_data + height() * stride;
+ uint8_t* v_data = u_data + stride/2;
+
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> copy =
+ new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
+ width(), height(),
+ y_data, stride,
+ u_data, stride,
+ v_data, stride,
+ rtc::Bind(&webrtc::AlignedFree, yuv_data.release()));
+
+ JNIEnv* jni = AttachCurrentThreadIfNeeded();
+ ScopedLocalRefFrame local_ref_frame(jni);
+
+ jmethodID transform_mid = GetMethodID(
+ jni,
+ GetObjectClass(jni, surface_texture_helper_),
+ "textureToYUV",
+ "(Ljava/nio/ByteBuffer;IIII[F)V");
+
+ jobject byte_buffer = jni->NewDirectByteBuffer(y_data, size);
+
+ // TODO(nisse): Keep java transform matrix around.
+ jfloatArray sampling_matrix = jni->NewFloatArray(16);
+ jni->SetFloatArrayRegion(sampling_matrix, 0, 16,
+ native_handle_.sampling_matrix);
+
+ jni->CallVoidMethod(surface_texture_helper_,
+ transform_mid,
+ byte_buffer, width(), height(), stride,
+ native_handle_.oes_texture_id, sampling_matrix);
+ CHECK_EXCEPTION(jni) << "textureToYUV throwed an exception";
+
+ return copy;
+}
+
+rtc::scoped_refptr<AndroidTextureBuffer>
+AndroidTextureBuffer::ScaleAndRotate(int dst_widht,
+ int dst_height,
+ webrtc::VideoRotation rotation) {
+ if (width() == dst_widht && height() == dst_height &&
+ rotation == webrtc::kVideoRotation_0) {
+ return this;
+ }
+ int rotated_width = (rotation % 180 == 0) ? dst_widht : dst_height;
+ int rotated_height = (rotation % 180 == 0) ? dst_height : dst_widht;
+
+ // Here we use Bind magic to add a reference count to |this| until the newly
+ // created AndroidTextureBuffer is destructed
+ rtc::scoped_refptr<AndroidTextureBuffer> buffer(
+ new rtc::RefCountedObject<AndroidTextureBuffer>(
+ rotated_width, rotated_height, native_handle_,
+ surface_texture_helper_, rtc::KeepRefUntilDone(this)));
+
+ RotateMatrix(buffer->native_handle_.sampling_matrix, rotation);
+ return buffer;
}
} // namespace webrtc_jni
diff --git a/talk/app/webrtc/java/jni/native_handle_impl.h b/talk/app/webrtc/java/jni/native_handle_impl.h
index dd04bc20b1..1d0f601d0d 100644
--- a/talk/app/webrtc/java/jni/native_handle_impl.h
+++ b/talk/app/webrtc/java/jni/native_handle_impl.h
@@ -31,56 +31,44 @@
#include <jni.h>
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
+#include "webrtc/common_video/rotation.h"
namespace webrtc_jni {
// Wrapper for texture object.
-struct NativeTextureHandleImpl {
- NativeTextureHandleImpl(JNIEnv* jni,
- jint j_oes_texture_id,
- jfloatArray j_transform_matrix);
+struct NativeHandleImpl {
+ NativeHandleImpl(JNIEnv* jni,
+ jint j_oes_texture_id,
+ jfloatArray j_transform_matrix);
const int oes_texture_id;
float sampling_matrix[16];
};
-// Native handle for SurfaceTexture + texture id.
-class NativeHandleImpl {
- public:
- NativeHandleImpl();
-
- void* GetHandle();
- int GetTextureId();
- void SetTextureObject(void* texture_object, int texture_id);
-
- private:
- jobject texture_object_;
- int32_t texture_id_;
-};
-
-class JniNativeHandleBuffer : public webrtc::NativeHandleBuffer {
- public:
- JniNativeHandleBuffer(void* native_handle, int width, int height);
-
- // TODO(pbos): Override destructor to release native handle, at the moment the
- // native handle is not released based on refcount.
-
- private:
- rtc::scoped_refptr<webrtc::VideoFrameBuffer> NativeToI420Buffer() override;
-};
-
class AndroidTextureBuffer : public webrtc::NativeHandleBuffer {
public:
AndroidTextureBuffer(int width,
int height,
- const NativeTextureHandleImpl& native_handle,
+ const NativeHandleImpl& native_handle,
+ jobject surface_texture_helper,
const rtc::Callback0<void>& no_longer_used);
~AndroidTextureBuffer();
rtc::scoped_refptr<VideoFrameBuffer> NativeToI420Buffer() override;
+ rtc::scoped_refptr<AndroidTextureBuffer> ScaleAndRotate(
+ int dst_widht,
+ int dst_height,
+ webrtc::VideoRotation rotation);
+
private:
- NativeTextureHandleImpl native_handle_;
+ NativeHandleImpl native_handle_;
+ // Raw object pointer, relying on the caller, i.e.,
+ // AndroidVideoCapturerJni or the C++ SurfaceTextureHelper, to keep
+ // a global reference. TODO(nisse): Make this a reference to the C++
+ // SurfaceTextureHelper instead, but that requires some refactoring
+ // of AndroidVideoCapturerJni.
+ jobject surface_texture_helper_;
rtc::Callback0<void> no_longer_used_cb_;
};
diff --git a/talk/app/webrtc/java/jni/peerconnection_jni.cc b/talk/app/webrtc/java/jni/peerconnection_jni.cc
index e75cd553b6..5ea63f74ae 100644
--- a/talk/app/webrtc/java/jni/peerconnection_jni.cc
+++ b/talk/app/webrtc/java/jni/peerconnection_jni.cc
@@ -57,6 +57,7 @@
#define JNIEXPORT __attribute__((visibility("default")))
#include <limits>
+#include <utility>
#include "talk/app/webrtc/java/jni/classreferenceholder.h"
#include "talk/app/webrtc/java/jni/jni_helpers.h"
@@ -74,10 +75,11 @@
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/event_tracer.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/logsinks.h"
-#include "webrtc/base/networkmonitor.h"
#include "webrtc/base/messagequeue.h"
+#include "webrtc/base/networkmonitor.h"
#include "webrtc/base/ssladapter.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/system_wrappers/include/field_trial_default.h"
@@ -141,22 +143,6 @@ static bool factory_static_initialized = false;
static bool video_hw_acceleration_enabled = true;
#endif
-extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
- jint ret = InitGlobalJniVariables(jvm);
- if (ret < 0)
- return -1;
-
- RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
- LoadGlobalClassReferenceHolder();
-
- return ret;
-}
-
-extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM *jvm, void *reserved) {
- FreeGlobalClassReferenceHolder();
- RTC_CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
-}
-
// Return the (singleton) Java Enum object corresponding to |index|;
// |state_class_fragment| is something like "MediaSource$State".
static jobject JavaEnumFromIndex(
@@ -545,7 +531,7 @@ class SdpObserverWrapper : public T {
protected:
// Common implementation for failure of Set & Create types, distinguished by
// |op| being "Set" or "Create".
- void OnFailure(const std::string& op, const std::string& error) {
+ void DoOnFailure(const std::string& op, const std::string& error) {
jmethodID m = GetMethodID(jni(), *j_observer_class_, "on" + op + "Failure",
"(Ljava/lang/String;)V");
jstring j_error_string = JavaStringFromStdString(jni(), error);
@@ -572,7 +558,7 @@ class CreateSdpObserverWrapper
void OnFailure(const std::string& error) override {
ScopedLocalRefFrame local_ref_frame(jni());
- SdpObserverWrapper::OnFailure(std::string("Create"), error);
+ SdpObserverWrapper::DoOnFailure(std::string("Create"), error);
}
};
@@ -585,7 +571,7 @@ class SetSdpObserverWrapper
void OnFailure(const std::string& error) override {
ScopedLocalRefFrame local_ref_frame(jni());
- SdpObserverWrapper::OnFailure(std::string("Set"), error);
+ SdpObserverWrapper::DoOnFailure(std::string("Set"), error);
}
};
@@ -773,7 +759,7 @@ class JavaVideoRendererWrapper : public VideoRendererInterface {
jni, *j_frame_class_, "<init>", "(III[I[Ljava/nio/ByteBuffer;J)V")),
j_texture_frame_ctor_id_(GetMethodID(
jni, *j_frame_class_, "<init>",
- "(IIILjava/lang/Object;IJ)V")),
+ "(IIII[FJ)V")),
j_byte_buffer_class_(jni, FindClass(jni, "java/nio/ByteBuffer")) {
CHECK_EXCEPTION(jni);
}
@@ -829,13 +815,13 @@ class JavaVideoRendererWrapper : public VideoRendererInterface {
jobject CricketToJavaTextureFrame(const cricket::VideoFrame* frame) {
NativeHandleImpl* handle =
reinterpret_cast<NativeHandleImpl*>(frame->GetNativeHandle());
- jobject texture_object = reinterpret_cast<jobject>(handle->GetHandle());
- int texture_id = handle->GetTextureId();
+ jfloatArray sampling_matrix = jni()->NewFloatArray(16);
+ jni()->SetFloatArrayRegion(sampling_matrix, 0, 16, handle->sampling_matrix);
return jni()->NewObject(
*j_frame_class_, j_texture_frame_ctor_id_,
frame->GetWidth(), frame->GetHeight(),
static_cast<int>(frame->GetVideoRotation()),
- texture_object, texture_id, javaShallowCopy(frame));
+ handle->oes_texture_id, sampling_matrix, javaShallowCopy(frame));
}
JNIEnv* jni() {
@@ -1054,6 +1040,32 @@ JOW(void, PeerConnectionFactory_initializeFieldTrials)(
webrtc::field_trial::InitFieldTrialsFromString(field_trials_init_string);
}
+JOW(void, PeerConnectionFactory_initializeInternalTracer)(JNIEnv* jni, jclass) {
+ rtc::tracing::SetupInternalTracer();
+}
+
+JOW(jboolean, PeerConnectionFactory_startInternalTracingCapture)(
+ JNIEnv* jni, jclass, jstring j_event_tracing_filename) {
+ if (!j_event_tracing_filename)
+ return false;
+
+ const char* init_string =
+ jni->GetStringUTFChars(j_event_tracing_filename, NULL);
+ LOG(LS_INFO) << "Starting internal tracing to: " << init_string;
+ bool ret = rtc::tracing::StartInternalCapture(init_string);
+ jni->ReleaseStringUTFChars(j_event_tracing_filename, init_string);
+ return ret;
+}
+
+JOW(void, PeerConnectionFactory_stopInternalTracingCapture)(
+ JNIEnv* jni, jclass) {
+ rtc::tracing::StopInternalCapture();
+}
+
+JOW(void, PeerConnectionFactory_shutdownInternalTracer)(JNIEnv* jni, jclass) {
+ rtc::tracing::ShutdownInternalTracer();
+}
+
// Helper struct for working around the fact that CreatePeerConnectionFactory()
// comes in two flavors: either entirely automagical (constructing its own
// threads and deleting them on teardown, but no external codec factory support)
@@ -1251,6 +1263,46 @@ JOW(jlong, PeerConnectionFactory_nativeCreateAudioTrack)(
return (jlong)track.release();
}
+JOW(jboolean, PeerConnectionFactory_nativeStartAecDump)(
+ JNIEnv* jni, jclass, jlong native_factory, jint file) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ return factory->StartAecDump(file);
+#else
+ return false;
+#endif
+}
+
+JOW(void, PeerConnectionFactory_nativeStopAecDump)(
+ JNIEnv* jni, jclass, jlong native_factory) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ factory->StopAecDump();
+#endif
+}
+
+JOW(jboolean, PeerConnectionFactory_nativeStartRtcEventLog)(
+ JNIEnv* jni, jclass, jlong native_factory, jint file) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ return factory->StartRtcEventLog(file);
+#else
+ return false;
+#endif
+}
+
+JOW(void, PeerConnectionFactory_nativeStopRtcEventLog)(
+ JNIEnv* jni, jclass, jlong native_factory) {
+#if defined(ANDROID)
+ rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
+ factoryFromJava(native_factory));
+ factory->StopRtcEventLog();
+#endif
+}
+
JOW(void, PeerConnectionFactory_nativeSetOptions)(
JNIEnv* jni, jclass, jlong native_factory, jobject options) {
rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
@@ -1292,21 +1344,35 @@ JOW(void, PeerConnectionFactory_nativeSetOptions)(
}
JOW(void, PeerConnectionFactory_nativeSetVideoHwAccelerationOptions)(
- JNIEnv* jni, jclass, jlong native_factory, jobject render_egl_context) {
+ JNIEnv* jni, jclass, jlong native_factory, jobject local_egl_context,
+ jobject remote_egl_context) {
#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
OwnedFactoryAndThreads* owned_factory =
reinterpret_cast<OwnedFactoryAndThreads*>(native_factory);
+
+ jclass j_eglbase14_context_class =
+ FindClass(jni, "org/webrtc/EglBase14$Context");
+
+ MediaCodecVideoEncoderFactory* encoder_factory =
+ static_cast<MediaCodecVideoEncoderFactory*>
+ (owned_factory->encoder_factory());
+ if (encoder_factory &&
+ jni->IsInstanceOf(local_egl_context, j_eglbase14_context_class)) {
+ LOG(LS_INFO) << "Set EGL context for HW encoding.";
+ encoder_factory->SetEGLContext(jni, local_egl_context);
+ }
+
MediaCodecVideoDecoderFactory* decoder_factory =
static_cast<MediaCodecVideoDecoderFactory*>
(owned_factory->decoder_factory());
- if (decoder_factory) {
- LOG(LS_INFO) << "Set EGL context for HW acceleration.";
- decoder_factory->SetEGLContext(jni, render_egl_context);
+ if (decoder_factory &&
+ jni->IsInstanceOf(remote_egl_context, j_eglbase14_context_class)) {
+ LOG(LS_INFO) << "Set EGL context for HW decoding.";
+ decoder_factory->SetEGLContext(jni, remote_egl_context);
}
#endif
}
-
static std::string
GetJavaEnumName(JNIEnv* jni, const std::string& className, jobject j_enum) {
jclass enumClass = FindClass(jni, className.c_str());
@@ -1503,6 +1569,9 @@ static void JavaRTCConfigurationToJsepRTCConfiguration(
jfieldID j_ice_connection_receiving_timeout_id =
GetFieldID(jni, j_rtc_config_class, "iceConnectionReceivingTimeout", "I");
+ jfieldID j_ice_backup_candidate_pair_ping_interval_id = GetFieldID(
+ jni, j_rtc_config_class, "iceBackupCandidatePairPingInterval", "I");
+
jfieldID j_continual_gathering_policy_id =
GetFieldID(jni, j_rtc_config_class, "continualGatheringPolicy",
"Lorg/webrtc/PeerConnection$ContinualGatheringPolicy;");
@@ -1524,6 +1593,8 @@ static void JavaRTCConfigurationToJsepRTCConfiguration(
jni, j_rtc_config, j_audio_jitter_buffer_fast_accelerate_id);
rtc_config->ice_connection_receiving_timeout =
GetIntField(jni, j_rtc_config, j_ice_connection_receiving_timeout_id);
+ rtc_config->ice_backup_candidate_pair_ping_interval = GetIntField(
+ jni, j_rtc_config, j_ice_backup_candidate_pair_ping_interval_id);
rtc_config->continual_gathering_policy =
JavaContinualGatheringPolicyToNativeType(
jni, j_continual_gathering_policy);
@@ -1550,7 +1621,7 @@ JOW(jlong, PeerConnectionFactory_nativeCreatePeerConnection)(
rtc::SSLIdentity::Generate(webrtc::kIdentityName, rtc::KT_ECDSA));
if (ssl_identity.get()) {
rtc_config.certificates.push_back(
- rtc::RTCCertificate::Create(ssl_identity.Pass()));
+ rtc::RTCCertificate::Create(std::move(ssl_identity)));
LOG(LS_INFO) << "ECDSA certificate created.";
} else {
// Failing to create certificate should not abort peer connection
@@ -1704,6 +1775,29 @@ JOW(void, PeerConnection_nativeRemoveLocalStream)(
reinterpret_cast<MediaStreamInterface*>(native_stream));
}
+JOW(jobject, PeerConnection_nativeCreateSender)(
+ JNIEnv* jni, jobject j_pc, jstring j_kind, jstring j_stream_id) {
+ jclass j_rtp_sender_class = FindClass(jni, "org/webrtc/RtpSender");
+ jmethodID j_rtp_sender_ctor =
+ GetMethodID(jni, j_rtp_sender_class, "<init>", "(J)V");
+
+ std::string kind = JavaToStdString(jni, j_kind);
+ std::string stream_id = JavaToStdString(jni, j_stream_id);
+ rtc::scoped_refptr<RtpSenderInterface> sender =
+ ExtractNativePC(jni, j_pc)->CreateSender(kind, stream_id);
+ if (!sender.get()) {
+ return nullptr;
+ }
+ jlong nativeSenderPtr = jlongFromPointer(sender.get());
+ jobject j_sender =
+ jni->NewObject(j_rtp_sender_class, j_rtp_sender_ctor, nativeSenderPtr);
+ CHECK_EXCEPTION(jni) << "error during NewObject";
+ // Sender is now owned by the Java object, and will be freed from
+ // RtpSender.dispose(), called by PeerConnection.dispose() or getSenders().
+ sender->AddRef();
+ return j_sender;
+}
+
JOW(jobject, PeerConnection_nativeGetSenders)(JNIEnv* jni, jobject j_pc) {
jclass j_array_list_class = FindClass(jni, "java/util/ArrayList");
jmethodID j_array_list_ctor =
@@ -1723,7 +1817,8 @@ JOW(jobject, PeerConnection_nativeGetSenders)(JNIEnv* jni, jobject j_pc) {
jobject j_sender =
jni->NewObject(j_rtp_sender_class, j_rtp_sender_ctor, nativeSenderPtr);
CHECK_EXCEPTION(jni) << "error during NewObject";
- // Sender is now owned by Java object, and will be freed from there.
+ // Sender is now owned by the Java object, and will be freed from
+ // RtpSender.dispose(), called by PeerConnection.dispose() or getSenders().
sender->AddRef();
jni->CallBooleanMethod(j_senders, j_array_list_add, j_sender);
CHECK_EXCEPTION(jni) << "error during CallBooleanMethod";
@@ -1802,6 +1897,7 @@ JOW(jobject, VideoCapturer_nativeCreateVideoCapturer)(
// Since we can't create platform specific java implementations in Java, we
// defer the creation to C land.
#if defined(ANDROID)
+ // TODO(nisse): This case is intended to be deleted.
jclass j_video_capturer_class(
FindClass(jni, "org/webrtc/VideoCapturerAndroid"));
const int camera_id = jni->CallStaticIntMethod(
@@ -1816,8 +1912,13 @@ JOW(jobject, VideoCapturer_nativeCreateVideoCapturer)(
j_video_capturer_class,
GetMethodID(jni, j_video_capturer_class, "<init>", "(I)V"), camera_id);
CHECK_EXCEPTION(jni) << "error during creation of VideoCapturerAndroid";
+ jfieldID helper_fid = GetFieldID(jni, j_video_capturer_class, "surfaceHelper",
+ "Lorg/webrtc/SurfaceTextureHelper;");
+
rtc::scoped_refptr<webrtc::AndroidVideoCapturerDelegate> delegate =
- new rtc::RefCountedObject<AndroidVideoCapturerJni>(jni, j_video_capturer);
+ new rtc::RefCountedObject<AndroidVideoCapturerJni>(
+ jni, j_video_capturer,
+ GetObjectField(jni, j_video_capturer, helper_fid));
rtc::scoped_ptr<cricket::VideoCapturer> capturer(
new webrtc::AndroidVideoCapturer(delegate));
@@ -2003,11 +2104,11 @@ JOW(jbyteArray, CallSessionFileRotatingLogSink_nativeGetLogData)(
return result;
}
-JOW(void, RtpSender_nativeSetTrack)(JNIEnv* jni,
+JOW(jboolean, RtpSender_nativeSetTrack)(JNIEnv* jni,
jclass,
jlong j_rtp_sender_pointer,
jlong j_track_pointer) {
- reinterpret_cast<RtpSenderInterface*>(j_rtp_sender_pointer)
+ return reinterpret_cast<RtpSenderInterface*>(j_rtp_sender_pointer)
->SetTrack(reinterpret_cast<MediaStreamTrackInterface*>(j_track_pointer));
}
diff --git a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc
index 05f1b23768..3e32b9a6fe 100644
--- a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc
+++ b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.cc
@@ -35,25 +35,14 @@
namespace webrtc_jni {
-SurfaceTextureHelper::SurfaceTextureHelper(JNIEnv* jni,
- jobject egl_shared_context)
- : j_surface_texture_helper_class_(
- jni,
- FindClass(jni, "org/webrtc/SurfaceTextureHelper")),
- j_surface_texture_helper_(
- jni,
- jni->CallStaticObjectMethod(
- *j_surface_texture_helper_class_,
- GetStaticMethodID(jni,
- *j_surface_texture_helper_class_,
- "create",
- "(Ljavax/microedition/khronos/egl/EGLContext;)"
- "Lorg/webrtc/SurfaceTextureHelper;"),
- egl_shared_context)),
- j_return_texture_method_(GetMethodID(jni,
- *j_surface_texture_helper_class_,
- "returnTextureFrame",
- "()V")) {
+SurfaceTextureHelper::SurfaceTextureHelper(
+ JNIEnv* jni, jobject surface_texture_helper)
+ : j_surface_texture_helper_(jni, surface_texture_helper),
+ j_return_texture_method_(
+ GetMethodID(jni,
+ FindClass(jni, "org/webrtc/SurfaceTextureHelper"),
+ "returnTextureFrame",
+ "()V")) {
CHECK_EXCEPTION(jni) << "error during initialization of SurfaceTextureHelper";
}
@@ -70,9 +59,9 @@ void SurfaceTextureHelper::ReturnTextureFrame() const {
rtc::scoped_refptr<webrtc::VideoFrameBuffer>
SurfaceTextureHelper::CreateTextureFrame(int width, int height,
- const NativeTextureHandleImpl& native_handle) {
+ const NativeHandleImpl& native_handle) {
return new rtc::RefCountedObject<AndroidTextureBuffer>(
- width, height, native_handle,
+ width, height, native_handle, *j_surface_texture_helper_,
rtc::Bind(&SurfaceTextureHelper::ReturnTextureFrame, this));
}
diff --git a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h
index dc9d2b853d..8dde2b54ed 100644
--- a/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h
+++ b/talk/app/webrtc/java/jni/surfacetexturehelper_jni.h
@@ -35,7 +35,7 @@
#include "talk/app/webrtc/java/jni/native_handle_impl.h"
#include "webrtc/base/refcount.h"
#include "webrtc/base/scoped_ref_ptr.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
namespace webrtc_jni {
@@ -49,24 +49,19 @@ namespace webrtc_jni {
// destroyed while a VideoFrameBuffer is in use.
// This class is the C++ counterpart of the java class SurfaceTextureHelper.
// Usage:
-// 1. Create an instance of this class.
-// 2. Call GetJavaSurfaceTextureHelper to get the Java SurfaceTextureHelper.
+// 1. Create an java instance of SurfaceTextureHelper.
+// 2. Create an instance of this class.
// 3. Register a listener to the Java SurfaceListener and start producing
// new buffers.
-// 3. Call CreateTextureFrame to wrap the Java texture in a VideoFrameBuffer.
+// 4. Call CreateTextureFrame to wrap the Java texture in a VideoFrameBuffer.
class SurfaceTextureHelper : public rtc::RefCountInterface {
public:
- SurfaceTextureHelper(JNIEnv* jni, jobject shared_egl_context);
-
- // Returns the Java SurfaceTextureHelper.
- jobject GetJavaSurfaceTextureHelper() const {
- return *j_surface_texture_helper_;
- }
+ SurfaceTextureHelper(JNIEnv* jni, jobject surface_texture_helper);
rtc::scoped_refptr<webrtc::VideoFrameBuffer> CreateTextureFrame(
int width,
int height,
- const NativeTextureHandleImpl& native_handle);
+ const NativeHandleImpl& native_handle);
protected:
~SurfaceTextureHelper();
@@ -75,7 +70,6 @@ class SurfaceTextureHelper : public rtc::RefCountInterface {
// May be called on arbitrary thread.
void ReturnTextureFrame() const;
- const ScopedGlobalRef<jclass> j_surface_texture_helper_class_;
const ScopedGlobalRef<jobject> j_surface_texture_helper_;
const jmethodID j_return_texture_method_;
};
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
index 42af9c7fd0..19002f70e1 100644
--- a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
@@ -33,23 +33,23 @@ import android.media.MediaCodecInfo;
import android.media.MediaCodecInfo.CodecCapabilities;
import android.media.MediaCodecList;
import android.media.MediaFormat;
-import android.opengl.GLES11Ext;
-import android.opengl.GLES20;
import android.os.Build;
+import android.os.SystemClock;
import android.view.Surface;
import org.webrtc.Logging;
import java.nio.ByteBuffer;
import java.util.Arrays;
+import java.util.LinkedList;
import java.util.List;
-
-import javax.microedition.khronos.egl.EGLContext;
+import java.util.concurrent.CountDownLatch;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
// Java-side of peerconnection_jni.cc:MediaCodecVideoDecoder.
// This class is an implementation detail of the Java PeerConnection API.
-// MediaCodec is thread-hostile so this class must be operated on a single
-// thread.
+@SuppressWarnings("deprecation")
public class MediaCodecVideoDecoder {
// This class is constructed, operated, and destroyed by its C++ incarnation,
// so the class and its methods have non-public visibility. The API this
@@ -66,18 +66,26 @@ public class MediaCodecVideoDecoder {
}
private static final int DEQUEUE_INPUT_TIMEOUT = 500000; // 500 ms timeout.
+ private static final int MEDIA_CODEC_RELEASE_TIMEOUT_MS = 5000; // Timeout for codec releasing.
// Active running decoder instance. Set in initDecode() (called from native code)
// and reset to null in release() call.
private static MediaCodecVideoDecoder runningInstance = null;
+ private static MediaCodecVideoDecoderErrorCallback errorCallback = null;
+ private static int codecErrors = 0;
+
private Thread mediaCodecThread;
private MediaCodec mediaCodec;
private ByteBuffer[] inputBuffers;
private ByteBuffer[] outputBuffers;
private static final String VP8_MIME_TYPE = "video/x-vnd.on2.vp8";
+ private static final String VP9_MIME_TYPE = "video/x-vnd.on2.vp9";
private static final String H264_MIME_TYPE = "video/avc";
// List of supported HW VP8 decoders.
private static final String[] supportedVp8HwCodecPrefixes =
{"OMX.qcom.", "OMX.Nvidia.", "OMX.Exynos.", "OMX.Intel." };
+ // List of supported HW VP9 decoders.
+ private static final String[] supportedVp9HwCodecPrefixes =
+ {"OMX.qcom.", "OMX.Exynos." };
// List of supported HW H.264 decoders.
private static final String[] supportedH264HwCodecPrefixes =
{"OMX.qcom.", "OMX.Intel." };
@@ -96,13 +104,29 @@ public class MediaCodecVideoDecoder {
private int height;
private int stride;
private int sliceHeight;
+ private boolean hasDecodedFirstFrame;
+ private final Queue<TimeStamps> decodeStartTimeMs = new LinkedList<TimeStamps>();
private boolean useSurface;
- private int textureID = 0;
- private SurfaceTexture surfaceTexture = null;
+
+ // The below variables are only used when decoding to a Surface.
+ private TextureListener textureListener;
+ // Max number of output buffers queued before starting to drop decoded frames.
+ private static final int MAX_QUEUED_OUTPUTBUFFERS = 3;
+ private int droppedFrames;
private Surface surface = null;
- private EglBase eglBase;
+ private final Queue<DecodedOutputBuffer>
+ dequeuedSurfaceOutputBuffers = new LinkedList<DecodedOutputBuffer>();
+
+ // MediaCodec error handler - invoked when critical error happens which may prevent
+ // further use of media codec API. Now it means that one of media codec instances
+ // is hanging and can no longer be used in the next call.
+ public static interface MediaCodecVideoDecoderErrorCallback {
+ void onMediaCodecVideoDecoderCriticalError(int codecErrors);
+ }
- private MediaCodecVideoDecoder() {
+ public static void setErrorCallback(MediaCodecVideoDecoderErrorCallback errorCallback) {
+ Logging.d(TAG, "Set error callback");
+ MediaCodecVideoDecoder.errorCallback = errorCallback;
}
// Helper struct for findVp8Decoder() below.
@@ -120,6 +144,7 @@ public class MediaCodecVideoDecoder {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.KITKAT) {
return null; // MediaCodec.setParameters is missing.
}
+ Logging.d(TAG, "Trying to find HW decoder for mime " + mime);
for (int i = 0; i < MediaCodecList.getCodecCount(); ++i) {
MediaCodecInfo info = MediaCodecList.getCodecInfoAt(i);
if (info.isEncoder()) {
@@ -135,7 +160,7 @@ public class MediaCodecVideoDecoder {
if (name == null) {
continue; // No HW support in this codec; try the next one.
}
- Logging.v(TAG, "Found candidate decoder " + name);
+ Logging.d(TAG, "Found candidate decoder " + name);
// Check if this is supported decoder.
boolean supportedCodec = false;
@@ -166,6 +191,7 @@ public class MediaCodecVideoDecoder {
}
}
}
+ Logging.d(TAG, "No HW decoder found for mime " + mime);
return null; // No HW decoder.
}
@@ -173,6 +199,10 @@ public class MediaCodecVideoDecoder {
return findDecoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes) != null;
}
+ public static boolean isVp9HwSupported() {
+ return findDecoder(VP9_MIME_TYPE, supportedVp9HwCodecPrefixes) != null;
+ }
+
public static boolean isH264HwSupported() {
return findDecoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes) != null;
}
@@ -197,17 +227,21 @@ public class MediaCodecVideoDecoder {
}
}
- // Pass null in |sharedContext| to configure the codec for ByteBuffer output.
- private boolean initDecode(VideoCodecType type, int width, int height, EGLContext sharedContext) {
+ // Pass null in |surfaceTextureHelper| to configure the codec for ByteBuffer output.
+ private boolean initDecode(
+ VideoCodecType type, int width, int height, SurfaceTextureHelper surfaceTextureHelper) {
if (mediaCodecThread != null) {
throw new RuntimeException("Forgot to release()?");
}
- useSurface = (sharedContext != null);
+ useSurface = (surfaceTextureHelper != null);
String mime = null;
String[] supportedCodecPrefixes = null;
if (type == VideoCodecType.VIDEO_CODEC_VP8) {
mime = VP8_MIME_TYPE;
supportedCodecPrefixes = supportedVp8HwCodecPrefixes;
+ } else if (type == VideoCodecType.VIDEO_CODEC_VP9) {
+ mime = VP9_MIME_TYPE;
+ supportedCodecPrefixes = supportedVp9HwCodecPrefixes;
} else if (type == VideoCodecType.VIDEO_CODEC_H264) {
mime = H264_MIME_TYPE;
supportedCodecPrefixes = supportedH264HwCodecPrefixes;
@@ -221,9 +255,6 @@ public class MediaCodecVideoDecoder {
Logging.d(TAG, "Java initDecode: " + type + " : "+ width + " x " + height +
". Color: 0x" + Integer.toHexString(properties.colorFormat) +
". Use Surface: " + useSurface);
- if (sharedContext != null) {
- Logging.d(TAG, "Decoder shared EGL Context: " + sharedContext);
- }
runningInstance = this; // Decoder is now running and can be queried for stack traces.
mediaCodecThread = Thread.currentThread();
try {
@@ -233,16 +264,8 @@ public class MediaCodecVideoDecoder {
sliceHeight = height;
if (useSurface) {
- // Create shared EGL context.
- eglBase = new EglBase(sharedContext, EglBase.ConfigType.PIXEL_BUFFER);
- eglBase.createDummyPbufferSurface();
- eglBase.makeCurrent();
-
- // Create output surface
- textureID = GlUtil.generateTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
- Logging.d(TAG, "Video decoder TextureID = " + textureID);
- surfaceTexture = new SurfaceTexture(textureID);
- surface = new Surface(surfaceTexture);
+ textureListener = new TextureListener(surfaceTextureHelper);
+ surface = new Surface(surfaceTextureHelper.getSurfaceTexture());
}
MediaFormat format = MediaFormat.createVideoFormat(mime, width, height);
@@ -261,6 +284,10 @@ public class MediaCodecVideoDecoder {
colorFormat = properties.colorFormat;
outputBuffers = mediaCodec.getOutputBuffers();
inputBuffers = mediaCodec.getInputBuffers();
+ decodeStartTimeMs.clear();
+ hasDecodedFirstFrame = false;
+ dequeuedSurfaceOutputBuffers.clear();
+ droppedFrames = 0;
Logging.d(TAG, "Input buffers: " + inputBuffers.length +
". Output buffers: " + outputBuffers.length);
return true;
@@ -271,25 +298,45 @@ public class MediaCodecVideoDecoder {
}
private void release() {
- Logging.d(TAG, "Java releaseDecoder");
+ Logging.d(TAG, "Java releaseDecoder. Total number of dropped frames: " + droppedFrames);
checkOnMediaCodecThread();
- try {
- mediaCodec.stop();
- mediaCodec.release();
- } catch (IllegalStateException e) {
- Logging.e(TAG, "release failed", e);
+
+ // Run Mediacodec stop() and release() on separate thread since sometime
+ // Mediacodec.stop() may hang.
+ final CountDownLatch releaseDone = new CountDownLatch(1);
+
+ Runnable runMediaCodecRelease = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Logging.d(TAG, "Java releaseDecoder on release thread");
+ mediaCodec.stop();
+ mediaCodec.release();
+ Logging.d(TAG, "Java releaseDecoder on release thread done");
+ } catch (Exception e) {
+ Logging.e(TAG, "Media decoder release failed", e);
+ }
+ releaseDone.countDown();
+ }
+ };
+ new Thread(runMediaCodecRelease).start();
+
+ if (!ThreadUtils.awaitUninterruptibly(releaseDone, MEDIA_CODEC_RELEASE_TIMEOUT_MS)) {
+ Logging.e(TAG, "Media decoder release timeout");
+ codecErrors++;
+ if (errorCallback != null) {
+ Logging.e(TAG, "Invoke codec error callback. Errors: " + codecErrors);
+ errorCallback.onMediaCodecVideoDecoderCriticalError(codecErrors);
+ }
}
+
mediaCodec = null;
mediaCodecThread = null;
runningInstance = null;
if (useSurface) {
surface.release();
surface = null;
- Logging.d(TAG, "Delete video decoder TextureID " + textureID);
- GLES20.glDeleteTextures(1, new int[] {textureID}, 0);
- textureID = 0;
- eglBase.release();
- eglBase = null;
+ textureListener.release();
}
Logging.d(TAG, "Java releaseDecoder done");
}
@@ -306,13 +353,15 @@ public class MediaCodecVideoDecoder {
}
}
- private boolean queueInputBuffer(
- int inputBufferIndex, int size, long timestampUs) {
+ private boolean queueInputBuffer(int inputBufferIndex, int size, long presentationTimeStamUs,
+ long timeStampMs, long ntpTimeStamp) {
checkOnMediaCodecThread();
try {
inputBuffers[inputBufferIndex].position(0);
inputBuffers[inputBufferIndex].limit(size);
- mediaCodec.queueInputBuffer(inputBufferIndex, 0, size, timestampUs, 0);
+ decodeStartTimeMs.add(new TimeStamps(SystemClock.elapsedRealtime(), timeStampMs,
+ ntpTimeStamp));
+ mediaCodec.queueInputBuffer(inputBufferIndex, 0, size, presentationTimeStamUs, 0);
return true;
}
catch (IllegalStateException e) {
@@ -321,56 +370,183 @@ public class MediaCodecVideoDecoder {
}
}
- // Helper structs for dequeueOutputBuffer() below.
- private static class DecodedByteBuffer {
- public DecodedByteBuffer(int index, int offset, int size, long presentationTimestampUs) {
+ private static class TimeStamps {
+ public TimeStamps(long decodeStartTimeMs, long timeStampMs, long ntpTimeStampMs) {
+ this.decodeStartTimeMs = decodeStartTimeMs;
+ this.timeStampMs = timeStampMs;
+ this.ntpTimeStampMs = ntpTimeStampMs;
+ }
+ private final long decodeStartTimeMs; // Time when this frame was queued for decoding.
+ private final long timeStampMs; // Only used for bookkeeping in Java. Used in C++;
+ private final long ntpTimeStampMs; // Only used for bookkeeping in Java. Used in C++;
+ }
+
+ // Helper struct for dequeueOutputBuffer() below.
+ private static class DecodedOutputBuffer {
+ public DecodedOutputBuffer(int index, int offset, int size, long timeStampMs,
+ long ntpTimeStampMs, long decodeTime, long endDecodeTime) {
this.index = index;
this.offset = offset;
this.size = size;
- this.presentationTimestampUs = presentationTimestampUs;
+ this.timeStampMs = timeStampMs;
+ this.ntpTimeStampMs = ntpTimeStampMs;
+ this.decodeTimeMs = decodeTime;
+ this.endDecodeTimeMs = endDecodeTime;
}
private final int index;
private final int offset;
private final int size;
- private final long presentationTimestampUs;
+ private final long timeStampMs;
+ private final long ntpTimeStampMs;
+ // Number of ms it took to decode this frame.
+ private final long decodeTimeMs;
+ // System time when this frame finished decoding.
+ private final long endDecodeTimeMs;
}
+ // Helper struct for dequeueTextureBuffer() below.
private static class DecodedTextureBuffer {
private final int textureID;
- private final long presentationTimestampUs;
+ private final float[] transformMatrix;
+ private final long timeStampMs;
+ private final long ntpTimeStampMs;
+ private final long decodeTimeMs;
+ // Interval from when the frame finished decoding until this buffer has been created.
+ // Since there is only one texture, this interval depend on the time from when
+ // a frame is decoded and provided to C++ and until that frame is returned to the MediaCodec
+ // so that the texture can be updated with the next decoded frame.
+ private final long frameDelayMs;
- public DecodedTextureBuffer(int textureID, long presentationTimestampUs) {
+ // A DecodedTextureBuffer with zero |textureID| has special meaning and represents a frame
+ // that was dropped.
+ public DecodedTextureBuffer(int textureID, float[] transformMatrix, long timeStampMs,
+ long ntpTimeStampMs, long decodeTimeMs, long frameDelay) {
this.textureID = textureID;
- this.presentationTimestampUs = presentationTimestampUs;
+ this.transformMatrix = transformMatrix;
+ this.timeStampMs = timeStampMs;
+ this.ntpTimeStampMs = ntpTimeStampMs;
+ this.decodeTimeMs = decodeTimeMs;
+ this.frameDelayMs = frameDelay;
}
}
- // Returns null if no decoded buffer is available, and otherwise either a DecodedByteBuffer or
- // DecodedTexturebuffer depending on |useSurface| configuration.
+ // Poll based texture listener.
+ private static class TextureListener
+ implements SurfaceTextureHelper.OnTextureFrameAvailableListener {
+ private final SurfaceTextureHelper surfaceTextureHelper;
+ // |newFrameLock| is used to synchronize arrival of new frames with wait()/notifyAll().
+ private final Object newFrameLock = new Object();
+ // |bufferToRender| is non-null when waiting for transition between addBufferToRender() to
+ // onTextureFrameAvailable().
+ private DecodedOutputBuffer bufferToRender;
+ private DecodedTextureBuffer renderedBuffer;
+
+ public TextureListener(SurfaceTextureHelper surfaceTextureHelper) {
+ this.surfaceTextureHelper = surfaceTextureHelper;
+ surfaceTextureHelper.setListener(this);
+ }
+
+ public void addBufferToRender(DecodedOutputBuffer buffer) {
+ if (bufferToRender != null) {
+ Logging.e(TAG,
+ "Unexpected addBufferToRender() called while waiting for a texture.");
+ throw new IllegalStateException("Waiting for a texture.");
+ }
+ bufferToRender = buffer;
+ }
+
+ public boolean isWaitingForTexture() {
+ synchronized (newFrameLock) {
+ return bufferToRender != null;
+ }
+ }
+
+ // Callback from |surfaceTextureHelper|. May be called on an arbitrary thread.
+ @Override
+ public void onTextureFrameAvailable(
+ int oesTextureId, float[] transformMatrix, long timestampNs) {
+ synchronized (newFrameLock) {
+ if (renderedBuffer != null) {
+ Logging.e(TAG,
+ "Unexpected onTextureFrameAvailable() called while already holding a texture.");
+ throw new IllegalStateException("Already holding a texture.");
+ }
+ // |timestampNs| is always zero on some Android versions.
+ renderedBuffer = new DecodedTextureBuffer(oesTextureId, transformMatrix,
+ bufferToRender.timeStampMs, bufferToRender.ntpTimeStampMs, bufferToRender.decodeTimeMs,
+ SystemClock.elapsedRealtime() - bufferToRender.endDecodeTimeMs);
+ bufferToRender = null;
+ newFrameLock.notifyAll();
+ }
+ }
+
+ // Dequeues and returns a DecodedTextureBuffer if available, or null otherwise.
+ public DecodedTextureBuffer dequeueTextureBuffer(int timeoutMs) {
+ synchronized (newFrameLock) {
+ if (renderedBuffer == null && timeoutMs > 0 && isWaitingForTexture()) {
+ try {
+ newFrameLock.wait(timeoutMs);
+ } catch(InterruptedException e) {
+ // Restore the interrupted status by reinterrupting the thread.
+ Thread.currentThread().interrupt();
+ }
+ }
+ DecodedTextureBuffer returnedBuffer = renderedBuffer;
+ renderedBuffer = null;
+ return returnedBuffer;
+ }
+ }
+
+ public void release() {
+ // SurfaceTextureHelper.disconnect() will block until any onTextureFrameAvailable() in
+ // progress is done. Therefore, the call to disconnect() must be outside any synchronized
+ // statement that is also used in the onTextureFrameAvailable() above to avoid deadlocks.
+ surfaceTextureHelper.disconnect();
+ synchronized (newFrameLock) {
+ if (renderedBuffer != null) {
+ surfaceTextureHelper.returnTextureFrame();
+ renderedBuffer = null;
+ }
+ }
+ }
+ }
+
+ // Returns null if no decoded buffer is available, and otherwise a DecodedByteBuffer.
// Throws IllegalStateException if call is made on the wrong thread, if color format changes to an
// unsupported format, or if |mediaCodec| is not in the Executing state. Throws CodecException
// upon codec error.
- private Object dequeueOutputBuffer(int dequeueTimeoutUs)
- throws IllegalStateException, MediaCodec.CodecException {
+ private DecodedOutputBuffer dequeueOutputBuffer(int dequeueTimeoutMs) {
checkOnMediaCodecThread();
+ if (decodeStartTimeMs.isEmpty()) {
+ return null;
+ }
// Drain the decoder until receiving a decoded buffer or hitting
// MediaCodec.INFO_TRY_AGAIN_LATER.
final MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
while (true) {
- final int result = mediaCodec.dequeueOutputBuffer(info, dequeueTimeoutUs);
+ final int result = mediaCodec.dequeueOutputBuffer(
+ info, TimeUnit.MILLISECONDS.toMicros(dequeueTimeoutMs));
switch (result) {
- case MediaCodec.INFO_TRY_AGAIN_LATER:
- return null;
case MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED:
outputBuffers = mediaCodec.getOutputBuffers();
Logging.d(TAG, "Decoder output buffers changed: " + outputBuffers.length);
+ if (hasDecodedFirstFrame) {
+ throw new RuntimeException("Unexpected output buffer change event.");
+ }
break;
case MediaCodec.INFO_OUTPUT_FORMAT_CHANGED:
MediaFormat format = mediaCodec.getOutputFormat();
Logging.d(TAG, "Decoder format changed: " + format.toString());
+ int new_width = format.getInteger(MediaFormat.KEY_WIDTH);
+ int new_height = format.getInteger(MediaFormat.KEY_HEIGHT);
+ if (hasDecodedFirstFrame && (new_width != width || new_height != height)) {
+ throw new RuntimeException("Unexpected size change. Configured " + width + "*" +
+ height + ". New " + new_width + "*" + new_height);
+ }
width = format.getInteger(MediaFormat.KEY_WIDTH);
height = format.getInteger(MediaFormat.KEY_HEIGHT);
+
if (!useSurface && format.containsKey(MediaFormat.KEY_COLOR_FORMAT)) {
colorFormat = format.getInteger(MediaFormat.KEY_COLOR_FORMAT);
Logging.d(TAG, "Color: 0x" + Integer.toHexString(colorFormat));
@@ -388,18 +564,76 @@ public class MediaCodecVideoDecoder {
stride = Math.max(width, stride);
sliceHeight = Math.max(height, sliceHeight);
break;
+ case MediaCodec.INFO_TRY_AGAIN_LATER:
+ return null;
default:
- // Output buffer decoded.
- if (useSurface) {
- mediaCodec.releaseOutputBuffer(result, true /* render */);
- // TODO(magjed): Wait for SurfaceTexture.onFrameAvailable() before returning a texture
- // frame.
- return new DecodedTextureBuffer(textureID, info.presentationTimeUs);
- } else {
- return new DecodedByteBuffer(result, info.offset, info.size, info.presentationTimeUs);
- }
+ hasDecodedFirstFrame = true;
+ TimeStamps timeStamps = decodeStartTimeMs.remove();
+ return new DecodedOutputBuffer(result, info.offset, info.size, timeStamps.timeStampMs,
+ timeStamps.ntpTimeStampMs,
+ SystemClock.elapsedRealtime() - timeStamps.decodeStartTimeMs,
+ SystemClock.elapsedRealtime());
+ }
+ }
+ }
+
+ // Returns null if no decoded buffer is available, and otherwise a DecodedTextureBuffer.
+ // Throws IllegalStateException if call is made on the wrong thread, if color format changes to an
+ // unsupported format, or if |mediaCodec| is not in the Executing state. Throws CodecException
+ // upon codec error. If |dequeueTimeoutMs| > 0, the oldest decoded frame will be dropped if
+ // a frame can't be returned.
+ private DecodedTextureBuffer dequeueTextureBuffer(int dequeueTimeoutMs) {
+ checkOnMediaCodecThread();
+ if (!useSurface) {
+ throw new IllegalStateException("dequeueTexture() called for byte buffer decoding.");
+ }
+ DecodedOutputBuffer outputBuffer = dequeueOutputBuffer(dequeueTimeoutMs);
+ if (outputBuffer != null) {
+ dequeuedSurfaceOutputBuffers.add(outputBuffer);
+ }
+
+ MaybeRenderDecodedTextureBuffer();
+ // Check if there is texture ready now by waiting max |dequeueTimeoutMs|.
+ DecodedTextureBuffer renderedBuffer = textureListener.dequeueTextureBuffer(dequeueTimeoutMs);
+ if (renderedBuffer != null) {
+ MaybeRenderDecodedTextureBuffer();
+ return renderedBuffer;
+ }
+
+ if ((dequeuedSurfaceOutputBuffers.size()
+ >= Math.min(MAX_QUEUED_OUTPUTBUFFERS, outputBuffers.length)
+ || (dequeueTimeoutMs > 0 && !dequeuedSurfaceOutputBuffers.isEmpty()))) {
+ ++droppedFrames;
+ // Drop the oldest frame still in dequeuedSurfaceOutputBuffers.
+ // The oldest frame is owned by |textureListener| and can't be dropped since
+ // mediaCodec.releaseOutputBuffer has already been called.
+ final DecodedOutputBuffer droppedFrame = dequeuedSurfaceOutputBuffers.remove();
+ if (dequeueTimeoutMs > 0) {
+ // TODO(perkj): Re-add the below log when VideoRenderGUI has been removed or fixed to
+ // return the one and only texture even if it does not render.
+ // Logging.w(TAG, "Draining decoder. Dropping frame with TS: "
+ // + droppedFrame.timeStampMs + ". Total number of dropped frames: " + droppedFrames);
+ } else {
+ Logging.w(TAG, "Too many output buffers. Dropping frame with TS: "
+ + droppedFrame.timeStampMs + ". Total number of dropped frames: " + droppedFrames);
}
+
+ mediaCodec.releaseOutputBuffer(droppedFrame.index, false /* render */);
+ return new DecodedTextureBuffer(0, null, droppedFrame.timeStampMs,
+ droppedFrame.ntpTimeStampMs, droppedFrame.decodeTimeMs,
+ SystemClock.elapsedRealtime() - droppedFrame.endDecodeTimeMs);
+ }
+ return null;
+ }
+
+ private void MaybeRenderDecodedTextureBuffer() {
+ if (dequeuedSurfaceOutputBuffers.isEmpty() || textureListener.isWaitingForTexture()) {
+ return;
}
+ // Get the first frame in the queue and render to the decoder output surface.
+ final DecodedOutputBuffer buffer = dequeuedSurfaceOutputBuffers.remove();
+ textureListener.addBufferToRender(buffer);
+ mediaCodec.releaseOutputBuffer(buffer.index, true /* render */);
}
// Release a dequeued output byte buffer back to the codec for re-use. Should only be called for
@@ -407,11 +641,11 @@ public class MediaCodecVideoDecoder {
// Throws IllegalStateException if the call is made on the wrong thread, if codec is configured
// for surface decoding, or if |mediaCodec| is not in the Executing state. Throws
// MediaCodec.CodecException upon codec error.
- private void returnDecodedByteBuffer(int index)
+ private void returnDecodedOutputBuffer(int index)
throws IllegalStateException, MediaCodec.CodecException {
checkOnMediaCodecThread();
if (useSurface) {
- throw new IllegalStateException("returnDecodedByteBuffer() called for surface decoding.");
+ throw new IllegalStateException("returnDecodedOutputBuffer() called for surface decoding.");
}
mediaCodec.releaseOutputBuffer(index, false /* render */);
}
diff --git a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
index f3f03c1d20..5c8f9dc77e 100644
--- a/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
+++ b/talk/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
@@ -27,24 +27,29 @@
package org.webrtc;
+import android.annotation.TargetApi;
import android.media.MediaCodec;
import android.media.MediaCodecInfo.CodecCapabilities;
import android.media.MediaCodecInfo;
import android.media.MediaCodecList;
import android.media.MediaFormat;
+import android.opengl.GLES20;
import android.os.Build;
import android.os.Bundle;
+import android.view.Surface;
import org.webrtc.Logging;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
// Java-side of peerconnection_jni.cc:MediaCodecVideoEncoder.
// This class is an implementation detail of the Java PeerConnection API.
-// MediaCodec is thread-hostile so this class must be operated on a single
-// thread.
+@TargetApi(19)
+@SuppressWarnings("deprecation")
public class MediaCodecVideoEncoder {
// This class is constructed, operated, and destroyed by its C++ incarnation,
// so the class and its methods have non-public visibility. The API this
@@ -60,18 +65,31 @@ public class MediaCodecVideoEncoder {
VIDEO_CODEC_H264
}
+ private static final int MEDIA_CODEC_RELEASE_TIMEOUT_MS = 5000; // Timeout for codec releasing.
private static final int DEQUEUE_TIMEOUT = 0; // Non-blocking, no wait.
- // Active running encoder instance. Set in initDecode() (called from native code)
+ // Active running encoder instance. Set in initEncode() (called from native code)
// and reset to null in release() call.
private static MediaCodecVideoEncoder runningInstance = null;
+ private static MediaCodecVideoEncoderErrorCallback errorCallback = null;
+ private static int codecErrors = 0;
+
private Thread mediaCodecThread;
private MediaCodec mediaCodec;
private ByteBuffer[] outputBuffers;
+ private EglBase14 eglBase;
+ private int width;
+ private int height;
+ private Surface inputSurface;
+ private GlRectDrawer drawer;
private static final String VP8_MIME_TYPE = "video/x-vnd.on2.vp8";
+ private static final String VP9_MIME_TYPE = "video/x-vnd.on2.vp9";
private static final String H264_MIME_TYPE = "video/avc";
// List of supported HW VP8 codecs.
private static final String[] supportedVp8HwCodecPrefixes =
{"OMX.qcom.", "OMX.Intel." };
+ // List of supported HW VP9 decoders.
+ private static final String[] supportedVp9HwCodecPrefixes =
+ {"OMX.qcom."};
// List of supported HW H.264 codecs.
private static final String[] supportedH264HwCodecPrefixes =
{"OMX.qcom." };
@@ -99,13 +117,25 @@ public class MediaCodecVideoEncoder {
CodecCapabilities.COLOR_QCOM_FormatYUV420SemiPlanar,
COLOR_QCOM_FORMATYUV420PackedSemiPlanar32m
};
- private int colorFormat;
- // Video encoder type.
+ private static final int[] supportedSurfaceColorList = {
+ CodecCapabilities.COLOR_FormatSurface
+ };
private VideoCodecType type;
+ private int colorFormat; // Used by native code.
+
// SPS and PPS NALs (Config frame) for H.264.
private ByteBuffer configData = null;
- private MediaCodecVideoEncoder() {
+ // MediaCodec error handler - invoked when critical error happens which may prevent
+ // further use of media codec API. Now it means that one of media codec instances
+ // is hanging and can no longer be used in the next call.
+ public static interface MediaCodecVideoEncoderErrorCallback {
+ void onMediaCodecVideoEncoderCriticalError(int codecErrors);
+ }
+
+ public static void setErrorCallback(MediaCodecVideoEncoderErrorCallback errorCallback) {
+ Logging.d(TAG, "Set error callback");
+ MediaCodecVideoEncoder.errorCallback = errorCallback;
}
// Helper struct for findHwEncoder() below.
@@ -119,7 +149,7 @@ public class MediaCodecVideoEncoder {
}
private static EncoderProperties findHwEncoder(
- String mime, String[] supportedHwCodecPrefixes) {
+ String mime, String[] supportedHwCodecPrefixes, int[] colorList) {
// MediaCodec.setParameters is missing for JB and below, so bitrate
// can not be adjusted dynamically.
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.KITKAT) {
@@ -130,8 +160,7 @@ public class MediaCodecVideoEncoder {
if (mime.equals(H264_MIME_TYPE)) {
List<String> exceptionModels = Arrays.asList(H264_HW_EXCEPTION_MODELS);
if (exceptionModels.contains(Build.MODEL)) {
- Logging.w(TAG, "Model: " + Build.MODEL +
- " has black listed H.264 encoder.");
+ Logging.w(TAG, "Model: " + Build.MODEL + " has black listed H.264 encoder.");
return null;
}
}
@@ -170,8 +199,7 @@ public class MediaCodecVideoEncoder {
Logging.v(TAG, " Color: 0x" + Integer.toHexString(colorFormat));
}
- // Check if codec supports either yuv420 or nv12.
- for (int supportedColorFormat : supportedColorList) {
+ for (int supportedColorFormat : colorList) {
for (int codecColorFormat : capabilities.colorFormats) {
if (codecColorFormat == supportedColorFormat) {
// Found supported HW encoder.
@@ -182,15 +210,34 @@ public class MediaCodecVideoEncoder {
}
}
}
- return null; // No HW VP8 encoder.
+ return null; // No HW encoder.
}
public static boolean isVp8HwSupported() {
- return findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes) != null;
+ return findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes, supportedColorList) != null;
+ }
+
+ public static boolean isVp9HwSupported() {
+ return findHwEncoder(VP9_MIME_TYPE, supportedVp9HwCodecPrefixes, supportedColorList) != null;
}
public static boolean isH264HwSupported() {
- return findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes) != null;
+ return findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes, supportedColorList) != null;
+ }
+
+ public static boolean isVp8HwSupportedUsingTextures() {
+ return findHwEncoder(
+ VP8_MIME_TYPE, supportedVp8HwCodecPrefixes, supportedSurfaceColorList) != null;
+ }
+
+ public static boolean isVp9HwSupportedUsingTextures() {
+ return findHwEncoder(
+ VP9_MIME_TYPE, supportedVp9HwCodecPrefixes, supportedSurfaceColorList) != null;
+ }
+
+ public static boolean isH264HwSupportedUsingTextures() {
+ return findHwEncoder(
+ H264_MIME_TYPE, supportedH264HwCodecPrefixes, supportedSurfaceColorList) != null;
}
private void checkOnMediaCodecThread() {
@@ -223,32 +270,43 @@ public class MediaCodecVideoEncoder {
}
}
- // Return the array of input buffers, or null on failure.
- private ByteBuffer[] initEncode(
- VideoCodecType type, int width, int height, int kbps, int fps) {
+ boolean initEncode(VideoCodecType type, int width, int height, int kbps, int fps,
+ EglBase14.Context sharedContext) {
+ final boolean useSurface = sharedContext != null;
Logging.d(TAG, "Java initEncode: " + type + " : " + width + " x " + height +
- ". @ " + kbps + " kbps. Fps: " + fps +
- ". Color: 0x" + Integer.toHexString(colorFormat));
+ ". @ " + kbps + " kbps. Fps: " + fps + ". Encode from texture : " + useSurface);
+
+ this.width = width;
+ this.height = height;
if (mediaCodecThread != null) {
throw new RuntimeException("Forgot to release()?");
}
- this.type = type;
EncoderProperties properties = null;
String mime = null;
int keyFrameIntervalSec = 0;
if (type == VideoCodecType.VIDEO_CODEC_VP8) {
mime = VP8_MIME_TYPE;
- properties = findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes);
+ properties = findHwEncoder(VP8_MIME_TYPE, supportedVp8HwCodecPrefixes,
+ useSurface ? supportedSurfaceColorList : supportedColorList);
+ keyFrameIntervalSec = 100;
+ } else if (type == VideoCodecType.VIDEO_CODEC_VP9) {
+ mime = VP9_MIME_TYPE;
+ properties = findHwEncoder(VP9_MIME_TYPE, supportedH264HwCodecPrefixes,
+ useSurface ? supportedSurfaceColorList : supportedColorList);
keyFrameIntervalSec = 100;
} else if (type == VideoCodecType.VIDEO_CODEC_H264) {
mime = H264_MIME_TYPE;
- properties = findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes);
+ properties = findHwEncoder(H264_MIME_TYPE, supportedH264HwCodecPrefixes,
+ useSurface ? supportedSurfaceColorList : supportedColorList);
keyFrameIntervalSec = 20;
}
if (properties == null) {
throw new RuntimeException("Can not find HW encoder for " + type);
}
runningInstance = this; // Encoder is now running and can be queried for stack traces.
+ colorFormat = properties.colorFormat;
+ Logging.d(TAG, "Color format: " + colorFormat);
+
mediaCodecThread = Thread.currentThread();
try {
MediaFormat format = MediaFormat.createVideoFormat(mime, width, height);
@@ -259,26 +317,39 @@ public class MediaCodecVideoEncoder {
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, keyFrameIntervalSec);
Logging.d(TAG, " Format: " + format);
mediaCodec = createByCodecName(properties.codecName);
+ this.type = type;
if (mediaCodec == null) {
Logging.e(TAG, "Can not create media encoder");
- return null;
+ return false;
}
mediaCodec.configure(
format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
+
+ if (useSurface) {
+ eglBase = new EglBase14(sharedContext, EglBase.CONFIG_RECORDABLE);
+ // Create an input surface and keep a reference since we must release the surface when done.
+ inputSurface = mediaCodec.createInputSurface();
+ eglBase.createSurface(inputSurface);
+ drawer = new GlRectDrawer();
+ }
mediaCodec.start();
- colorFormat = properties.colorFormat;
outputBuffers = mediaCodec.getOutputBuffers();
- ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers();
- Logging.d(TAG, "Input buffers: " + inputBuffers.length +
- ". Output buffers: " + outputBuffers.length);
- return inputBuffers;
+ Logging.d(TAG, "Output buffers: " + outputBuffers.length);
+
} catch (IllegalStateException e) {
Logging.e(TAG, "initEncode failed", e);
- return null;
+ return false;
}
+ return true;
+ }
+
+ ByteBuffer[] getInputBuffers() {
+ ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers();
+ Logging.d(TAG, "Input buffers: " + inputBuffers.length);
+ return inputBuffers;
}
- private boolean encode(
+ boolean encodeBuffer(
boolean isKeyframe, int inputBuffer, int size,
long presentationTimestampUs) {
checkOnMediaCodecThread();
@@ -298,22 +369,82 @@ public class MediaCodecVideoEncoder {
return true;
}
catch (IllegalStateException e) {
- Logging.e(TAG, "encode failed", e);
+ Logging.e(TAG, "encodeBuffer failed", e);
return false;
}
}
- private void release() {
- Logging.d(TAG, "Java releaseEncoder");
+ boolean encodeTexture(boolean isKeyframe, int oesTextureId, float[] transformationMatrix,
+ long presentationTimestampUs) {
checkOnMediaCodecThread();
try {
- mediaCodec.stop();
- mediaCodec.release();
- } catch (IllegalStateException e) {
- Logging.e(TAG, "release failed", e);
+ if (isKeyframe) {
+ Logging.d(TAG, "Sync frame request");
+ Bundle b = new Bundle();
+ b.putInt(MediaCodec.PARAMETER_KEY_REQUEST_SYNC_FRAME, 0);
+ mediaCodec.setParameters(b);
+ }
+ eglBase.makeCurrent();
+ // TODO(perkj): glClear() shouldn't be necessary since every pixel is covered anyway,
+ // but it's a workaround for bug webrtc:5147.
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ drawer.drawOes(oesTextureId, transformationMatrix, 0, 0, width, height);
+ eglBase.swapBuffers(TimeUnit.MICROSECONDS.toNanos(presentationTimestampUs));
+ return true;
+ }
+ catch (RuntimeException e) {
+ Logging.e(TAG, "encodeTexture failed", e);
+ return false;
}
+ }
+
+ void release() {
+ Logging.d(TAG, "Java releaseEncoder");
+ checkOnMediaCodecThread();
+
+ // Run Mediacodec stop() and release() on separate thread since sometime
+ // Mediacodec.stop() may hang.
+ final CountDownLatch releaseDone = new CountDownLatch(1);
+
+ Runnable runMediaCodecRelease = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Logging.d(TAG, "Java releaseEncoder on release thread");
+ mediaCodec.stop();
+ mediaCodec.release();
+ Logging.d(TAG, "Java releaseEncoder on release thread done");
+ } catch (Exception e) {
+ Logging.e(TAG, "Media encoder release failed", e);
+ }
+ releaseDone.countDown();
+ }
+ };
+ new Thread(runMediaCodecRelease).start();
+
+ if (!ThreadUtils.awaitUninterruptibly(releaseDone, MEDIA_CODEC_RELEASE_TIMEOUT_MS)) {
+ Logging.e(TAG, "Media encoder release timeout");
+ codecErrors++;
+ if (errorCallback != null) {
+ Logging.e(TAG, "Invoke codec error callback. Errors: " + codecErrors);
+ errorCallback.onMediaCodecVideoEncoderCriticalError(codecErrors);
+ }
+ }
+
mediaCodec = null;
mediaCodecThread = null;
+ if (drawer != null) {
+ drawer.release();
+ drawer = null;
+ }
+ if (eglBase != null) {
+ eglBase.release();
+ eglBase = null;
+ }
+ if (inputSurface != null) {
+ inputSurface.release();
+ inputSurface = null;
+ }
runningInstance = null;
Logging.d(TAG, "Java releaseEncoder done");
}
@@ -336,7 +467,7 @@ public class MediaCodecVideoEncoder {
// Dequeue an input buffer and return its index, -1 if no input buffer is
// available, or -2 if the codec is no longer operative.
- private int dequeueInputBuffer() {
+ int dequeueInputBuffer() {
checkOnMediaCodecThread();
try {
return mediaCodec.dequeueInputBuffer(DEQUEUE_TIMEOUT);
@@ -347,7 +478,7 @@ public class MediaCodecVideoEncoder {
}
// Helper struct for dequeueOutputBuffer() below.
- private static class OutputBufferInfo {
+ static class OutputBufferInfo {
public OutputBufferInfo(
int index, ByteBuffer buffer,
boolean isKeyFrame, long presentationTimestampUs) {
@@ -357,15 +488,15 @@ public class MediaCodecVideoEncoder {
this.presentationTimestampUs = presentationTimestampUs;
}
- private final int index;
- private final ByteBuffer buffer;
- private final boolean isKeyFrame;
- private final long presentationTimestampUs;
+ public final int index;
+ public final ByteBuffer buffer;
+ public final boolean isKeyFrame;
+ public final long presentationTimestampUs;
}
// Dequeue and return an output buffer, or null if no output is ready. Return
// a fake OutputBufferInfo with index -1 if the codec is no longer operable.
- private OutputBufferInfo dequeueOutputBuffer() {
+ OutputBufferInfo dequeueOutputBuffer() {
checkOnMediaCodecThread();
try {
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
@@ -434,7 +565,7 @@ public class MediaCodecVideoEncoder {
// Release a dequeued output buffer back to the codec for re-use. Return
// false if the codec is no longer operable.
- private boolean releaseOutputBuffer(int index) {
+ boolean releaseOutputBuffer(int index) {
checkOnMediaCodecThread();
try {
mediaCodec.releaseOutputBuffer(index, false);
diff --git a/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java b/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
index 50023001d7..36cd07595c 100644
--- a/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
+++ b/talk/app/webrtc/java/src/org/webrtc/PeerConnection.java
@@ -28,7 +28,6 @@
package org.webrtc;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
@@ -151,6 +150,7 @@ public class PeerConnection {
public int audioJitterBufferMaxPackets;
public boolean audioJitterBufferFastAccelerate;
public int iceConnectionReceivingTimeout;
+ public int iceBackupCandidatePairPingInterval;
public KeyType keyType;
public ContinualGatheringPolicy continualGatheringPolicy;
@@ -163,6 +163,7 @@ public class PeerConnection {
audioJitterBufferMaxPackets = 50;
audioJitterBufferFastAccelerate = false;
iceConnectionReceivingTimeout = -1;
+ iceBackupCandidatePairPingInterval = -1;
keyType = KeyType.ECDSA;
continualGatheringPolicy = ContinualGatheringPolicy.GATHER_ONCE;
}
@@ -223,6 +224,14 @@ public class PeerConnection {
localStreams.remove(stream);
}
+ public RtpSender createSender(String kind, String stream_id) {
+ RtpSender new_sender = nativeCreateSender(kind, stream_id);
+ if (new_sender != null) {
+ senders.add(new_sender);
+ }
+ return new_sender;
+ }
+
// Note that calling getSenders will dispose of the senders previously
// returned (and same goes for getReceivers).
public List<RtpSender> getSenders() {
@@ -288,6 +297,8 @@ public class PeerConnection {
private native boolean nativeGetStats(
StatsObserver observer, long nativeTrack);
+ private native RtpSender nativeCreateSender(String kind, String stream_id);
+
private native List<RtpSender> nativeGetSenders();
private native List<RtpReceiver> nativeGetReceivers();
diff --git a/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java b/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
index 83999ece98..d759c69271 100644
--- a/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
+++ b/talk/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
@@ -73,6 +73,15 @@ public class PeerConnectionFactory {
// Field trial initialization. Must be called before PeerConnectionFactory
// is created.
public static native void initializeFieldTrials(String fieldTrialsInitString);
+ // Internal tracing initialization. Must be called before PeerConnectionFactory is created to
+ // prevent racing with tracing code.
+ public static native void initializeInternalTracer();
+ // Internal tracing shutdown, called to prevent resource leaks. Must be called after
+ // PeerConnectionFactory is gone to prevent races with code performing tracing.
+ public static native void shutdownInternalTracer();
+ // Start/stop internal capturing of internal tracing.
+ public static native boolean startInternalTracingCapture(String tracing_filename);
+ public static native void stopInternalTracingCapture();
public PeerConnectionFactory() {
nativeFactory = nativeCreatePeerConnectionFactory();
@@ -131,12 +140,52 @@ public class PeerConnectionFactory {
nativeFactory, id, source.nativeSource));
}
+ // Starts recording an AEC dump. Ownership of the file is transfered to the
+ // native code. If an AEC dump is already in progress, it will be stopped and
+ // a new one will start using the provided file.
+ public boolean startAecDump(int file_descriptor) {
+ return nativeStartAecDump(nativeFactory, file_descriptor);
+ }
+
+ // Stops recording an AEC dump. If no AEC dump is currently being recorded,
+ // this call will have no effect.
+ public void stopAecDump() {
+ nativeStopAecDump(nativeFactory);
+ }
+
+ // Starts recording an RTC event log. Ownership of the file is transfered to
+ // the native code. If an RTC event log is already being recorded, it will be
+ // stopped and a new one will start using the provided file.
+ public boolean startRtcEventLog(int file_descriptor) {
+ return nativeStartRtcEventLog(nativeFactory, file_descriptor);
+ }
+
+ // Stops recording an RTC event log. If no RTC event log is currently being
+ // recorded, this call will have no effect.
+ public void StopRtcEventLog() {
+ nativeStopRtcEventLog(nativeFactory);
+ }
+
public void setOptions(Options options) {
nativeSetOptions(nativeFactory, options);
}
+ @Deprecated
public void setVideoHwAccelerationOptions(Object renderEGLContext) {
- nativeSetVideoHwAccelerationOptions(nativeFactory, renderEGLContext);
+ nativeSetVideoHwAccelerationOptions(nativeFactory, renderEGLContext, renderEGLContext);
+ }
+
+ /** Set the EGL context used by HW Video encoding and decoding.
+ *
+ *
+ * @param localEGLContext An instance of javax.microedition.khronos.egl.EGLContext.
+ * Must be the same as used by VideoCapturerAndroid and any local
+ * video renderer.
+ * @param remoteEGLContext An instance of javax.microedition.khronos.egl.EGLContext.
+ * Must be the same as used by any remote video renderer.
+ */
+ public void setVideoHwAccelerationOptions(Object localEGLContext, Object remoteEGLContext) {
+ nativeSetVideoHwAccelerationOptions(nativeFactory, localEGLContext, remoteEGLContext);
}
public void dispose() {
@@ -201,10 +250,18 @@ public class PeerConnectionFactory {
private static native long nativeCreateAudioTrack(
long nativeFactory, String id, long nativeSource);
+ private static native boolean nativeStartAecDump(long nativeFactory, int file_descriptor);
+
+ private static native void nativeStopAecDump(long nativeFactory);
+
+ private static native boolean nativeStartRtcEventLog(long nativeFactory, int file_descriptor);
+
+ private static native void nativeStopRtcEventLog(long nativeFactory);
+
public native void nativeSetOptions(long nativeFactory, Options options);
private static native void nativeSetVideoHwAccelerationOptions(
- long nativeFactory, Object renderEGLContext);
+ long nativeFactory, Object localEGLContext, Object remoteEGLContext);
private static native void nativeThreadsCallbacks(long nativeFactory);
diff --git a/talk/app/webrtc/java/src/org/webrtc/RtpSender.java b/talk/app/webrtc/java/src/org/webrtc/RtpSender.java
index 37357c0657..9ac2e7034f 100644
--- a/talk/app/webrtc/java/src/org/webrtc/RtpSender.java
+++ b/talk/app/webrtc/java/src/org/webrtc/RtpSender.java
@@ -32,6 +32,7 @@ public class RtpSender {
final long nativeRtpSender;
private MediaStreamTrack cachedTrack;
+ private boolean ownsTrack = true;
public RtpSender(long nativeRtpSender) {
this.nativeRtpSender = nativeRtpSender;
@@ -40,14 +41,22 @@ public class RtpSender {
cachedTrack = (track == 0) ? null : new MediaStreamTrack(track);
}
- // NOTE: This should not be called with a track that's already used by
- // another RtpSender, because then it would be double-disposed.
- public void setTrack(MediaStreamTrack track) {
- if (cachedTrack != null) {
+ // If |takeOwnership| is true, the RtpSender takes ownership of the track
+ // from the caller, and will auto-dispose of it when no longer needed.
+ // |takeOwnership| should only be used if the caller owns the track; it is
+ // not appropriate when the track is owned by, for example, another RtpSender
+ // or a MediaStream.
+ public boolean setTrack(MediaStreamTrack track, boolean takeOwnership) {
+ if (!nativeSetTrack(nativeRtpSender,
+ (track == null) ? 0 : track.nativeTrack)) {
+ return false;
+ }
+ if (cachedTrack != null && ownsTrack) {
cachedTrack.dispose();
}
cachedTrack = track;
- nativeSetTrack(nativeRtpSender, (track == null) ? 0 : track.nativeTrack);
+ ownsTrack = takeOwnership;
+ return true;
}
public MediaStreamTrack track() {
@@ -59,14 +68,14 @@ public class RtpSender {
}
public void dispose() {
- if (cachedTrack != null) {
+ if (cachedTrack != null && ownsTrack) {
cachedTrack.dispose();
}
free(nativeRtpSender);
}
- private static native void nativeSetTrack(long nativeRtpSender,
- long nativeTrack);
+ private static native boolean nativeSetTrack(long nativeRtpSender,
+ long nativeTrack);
// This should increment the reference count of the track.
// Will be released in dispose() or setTrack().
diff --git a/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java b/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
index 3c255dd123..2e307fc54b 100644
--- a/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
+++ b/talk/app/webrtc/java/src/org/webrtc/VideoRenderer.java
@@ -46,7 +46,11 @@ public class VideoRenderer {
public final int[] yuvStrides;
public ByteBuffer[] yuvPlanes;
public final boolean yuvFrame;
- public Object textureObject;
+ // Matrix that transforms standard coordinates to their proper sampling locations in
+ // the texture. This transform compensates for any properties of the video source that
+ // cause it to appear different from a normalized texture. This matrix does not take
+ // |rotationDegree| into account.
+ public final float[] samplingMatrix;
public int textureId;
// Frame pointer in C++.
private long nativeFramePointer;
@@ -70,19 +74,27 @@ public class VideoRenderer {
if (rotationDegree % 90 != 0) {
throw new IllegalArgumentException("Rotation degree not multiple of 90: " + rotationDegree);
}
+ // The convention in WebRTC is that the first element in a ByteBuffer corresponds to the
+ // top-left corner of the image, but in glTexImage2D() the first element corresponds to the
+ // bottom-left corner. This discrepancy is corrected by setting a vertical flip as sampling
+ // matrix.
+ samplingMatrix = new float[] {
+ 1, 0, 0, 0,
+ 0, -1, 0, 0,
+ 0, 0, 1, 0,
+ 0, 1, 0, 1};
}
/**
* Construct a texture frame of the given dimensions with data in SurfaceTexture
*/
- I420Frame(
- int width, int height, int rotationDegree,
- Object textureObject, int textureId, long nativeFramePointer) {
+ I420Frame(int width, int height, int rotationDegree, int textureId, float[] samplingMatrix,
+ long nativeFramePointer) {
this.width = width;
this.height = height;
this.yuvStrides = null;
this.yuvPlanes = null;
- this.textureObject = textureObject;
+ this.samplingMatrix = samplingMatrix;
this.textureId = textureId;
this.yuvFrame = false;
this.rotationDegree = rotationDegree;
@@ -125,7 +137,6 @@ public class VideoRenderer {
*/
public static void renderFrameDone(I420Frame frame) {
frame.yuvPlanes = null;
- frame.textureObject = null;
frame.textureId = 0;
if (frame.nativeFramePointer != 0) {
releaseNativeFrame(frame.nativeFramePointer);
diff --git a/talk/app/webrtc/jsepsessiondescription.cc b/talk/app/webrtc/jsepsessiondescription.cc
index 24bd9d4195..226432db69 100644
--- a/talk/app/webrtc/jsepsessiondescription.cc
+++ b/talk/app/webrtc/jsepsessiondescription.cc
@@ -29,6 +29,7 @@
#include "talk/app/webrtc/webrtcsdp.h"
#include "talk/session/media/mediasession.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/stringencode.h"
using rtc::scoped_ptr;
@@ -44,7 +45,7 @@ static const char* kSupportedTypes[] = {
static bool IsTypeSupported(const std::string& type) {
bool type_supported = false;
- for (size_t i = 0; i < ARRAY_SIZE(kSupportedTypes); ++i) {
+ for (size_t i = 0; i < arraysize(kSupportedTypes); ++i) {
if (kSupportedTypes[i] == type) {
type_supported = true;
break;
diff --git a/talk/app/webrtc/localaudiosource.cc b/talk/app/webrtc/localaudiosource.cc
index 63c6f13a3d..591877aa8b 100644
--- a/talk/app/webrtc/localaudiosource.cc
+++ b/talk/app/webrtc/localaudiosource.cc
@@ -49,7 +49,7 @@ void FromConstraints(const MediaConstraintsInterface::Constraints& constraints,
// a different algorithm will be required.
struct {
const char* name;
- cricket::Settable<bool>& value;
+ rtc::Optional<bool>& value;
} key_to_value[] = {
{MediaConstraintsInterface::kGoogEchoCancellation,
options->echo_cancellation},
@@ -78,7 +78,7 @@ void FromConstraints(const MediaConstraintsInterface::Constraints& constraints,
for (auto& entry : key_to_value) {
if (constraint.key.compare(entry.name) == 0)
- entry.value.Set(value);
+ entry.value = rtc::Optional<bool>(value);
}
}
}
diff --git a/talk/app/webrtc/localaudiosource.h b/talk/app/webrtc/localaudiosource.h
index 557745b8b8..5158eb1215 100644
--- a/talk/app/webrtc/localaudiosource.h
+++ b/talk/app/webrtc/localaudiosource.h
@@ -48,16 +48,17 @@ class LocalAudioSource : public Notifier<AudioSourceInterface> {
const PeerConnectionFactoryInterface::Options& options,
const MediaConstraintsInterface* constraints);
- virtual SourceState state() const { return source_state_; }
+ SourceState state() const override { return source_state_; }
+ bool remote() const override { return false; }
+
virtual const cricket::AudioOptions& options() const { return options_; }
- protected:
- LocalAudioSource()
- : source_state_(kInitializing) {
- }
+ void AddSink(AudioTrackSinkInterface* sink) override {}
+ void RemoveSink(AudioTrackSinkInterface* sink) override {}
- ~LocalAudioSource() {
- }
+ protected:
+ LocalAudioSource() : source_state_(kInitializing) {}
+ ~LocalAudioSource() override {}
private:
void Initialize(const PeerConnectionFactoryInterface::Options& options,
diff --git a/talk/app/webrtc/localaudiosource_unittest.cc b/talk/app/webrtc/localaudiosource_unittest.cc
index 8e05c18287..75d0c35462 100644
--- a/talk/app/webrtc/localaudiosource_unittest.cc
+++ b/talk/app/webrtc/localaudiosource_unittest.cc
@@ -58,23 +58,14 @@ TEST(LocalAudioSourceTest, SetValidOptions) {
LocalAudioSource::Create(PeerConnectionFactoryInterface::Options(),
&constraints);
- bool value;
- EXPECT_TRUE(source->options().echo_cancellation.Get(&value));
- EXPECT_FALSE(value);
- EXPECT_TRUE(source->options().extended_filter_aec.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().delay_agnostic_aec.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().auto_gain_control.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().experimental_agc.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().noise_suppression.Get(&value));
- EXPECT_FALSE(value);
- EXPECT_TRUE(source->options().highpass_filter.Get(&value));
- EXPECT_TRUE(value);
- EXPECT_TRUE(source->options().aec_dump.Get(&value));
- EXPECT_TRUE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().echo_cancellation);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().extended_filter_aec);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().delay_agnostic_aec);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().auto_gain_control);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().experimental_agc);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().noise_suppression);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().highpass_filter);
+ EXPECT_EQ(rtc::Optional<bool>(true), source->options().aec_dump);
}
TEST(LocalAudioSourceTest, OptionNotSet) {
@@ -82,8 +73,7 @@ TEST(LocalAudioSourceTest, OptionNotSet) {
rtc::scoped_refptr<LocalAudioSource> source =
LocalAudioSource::Create(PeerConnectionFactoryInterface::Options(),
&constraints);
- bool value;
- EXPECT_FALSE(source->options().highpass_filter.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source->options().highpass_filter);
}
TEST(LocalAudioSourceTest, MandatoryOverridesOptional) {
@@ -97,9 +87,7 @@ TEST(LocalAudioSourceTest, MandatoryOverridesOptional) {
LocalAudioSource::Create(PeerConnectionFactoryInterface::Options(),
&constraints);
- bool value;
- EXPECT_TRUE(source->options().echo_cancellation.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().echo_cancellation);
}
TEST(LocalAudioSourceTest, InvalidOptional) {
@@ -112,9 +100,7 @@ TEST(LocalAudioSourceTest, InvalidOptional) {
&constraints);
EXPECT_EQ(MediaSourceInterface::kLive, source->state());
- bool value;
- EXPECT_TRUE(source->options().highpass_filter.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().highpass_filter);
}
TEST(LocalAudioSourceTest, InvalidMandatory) {
@@ -127,7 +113,5 @@ TEST(LocalAudioSourceTest, InvalidMandatory) {
&constraints);
EXPECT_EQ(MediaSourceInterface::kLive, source->state());
- bool value;
- EXPECT_TRUE(source->options().highpass_filter.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false), source->options().highpass_filter);
}
diff --git a/talk/app/webrtc/mediacontroller.cc b/talk/app/webrtc/mediacontroller.cc
index f7d85116b1..24f5877483 100644
--- a/talk/app/webrtc/mediacontroller.cc
+++ b/talk/app/webrtc/mediacontroller.cc
@@ -47,11 +47,10 @@ class MediaController : public webrtc::MediaControllerInterface,
RTC_DCHECK(nullptr != worker_thread);
worker_thread_->Invoke<void>(
rtc::Bind(&MediaController::Construct_w, this,
- channel_manager_->media_engine()->GetVoE()));
+ channel_manager_->media_engine()));
}
~MediaController() override {
- worker_thread_->Invoke<void>(
- rtc::Bind(&MediaController::Destruct_w, this));
+ worker_thread_->Invoke<void>(rtc::Bind(&MediaController::Destruct_w, this));
}
webrtc::Call* call_w() override {
@@ -64,10 +63,11 @@ class MediaController : public webrtc::MediaControllerInterface,
}
private:
- void Construct_w(webrtc::VoiceEngine* voice_engine) {
+ void Construct_w(cricket::MediaEngineInterface* media_engine) {
RTC_DCHECK(worker_thread_->IsCurrent());
+ RTC_DCHECK(media_engine);
webrtc::Call::Config config;
- config.voice_engine = voice_engine;
+ config.audio_state = media_engine->GetAudioState();
config.bitrate_config.min_bitrate_bps = kMinBandwidthBps;
config.bitrate_config.start_bitrate_bps = kStartBandwidthBps;
config.bitrate_config.max_bitrate_bps = kMaxBandwidthBps;
@@ -84,7 +84,7 @@ class MediaController : public webrtc::MediaControllerInterface,
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MediaController);
};
-} // namespace {
+} // namespace {
namespace webrtc {
@@ -93,4 +93,4 @@ MediaControllerInterface* MediaControllerInterface::Create(
cricket::ChannelManager* channel_manager) {
return new MediaController(worker_thread, channel_manager);
}
-} // namespace webrtc
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastream_unittest.cc b/talk/app/webrtc/mediastream_unittest.cc
index 2cf930c4c0..f19b9456a6 100644
--- a/talk/app/webrtc/mediastream_unittest.cc
+++ b/talk/app/webrtc/mediastream_unittest.cc
@@ -48,9 +48,23 @@ namespace webrtc {
// Helper class to test Observer.
class MockObserver : public ObserverInterface {
public:
- MockObserver() {}
+ explicit MockObserver(NotifierInterface* notifier) : notifier_(notifier) {
+ notifier_->RegisterObserver(this);
+ }
+
+ ~MockObserver() { Unregister(); }
+
+ void Unregister() {
+ if (notifier_) {
+ notifier_->UnregisterObserver(this);
+ notifier_ = nullptr;
+ }
+ }
MOCK_METHOD0(OnChanged, void());
+
+ private:
+ NotifierInterface* notifier_;
};
class MediaStreamTest: public testing::Test {
@@ -75,8 +89,7 @@ class MediaStreamTest: public testing::Test {
}
void ChangeTrack(MediaStreamTrackInterface* track) {
- MockObserver observer;
- track->RegisterObserver(&observer);
+ MockObserver observer(track);
EXPECT_CALL(observer, OnChanged())
.Times(Exactly(1));
@@ -127,8 +140,7 @@ TEST_F(MediaStreamTest, GetTrackInfo) {
}
TEST_F(MediaStreamTest, RemoveTrack) {
- MockObserver observer;
- stream_->RegisterObserver(&observer);
+ MockObserver observer(stream_);
EXPECT_CALL(observer, OnChanged())
.Times(Exactly(2));
diff --git a/talk/app/webrtc/mediastreaminterface.h b/talk/app/webrtc/mediastreaminterface.h
index 5911e85e8e..9b137d9f76 100644
--- a/talk/app/webrtc/mediastreaminterface.h
+++ b/talk/app/webrtc/mediastreaminterface.h
@@ -71,8 +71,6 @@ class NotifierInterface {
// Base class for sources. A MediaStreamTrack have an underlying source that
// provide media. A source can be shared with multiple tracks.
-// TODO(perkj): Implement sources for local and remote audio tracks and
-// remote video tracks.
class MediaSourceInterface : public rtc::RefCountInterface,
public NotifierInterface {
public:
@@ -85,6 +83,8 @@ class MediaSourceInterface : public rtc::RefCountInterface,
virtual SourceState state() const = 0;
+ virtual bool remote() const = 0;
+
protected:
virtual ~MediaSourceInterface() {}
};
@@ -100,6 +100,9 @@ class MediaStreamTrackInterface : public rtc::RefCountInterface,
kFailed = 3, // Track negotiation failed.
};
+ static const char kAudioKind[];
+ static const char kVideoKind[];
+
virtual std::string kind() const = 0;
virtual std::string id() const = 0;
virtual bool enabled() const = 0;
@@ -115,13 +118,6 @@ class MediaStreamTrackInterface : public rtc::RefCountInterface,
// Interface for rendering VideoFrames from a VideoTrack
class VideoRendererInterface {
public:
- // TODO(guoweis): Remove this function. Obsolete. The implementation of
- // VideoRendererInterface should be able to handle different frame size as
- // well as pending rotation. If it can't apply the frame rotation by itself,
- // it should call |frame|.GetCopyWithRotationApplied() to get a frame that has
- // the rotation applied.
- virtual void SetSize(int width, int height) {}
-
// |frame| may have pending rotation. For clients which can't apply rotation,
// |frame|->GetCopyWithRotationApplied() will return a frame that has the
// rotation applied.
@@ -149,6 +145,19 @@ class VideoTrackInterface : public MediaStreamTrackInterface {
virtual ~VideoTrackInterface() {}
};
+// Interface for receiving audio data from a AudioTrack.
+class AudioTrackSinkInterface {
+ public:
+ virtual void OnData(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) = 0;
+
+ protected:
+ virtual ~AudioTrackSinkInterface() {}
+};
+
// AudioSourceInterface is a reference counted source used for AudioTracks.
// The same source can be used in multiple AudioTracks.
class AudioSourceInterface : public MediaSourceInterface {
@@ -164,23 +173,17 @@ class AudioSourceInterface : public MediaSourceInterface {
// TODO(xians): Makes all the interface pure virtual after Chrome has their
// implementations.
// Sets the volume to the source. |volume| is in the range of [0, 10].
+ // TODO(tommi): This method should be on the track and ideally volume should
+ // be applied in the track in a way that does not affect clones of the track.
virtual void SetVolume(double volume) {}
// Registers/unregisters observer to the audio source.
virtual void RegisterAudioObserver(AudioObserver* observer) {}
virtual void UnregisterAudioObserver(AudioObserver* observer) {}
-};
-// Interface for receiving audio data from a AudioTrack.
-class AudioTrackSinkInterface {
- public:
- virtual void OnData(const void* audio_data,
- int bits_per_sample,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames) = 0;
- protected:
- virtual ~AudioTrackSinkInterface() {}
+ // TODO(tommi): Make pure virtual.
+ virtual void AddSink(AudioTrackSinkInterface* sink) {}
+ virtual void RemoveSink(AudioTrackSinkInterface* sink) {}
};
// Interface of the audio processor used by the audio track to collect
diff --git a/talk/app/webrtc/mediastreamobserver.cc b/talk/app/webrtc/mediastreamobserver.cc
new file mode 100644
index 0000000000..2650b9a6f7
--- /dev/null
+++ b/talk/app/webrtc/mediastreamobserver.cc
@@ -0,0 +1,101 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/mediastreamobserver.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+MediaStreamObserver::MediaStreamObserver(MediaStreamInterface* stream)
+ : stream_(stream),
+ cached_audio_tracks_(stream->GetAudioTracks()),
+ cached_video_tracks_(stream->GetVideoTracks()) {
+ stream_->RegisterObserver(this);
+}
+
+MediaStreamObserver::~MediaStreamObserver() {
+ stream_->UnregisterObserver(this);
+}
+
+void MediaStreamObserver::OnChanged() {
+ AudioTrackVector new_audio_tracks = stream_->GetAudioTracks();
+ VideoTrackVector new_video_tracks = stream_->GetVideoTracks();
+
+ // Find removed audio tracks.
+ for (const auto& cached_track : cached_audio_tracks_) {
+ auto it = std::find_if(
+ new_audio_tracks.begin(), new_audio_tracks.end(),
+ [cached_track](const AudioTrackVector::value_type& new_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == new_audio_tracks.end()) {
+ SignalAudioTrackRemoved(cached_track.get(), stream_);
+ }
+ }
+
+ // Find added audio tracks.
+ for (const auto& new_track : new_audio_tracks) {
+ auto it = std::find_if(
+ cached_audio_tracks_.begin(), cached_audio_tracks_.end(),
+ [new_track](const AudioTrackVector::value_type& cached_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == cached_audio_tracks_.end()) {
+ SignalAudioTrackAdded(new_track.get(), stream_);
+ }
+ }
+
+ // Find removed video tracks.
+ for (const auto& cached_track : cached_video_tracks_) {
+ auto it = std::find_if(
+ new_video_tracks.begin(), new_video_tracks.end(),
+ [cached_track](const VideoTrackVector::value_type& new_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == new_video_tracks.end()) {
+ SignalVideoTrackRemoved(cached_track.get(), stream_);
+ }
+ }
+
+ // Find added video tracks.
+ for (const auto& new_track : new_video_tracks) {
+ auto it = std::find_if(
+ cached_video_tracks_.begin(), cached_video_tracks_.end(),
+ [new_track](const VideoTrackVector::value_type& cached_track) {
+ return new_track->id().compare(cached_track->id()) == 0;
+ });
+ if (it == cached_video_tracks_.end()) {
+ SignalVideoTrackAdded(new_track.get(), stream_);
+ }
+ }
+
+ cached_audio_tracks_ = new_audio_tracks;
+ cached_video_tracks_ = new_video_tracks;
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamobserver.h b/talk/app/webrtc/mediastreamobserver.h
new file mode 100644
index 0000000000..1dd6c4c118
--- /dev/null
+++ b/talk/app/webrtc/mediastreamobserver.h
@@ -0,0 +1,65 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_MEDIASTREAMOBSERVER_H_
+#define TALK_APP_WEBRTC_MEDIASTREAMOBSERVER_H_
+
+#include "talk/app/webrtc/mediastreaminterface.h"
+#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/base/sigslot.h"
+
+namespace webrtc {
+
+// Helper class which will listen for changes to a stream and emit the
+// corresponding signals.
+class MediaStreamObserver : public ObserverInterface {
+ public:
+ explicit MediaStreamObserver(MediaStreamInterface* stream);
+ ~MediaStreamObserver();
+
+ const MediaStreamInterface* stream() const { return stream_; }
+
+ void OnChanged() override;
+
+ sigslot::signal2<AudioTrackInterface*, MediaStreamInterface*>
+ SignalAudioTrackAdded;
+ sigslot::signal2<AudioTrackInterface*, MediaStreamInterface*>
+ SignalAudioTrackRemoved;
+ sigslot::signal2<VideoTrackInterface*, MediaStreamInterface*>
+ SignalVideoTrackAdded;
+ sigslot::signal2<VideoTrackInterface*, MediaStreamInterface*>
+ SignalVideoTrackRemoved;
+
+ private:
+ rtc::scoped_refptr<MediaStreamInterface> stream_;
+ AudioTrackVector cached_audio_tracks_;
+ VideoTrackVector cached_video_tracks_;
+};
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_MEDIASTREAMOBSERVER_H_
diff --git a/talk/app/webrtc/mediastreamprovider.h b/talk/app/webrtc/mediastreamprovider.h
index 1c62daf9f1..585d51bcc8 100644
--- a/talk/app/webrtc/mediastreamprovider.h
+++ b/talk/app/webrtc/mediastreamprovider.h
@@ -29,6 +29,7 @@
#define TALK_APP_WEBRTC_MEDIASTREAMPROVIDER_H_
#include "webrtc/base/basictypes.h"
+#include "webrtc/base/scoped_ptr.h"
namespace cricket {
@@ -42,6 +43,8 @@ struct VideoOptions;
namespace webrtc {
+class AudioSinkInterface;
+
// TODO(deadbeef): Change the key from an ssrc to a "sender_id" or
// "receiver_id" string, which will be the MSID in the short term and MID in
// the long term.
@@ -50,8 +53,8 @@ namespace webrtc {
// RtpSenders/Receivers to get to the BaseChannels. These interfaces should be
// refactored away eventually, as the classes converge.
-// This interface is called by AudioTrackHandler classes in mediastreamhandler.h
-// to change the settings of an audio track connected to certain PeerConnection.
+// This interface is called by AudioRtpSender/Receivers to change the settings
+// of an audio track connected to certain PeerConnection.
class AudioProviderInterface {
public:
// Enable/disable the audio playout of a remote audio track with |ssrc|.
@@ -67,13 +70,19 @@ class AudioProviderInterface {
// |volume| is in the range of [0, 10].
virtual void SetAudioPlayoutVolume(uint32_t ssrc, double volume) = 0;
+ // Allows for setting a direct audio sink for an incoming audio source.
+ // Only one audio sink is supported per ssrc and ownership of the sink is
+ // passed to the provider.
+ virtual void SetRawAudioSink(
+ uint32_t ssrc,
+ rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) = 0;
+
protected:
virtual ~AudioProviderInterface() {}
};
-// This interface is called by VideoTrackHandler classes in mediastreamhandler.h
-// to change the settings of a video track connected to a certain
-// PeerConnection.
+// This interface is called by VideoRtpSender/Receivers to change the settings
+// of a video track connected to a certain PeerConnection.
class VideoProviderInterface {
public:
virtual bool SetCaptureDevice(uint32_t ssrc,
diff --git a/talk/app/webrtc/objc/README b/talk/app/webrtc/objc/README
index 692fbbc564..c323e73ed1 100644
--- a/talk/app/webrtc/objc/README
+++ b/talk/app/webrtc/objc/README
@@ -12,69 +12,59 @@ Prerequisites:
up for building for iOS-device, iOS-simulator, and Mac (resp) are:
function wrbase() {
cd /path/to/webrtc/trunk
- export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0 libjingle_objc=1"
+ export GYP_DEFINES="build_with_libjingle=1 build_with_chromium=0"
export GYP_GENERATORS="ninja"
}
function wrios() {
wrbase
- export GYP_DEFINES="$GYP_DEFINES OS=ios target_arch=armv7"
+ export GYP_DEFINES="$GYP_DEFINES OS=ios"
export GYP_GENERATOR_FLAGS="$GYP_GENERATOR_FLAGS output_dir=out_ios"
export GYP_CROSSCOMPILE=1
}
+function wrios32() {
+ wrios
+ export GYP_DEFINES="$GYP_DEFINES target_arch=arm"
+}
+
+function wrios64() {
+ wrios
+ export GYP_DEFINES="$GYP_DEFINES target_arch=arm64"
+}
+
function wrsim() {
wrbase
- export GYP_DEFINES="$GYP_DEFINES OS=ios target_arch=ia32"
+ export GYP_DEFINES="$GYP_DEFINES OS=ios target_subarch=arm32 target_arch=ia32"
export GYP_GENERATOR_FLAGS="$GYP_GENERATOR_FLAGS output_dir=out_sim"
export GYP_CROSSCOMPILE=1
}
function wrmac() {
wrbase
- export GYP_DEFINES="$GYP_DEFINES OS=mac target_arch=x64"
+ export GYP_DEFINES="$GYP_DEFINES OS=mac target_subarch=arm64 target_arch=x64"
export GYP_GENERATOR_FLAGS="$GYP_GENERATOR_FLAGS output_dir=out_mac"
}
-- Finally, run "gclient runhooks" to generate ninja files.
+- Finally, run "webrtc/build/gyp_webrtc" to generate ninja files.
Example of building & using the unittest & app:
- To build & run the unittest (must target mac):
- wrmac && gclient runhooks && \
+ wrmac && ./webrtc/build/gyp_webrtc && \
ninja -C out_mac/Debug libjingle_peerconnection_objc_test && \
./out_mac/Debug/libjingle_peerconnection_objc_test.app/Contents/MacOS/libjingle_peerconnection_objc_test
- To build & launch the sample app on OSX:
- wrmac && gclient runhooks && ninja -C out_mac/Debug AppRTCDemo && \
+ wrmac && ./webrtc/build/gyp_webrtc && ninja -C out_mac/Debug AppRTCDemo && \
./out_mac/Debug/AppRTCDemo.app/Contents/MacOS/AppRTCDemo
- To build & launch the sample app on the iOS simulator:
- wrsim && gclient runhooks && ninja -C out_sim/Debug iossim AppRTCDemo && \
+ wrsim && ./webrtc/build/gyp_webrtc && ninja -C out_sim/Debug iossim AppRTCDemo && \
./out_sim/Debug/iossim out_sim/Debug/AppRTCDemo.app
-- To build & sign the sample app for an iOS device:
- wrios && gclient runhooks && ninja -C out_ios/Debug-iphoneos AppRTCDemo
-
-- To install the sample app on an iOS device:
- ideviceinstaller -i out_ios/Debug-iphoneos/AppRTCDemo.app
- (if installing ideviceinstaller from brew, use --HEAD to get support
- for .app directories)
-- Alternatively, use iPhone Configuration Utility:
- - Open "iPhone Configuration Utility" (http://support.apple.com/kb/DL1465)
- - Click the "Add" icon (command-o)
- - Open the app under out_ios/Debug-iphoneos/AppRTCDemo (should be added to the Applications tab)
- - Click the device's name in the left-hand panel and select the Applications tab
- - Click Install on the AppRTCDemo line.
- (If you have any problems deploying for the first time, check
- the Info.plist file to ensure that the Bundle Identifier matches
- your phone provisioning profile, or use a development wildcard
- provisioning profile.)
-- Alternately, use ios-deploy:
- ios-deploy -d -b out_ios/Debug-iphoneos/AppRTCDemo.app
+- To build & sign the sample app for an iOS device (32 bit):
+ wrios32 && ./webrtc/build/gyp_webrtc && ninja -C out_ios/Debug-iphoneos AppRTCDemo
-- Once installed:
- - Tap AppRTCDemo on the iOS device's home screen (might have to scroll to find it).
- - In desktop chrome, navigate to http://apprtc.appspot.com and note
- the r=<NNN> room number in the resulting URL; enter that number
- into the text field on the phone.
+- To build & sign the sample app for an iOS device (64 bit):
+ wrios64 && ./webrtc/build/gyp_webrtc && ninja -C out_ios/Debug-iphoneos AppRTCDemo
diff --git a/talk/app/webrtc/objc/RTCFileLogger.mm b/talk/app/webrtc/objc/RTCFileLogger.mm
index c4e469655d..44ada3e22e 100644
--- a/talk/app/webrtc/objc/RTCFileLogger.mm
+++ b/talk/app/webrtc/objc/RTCFileLogger.mm
@@ -35,15 +35,17 @@
NSString *const kDefaultLogDirName = @"webrtc_logs";
NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
+const char *kRTCFileLoggerRotatingLogPrefix = "rotating_log";
@implementation RTCFileLogger {
BOOL _hasStarted;
NSString *_dirPath;
NSUInteger _maxFileSize;
- rtc::scoped_ptr<rtc::CallSessionFileRotatingLogSink> _logSink;
+ rtc::scoped_ptr<rtc::FileRotatingLogSink> _logSink;
}
@synthesize severity = _severity;
+@synthesize rotationType = _rotationType;
- (instancetype)init {
NSArray *paths = NSSearchPathForDirectoriesInDomains(
@@ -57,6 +59,14 @@ NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
- (instancetype)initWithDirPath:(NSString *)dirPath
maxFileSize:(NSUInteger)maxFileSize {
+ return [self initWithDirPath:dirPath
+ maxFileSize:maxFileSize
+ rotationType:kRTCFileLoggerTypeCall];
+}
+
+- (instancetype)initWithDirPath:(NSString *)dirPath
+ maxFileSize:(NSUInteger)maxFileSize
+ rotationType:(RTCFileLoggerRotationType)rotationType {
NSParameterAssert(dirPath.length);
NSParameterAssert(maxFileSize);
if (self = [super init]) {
@@ -91,8 +101,20 @@ NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
if (_hasStarted) {
return;
}
- _logSink.reset(new rtc::CallSessionFileRotatingLogSink(_dirPath.UTF8String,
- _maxFileSize));
+ switch (_rotationType) {
+ case kRTCFileLoggerTypeApp:
+ _logSink.reset(
+ new rtc::FileRotatingLogSink(_dirPath.UTF8String,
+ kRTCFileLoggerRotatingLogPrefix,
+ _maxFileSize,
+ _maxFileSize / 10));
+ break;
+ case kRTCFileLoggerTypeCall:
+ _logSink.reset(
+ new rtc::CallSessionFileRotatingLogSink(_dirPath.UTF8String,
+ _maxFileSize));
+ break;
+ }
if (!_logSink->Init()) {
LOG(LS_ERROR) << "Failed to open log files at path: "
<< _dirPath.UTF8String;
@@ -120,8 +142,17 @@ NSUInteger const kDefaultMaxFileSize = 10 * 1024 * 1024; // 10MB.
return nil;
}
NSMutableData* logData = [NSMutableData data];
- rtc::scoped_ptr<rtc::CallSessionFileRotatingStream> stream(
- new rtc::CallSessionFileRotatingStream(_dirPath.UTF8String));
+ rtc::scoped_ptr<rtc::FileRotatingStream> stream;
+ switch(_rotationType) {
+ case kRTCFileLoggerTypeApp:
+ stream.reset(
+ new rtc::FileRotatingStream(_dirPath.UTF8String,
+ kRTCFileLoggerRotatingLogPrefix));
+ break;
+ case kRTCFileLoggerTypeCall:
+ stream.reset(new rtc::CallSessionFileRotatingStream(_dirPath.UTF8String));
+ break;
+ }
if (!stream->Open()) {
return logData;
}
diff --git a/talk/app/webrtc/objc/RTCPeerConnection.mm b/talk/app/webrtc/objc/RTCPeerConnection.mm
index 44d39cb090..f814f06ad8 100644
--- a/talk/app/webrtc/objc/RTCPeerConnection.mm
+++ b/talk/app/webrtc/objc/RTCPeerConnection.mm
@@ -271,11 +271,13 @@ class RTCStatsObserver : public StatsObserver {
- (instancetype)initWithFactory:(webrtc::PeerConnectionFactoryInterface*)factory
iceServers:(const webrtc::PeerConnectionInterface::IceServers&)iceServers
constraints:(const webrtc::MediaConstraintsInterface*)constraints {
- NSParameterAssert(factory != NULL);
+ NSParameterAssert(factory != nullptr);
if (self = [super init]) {
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
+ config.servers = iceServers;
_observer.reset(new webrtc::RTCPeerConnectionObserver(self));
_peerConnection = factory->CreatePeerConnection(
- iceServers, constraints, NULL, NULL, _observer.get());
+ config, constraints, nullptr, nullptr, _observer.get());
_localStreams = [[NSMutableArray alloc] init];
}
return self;
diff --git a/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm b/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm
index 58d12ace4c..ff45bd2bac 100644
--- a/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm
+++ b/talk/app/webrtc/objc/RTCPeerConnectionInterface.mm
@@ -39,6 +39,7 @@
@synthesize tcpCandidatePolicy = _tcpCandidatePolicy;
@synthesize audioJitterBufferMaxPackets = _audioJitterBufferMaxPackets;
@synthesize iceConnectionReceivingTimeout = _iceConnectionReceivingTimeout;
+@synthesize iceBackupCandidatePairPingInterval = _iceBackupCandidatePairPingInterval;
- (instancetype)init {
if (self = [super init]) {
@@ -51,6 +52,7 @@
[RTCEnumConverter tcpCandidatePolicyForNativeEnum:config.tcp_candidate_policy];
_audioJitterBufferMaxPackets = config.audio_jitter_buffer_max_packets;
_iceConnectionReceivingTimeout = config.ice_connection_receiving_timeout;
+ _iceBackupCandidatePairPingInterval = config.ice_backup_candidate_pair_ping_interval;
}
return self;
}
@@ -60,7 +62,8 @@
rtcpMuxPolicy:(RTCRtcpMuxPolicy)rtcpMuxPolicy
tcpCandidatePolicy:(RTCTcpCandidatePolicy)tcpCandidatePolicy
audioJitterBufferMaxPackets:(int)audioJitterBufferMaxPackets
- iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout {
+ iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout
+ iceBackupCandidatePairPingInterval:(int)iceBackupCandidatePairPingInterval {
if (self = [super init]) {
_iceTransportsType = iceTransportsType;
_bundlePolicy = bundlePolicy;
@@ -68,6 +71,7 @@
_tcpCandidatePolicy = tcpCandidatePolicy;
_audioJitterBufferMaxPackets = audioJitterBufferMaxPackets;
_iceConnectionReceivingTimeout = iceConnectionReceivingTimeout;
+ _iceBackupCandidatePairPingInterval = iceBackupCandidatePairPingInterval;
}
return self;
}
@@ -85,8 +89,8 @@
nativeConfig.tcp_candidate_policy =
[RTCEnumConverter nativeEnumForTcpCandidatePolicy:_tcpCandidatePolicy];
nativeConfig.audio_jitter_buffer_max_packets = _audioJitterBufferMaxPackets;
- nativeConfig.ice_connection_receiving_timeout =
- _iceConnectionReceivingTimeout;
+ nativeConfig.ice_connection_receiving_timeout = _iceConnectionReceivingTimeout;
+ nativeConfig.ice_backup_candidate_pair_ping_interval = _iceBackupCandidatePairPingInterval;
return nativeConfig;
}
diff --git a/talk/app/webrtc/objc/avfoundationvideocapturer.h b/talk/app/webrtc/objc/avfoundationvideocapturer.h
index ded80f6647..32de09aadd 100644
--- a/talk/app/webrtc/objc/avfoundationvideocapturer.h
+++ b/talk/app/webrtc/objc/avfoundationvideocapturer.h
@@ -71,7 +71,6 @@ class AVFoundationVideoCapturer : public cricket::VideoCapturer {
RTCAVFoundationVideoCapturerInternal* _capturer;
rtc::Thread* _startThread; // Set in Start(), unset in Stop().
- uint64_t _startTime;
}; // AVFoundationVideoCapturer
} // namespace webrtc
diff --git a/talk/app/webrtc/objc/avfoundationvideocapturer.mm b/talk/app/webrtc/objc/avfoundationvideocapturer.mm
index e1b0f88fb6..0f9dc6825e 100644
--- a/talk/app/webrtc/objc/avfoundationvideocapturer.mm
+++ b/talk/app/webrtc/objc/avfoundationvideocapturer.mm
@@ -33,6 +33,8 @@
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
+#import "webrtc/base/objc/RTCDispatcher.h"
+
// TODO(tkchin): support other formats.
static NSString* const kDefaultPreset = AVCaptureSessionPreset640x480;
static cricket::VideoFormat const kDefaultFormat =
@@ -41,11 +43,6 @@ static cricket::VideoFormat const kDefaultFormat =
cricket::VideoFormat::FpsToInterval(30),
cricket::FOURCC_NV12);
-// This queue is used to start and stop the capturer without blocking the
-// calling thread. -[AVCaptureSession startRunning] blocks until the camera is
-// running.
-static dispatch_queue_t kBackgroundQueue = nil;
-
// This class used to capture frames using AVFoundation APIs on iOS. It is meant
// to be owned by an instance of AVFoundationVideoCapturer. The reason for this
// because other webrtc objects own cricket::VideoCapturer, which is not
@@ -80,15 +77,6 @@ static dispatch_queue_t kBackgroundQueue = nil;
@synthesize useBackCamera = _useBackCamera;
@synthesize isRunning = _isRunning;
-+ (void)initialize {
- static dispatch_once_t onceToken;
- dispatch_once(&onceToken, ^{
- kBackgroundQueue = dispatch_queue_create(
- "com.google.webrtc.RTCAVFoundationCapturerBackground",
- DISPATCH_QUEUE_SERIAL);
- });
-}
-
- (instancetype)initWithCapturer:(webrtc::AVFoundationVideoCapturer*)capturer {
NSParameterAssert(capturer);
if (self = [super init]) {
@@ -132,9 +120,10 @@ static dispatch_queue_t kBackgroundQueue = nil;
_orientationHasChanged = NO;
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
AVCaptureSession* session = _captureSession;
- dispatch_async(kBackgroundQueue, ^{
+ [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
[session startRunning];
- });
+ }];
_isRunning = YES;
}
@@ -144,9 +133,10 @@ static dispatch_queue_t kBackgroundQueue = nil;
}
[_videoOutput setSampleBufferDelegate:nil queue:nullptr];
AVCaptureSession* session = _captureSession;
- dispatch_async(kBackgroundQueue, ^{
+ [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
[session stopRunning];
- });
+ }];
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
_isRunning = NO;
}
diff --git a/talk/app/webrtc/objc/public/RTCFileLogger.h b/talk/app/webrtc/objc/public/RTCFileLogger.h
index 3900cb6fbe..70b3825307 100644
--- a/talk/app/webrtc/objc/public/RTCFileLogger.h
+++ b/talk/app/webrtc/objc/public/RTCFileLogger.h
@@ -39,21 +39,38 @@ typedef NS_ENUM(NSUInteger, RTCFileLoggerSeverity) {
kRTCFileLoggerSeverityError
};
+typedef NS_ENUM(NSUInteger, RTCFileLoggerRotationType) {
+ kRTCFileLoggerTypeCall,
+ kRTCFileLoggerTypeApp,
+};
+
// This class intercepts WebRTC logs and saves them to a file. The file size
// will not exceed the given maximum bytesize. When the maximum bytesize is
-// reached logs from the beginning and the end are preserved while the middle
-// section is overwritten instead.
+// reached, logs are rotated according to the rotationType specified.
+// For kRTCFileLoggerTypeCall, logs from the beginning and the end
+// are preserved while the middle section is overwritten instead.
+// For kRTCFileLoggerTypeApp, the oldest log is overwritten.
// This class is not threadsafe.
@interface RTCFileLogger : NSObject
// The severity level to capture. The default is kRTCFileLoggerSeverityInfo.
@property(nonatomic, assign) RTCFileLoggerSeverity severity;
-// Default constructor provides default settings for dir path and file size.
+// The rotation type for this file logger. The default is
+// kRTCFileLoggerTypeCall.
+@property(nonatomic, readonly) RTCFileLoggerRotationType rotationType;
+
+// Default constructor provides default settings for dir path, file size and
+// rotation type.
- (instancetype)init;
+// Create file logger with default rotation type.
+- (instancetype)initWithDirPath:(NSString *)dirPath
+ maxFileSize:(NSUInteger)maxFileSize;
+
- (instancetype)initWithDirPath:(NSString *)dirPath
maxFileSize:(NSUInteger)maxFileSize
+ rotationType:(RTCFileLoggerRotationType)rotationType
NS_DESIGNATED_INITIALIZER;
// Starts writing WebRTC logs to disk if not already started. Overwrites any
diff --git a/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h b/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h
index b0cc72b5b7..44b971c85e 100644
--- a/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h
+++ b/talk/app/webrtc/objc/public/RTCPeerConnectionInterface.h
@@ -64,12 +64,14 @@ typedef NS_ENUM(NSInteger, RTCTcpCandidatePolicy) {
@property(nonatomic, assign) RTCTcpCandidatePolicy tcpCandidatePolicy;
@property(nonatomic, assign) int audioJitterBufferMaxPackets;
@property(nonatomic, assign) int iceConnectionReceivingTimeout;
+@property(nonatomic, assign) int iceBackupCandidatePairPingInterval;
- (instancetype)initWithIceTransportsType:(RTCIceTransportsType)iceTransportsType
bundlePolicy:(RTCBundlePolicy)bundlePolicy
rtcpMuxPolicy:(RTCRtcpMuxPolicy)rtcpMuxPolicy
tcpCandidatePolicy:(RTCTcpCandidatePolicy)tcpCandidatePolicy
audioJitterBufferMaxPackets:(int)audioJitterBufferMaxPackets
- iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout;
+ iceConnectionReceivingTimeout:(int)iceConnectionReceivingTimeout
+ iceBackupCandidatePairPingInterval:(int)iceBackupCandidatePairPingInterval;
@end
diff --git a/talk/app/webrtc/peerconnection.cc b/talk/app/webrtc/peerconnection.cc
index 0d519b280b..ccca18af67 100644
--- a/talk/app/webrtc/peerconnection.cc
+++ b/talk/app/webrtc/peerconnection.cc
@@ -27,8 +27,10 @@
#include "talk/app/webrtc/peerconnection.h"
-#include <vector>
+#include <algorithm>
#include <cctype> // for isdigit
+#include <utility>
+#include <vector>
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/dtmfsender.h"
@@ -36,6 +38,7 @@
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/mediaconstraintsinterface.h"
#include "talk/app/webrtc/mediastream.h"
+#include "talk/app/webrtc/mediastreamobserver.h"
#include "talk/app/webrtc/mediastreamproxy.h"
#include "talk/app/webrtc/mediastreamtrackproxy.h"
#include "talk/app/webrtc/remoteaudiosource.h"
@@ -46,11 +49,13 @@
#include "talk/app/webrtc/videosource.h"
#include "talk/app/webrtc/videotrack.h"
#include "talk/media/sctp/sctpdataengine.h"
-#include "webrtc/p2p/client/basicportallocator.h"
#include "talk/session/media/channelmanager.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/stringencode.h"
#include "webrtc/base/stringutils.h"
+#include "webrtc/base/trace_event.h"
+#include "webrtc/p2p/client/basicportallocator.h"
#include "webrtc/system_wrappers/include/field_trial.h"
namespace {
@@ -59,13 +64,8 @@ using webrtc::DataChannel;
using webrtc::MediaConstraintsInterface;
using webrtc::MediaStreamInterface;
using webrtc::PeerConnectionInterface;
+using webrtc::RtpSenderInterface;
using webrtc::StreamCollection;
-using webrtc::StunConfigurations;
-using webrtc::TurnConfigurations;
-typedef webrtc::PortAllocatorFactoryInterface::StunConfiguration
- StunConfiguration;
-typedef webrtc::PortAllocatorFactoryInterface::TurnConfiguration
- TurnConfiguration;
static const char kDefaultStreamLabel[] = "default";
static const char kDefaultAudioTrackLabel[] = "defaulta0";
@@ -80,8 +80,6 @@ static const size_t kTurnTransportTokensNum = 2;
static const int kDefaultStunPort = 3478;
static const int kDefaultStunTlsPort = 5349;
static const char kTransport[] = "transport";
-static const char kUdpTransportType[] = "udp";
-static const char kTcpTransportType[] = "tcp";
// NOTE: Must be in the same order as the ServiceType enum.
static const char* kValidIceServiceTypes[] = {"stun", "stuns", "turn", "turns"};
@@ -95,7 +93,7 @@ enum ServiceType {
TURNS, // Indicates a TURN server used with a TLS session.
INVALID, // Unknown.
};
-static_assert(INVALID == ARRAY_SIZE(kValidIceServiceTypes),
+static_assert(INVALID == arraysize(kValidIceServiceTypes),
"kValidIceServiceTypes must have as many strings as ServiceType "
"has values.");
@@ -104,6 +102,7 @@ enum {
MSG_SET_SESSIONDESCRIPTION_FAILED,
MSG_CREATE_SESSIONDESCRIPTION_FAILED,
MSG_GETSTATS,
+ MSG_FREE_DATACHANNELS,
};
struct SetSessionDescriptionMsg : public rtc::MessageData {
@@ -156,7 +155,7 @@ bool GetServiceTypeAndHostnameFromUri(const std::string& in_str,
return false;
}
*service_type = INVALID;
- for (size_t i = 0; i < ARRAY_SIZE(kValidIceServiceTypes); ++i) {
+ for (size_t i = 0; i < arraysize(kValidIceServiceTypes); ++i) {
if (in_str.compare(0, colonpos, kValidIceServiceTypes[i]) == 0) {
*service_type = static_cast<ServiceType>(i);
break;
@@ -216,12 +215,12 @@ bool ParseHostnameAndPortFromString(const std::string& in_str,
return !host->empty();
}
-// Adds a StunConfiguration or TurnConfiguration to the appropriate list,
+// Adds a STUN or TURN server to the appropriate list,
// by parsing |url| and using the username/password in |server|.
bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
const std::string& url,
- StunConfigurations* stun_config,
- TurnConfigurations* turn_config) {
+ cricket::ServerAddresses* stun_servers,
+ std::vector<cricket::RelayServerConfig>* turn_servers) {
// draft-nandakumar-rtcweb-stun-uri-01
// stunURI = scheme ":" stun-host [ ":" stun-port ]
// scheme = "stun" / "stuns"
@@ -236,10 +235,10 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
// transport-ext = 1*unreserved
// turn-host = IP-literal / IPv4address / reg-name
// turn-port = *DIGIT
- RTC_DCHECK(stun_config != nullptr);
- RTC_DCHECK(turn_config != nullptr);
+ RTC_DCHECK(stun_servers != nullptr);
+ RTC_DCHECK(turn_servers != nullptr);
std::vector<std::string> tokens;
- std::string turn_transport_type = kUdpTransportType;
+ cricket::ProtocolType turn_transport_type = cricket::PROTO_UDP;
RTC_DCHECK(!url.empty());
rtc::tokenize(url, '?', &tokens);
std::string uri_without_transport = tokens[0];
@@ -250,11 +249,12 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
if (tokens[0] == kTransport) {
// As per above grammar transport param will be consist of lower case
// letters.
- if (tokens[1] != kUdpTransportType && tokens[1] != kTcpTransportType) {
+ if (!cricket::StringToProto(tokens[1].c_str(), &turn_transport_type) ||
+ (turn_transport_type != cricket::PROTO_UDP &&
+ turn_transport_type != cricket::PROTO_TCP)) {
LOG(LS_WARNING) << "Transport param should always be udp or tcp.";
return false;
}
- turn_transport_type = tokens[1];
}
}
@@ -293,7 +293,7 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
int port = kDefaultStunPort;
if (service_type == TURNS) {
port = kDefaultStunTlsPort;
- turn_transport_type = kTcpTransportType;
+ turn_transport_type = cricket::PROTO_TCP;
}
std::string address;
@@ -310,16 +310,14 @@ bool ParseIceServerUrl(const PeerConnectionInterface::IceServer& server,
switch (service_type) {
case STUN:
case STUNS:
- stun_config->push_back(StunConfiguration(address, port));
+ stun_servers->insert(rtc::SocketAddress(address, port));
break;
case TURN:
case TURNS: {
bool secure = (service_type == TURNS);
- turn_config->push_back(TurnConfiguration(address, port,
- username,
- server.password,
- turn_transport_type,
- secure));
+ turn_servers->push_back(
+ cricket::RelayServerConfig(address, port, username, server.password,
+ turn_transport_type, secure));
break;
}
case INVALID:
@@ -365,25 +363,15 @@ bool IsValidOfferToReceiveMedia(int value) {
}
// Add the stream and RTP data channel info to |session_options|.
-void SetStreams(cricket::MediaSessionOptions* session_options,
- rtc::scoped_refptr<StreamCollection> streams,
- const std::map<std::string, rtc::scoped_refptr<DataChannel>>&
- rtp_data_channels) {
+void AddSendStreams(
+ cricket::MediaSessionOptions* session_options,
+ const std::vector<rtc::scoped_refptr<RtpSenderInterface>>& senders,
+ const std::map<std::string, rtc::scoped_refptr<DataChannel>>&
+ rtp_data_channels) {
session_options->streams.clear();
- if (streams != nullptr) {
- for (size_t i = 0; i < streams->count(); ++i) {
- MediaStreamInterface* stream = streams->at(i);
- // For each audio track in the stream, add it to the MediaSessionOptions.
- for (const auto& track : stream->GetAudioTracks()) {
- session_options->AddSendStream(cricket::MEDIA_TYPE_AUDIO, track->id(),
- stream->label());
- }
- // For each video track in the stream, add it to the MediaSessionOptions.
- for (const auto& track : stream->GetVideoTracks()) {
- session_options->AddSendStream(cricket::MEDIA_TYPE_VIDEO, track->id(),
- stream->label());
- }
- }
+ for (const auto& sender : senders) {
+ session_options->AddSendStream(sender->media_type(), sender->id(),
+ sender->stream_id());
}
// Check for data channels.
@@ -421,10 +409,12 @@ class RemoteMediaStreamFactory {
MediaStream::Create(stream_label));
}
- AudioTrackInterface* AddAudioTrack(webrtc::MediaStreamInterface* stream,
+ AudioTrackInterface* AddAudioTrack(uint32_t ssrc,
+ AudioProviderInterface* provider,
+ webrtc::MediaStreamInterface* stream,
const std::string& track_id) {
return AddTrack<AudioTrackInterface, AudioTrack, AudioTrackProxy>(
- stream, track_id, RemoteAudioSource::Create().get());
+ stream, track_id, RemoteAudioSource::Create(ssrc, provider));
}
VideoTrackInterface* AddVideoTrack(webrtc::MediaStreamInterface* stream,
@@ -432,7 +422,7 @@ class RemoteMediaStreamFactory {
return AddTrack<VideoTrackInterface, VideoTrack, VideoTrackProxy>(
stream, track_id,
VideoSource::Create(channel_manager_, new RemoteVideoCapturer(),
- nullptr)
+ nullptr, true)
.get());
}
@@ -440,7 +430,7 @@ class RemoteMediaStreamFactory {
template <typename TI, typename T, typename TP, typename S>
TI* AddTrack(MediaStreamInterface* stream,
const std::string& track_id,
- S* source) {
+ const S& source) {
rtc::scoped_refptr<TI> track(
TP::Create(signaling_thread_, T::Create(track_id, source)));
track->set_state(webrtc::MediaStreamTrackInterface::kLive);
@@ -471,7 +461,11 @@ bool ConvertRtcOptionsForOffer(
}
session_options->vad_enabled = rtc_options.voice_activity_detection;
- session_options->transport_options.ice_restart = rtc_options.ice_restart;
+ session_options->audio_transport_options.ice_restart =
+ rtc_options.ice_restart;
+ session_options->video_transport_options.ice_restart =
+ rtc_options.ice_restart;
+ session_options->data_transport_options.ice_restart = rtc_options.ice_restart;
session_options->bundle_enabled = rtc_options.use_rtp_mux;
return true;
@@ -517,10 +511,14 @@ bool ParseConstraintsForAnswer(const MediaConstraintsInterface* constraints,
if (FindConstraint(constraints, MediaConstraintsInterface::kIceRestart,
&value, &mandatory_constraints_satisfied)) {
- session_options->transport_options.ice_restart = value;
+ session_options->audio_transport_options.ice_restart = value;
+ session_options->video_transport_options.ice_restart = value;
+ session_options->data_transport_options.ice_restart = value;
} else {
// kIceRestart defaults to false according to spec.
- session_options->transport_options.ice_restart = false;
+ session_options->audio_transport_options.ice_restart = false;
+ session_options->video_transport_options.ice_restart = false;
+ session_options->data_transport_options.ice_restart = false;
}
if (!constraints) {
@@ -530,8 +528,8 @@ bool ParseConstraintsForAnswer(const MediaConstraintsInterface* constraints,
}
bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
- StunConfigurations* stun_config,
- TurnConfigurations* turn_config) {
+ cricket::ServerAddresses* stun_servers,
+ std::vector<cricket::RelayServerConfig>* turn_servers) {
for (const webrtc::PeerConnectionInterface::IceServer& server : servers) {
if (!server.urls.empty()) {
for (const std::string& url : server.urls) {
@@ -539,13 +537,13 @@ bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
LOG(LS_ERROR) << "Empty uri.";
return false;
}
- if (!ParseIceServerUrl(server, url, stun_config, turn_config)) {
+ if (!ParseIceServerUrl(server, url, stun_servers, turn_servers)) {
return false;
}
}
} else if (!server.uri.empty()) {
// Fallback to old .uri if new .urls isn't present.
- if (!ParseIceServerUrl(server, server.uri, stun_config, turn_config)) {
+ if (!ParseIceServerUrl(server, server.uri, stun_servers, turn_servers)) {
return false;
}
} else {
@@ -553,6 +551,13 @@ bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
return false;
}
}
+ // Candidates must have unique priorities, so that connectivity checks
+ // are performed in a well-defined order.
+ int priority = static_cast<int>(turn_servers->size() - 1);
+ for (cricket::RelayServerConfig& turn_server : *turn_servers) {
+ // First in the list gets highest priority.
+ turn_server.priority = priority--;
+ }
return true;
}
@@ -568,6 +573,7 @@ PeerConnection::PeerConnection(PeerConnectionFactory* factory)
remote_streams_(StreamCollection::Create()) {}
PeerConnection::~PeerConnection() {
+ TRACE_EVENT0("webrtc", "PeerConnection::~PeerConnection");
RTC_DCHECK(signaling_thread()->IsCurrent());
// Need to detach RTP senders/receivers from WebRtcSession,
// since it's about to be destroyed.
@@ -582,22 +588,24 @@ PeerConnection::~PeerConnection() {
bool PeerConnection::Initialize(
const PeerConnectionInterface::RTCConfiguration& configuration,
const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
PeerConnectionObserver* observer) {
+ TRACE_EVENT0("webrtc", "PeerConnection::Initialize");
RTC_DCHECK(observer != nullptr);
if (!observer) {
return false;
}
observer_ = observer;
- std::vector<PortAllocatorFactoryInterface::StunConfiguration> stun_config;
- std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turn_config;
- if (!ParseIceServers(configuration.servers, &stun_config, &turn_config)) {
+ port_allocator_ = std::move(allocator);
+
+ cricket::ServerAddresses stun_servers;
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ if (!ParseIceServers(configuration.servers, &stun_servers, &turn_servers)) {
return false;
}
- port_allocator_.reset(
- allocator_factory->CreatePortAllocator(stun_config, turn_config));
+ port_allocator_->SetIceServers(stun_servers, turn_servers);
// To handle both internal and externally created port allocator, we will
// enable BUNDLE here.
@@ -637,7 +645,7 @@ bool PeerConnection::Initialize(
// Initialize the WebRtcSession. It creates transport channels etc.
if (!session_->Initialize(factory_->options(), constraints,
- dtls_identity_store.Pass(), configuration)) {
+ std::move(dtls_identity_store), configuration)) {
return false;
}
@@ -668,9 +676,8 @@ PeerConnection::remote_streams() {
return remote_streams_;
}
-// TODO(deadbeef): Create RtpSenders immediately here, even if local
-// description hasn't yet been set.
bool PeerConnection::AddStream(MediaStreamInterface* local_stream) {
+ TRACE_EVENT0("webrtc", "PeerConnection::AddStream");
if (IsClosed()) {
return false;
}
@@ -679,25 +686,22 @@ bool PeerConnection::AddStream(MediaStreamInterface* local_stream) {
}
local_streams_->AddStream(local_stream);
+ MediaStreamObserver* observer = new MediaStreamObserver(local_stream);
+ observer->SignalAudioTrackAdded.connect(this,
+ &PeerConnection::OnAudioTrackAdded);
+ observer->SignalAudioTrackRemoved.connect(
+ this, &PeerConnection::OnAudioTrackRemoved);
+ observer->SignalVideoTrackAdded.connect(this,
+ &PeerConnection::OnVideoTrackAdded);
+ observer->SignalVideoTrackRemoved.connect(
+ this, &PeerConnection::OnVideoTrackRemoved);
+ stream_observers_.push_back(rtc::scoped_ptr<MediaStreamObserver>(observer));
- // Find tracks that have already been configured in SDP. This can occur if a
- // local session description that contains the MSID of these tracks is set
- // before AddLocalStream is called. It can also occur if the local session
- // description is not changed and RemoveLocalStream is called and later
- // AddLocalStream is called again with the same stream.
for (const auto& track : local_stream->GetAudioTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_audio_tracks_, local_stream->label(), track->id());
- if (track_info) {
- CreateAudioSender(local_stream, track.get(), track_info->ssrc);
- }
+ OnAudioTrackAdded(track.get(), local_stream);
}
for (const auto& track : local_stream->GetVideoTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_video_tracks_, local_stream->label(), track->id());
- if (track_info) {
- CreateVideoSender(local_stream, track.get(), track_info->ssrc);
- }
+ OnVideoTrackAdded(track.get(), local_stream);
}
stats_->AddStream(local_stream);
@@ -705,25 +709,24 @@ bool PeerConnection::AddStream(MediaStreamInterface* local_stream) {
return true;
}
-// TODO(deadbeef): Don't destroy RtpSenders here; they should be kept around
-// indefinitely.
void PeerConnection::RemoveStream(MediaStreamInterface* local_stream) {
+ TRACE_EVENT0("webrtc", "PeerConnection::RemoveStream");
for (const auto& track : local_stream->GetAudioTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_audio_tracks_, local_stream->label(), track->id());
- if (track_info) {
- DestroyAudioSender(local_stream, track.get(), track_info->ssrc);
- }
+ OnAudioTrackRemoved(track.get(), local_stream);
}
for (const auto& track : local_stream->GetVideoTracks()) {
- const TrackInfo* track_info =
- FindTrackInfo(local_video_tracks_, local_stream->label(), track->id());
- if (track_info) {
- DestroyVideoSender(local_stream, track.get());
- }
+ OnVideoTrackRemoved(track.get(), local_stream);
}
local_streams_->RemoveStream(local_stream);
+ stream_observers_.erase(
+ std::remove_if(
+ stream_observers_.begin(), stream_observers_.end(),
+ [local_stream](const rtc::scoped_ptr<MediaStreamObserver>& observer) {
+ return observer->stream()->label().compare(local_stream->label()) ==
+ 0;
+ }),
+ stream_observers_.end());
if (IsClosed()) {
return;
@@ -733,6 +736,7 @@ void PeerConnection::RemoveStream(MediaStreamInterface* local_stream) {
rtc::scoped_refptr<DtmfSenderInterface> PeerConnection::CreateDtmfSender(
AudioTrackInterface* track) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateDtmfSender");
if (!track) {
LOG(LS_ERROR) << "CreateDtmfSender - track is NULL.";
return NULL;
@@ -751,6 +755,26 @@ rtc::scoped_refptr<DtmfSenderInterface> PeerConnection::CreateDtmfSender(
return DtmfSenderProxy::Create(signaling_thread(), sender.get());
}
+rtc::scoped_refptr<RtpSenderInterface> PeerConnection::CreateSender(
+ const std::string& kind,
+ const std::string& stream_id) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateSender");
+ RtpSenderInterface* new_sender;
+ if (kind == MediaStreamTrackInterface::kAudioKind) {
+ new_sender = new AudioRtpSender(session_.get(), stats_.get());
+ } else if (kind == MediaStreamTrackInterface::kVideoKind) {
+ new_sender = new VideoRtpSender(session_.get());
+ } else {
+ LOG(LS_ERROR) << "CreateSender called with invalid kind: " << kind;
+ return rtc::scoped_refptr<RtpSenderInterface>();
+ }
+ if (!stream_id.empty()) {
+ new_sender->set_stream_id(stream_id);
+ }
+ senders_.push_back(new_sender);
+ return RtpSenderProxy::Create(signaling_thread(), new_sender);
+}
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>> PeerConnection::GetSenders()
const {
std::vector<rtc::scoped_refptr<RtpSenderInterface>> senders;
@@ -773,6 +797,7 @@ PeerConnection::GetReceivers() const {
bool PeerConnection::GetStats(StatsObserver* observer,
MediaStreamTrackInterface* track,
StatsOutputLevel level) {
+ TRACE_EVENT0("webrtc", "PeerConnection::GetStats");
RTC_DCHECK(signaling_thread()->IsCurrent());
if (!VERIFY(observer != NULL)) {
LOG(LS_ERROR) << "GetStats - observer is NULL.";
@@ -807,6 +832,7 @@ rtc::scoped_refptr<DataChannelInterface>
PeerConnection::CreateDataChannel(
const std::string& label,
const DataChannelInit* config) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateDataChannel");
bool first_datachannel = !HasDataChannels();
rtc::scoped_ptr<InternalDataChannelInit> internal_config;
@@ -830,6 +856,7 @@ PeerConnection::CreateDataChannel(
void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
const MediaConstraintsInterface* constraints) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateOffer");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "CreateOffer - observer is NULL.";
return;
@@ -881,6 +908,7 @@ void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
const RTCOfferAnswerOptions& options) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateOffer");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "CreateOffer - observer is NULL.";
return;
@@ -900,6 +928,7 @@ void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer,
void PeerConnection::CreateAnswer(
CreateSessionDescriptionObserver* observer,
const MediaConstraintsInterface* constraints) {
+ TRACE_EVENT0("webrtc", "PeerConnection::CreateAnswer");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "CreateAnswer - observer is NULL.";
return;
@@ -919,6 +948,7 @@ void PeerConnection::CreateAnswer(
void PeerConnection::SetLocalDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc) {
+ TRACE_EVENT0("webrtc", "PeerConnection::SetLocalDescription");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "SetLocalDescription - observer is NULL.";
return;
@@ -940,7 +970,7 @@ void PeerConnection::SetLocalDescription(
// SCTP sids.
rtc::SSLRole role;
if (session_->data_channel_type() == cricket::DCT_SCTP &&
- session_->GetSslRole(&role)) {
+ session_->GetSslRole(session_->data_channel(), &role)) {
AllocateSctpSids(role);
}
@@ -949,19 +979,27 @@ void PeerConnection::SetLocalDescription(
const cricket::ContentInfo* audio_content =
GetFirstAudioContent(desc->description());
if (audio_content) {
- const cricket::AudioContentDescription* audio_desc =
- static_cast<const cricket::AudioContentDescription*>(
- audio_content->description);
- UpdateLocalTracks(audio_desc->streams(), audio_desc->type());
+ if (audio_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_AUDIO);
+ } else {
+ const cricket::AudioContentDescription* audio_desc =
+ static_cast<const cricket::AudioContentDescription*>(
+ audio_content->description);
+ UpdateLocalTracks(audio_desc->streams(), audio_desc->type());
+ }
}
const cricket::ContentInfo* video_content =
GetFirstVideoContent(desc->description());
if (video_content) {
- const cricket::VideoContentDescription* video_desc =
- static_cast<const cricket::VideoContentDescription*>(
- video_content->description);
- UpdateLocalTracks(video_desc->streams(), video_desc->type());
+ if (video_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_VIDEO);
+ } else {
+ const cricket::VideoContentDescription* video_desc =
+ static_cast<const cricket::VideoContentDescription*>(
+ video_content->description);
+ UpdateLocalTracks(video_desc->streams(), video_desc->type());
+ }
}
const cricket::ContentInfo* data_content =
@@ -988,6 +1026,7 @@ void PeerConnection::SetLocalDescription(
void PeerConnection::SetRemoteDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc) {
+ TRACE_EVENT0("webrtc", "PeerConnection::SetRemoteDescription");
if (!VERIFY(observer != nullptr)) {
LOG(LS_ERROR) << "SetRemoteDescription - observer is NULL.";
return;
@@ -1009,11 +1048,27 @@ void PeerConnection::SetRemoteDescription(
// SCTP sids.
rtc::SSLRole role;
if (session_->data_channel_type() == cricket::DCT_SCTP &&
- session_->GetSslRole(&role)) {
+ session_->GetSslRole(session_->data_channel(), &role)) {
AllocateSctpSids(role);
}
const cricket::SessionDescription* remote_desc = desc->description();
+ const cricket::ContentInfo* audio_content = GetFirstAudioContent(remote_desc);
+ const cricket::ContentInfo* video_content = GetFirstVideoContent(remote_desc);
+ const cricket::AudioContentDescription* audio_desc =
+ GetFirstAudioContentDescription(remote_desc);
+ const cricket::VideoContentDescription* video_desc =
+ GetFirstVideoContentDescription(remote_desc);
+ const cricket::DataContentDescription* data_desc =
+ GetFirstDataContentDescription(remote_desc);
+
+ // Check if the descriptions include streams, just in case the peer supports
+ // MSID, but doesn't indicate so with "a=msid-semantic".
+ if (remote_desc->msid_supported() ||
+ (audio_desc && !audio_desc->streams().empty()) ||
+ (video_desc && !video_desc->streams().empty())) {
+ remote_peer_supports_msid_ = true;
+ }
// We wait to signal new streams until we finish processing the description,
// since only at that point will new streams have all their tracks.
@@ -1021,39 +1076,39 @@ void PeerConnection::SetRemoteDescription(
// Find all audio rtp streams and create corresponding remote AudioTracks
// and MediaStreams.
- const cricket::ContentInfo* audio_content = GetFirstAudioContent(remote_desc);
if (audio_content) {
- const cricket::AudioContentDescription* desc =
- static_cast<const cricket::AudioContentDescription*>(
- audio_content->description);
- UpdateRemoteStreamsList(GetActiveStreams(desc), desc->type(), new_streams);
- remote_info_.default_audio_track_needed =
- !remote_desc->msid_supported() && desc->streams().empty() &&
- MediaContentDirectionHasSend(desc->direction());
+ if (audio_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_AUDIO);
+ } else {
+ bool default_audio_track_needed =
+ !remote_peer_supports_msid_ &&
+ MediaContentDirectionHasSend(audio_desc->direction());
+ UpdateRemoteStreamsList(GetActiveStreams(audio_desc),
+ default_audio_track_needed, audio_desc->type(),
+ new_streams);
+ }
}
// Find all video rtp streams and create corresponding remote VideoTracks
// and MediaStreams.
- const cricket::ContentInfo* video_content = GetFirstVideoContent(remote_desc);
if (video_content) {
- const cricket::VideoContentDescription* desc =
- static_cast<const cricket::VideoContentDescription*>(
- video_content->description);
- UpdateRemoteStreamsList(GetActiveStreams(desc), desc->type(), new_streams);
- remote_info_.default_video_track_needed =
- !remote_desc->msid_supported() && desc->streams().empty() &&
- MediaContentDirectionHasSend(desc->direction());
+ if (video_content->rejected) {
+ RemoveTracks(cricket::MEDIA_TYPE_VIDEO);
+ } else {
+ bool default_video_track_needed =
+ !remote_peer_supports_msid_ &&
+ MediaContentDirectionHasSend(video_desc->direction());
+ UpdateRemoteStreamsList(GetActiveStreams(video_desc),
+ default_video_track_needed, video_desc->type(),
+ new_streams);
+ }
}
// Update the DataChannels with the information from the remote peer.
- const cricket::ContentInfo* data_content = GetFirstDataContent(remote_desc);
- if (data_content) {
- const cricket::DataContentDescription* desc =
- static_cast<const cricket::DataContentDescription*>(
- data_content->description);
- if (rtc::starts_with(desc->protocol().data(),
+ if (data_desc) {
+ if (rtc::starts_with(data_desc->protocol().data(),
cricket::kMediaProtocolRtpPrefix)) {
- UpdateRemoteRtpDataChannels(GetActiveStreams(desc));
+ UpdateRemoteRtpDataChannels(GetActiveStreams(data_desc));
}
}
@@ -1064,58 +1119,21 @@ void PeerConnection::SetRemoteDescription(
observer_->OnAddStream(new_stream);
}
- // Find removed MediaStreams.
- if (remote_info_.IsDefaultMediaStreamNeeded() &&
- remote_streams_->find(kDefaultStreamLabel) != nullptr) {
- // The default media stream already exists. No need to do anything.
- } else {
- UpdateEndedRemoteMediaStreams();
- remote_info_.msid_supported |= remote_streams_->count() > 0;
- }
- MaybeCreateDefaultStream();
+ UpdateEndedRemoteMediaStreams();
SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer);
signaling_thread()->Post(this, MSG_SET_SESSIONDESCRIPTION_SUCCESS, msg);
}
bool PeerConnection::SetConfiguration(const RTCConfiguration& config) {
+ TRACE_EVENT0("webrtc", "PeerConnection::SetConfiguration");
if (port_allocator_) {
- std::vector<PortAllocatorFactoryInterface::StunConfiguration> stuns;
- std::vector<PortAllocatorFactoryInterface::TurnConfiguration> turns;
- if (!ParseIceServers(config.servers, &stuns, &turns)) {
+ cricket::ServerAddresses stun_servers;
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ if (!ParseIceServers(config.servers, &stun_servers, &turn_servers)) {
return false;
}
-
- std::vector<rtc::SocketAddress> stun_hosts;
- typedef std::vector<StunConfiguration>::const_iterator StunIt;
- for (StunIt stun_it = stuns.begin(); stun_it != stuns.end(); ++stun_it) {
- stun_hosts.push_back(stun_it->server);
- }
-
- rtc::SocketAddress stun_addr;
- if (!stun_hosts.empty()) {
- stun_addr = stun_hosts.front();
- LOG(LS_INFO) << "SetConfiguration: StunServer Address: "
- << stun_addr.ToString();
- }
-
- for (size_t i = 0; i < turns.size(); ++i) {
- cricket::RelayCredentials credentials(turns[i].username,
- turns[i].password);
- cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
- cricket::ProtocolType protocol;
- if (cricket::StringToProto(turns[i].transport_type.c_str(), &protocol)) {
- relay_server.ports.push_back(cricket::ProtocolAddress(
- turns[i].server, protocol, turns[i].secure));
- relay_server.credentials = credentials;
- LOG(LS_INFO) << "SetConfiguration: TurnServer Address: "
- << turns[i].server.ToString();
- } else {
- LOG(LS_WARNING) << "Ignoring TURN server " << turns[i].server << ". "
- << "Reason= Incorrect " << turns[i].transport_type
- << " transport parameter.";
- }
- }
+ port_allocator_->SetIceServers(stun_servers, turn_servers);
}
session_->SetIceConfig(session_->ParseIceConfig(config));
return session_->SetIceTransports(config.type);
@@ -1123,10 +1141,12 @@ bool PeerConnection::SetConfiguration(const RTCConfiguration& config) {
bool PeerConnection::AddIceCandidate(
const IceCandidateInterface* ice_candidate) {
+ TRACE_EVENT0("webrtc", "PeerConnection::AddIceCandidate");
return session_->ProcessIceMessage(ice_candidate);
}
void PeerConnection::RegisterUMAObserver(UMAObserver* observer) {
+ TRACE_EVENT0("webrtc", "PeerConnection::RegisterUmaObserver");
uma_observer_ = observer;
if (session_) {
@@ -1156,6 +1176,7 @@ const SessionDescriptionInterface* PeerConnection::remote_description() const {
}
void PeerConnection::Close() {
+ TRACE_EVENT0("webrtc", "PeerConnection::Close");
// Update stats here so that we have the most recent stats for tracks and
// streams before the channels are closed.
stats_->UpdateStats(kStatsOutputLevelStandard);
@@ -1223,6 +1244,10 @@ void PeerConnection::OnMessage(rtc::Message* msg) {
delete param;
break;
}
+ case MSG_FREE_DATACHANNELS: {
+ sctp_data_channels_to_free_.clear();
+ break;
+ }
default:
RTC_DCHECK(false && "Not implemented");
break;
@@ -1267,49 +1292,6 @@ void PeerConnection::DestroyVideoReceiver(MediaStreamInterface* stream,
}
}
-void PeerConnection::CreateAudioSender(MediaStreamInterface* stream,
- AudioTrackInterface* audio_track,
- uint32_t ssrc) {
- senders_.push_back(new AudioRtpSender(audio_track, ssrc, session_.get()));
- stats_->AddLocalAudioTrack(audio_track, ssrc);
-}
-
-void PeerConnection::CreateVideoSender(MediaStreamInterface* stream,
- VideoTrackInterface* video_track,
- uint32_t ssrc) {
- senders_.push_back(new VideoRtpSender(video_track, ssrc, session_.get()));
-}
-
-// TODO(deadbeef): Keep RtpSenders around even if track goes away in local
-// description.
-void PeerConnection::DestroyAudioSender(MediaStreamInterface* stream,
- AudioTrackInterface* audio_track,
- uint32_t ssrc) {
- auto it = FindSenderForTrack(audio_track);
- if (it == senders_.end()) {
- LOG(LS_WARNING) << "RtpSender for track with id " << audio_track->id()
- << " doesn't exist.";
- return;
- } else {
- (*it)->Stop();
- senders_.erase(it);
- }
- stats_->RemoveLocalAudioTrack(audio_track, ssrc);
-}
-
-void PeerConnection::DestroyVideoSender(MediaStreamInterface* stream,
- VideoTrackInterface* video_track) {
- auto it = FindSenderForTrack(video_track);
- if (it == senders_.end()) {
- LOG(LS_WARNING) << "RtpSender for track with id " << video_track->id()
- << " doesn't exist.";
- return;
- } else {
- (*it)->Stop();
- senders_.erase(it);
- }
-}
-
void PeerConnection::OnIceConnectionChange(
PeerConnectionInterface::IceConnectionState new_state) {
RTC_DCHECK(signaling_thread()->IsCurrent());
@@ -1362,6 +1344,80 @@ void PeerConnection::ChangeSignalingState(
observer_->OnStateChange(PeerConnectionObserver::kSignalingState);
}
+void PeerConnection::OnAudioTrackAdded(AudioTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender != senders_.end()) {
+ // We already have a sender for this track, so just change the stream_id
+ // so that it's correct in the next call to CreateOffer.
+ (*sender)->set_stream_id(stream->label());
+ return;
+ }
+
+ // Normal case; we've never seen this track before.
+ AudioRtpSender* new_sender =
+ new AudioRtpSender(track, stream->label(), session_.get(), stats_.get());
+ senders_.push_back(new_sender);
+ // If the sender has already been configured in SDP, we call SetSsrc,
+ // which will connect the sender to the underlying transport. This can
+ // occur if a local session description that contains the ID of the sender
+ // is set before AddStream is called. It can also occur if the local
+ // session description is not changed and RemoveStream is called, and
+ // later AddStream is called again with the same stream.
+ const TrackInfo* track_info =
+ FindTrackInfo(local_audio_tracks_, stream->label(), track->id());
+ if (track_info) {
+ new_sender->SetSsrc(track_info->ssrc);
+ }
+}
+
+// TODO(deadbeef): Don't destroy RtpSenders here; they should be kept around
+// indefinitely, when we have unified plan SDP.
+void PeerConnection::OnAudioTrackRemoved(AudioTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender == senders_.end()) {
+ LOG(LS_WARNING) << "RtpSender for track with id " << track->id()
+ << " doesn't exist.";
+ return;
+ }
+ (*sender)->Stop();
+ senders_.erase(sender);
+}
+
+void PeerConnection::OnVideoTrackAdded(VideoTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender != senders_.end()) {
+ // We already have a sender for this track, so just change the stream_id
+ // so that it's correct in the next call to CreateOffer.
+ (*sender)->set_stream_id(stream->label());
+ return;
+ }
+
+ // Normal case; we've never seen this track before.
+ VideoRtpSender* new_sender =
+ new VideoRtpSender(track, stream->label(), session_.get());
+ senders_.push_back(new_sender);
+ const TrackInfo* track_info =
+ FindTrackInfo(local_video_tracks_, stream->label(), track->id());
+ if (track_info) {
+ new_sender->SetSsrc(track_info->ssrc);
+ }
+}
+
+void PeerConnection::OnVideoTrackRemoved(VideoTrackInterface* track,
+ MediaStreamInterface* stream) {
+ auto sender = FindSenderForTrack(track);
+ if (sender == senders_.end()) {
+ LOG(LS_WARNING) << "RtpSender for track with id " << track->id()
+ << " doesn't exist.";
+ return;
+ }
+ (*sender)->Stop();
+ senders_.erase(sender);
+}
+
void PeerConnection::PostSetSessionDescriptionFailure(
SetSessionDescriptionObserver* observer,
const std::string& error) {
@@ -1385,7 +1441,7 @@ bool PeerConnection::GetOptionsForOffer(
return false;
}
- SetStreams(session_options, local_streams_, rtp_data_channels_);
+ AddSendStreams(session_options, senders_, rtp_data_channels_);
// Offer to receive audio/video if the constraint is not set and there are
// send streams, or we're currently receiving.
if (rtc_options.offer_to_receive_audio == RTCOfferAnswerOptions::kUndefined) {
@@ -1418,7 +1474,7 @@ bool PeerConnection::GetOptionsForAnswer(
return false;
}
- SetStreams(session_options, local_streams_, rtp_data_channels_);
+ AddSendStreams(session_options, senders_, rtp_data_channels_);
session_options->bundle_enabled =
session_options->bundle_enabled &&
(session_options->has_audio() || session_options->has_video() ||
@@ -1433,25 +1489,34 @@ bool PeerConnection::GetOptionsForAnswer(
return true;
}
+void PeerConnection::RemoveTracks(cricket::MediaType media_type) {
+ UpdateLocalTracks(std::vector<cricket::StreamParams>(), media_type);
+ UpdateRemoteStreamsList(std::vector<cricket::StreamParams>(), false,
+ media_type, nullptr);
+}
+
void PeerConnection::UpdateRemoteStreamsList(
const cricket::StreamParamsVec& streams,
+ bool default_track_needed,
cricket::MediaType media_type,
StreamCollection* new_streams) {
TrackInfos* current_tracks = GetRemoteTracks(media_type);
// Find removed tracks. I.e., tracks where the track id or ssrc don't match
- // the
- // new StreamParam.
+ // the new StreamParam.
auto track_it = current_tracks->begin();
while (track_it != current_tracks->end()) {
const TrackInfo& info = *track_it;
const cricket::StreamParams* params =
cricket::GetStreamBySsrc(streams, info.ssrc);
- if (!params || params->id != info.track_id) {
+ bool track_exists = params && params->id == info.track_id;
+ // If this is a default track, and we still need it, don't remove it.
+ if ((info.stream_label == kDefaultStreamLabel && default_track_needed) ||
+ track_exists) {
+ ++track_it;
+ } else {
OnRemoteTrackRemoved(info.stream_label, info.track_id, media_type);
track_it = current_tracks->erase(track_it);
- } else {
- ++track_it;
}
}
@@ -1479,6 +1544,29 @@ void PeerConnection::UpdateRemoteStreamsList(
OnRemoteTrackSeen(stream_label, track_id, ssrc, media_type);
}
}
+
+ // Add default track if necessary.
+ if (default_track_needed) {
+ rtc::scoped_refptr<MediaStreamInterface> default_stream =
+ remote_streams_->find(kDefaultStreamLabel);
+ if (!default_stream) {
+ // Create the new default MediaStream.
+ default_stream =
+ remote_stream_factory_->CreateMediaStream(kDefaultStreamLabel);
+ remote_streams_->AddStream(default_stream);
+ new_streams->AddStream(default_stream);
+ }
+ std::string default_track_id = (media_type == cricket::MEDIA_TYPE_AUDIO)
+ ? kDefaultAudioTrackLabel
+ : kDefaultVideoTrackLabel;
+ const TrackInfo* default_track_info =
+ FindTrackInfo(*current_tracks, kDefaultStreamLabel, default_track_id);
+ if (!default_track_info) {
+ current_tracks->push_back(
+ TrackInfo(kDefaultStreamLabel, default_track_id, 0));
+ OnRemoteTrackSeen(kDefaultStreamLabel, default_track_id, 0, media_type);
+ }
+ }
}
void PeerConnection::OnRemoteTrackSeen(const std::string& stream_label,
@@ -1488,8 +1576,8 @@ void PeerConnection::OnRemoteTrackSeen(const std::string& stream_label,
MediaStreamInterface* stream = remote_streams_->find(stream_label);
if (media_type == cricket::MEDIA_TYPE_AUDIO) {
- AudioTrackInterface* audio_track =
- remote_stream_factory_->AddAudioTrack(stream, track_id);
+ AudioTrackInterface* audio_track = remote_stream_factory_->AddAudioTrack(
+ ssrc, session_.get(), stream, track_id);
CreateAudioReceiver(stream, audio_track, ssrc);
} else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
VideoTrackInterface* video_track =
@@ -1541,41 +1629,6 @@ void PeerConnection::UpdateEndedRemoteMediaStreams() {
}
}
-void PeerConnection::MaybeCreateDefaultStream() {
- if (!remote_info_.IsDefaultMediaStreamNeeded()) {
- return;
- }
-
- bool default_created = false;
-
- rtc::scoped_refptr<MediaStreamInterface> default_remote_stream =
- remote_streams_->find(kDefaultStreamLabel);
- if (default_remote_stream == nullptr) {
- default_created = true;
- default_remote_stream =
- remote_stream_factory_->CreateMediaStream(kDefaultStreamLabel);
- remote_streams_->AddStream(default_remote_stream);
- }
- if (remote_info_.default_audio_track_needed &&
- default_remote_stream->GetAudioTracks().size() == 0) {
- remote_audio_tracks_.push_back(
- TrackInfo(kDefaultStreamLabel, kDefaultAudioTrackLabel, 0));
- OnRemoteTrackSeen(kDefaultStreamLabel, kDefaultAudioTrackLabel, 0,
- cricket::MEDIA_TYPE_AUDIO);
- }
- if (remote_info_.default_video_track_needed &&
- default_remote_stream->GetVideoTracks().size() == 0) {
- remote_video_tracks_.push_back(
- TrackInfo(kDefaultStreamLabel, kDefaultVideoTrackLabel, 0));
- OnRemoteTrackSeen(kDefaultStreamLabel, kDefaultVideoTrackLabel, 0,
- cricket::MEDIA_TYPE_VIDEO);
- }
- if (default_created) {
- stats_->AddStream(default_remote_stream);
- observer_->OnAddStream(default_remote_stream);
- }
-}
-
void PeerConnection::EndRemoteTracks(cricket::MediaType media_type) {
TrackInfos* current_tracks = GetRemoteTracks(media_type);
for (TrackInfos::iterator track_it = current_tracks->begin();
@@ -1643,62 +1696,44 @@ void PeerConnection::OnLocalTrackSeen(const std::string& stream_label,
const std::string& track_id,
uint32_t ssrc,
cricket::MediaType media_type) {
- MediaStreamInterface* stream = local_streams_->find(stream_label);
- if (!stream) {
- LOG(LS_WARNING) << "An unknown local MediaStream with label "
- << stream_label << " has been configured.";
+ RtpSenderInterface* sender = FindSenderById(track_id);
+ if (!sender) {
+ LOG(LS_WARNING) << "An unknown RtpSender with id " << track_id
+ << " has been configured in the local description.";
return;
}
- if (media_type == cricket::MEDIA_TYPE_AUDIO) {
- AudioTrackInterface* audio_track = stream->FindAudioTrack(track_id);
- if (!audio_track) {
- LOG(LS_WARNING) << "An unknown local AudioTrack with id , " << track_id
- << " has been configured.";
- return;
- }
- CreateAudioSender(stream, audio_track, ssrc);
- } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
- VideoTrackInterface* video_track = stream->FindVideoTrack(track_id);
- if (!video_track) {
- LOG(LS_WARNING) << "An unknown local VideoTrack with id , " << track_id
- << " has been configured.";
- return;
- }
- CreateVideoSender(stream, video_track, ssrc);
- } else {
- RTC_DCHECK(false && "Invalid media type");
+ if (sender->media_type() != media_type) {
+ LOG(LS_WARNING) << "An RtpSender has been configured in the local"
+ << " description with an unexpected media type.";
+ return;
}
+
+ sender->set_stream_id(stream_label);
+ sender->SetSsrc(ssrc);
}
void PeerConnection::OnLocalTrackRemoved(const std::string& stream_label,
const std::string& track_id,
uint32_t ssrc,
cricket::MediaType media_type) {
- MediaStreamInterface* stream = local_streams_->find(stream_label);
- if (!stream) {
- // This is the normal case. I.e., RemoveLocalStream has been called and the
+ RtpSenderInterface* sender = FindSenderById(track_id);
+ if (!sender) {
+ // This is the normal case. I.e., RemoveStream has been called and the
// SessionDescriptions has been renegotiated.
return;
}
- // A track has been removed from the SessionDescription but the MediaStream
- // is still associated with PeerConnection. This only occurs if the SDP
- // doesn't match with the calls to AddLocalStream and RemoveLocalStream.
- if (media_type == cricket::MEDIA_TYPE_AUDIO) {
- AudioTrackInterface* audio_track = stream->FindAudioTrack(track_id);
- if (!audio_track) {
- return;
- }
- DestroyAudioSender(stream, audio_track, ssrc);
- } else if (media_type == cricket::MEDIA_TYPE_VIDEO) {
- VideoTrackInterface* video_track = stream->FindVideoTrack(track_id);
- if (!video_track) {
- return;
- }
- DestroyVideoSender(stream, video_track);
- } else {
- RTC_DCHECK(false && "Invalid media type.");
+
+ // A sender has been removed from the SessionDescription but it's still
+ // associated with the PeerConnection. This only occurs if the SDP doesn't
+ // match with the calls to CreateSender, AddStream and RemoveStream.
+ if (sender->media_type() != media_type) {
+ LOG(LS_WARNING) << "An RtpSender has been configured in the local"
+ << " description with an unexpected media type.";
+ return;
}
+
+ sender->SetSsrc(0);
}
void PeerConnection::UpdateLocalRtpDataChannels(
@@ -1806,7 +1841,7 @@ rtc::scoped_refptr<DataChannel> PeerConnection::InternalCreateDataChannel(
if (session_->data_channel_type() == cricket::DCT_SCTP) {
if (new_config.id < 0) {
rtc::SSLRole role;
- if (session_->GetSslRole(&role) &&
+ if ((session_->GetSslRole(session_->data_channel(), &role)) &&
!sid_allocator_.AllocateSid(role, &new_config.id)) {
LOG(LS_ERROR) << "No id can be allocated for the SCTP data channel.";
return nullptr;
@@ -1860,13 +1895,18 @@ void PeerConnection::AllocateSctpSids(rtc::SSLRole role) {
}
void PeerConnection::OnSctpDataChannelClosed(DataChannel* channel) {
+ RTC_DCHECK(signaling_thread()->IsCurrent());
for (auto it = sctp_data_channels_.begin(); it != sctp_data_channels_.end();
++it) {
if (it->get() == channel) {
if (channel->id() >= 0) {
sid_allocator_.ReleaseSid(channel->id());
}
+ // Since this method is triggered by a signal from the DataChannel,
+ // we can't free it directly here; we need to free it asynchronously.
+ sctp_data_channels_to_free_.push_back(*it);
sctp_data_channels_.erase(it);
+ signaling_thread()->Post(this, MSG_FREE_DATACHANNELS, nullptr);
return;
}
}
@@ -1916,6 +1956,15 @@ void PeerConnection::OnDataChannelOpenMessage(
DataChannelProxy::Create(signaling_thread(), channel));
}
+RtpSenderInterface* PeerConnection::FindSenderById(const std::string& id) {
+ auto it =
+ std::find_if(senders_.begin(), senders_.end(),
+ [id](const rtc::scoped_refptr<RtpSenderInterface>& sender) {
+ return sender->id() == id;
+ });
+ return it != senders_.end() ? it->get() : nullptr;
+}
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>>::iterator
PeerConnection::FindSenderForTrack(MediaStreamTrackInterface* track) {
return std::find_if(
diff --git a/talk/app/webrtc/peerconnection.h b/talk/app/webrtc/peerconnection.h
index 2d388ae9f9..6e2b967fb4 100644
--- a/talk/app/webrtc/peerconnection.h
+++ b/talk/app/webrtc/peerconnection.h
@@ -42,13 +42,9 @@
namespace webrtc {
+class MediaStreamObserver;
class RemoteMediaStreamFactory;
-typedef std::vector<PortAllocatorFactoryInterface::StunConfiguration>
- StunConfigurations;
-typedef std::vector<PortAllocatorFactoryInterface::TurnConfiguration>
- TurnConfigurations;
-
// Populates |session_options| from |rtc_options|, and returns true if options
// are valid.
bool ConvertRtcOptionsForOffer(
@@ -60,11 +56,11 @@ bool ConvertRtcOptionsForOffer(
bool ParseConstraintsForAnswer(const MediaConstraintsInterface* constraints,
cricket::MediaSessionOptions* session_options);
-// Parses the URLs for each server in |servers| to build |stun_config| and
-// |turn_config|.
+// Parses the URLs for each server in |servers| to build |stun_servers| and
+// |turn_servers|.
bool ParseIceServers(const PeerConnectionInterface::IceServers& servers,
- StunConfigurations* stun_config,
- TurnConfigurations* turn_config);
+ cricket::ServerAddresses* stun_servers,
+ std::vector<cricket::RelayServerConfig>* turn_servers);
// PeerConnection implements the PeerConnectionInterface interface.
// It uses WebRtcSession to implement the PeerConnection functionality.
@@ -78,9 +74,10 @@ class PeerConnection : public PeerConnectionInterface,
bool Initialize(
const PeerConnectionInterface::RTCConfiguration& configuration,
const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
PeerConnectionObserver* observer);
+
rtc::scoped_refptr<StreamCollectionInterface> local_streams() override;
rtc::scoped_refptr<StreamCollectionInterface> remote_streams() override;
bool AddStream(MediaStreamInterface* local_stream) override;
@@ -91,6 +88,10 @@ class PeerConnection : public PeerConnectionInterface,
rtc::scoped_refptr<DtmfSenderInterface> CreateDtmfSender(
AudioTrackInterface* track) override;
+ rtc::scoped_refptr<RtpSenderInterface> CreateSender(
+ const std::string& kind,
+ const std::string& stream_id) override;
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>> GetSenders()
const override;
std::vector<rtc::scoped_refptr<RtpReceiverInterface>> GetReceivers()
@@ -148,32 +149,16 @@ class PeerConnection : public PeerConnectionInterface,
const std::string track_id,
uint32_t ssrc)
: stream_label(stream_label), track_id(track_id), ssrc(ssrc) {}
+ bool operator==(const TrackInfo& other) {
+ return this->stream_label == other.stream_label &&
+ this->track_id == other.track_id && this->ssrc == other.ssrc;
+ }
std::string stream_label;
std::string track_id;
uint32_t ssrc;
};
typedef std::vector<TrackInfo> TrackInfos;
- struct RemotePeerInfo {
- RemotePeerInfo()
- : msid_supported(false),
- default_audio_track_needed(false),
- default_video_track_needed(false) {}
- // True if it has been discovered that the remote peer support MSID.
- bool msid_supported;
- // The remote peer indicates in the session description that audio will be
- // sent but no MSID is given.
- bool default_audio_track_needed;
- // The remote peer indicates in the session description that video will be
- // sent but no MSID is given.
- bool default_video_track_needed;
-
- bool IsDefaultMediaStreamNeeded() {
- return !msid_supported &&
- (default_audio_track_needed || default_video_track_needed);
- }
- };
-
// Implements MessageHandler.
void OnMessage(rtc::Message* msg) override;
@@ -187,12 +172,6 @@ class PeerConnection : public PeerConnectionInterface,
AudioTrackInterface* audio_track);
void DestroyVideoReceiver(MediaStreamInterface* stream,
VideoTrackInterface* video_track);
- void CreateAudioSender(MediaStreamInterface* stream,
- AudioTrackInterface* audio_track,
- uint32_t ssrc);
- void CreateVideoSender(MediaStreamInterface* stream,
- VideoTrackInterface* video_track,
- uint32_t ssrc);
void DestroyAudioSender(MediaStreamInterface* stream,
AudioTrackInterface* audio_track,
uint32_t ssrc);
@@ -210,6 +189,16 @@ class PeerConnection : public PeerConnectionInterface,
void OnSessionStateChange(WebRtcSession* session, WebRtcSession::State state);
void ChangeSignalingState(SignalingState signaling_state);
+ // Signals from MediaStreamObserver.
+ void OnAudioTrackAdded(AudioTrackInterface* track,
+ MediaStreamInterface* stream);
+ void OnAudioTrackRemoved(AudioTrackInterface* track,
+ MediaStreamInterface* stream);
+ void OnVideoTrackAdded(VideoTrackInterface* track,
+ MediaStreamInterface* stream);
+ void OnVideoTrackRemoved(VideoTrackInterface* track,
+ MediaStreamInterface* stream);
+
rtc::Thread* signaling_thread() const {
return factory_->signaling_thread();
}
@@ -236,12 +225,19 @@ class PeerConnection : public PeerConnectionInterface,
const MediaConstraintsInterface* constraints,
cricket::MediaSessionOptions* session_options);
- // Makes sure a MediaStream Track is created for each StreamParam in
- // |streams|. |media_type| is the type of the |streams| and can be either
- // audio or video.
+ // Remove all local and remote tracks of type |media_type|.
+ // Called when a media type is rejected (m-line set to port 0).
+ void RemoveTracks(cricket::MediaType media_type);
+
+ // Makes sure a MediaStreamTrack is created for each StreamParam in |streams|,
+ // and existing MediaStreamTracks are removed if there is no corresponding
+ // StreamParam. If |default_track_needed| is true, a default MediaStreamTrack
+ // is created if it doesn't exist; if false, it's removed if it exists.
+ // |media_type| is the type of the |streams| and can be either audio or video.
// If a new MediaStream is created it is added to |new_streams|.
void UpdateRemoteStreamsList(
const std::vector<cricket::StreamParams>& streams,
+ bool default_track_needed,
cricket::MediaType media_type,
StreamCollection* new_streams);
@@ -265,8 +261,6 @@ class PeerConnection : public PeerConnectionInterface,
// exist.
void UpdateEndedRemoteMediaStreams();
- void MaybeCreateDefaultStream();
-
// Set the MediaStreamTrackInterface::TrackState to |kEnded| on all remote
// tracks of type |media_type|.
void EndRemoteTracks(cricket::MediaType media_type);
@@ -328,6 +322,8 @@ class PeerConnection : public PeerConnectionInterface,
void OnDataChannelOpenMessage(const std::string& label,
const InternalDataChannelInit& config);
+ RtpSenderInterface* FindSenderById(const std::string& id);
+
std::vector<rtc::scoped_refptr<RtpSenderInterface>>::iterator
FindSenderForTrack(MediaStreamTrackInterface* track);
std::vector<rtc::scoped_refptr<RtpReceiverInterface>>::iterator
@@ -366,6 +362,8 @@ class PeerConnection : public PeerConnectionInterface,
// Streams created as a result of SetRemoteDescription.
rtc::scoped_refptr<StreamCollection> remote_streams_;
+ std::vector<rtc::scoped_ptr<MediaStreamObserver>> stream_observers_;
+
// These lists store track info seen in local/remote descriptions.
TrackInfos remote_audio_tracks_;
TrackInfos remote_video_tracks_;
@@ -376,8 +374,9 @@ class PeerConnection : public PeerConnectionInterface,
// label -> DataChannel
std::map<std::string, rtc::scoped_refptr<DataChannel>> rtp_data_channels_;
std::vector<rtc::scoped_refptr<DataChannel>> sctp_data_channels_;
+ std::vector<rtc::scoped_refptr<DataChannel>> sctp_data_channels_to_free_;
- RemotePeerInfo remote_info_;
+ bool remote_peer_supports_msid_ = false;
rtc::scoped_ptr<RemoteMediaStreamFactory> remote_stream_factory_;
std::vector<rtc::scoped_refptr<RtpSenderInterface>> senders_;
diff --git a/talk/app/webrtc/peerconnection_unittest.cc b/talk/app/webrtc/peerconnection_unittest.cc
index 3cf66d64d8..8d0793e25f 100644
--- a/talk/app/webrtc/peerconnection_unittest.cc
+++ b/talk/app/webrtc/peerconnection_unittest.cc
@@ -30,11 +30,11 @@
#include <algorithm>
#include <list>
#include <map>
+#include <utility>
#include <vector>
#include "talk/app/webrtc/dtmfsender.h"
#include "talk/app/webrtc/fakemetricsobserver.h"
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
#include "talk/app/webrtc/localaudiosource.h"
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/peerconnection.h"
@@ -58,6 +58,7 @@
#include "webrtc/base/virtualsocketserver.h"
#include "webrtc/p2p/base/constants.h"
#include "webrtc/p2p/base/sessiondescription.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
#define MAYBE_SKIP_TEST(feature) \
if (!(feature())) { \
@@ -78,11 +79,13 @@ using webrtc::DtmfSenderInterface;
using webrtc::DtmfSenderObserverInterface;
using webrtc::FakeConstraints;
using webrtc::MediaConstraintsInterface;
+using webrtc::MediaStreamInterface;
using webrtc::MediaStreamTrackInterface;
using webrtc::MockCreateSessionDescriptionObserver;
using webrtc::MockDataChannelObserver;
using webrtc::MockSetSessionDescriptionObserver;
using webrtc::MockStatsObserver;
+using webrtc::ObserverInterface;
using webrtc::PeerConnectionInterface;
using webrtc::PeerConnectionFactory;
using webrtc::SessionDescriptionInterface;
@@ -96,6 +99,7 @@ static const int kMaxWaitMs = 10000;
#if !defined(THREAD_SANITIZER)
static const int kMaxWaitForStatsMs = 3000;
#endif
+static const int kMaxWaitForActivationMs = 5000;
static const int kMaxWaitForFramesMs = 10000;
static const int kEndAudioFrameCount = 3;
static const int kEndVideoFrameCount = 3;
@@ -111,7 +115,7 @@ static const char kDataChannelLabel[] = "data_channel";
#if !defined(THREAD_SANITIZER)
// SRTP cipher name negotiated by the tests. This must be updated if the
// default changes.
-static const char kDefaultSrtpCipher[] = "AES_CM_128_HMAC_SHA1_32";
+static const int kDefaultSrtpCryptoSuite = rtc::SRTP_AES128_CM_SHA1_32;
#endif
static void RemoveLinesFromSdp(const std::string& line_start,
@@ -139,26 +143,35 @@ class SignalingMessageReceiver {
};
class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
- public SignalingMessageReceiver {
+ public SignalingMessageReceiver,
+ public ObserverInterface {
public:
- static PeerConnectionTestClient* CreateClient(
+ static PeerConnectionTestClient* CreateClientWithDtlsIdentityStore(
const std::string& id,
const MediaConstraintsInterface* constraints,
- const PeerConnectionFactory::Options* options) {
+ const PeerConnectionFactory::Options* options,
+ rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store) {
PeerConnectionTestClient* client(new PeerConnectionTestClient(id));
- if (!client->Init(constraints, options)) {
+ if (!client->Init(constraints, options, std::move(dtls_identity_store))) {
delete client;
return nullptr;
}
return client;
}
+ static PeerConnectionTestClient* CreateClient(
+ const std::string& id,
+ const MediaConstraintsInterface* constraints,
+ const PeerConnectionFactory::Options* options) {
+ rtc::scoped_ptr<FakeDtlsIdentityStore> dtls_identity_store(
+ rtc::SSLStreamAdapter::HaveDtlsSrtp() ? new FakeDtlsIdentityStore()
+ : nullptr);
+
+ return CreateClientWithDtlsIdentityStore(id, constraints, options,
+ std::move(dtls_identity_store));
+ }
+
~PeerConnectionTestClient() {
- while (!fake_video_renderers_.empty()) {
- RenderMap::iterator it = fake_video_renderers_.begin();
- delete it->second;
- fake_video_renderers_.erase(it);
- }
}
void Negotiate() { Negotiate(true, true); }
@@ -206,16 +219,17 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
webrtc::PeerConnectionInterface::SignalingState new_state) override {
EXPECT_EQ(pc()->signaling_state(), new_state);
}
- void OnAddStream(webrtc::MediaStreamInterface* media_stream) override {
+ void OnAddStream(MediaStreamInterface* media_stream) override {
+ media_stream->RegisterObserver(this);
for (size_t i = 0; i < media_stream->GetVideoTracks().size(); ++i) {
const std::string id = media_stream->GetVideoTracks()[i]->id();
ASSERT_TRUE(fake_video_renderers_.find(id) ==
fake_video_renderers_.end());
- fake_video_renderers_[id] =
- new webrtc::FakeVideoTrackRenderer(media_stream->GetVideoTracks()[i]);
+ fake_video_renderers_[id].reset(new webrtc::FakeVideoTrackRenderer(
+ media_stream->GetVideoTracks()[i]));
}
}
- void OnRemoveStream(webrtc::MediaStreamInterface* media_stream) override {}
+ void OnRemoveStream(MediaStreamInterface* media_stream) override {}
void OnRenegotiationNeeded() override {}
void OnIceConnectionChange(
webrtc::PeerConnectionInterface::IceConnectionState new_state) override {
@@ -238,6 +252,40 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
candidate->sdp_mid(), candidate->sdp_mline_index(), ice_sdp);
}
+ // MediaStreamInterface callback
+ void OnChanged() override {
+ // Track added or removed from MediaStream, so update our renderers.
+ rtc::scoped_refptr<StreamCollectionInterface> remote_streams =
+ pc()->remote_streams();
+ // Remove renderers for tracks that were removed.
+ for (auto it = fake_video_renderers_.begin();
+ it != fake_video_renderers_.end();) {
+ if (remote_streams->FindVideoTrack(it->first) == nullptr) {
+ auto to_remove = it++;
+ removed_fake_video_renderers_.push_back(std::move(to_remove->second));
+ fake_video_renderers_.erase(to_remove);
+ } else {
+ ++it;
+ }
+ }
+ // Create renderers for new video tracks.
+ for (size_t stream_index = 0; stream_index < remote_streams->count();
+ ++stream_index) {
+ MediaStreamInterface* remote_stream = remote_streams->at(stream_index);
+ for (size_t track_index = 0;
+ track_index < remote_stream->GetVideoTracks().size();
+ ++track_index) {
+ const std::string id =
+ remote_stream->GetVideoTracks()[track_index]->id();
+ if (fake_video_renderers_.find(id) != fake_video_renderers_.end()) {
+ continue;
+ }
+ fake_video_renderers_[id].reset(new webrtc::FakeVideoTrackRenderer(
+ remote_stream->GetVideoTracks()[track_index]));
+ }
+ }
+ }
+
void SetVideoConstraints(const webrtc::FakeConstraints& video_constraint) {
video_constraints_ = video_constraint;
}
@@ -246,22 +294,11 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
std::string stream_label =
kStreamLabelBase +
rtc::ToString<int>(static_cast<int>(pc()->local_streams()->count()));
- rtc::scoped_refptr<webrtc::MediaStreamInterface> stream =
+ rtc::scoped_refptr<MediaStreamInterface> stream =
peer_connection_factory_->CreateLocalMediaStream(stream_label);
if (audio && can_receive_audio()) {
- FakeConstraints constraints;
- // Disable highpass filter so that we can get all the test audio frames.
- constraints.AddMandatory(
- MediaConstraintsInterface::kHighpassFilter, false);
- rtc::scoped_refptr<webrtc::AudioSourceInterface> source =
- peer_connection_factory_->CreateAudioSource(&constraints);
- // TODO(perkj): Test audio source when it is implemented. Currently audio
- // always use the default input.
- std::string label = stream_label + kAudioTrackLabelBase;
- rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- peer_connection_factory_->CreateAudioTrack(label, source));
- stream->AddTrack(audio_track);
+ stream->AddTrack(CreateLocalAudioTrack(stream_label));
}
if (video && can_receive_video()) {
stream->AddTrack(CreateLocalVideoTrack(stream_label));
@@ -276,6 +313,12 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return pc()->signaling_state() == webrtc::PeerConnectionInterface::kStable;
}
+ // Automatically add a stream when receiving an offer, if we don't have one.
+ // Defaults to true.
+ void set_auto_add_stream(bool auto_add_stream) {
+ auto_add_stream_ = auto_add_stream;
+ }
+
void set_signaling_message_receiver(
SignalingMessageReceiver* signaling_message_receiver) {
signaling_message_receiver_ = signaling_message_receiver;
@@ -357,6 +400,35 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
data_observer_.reset(new MockDataChannelObserver(data_channel_));
}
+ rtc::scoped_refptr<webrtc::AudioTrackInterface> CreateLocalAudioTrack(
+ const std::string& stream_label) {
+ FakeConstraints constraints;
+ // Disable highpass filter so that we can get all the test audio frames.
+ constraints.AddMandatory(MediaConstraintsInterface::kHighpassFilter, false);
+ rtc::scoped_refptr<webrtc::AudioSourceInterface> source =
+ peer_connection_factory_->CreateAudioSource(&constraints);
+ // TODO(perkj): Test audio source when it is implemented. Currently audio
+ // always use the default input.
+ std::string label = stream_label + kAudioTrackLabelBase;
+ return peer_connection_factory_->CreateAudioTrack(label, source);
+ }
+
+ rtc::scoped_refptr<webrtc::VideoTrackInterface> CreateLocalVideoTrack(
+ const std::string& stream_label) {
+ // Set max frame rate to 10fps to reduce the risk of the tests to be flaky.
+ FakeConstraints source_constraints = video_constraints_;
+ source_constraints.SetMandatoryMaxFrameRate(10);
+
+ cricket::FakeVideoCapturer* fake_capturer =
+ new webrtc::FakePeriodicVideoCapturer();
+ video_capturers_.push_back(fake_capturer);
+ rtc::scoped_refptr<webrtc::VideoSourceInterface> source =
+ peer_connection_factory_->CreateVideoSource(fake_capturer,
+ &source_constraints);
+ std::string label = stream_label + kVideoTrackLabelBase;
+ return peer_connection_factory_->CreateVideoTrack(label, source);
+ }
+
DataChannelInterface* data_channel() { return data_channel_; }
const MockDataChannelObserver* data_observer() const {
return data_observer_.get();
@@ -376,6 +448,10 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return number_of_frames <= fake_audio_capture_module_->frames_received();
}
+ int audio_frames_received() const {
+ return fake_audio_capture_module_->frames_received();
+ }
+
bool VideoFramesReceivedCheck(int number_of_frames) {
if (video_decoder_factory_enabled_) {
const std::vector<FakeWebRtcVideoDecoder*>& decoders
@@ -384,9 +460,8 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return number_of_frames <= 0;
}
- for (std::vector<FakeWebRtcVideoDecoder*>::const_iterator
- it = decoders.begin(); it != decoders.end(); ++it) {
- if (number_of_frames > (*it)->GetNumFramesReceived()) {
+ for (FakeWebRtcVideoDecoder* decoder : decoders) {
+ if (number_of_frames > decoder->GetNumFramesReceived()) {
return false;
}
}
@@ -396,9 +471,8 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
return number_of_frames <= 0;
}
- for (RenderMap::const_iterator it = fake_video_renderers_.begin();
- it != fake_video_renderers_.end(); ++it) {
- if (number_of_frames > it->second->num_rendered_frames()) {
+ for (const auto& pair : fake_video_renderers_) {
+ if (number_of_frames > pair.second->num_rendered_frames()) {
return false;
}
}
@@ -406,6 +480,25 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
}
}
+ int video_frames_received() const {
+ int total = 0;
+ if (video_decoder_factory_enabled_) {
+ const std::vector<FakeWebRtcVideoDecoder*>& decoders =
+ fake_video_decoder_factory_->decoders();
+ for (const FakeWebRtcVideoDecoder* decoder : decoders) {
+ total += decoder->GetNumFramesReceived();
+ }
+ } else {
+ for (const auto& pair : fake_video_renderers_) {
+ total += pair.second->num_rendered_frames();
+ }
+ for (const auto& renderer : removed_fake_video_renderers_) {
+ total += renderer->num_rendered_frames();
+ }
+ }
+ return total;
+ }
+
// Verify the CreateDtmfSender interface
void VerifyDtmf() {
rtc::scoped_ptr<DummyDtmfObserver> observer(new DummyDtmfObserver());
@@ -641,14 +734,14 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
explicit PeerConnectionTestClient(const std::string& id) : id_(id) {}
- bool Init(const MediaConstraintsInterface* constraints,
- const PeerConnectionFactory::Options* options) {
+ bool Init(
+ const MediaConstraintsInterface* constraints,
+ const PeerConnectionFactory::Options* options,
+ rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store) {
EXPECT_TRUE(!peer_connection_);
EXPECT_TRUE(!peer_connection_factory_);
- allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
- if (!allocator_factory_) {
- return false;
- }
+ rtc::scoped_ptr<cricket::PortAllocator> port_allocator(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
fake_audio_capture_module_ = FakeAudioCaptureModule::Create();
if (fake_audio_capture_module_ == nullptr) {
@@ -666,46 +759,29 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
if (options) {
peer_connection_factory_->SetOptions(*options);
}
- peer_connection_ = CreatePeerConnection(allocator_factory_.get(),
- constraints);
+ peer_connection_ = CreatePeerConnection(
+ std::move(port_allocator), constraints, std::move(dtls_identity_store));
return peer_connection_.get() != nullptr;
}
- rtc::scoped_refptr<webrtc::VideoTrackInterface>
- CreateLocalVideoTrack(const std::string stream_label) {
- // Set max frame rate to 10fps to reduce the risk of the tests to be flaky.
- FakeConstraints source_constraints = video_constraints_;
- source_constraints.SetMandatoryMaxFrameRate(10);
-
- cricket::FakeVideoCapturer* fake_capturer =
- new webrtc::FakePeriodicVideoCapturer();
- video_capturers_.push_back(fake_capturer);
- rtc::scoped_refptr<webrtc::VideoSourceInterface> source =
- peer_connection_factory_->CreateVideoSource(
- fake_capturer, &source_constraints);
- std::string label = stream_label + kVideoTrackLabelBase;
- return peer_connection_factory_->CreateVideoTrack(label, source);
- }
-
rtc::scoped_refptr<webrtc::PeerConnectionInterface> CreatePeerConnection(
- webrtc::PortAllocatorFactoryInterface* factory,
- const MediaConstraintsInterface* constraints) {
- // CreatePeerConnection with IceServers.
- webrtc::PeerConnectionInterface::IceServers ice_servers;
+ rtc::scoped_ptr<cricket::PortAllocator> port_allocator,
+ const MediaConstraintsInterface* constraints,
+ rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store) {
+ // CreatePeerConnection with RTCConfiguration.
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
webrtc::PeerConnectionInterface::IceServer ice_server;
ice_server.uri = "stun:stun.l.google.com:19302";
- ice_servers.push_back(ice_server);
+ config.servers.push_back(ice_server);
- rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store(
- rtc::SSLStreamAdapter::HaveDtlsSrtp() ? new FakeDtlsIdentityStore()
- : nullptr);
return peer_connection_factory_->CreatePeerConnection(
- ice_servers, constraints, factory, dtls_identity_store.Pass(), this);
+ config, constraints, std::move(port_allocator),
+ std::move(dtls_identity_store), this);
}
void HandleIncomingOffer(const std::string& msg) {
LOG(INFO) << id_ << "HandleIncomingOffer ";
- if (NumberOfLocalMediaStreams() == 0) {
+ if (NumberOfLocalMediaStreams() == 0 && auto_add_stream_) {
// If we are not sending any streams ourselves it is time to add some.
AddMediaStream(true, true);
}
@@ -807,20 +883,24 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
std::string id_;
- rtc::scoped_refptr<webrtc::PortAllocatorFactoryInterface> allocator_factory_;
rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface>
peer_connection_factory_;
+ bool auto_add_stream_ = true;
+
typedef std::pair<std::string, std::string> IceUfragPwdPair;
std::map<int, IceUfragPwdPair> ice_ufrag_pwd_;
bool expect_ice_restart_ = false;
- // Needed to keep track of number of frames send.
+ // Needed to keep track of number of frames sent.
rtc::scoped_refptr<FakeAudioCaptureModule> fake_audio_capture_module_;
// Needed to keep track of number of frames received.
- typedef std::map<std::string, webrtc::FakeVideoTrackRenderer*> RenderMap;
- RenderMap fake_video_renderers_;
+ std::map<std::string, rtc::scoped_ptr<webrtc::FakeVideoTrackRenderer>>
+ fake_video_renderers_;
+ // Needed to ensure frames aren't received for removed tracks.
+ std::vector<rtc::scoped_ptr<webrtc::FakeVideoTrackRenderer>>
+ removed_fake_video_renderers_;
// Needed to keep track of number of frames received when external decoder
// used.
FakeWebRtcVideoDecoderFactory* fake_video_decoder_factory_ = nullptr;
@@ -846,11 +926,9 @@ class PeerConnectionTestClient : public webrtc::PeerConnectionObserver,
rtc::scoped_ptr<MockDataChannelObserver> data_observer_;
};
-// TODO(deadbeef): Rename this to P2PTestConductor once the Linux memcheck and
-// Windows DrMemory Full bots' blacklists are updated.
-class JsepPeerConnectionP2PTestClient : public testing::Test {
+class P2PTestConductor : public testing::Test {
public:
- JsepPeerConnectionP2PTestClient()
+ P2PTestConductor()
: pss_(new rtc::PhysicalSocketServer),
ss_(new rtc::VirtualSocketServer(pss_.get())),
ss_scope_(ss_.get()) {}
@@ -882,13 +960,26 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
}
void TestUpdateOfferWithRejectedContent() {
+ // Renegotiate, rejecting the video m-line.
initiating_client_->Negotiate(true, false);
- EXPECT_TRUE_WAIT(
- FramesNotPending(kEndAudioFrameCount * 2, kEndVideoFrameCount),
- kMaxWaitForFramesMs);
- // There shouldn't be any more video frame after the new offer is
- // negotiated.
- EXPECT_FALSE(VideoFramesReceivedCheck(kEndVideoFrameCount + 1));
+ ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
+
+ int pc1_audio_received = initiating_client_->audio_frames_received();
+ int pc1_video_received = initiating_client_->video_frames_received();
+ int pc2_audio_received = receiving_client_->audio_frames_received();
+ int pc2_video_received = receiving_client_->video_frames_received();
+
+ // Wait for some additional audio frames to be received.
+ EXPECT_TRUE_WAIT(initiating_client_->AudioFramesReceivedCheck(
+ pc1_audio_received + kEndAudioFrameCount) &&
+ receiving_client_->AudioFramesReceivedCheck(
+ pc2_audio_received + kEndAudioFrameCount),
+ kMaxWaitForFramesMs);
+
+ // During this time, we shouldn't have received any additional video frames
+ // for the rejected video tracks.
+ EXPECT_EQ(pc1_video_received, initiating_client_->video_frames_received());
+ EXPECT_EQ(pc2_video_received, receiving_client_->video_frames_received());
}
void VerifyRenderedSize(int width, int height) {
@@ -905,7 +996,7 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
receiving_client_->VerifyLocalIceUfragAndPassword();
}
- ~JsepPeerConnectionP2PTestClient() {
+ ~P2PTestConductor() {
if (initiating_client_) {
initiating_client_->set_signaling_message_receiver(nullptr);
}
@@ -922,6 +1013,11 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
nullptr);
}
+ void SetSignalingReceivers() {
+ initiating_client_->set_signaling_message_receiver(receiving_client_.get());
+ receiving_client_->set_signaling_message_receiver(initiating_client_.get());
+ }
+
bool CreateTestClients(MediaConstraintsInterface* init_constraints,
PeerConnectionFactory::Options* init_options,
MediaConstraintsInterface* recv_constraints,
@@ -933,8 +1029,7 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
if (!initiating_client_ || !receiving_client_) {
return false;
}
- initiating_client_->set_signaling_message_receiver(receiving_client_.get());
- receiving_client_->set_signaling_message_receiver(initiating_client_.get());
+ SetSignalingReceivers();
return true;
}
@@ -957,13 +1052,11 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
initiating_client_->AddMediaStream(true, true);
}
initiating_client_->Negotiate();
- const int kMaxWaitForActivationMs = 5000;
// Assert true is used here since next tests are guaranteed to fail and
// would eat up 5 seconds.
ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
VerifySessionDescriptions();
-
int audio_frame_count = kEndAudioFrameCount;
// TODO(ronghuawu): Add test to cover the case of sendonly and recvonly.
if (!initiating_client_->can_receive_audio() ||
@@ -1013,6 +1106,32 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
kMaxWaitForFramesMs);
}
+ void SetupAndVerifyDtlsCall() {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+ }
+
+ PeerConnectionTestClient* CreateDtlsClientWithAlternateKey() {
+ FakeConstraints setup_constraints;
+ setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+
+ rtc::scoped_ptr<FakeDtlsIdentityStore> dtls_identity_store(
+ rtc::SSLStreamAdapter::HaveDtlsSrtp() ? new FakeDtlsIdentityStore()
+ : nullptr);
+ dtls_identity_store->use_alternate_key();
+
+ // Make sure the new client is using a different certificate.
+ return PeerConnectionTestClient::CreateClientWithDtlsIdentityStore(
+ "New Peer: ", &setup_constraints, nullptr,
+ std::move(dtls_identity_store));
+ }
+
void SendRtpData(webrtc::DataChannelInterface* dc, const std::string& data) {
// Messages may get lost on the unreliable DataChannel, so we send multiple
// times to avoid test flakiness.
@@ -1026,10 +1145,29 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
PeerConnectionTestClient* initializing_client() {
return initiating_client_.get();
}
+
+ // Set the |initiating_client_| to the |client| passed in and return the
+ // original |initiating_client_|.
+ PeerConnectionTestClient* set_initializing_client(
+ PeerConnectionTestClient* client) {
+ PeerConnectionTestClient* old = initiating_client_.release();
+ initiating_client_.reset(client);
+ return old;
+ }
+
PeerConnectionTestClient* receiving_client() {
return receiving_client_.get();
}
+ // Set the |receiving_client_| to the |client| passed in and return the
+ // original |receiving_client_|.
+ PeerConnectionTestClient* set_receiving_client(
+ PeerConnectionTestClient* client) {
+ PeerConnectionTestClient* old = receiving_client_.release();
+ receiving_client_.reset(client);
+ return old;
+ }
+
private:
rtc::scoped_ptr<rtc::PhysicalSocketServer> pss_;
rtc::scoped_ptr<rtc::VirtualSocketServer> ss_;
@@ -1045,7 +1183,7 @@ class JsepPeerConnectionP2PTestClient : public testing::Test {
// This test sets up a Jsep call between two parties and test Dtmf.
// TODO(holmer): Disabled due to sometimes crashing on buildbots.
// See issue webrtc/2378.
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDtmf) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTestDtmf) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
VerifyDtmf();
@@ -1053,7 +1191,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestDtmf) {
// This test sets up a Jsep call between two parties and test that we can get a
// video aspect ratio of 16:9.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) {
+TEST_F(P2PTestConductor, LocalP2PTest16To9) {
ASSERT_TRUE(CreateTestClients());
FakeConstraints constraint;
double requested_ratio = 640.0/360;
@@ -1078,7 +1216,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTest16To9) {
// received video has a resolution of 1280*720.
// TODO(mallinath): Enable when
// http://code.google.com/p/webrtc/issues/detail?id=981 is fixed.
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTest1280By720) {
ASSERT_TRUE(CreateTestClients());
FakeConstraints constraint;
constraint.SetMandatoryMinWidth(1280);
@@ -1090,34 +1228,84 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTest1280By720) {
// This test sets up a call between two endpoints that are configured to use
// DTLS key agreement. As a result, DTLS is negotiated and used for transport.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtls) {
+TEST_F(P2PTestConductor, LocalP2PTestDtls) {
+ SetupAndVerifyDtlsCall();
+}
+
+// This test sets up a audio call initially and then upgrades to audio/video,
+// using DTLS.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsRenegotiate) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
true);
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+ receiving_client()->SetReceiveAudioVideo(true, false);
+ LocalP2PTest();
+ receiving_client()->SetReceiveAudioVideo(true, true);
+ receiving_client()->Negotiate();
+}
+
+// This test sets up a call transfer to a new caller with a different DTLS
+// fingerprint.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsTransferCallee) {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+ SetupAndVerifyDtlsCall();
+
+ // Keeping the original peer around which will still send packets to the
+ // receiving client. These SRTP packets will be dropped.
+ rtc::scoped_ptr<PeerConnectionTestClient> original_peer(
+ set_initializing_client(CreateDtlsClientWithAlternateKey()));
+ original_peer->pc()->Close();
+
+ SetSignalingReceivers();
+ receiving_client()->SetExpectIceRestart(true);
LocalP2PTest();
VerifyRenderedSize(640, 480);
}
-// This test sets up a audio call initially and then upgrades to audio/video,
-// using DTLS.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDtlsRenegotiate) {
+// This test sets up a non-bundle call and apply bundle during ICE restart. When
+// bundle is in effect in the restart, the channel can successfully reset its
+// DTLS-SRTP context.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsBundleInIceRestart) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
true);
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
- receiving_client()->SetReceiveAudioVideo(true, false);
+ receiving_client()->RemoveBundleFromReceivedSdp(true);
LocalP2PTest();
- receiving_client()->SetReceiveAudioVideo(true, true);
- receiving_client()->Negotiate();
+ VerifyRenderedSize(640, 480);
+
+ initializing_client()->IceRestart();
+ receiving_client()->SetExpectIceRestart(true);
+ receiving_client()->RemoveBundleFromReceivedSdp(false);
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
+}
+
+// This test sets up a call transfer to a new callee with a different DTLS
+// fingerprint.
+TEST_F(P2PTestConductor, LocalP2PTestDtlsTransferCaller) {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+ SetupAndVerifyDtlsCall();
+
+ // Keeping the original peer around which will still send packets to the
+ // receiving client. These SRTP packets will be dropped.
+ rtc::scoped_ptr<PeerConnectionTestClient> original_peer(
+ set_receiving_client(CreateDtlsClientWithAlternateKey()));
+ original_peer->pc()->Close();
+
+ SetSignalingReceivers();
+ initializing_client()->IceRestart();
+ LocalP2PTest();
+ VerifyRenderedSize(640, 480);
}
// This test sets up a call between two endpoints that are configured to use
// DTLS key agreement. The offerer don't support SDES. As a result, DTLS is
// negotiated and used for transport.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) {
+TEST_F(P2PTestConductor, LocalP2PTestOfferDtlsButNotSdes) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints setup_constraints;
setup_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
@@ -1130,7 +1318,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestOfferDtlsButNotSdes) {
// This test sets up a Jsep call between two parties, and the callee only
// accept to receive video.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerVideo) {
+TEST_F(P2PTestConductor, LocalP2PTestAnswerVideo) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->SetReceiveAudioVideo(false, true);
LocalP2PTest();
@@ -1138,7 +1326,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerVideo) {
// This test sets up a Jsep call between two parties, and the callee only
// accept to receive audio.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerAudio) {
+TEST_F(P2PTestConductor, LocalP2PTestAnswerAudio) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->SetReceiveAudioVideo(true, false);
LocalP2PTest();
@@ -1146,7 +1334,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerAudio) {
// This test sets up a Jsep call between two parties, and the callee reject both
// audio and video.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) {
+TEST_F(P2PTestConductor, LocalP2PTestAnswerNone) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->SetReceiveAudioVideo(false, false);
LocalP2PTest();
@@ -1156,9 +1344,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestAnswerNone) {
// runs for a while (10 frames), the caller sends an update offer with video
// being rejected. Once the re-negotiation is done, the video flow should stop
// and the audio flow should continue.
-// Disabled due to b/14955157.
-TEST_F(JsepPeerConnectionP2PTestClient,
- DISABLED_UpdateOfferWithRejectedContent) {
+TEST_F(P2PTestConductor, UpdateOfferWithRejectedContent) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
TestUpdateOfferWithRejectedContent();
@@ -1166,8 +1352,7 @@ TEST_F(JsepPeerConnectionP2PTestClient,
// This test sets up a Jsep call between two parties. The MSID is removed from
// the SDP strings from the caller.
-// Disabled due to b/14955157.
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestWithoutMsid) {
+TEST_F(P2PTestConductor, LocalP2PTestWithoutMsid) {
ASSERT_TRUE(CreateTestClients());
receiving_client()->RemoveMsidFromReceivedSdp(true);
// TODO(perkj): Currently there is a bug that cause audio to stop playing if
@@ -1182,7 +1367,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestWithoutMsid) {
// sends two steams.
// TODO(perkj): Disabled due to
// https://code.google.com/p/webrtc/issues/detail?id=1454
-TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTestTwoStreams) {
ASSERT_TRUE(CreateTestClients());
// Set optional video constraint to max 320pixels to decrease CPU usage.
FakeConstraints constraint;
@@ -1196,7 +1381,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, DISABLED_LocalP2PTestTwoStreams) {
}
// Test that we can receive the audio output level from a remote audio track.
-TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) {
+TEST_F(P2PTestConductor, GetAudioOutputLevelStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1215,7 +1400,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetAudioOutputLevelStats) {
}
// Test that an audio input level is reported.
-TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) {
+TEST_F(P2PTestConductor, GetAudioInputLevelStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1226,7 +1411,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetAudioInputLevelStats) {
}
// Test that we can get incoming byte counts from both audio and video tracks.
-TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) {
+TEST_F(P2PTestConductor, GetBytesReceivedStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1248,7 +1433,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetBytesReceivedStats) {
}
// Test that we can get outgoing byte counts from both audio and video tracks.
-TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) {
+TEST_F(P2PTestConductor, GetBytesSentStats) {
ASSERT_TRUE(CreateTestClients());
LocalP2PTest();
@@ -1270,7 +1455,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetBytesSentStats) {
}
// Test that DTLS 1.0 is used if both sides only support DTLS 1.0.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12None) {
+TEST_F(P2PTestConductor, GetDtls12None) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10;
PeerConnectionFactory::Options recv_options;
@@ -1282,7 +1467,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12None) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1292,16 +1477,23 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12None) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
+#if defined(MEMORY_SANITIZER)
+// Fails under MemorySanitizer:
+// See https://code.google.com/p/webrtc/issues/detail?id=5381.
+#define MAYBE_GetDtls12Both DISABLED_GetDtls12Both
+#else
+#define MAYBE_GetDtls12Both GetDtls12Both
+#endif
// Test that DTLS 1.2 is used if both ends support it.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Both) {
+TEST_F(P2PTestConductor, MAYBE_GetDtls12Both) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
PeerConnectionFactory::Options recv_options;
@@ -1313,7 +1505,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Both) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_12, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1323,17 +1515,17 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Both) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_12, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
// Test that DTLS 1.0 is used if the initator supports DTLS 1.2 and the
// received supports 1.0.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Init) {
+TEST_F(P2PTestConductor, GetDtls12Init) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
PeerConnectionFactory::Options recv_options;
@@ -1345,7 +1537,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Init) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1355,17 +1547,17 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Init) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
// Test that DTLS 1.0 is used if the initator supports DTLS 1.0 and the
// received supports 1.2.
-TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Recv) {
+TEST_F(P2PTestConductor, GetDtls12Recv) {
PeerConnectionFactory::Options init_options;
init_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10;
PeerConnectionFactory::Options recv_options;
@@ -1377,7 +1569,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Recv) {
initializing_client()->pc()->RegisterUMAObserver(init_observer);
LocalP2PTest();
- EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ EXPECT_EQ_WAIT(rtc::SSLStreamAdapter::SslCipherSuiteToName(
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)),
initializing_client()->GetDtlsCipherStats(),
@@ -1387,16 +1579,17 @@ TEST_F(JsepPeerConnectionP2PTestClient, GetDtls12Recv) {
rtc::SSLStreamAdapter::GetDefaultSslCipherForTest(
rtc::SSL_PROTOCOL_DTLS_10, rtc::KT_DEFAULT)));
- EXPECT_EQ_WAIT(kDefaultSrtpCipher,
+ EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite),
initializing_client()->GetSrtpCipherStats(),
kMaxWaitForStatsMs);
- EXPECT_EQ(1, init_observer->GetEnumCounter(
- webrtc::kEnumCounterAudioSrtpCipher,
- rtc::GetSrtpCryptoSuiteFromName(kDefaultSrtpCipher)));
+ EXPECT_EQ(1,
+ init_observer->GetEnumCounter(webrtc::kEnumCounterAudioSrtpCipher,
+ kDefaultSrtpCryptoSuite));
}
-// This test sets up a call between two parties with audio, video and data.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
+// This test sets up a call between two parties with audio, video and an RTP
+// data channel.
+TEST_F(P2PTestConductor, LocalP2PTestRtpDataChannel) {
FakeConstraints setup_constraints;
setup_constraints.SetAllowRtpDataChannels();
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
@@ -1426,6 +1619,34 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
EXPECT_FALSE(receiving_client()->data_observer()->IsOpen());
}
+// This test sets up a call between two parties with audio, video and an SCTP
+// data channel.
+TEST_F(P2PTestConductor, LocalP2PTestSctpDataChannel) {
+ ASSERT_TRUE(CreateTestClients());
+ initializing_client()->CreateDataChannel();
+ LocalP2PTest();
+ ASSERT_TRUE(initializing_client()->data_channel() != nullptr);
+ EXPECT_TRUE_WAIT(receiving_client()->data_channel() != nullptr, kMaxWaitMs);
+ EXPECT_TRUE_WAIT(initializing_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+ EXPECT_TRUE_WAIT(receiving_client()->data_observer()->IsOpen(), kMaxWaitMs);
+
+ std::string data = "hello world";
+
+ initializing_client()->data_channel()->Send(DataBuffer(data));
+ EXPECT_EQ_WAIT(data, receiving_client()->data_observer()->last_message(),
+ kMaxWaitMs);
+
+ receiving_client()->data_channel()->Send(DataBuffer(data));
+ EXPECT_EQ_WAIT(data, initializing_client()->data_observer()->last_message(),
+ kMaxWaitMs);
+
+ receiving_client()->data_channel()->Close();
+ EXPECT_TRUE_WAIT(!initializing_client()->data_observer()->IsOpen(),
+ kMaxWaitMs);
+ EXPECT_TRUE_WAIT(!receiving_client()->data_observer()->IsOpen(), kMaxWaitMs);
+}
+
// This test sets up a call between two parties and creates a data channel.
// The test tests that received data is buffered unless an observer has been
// registered.
@@ -1433,7 +1654,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestDataChannel) {
// transport has detected that a channel is writable and thus data can be
// received before the data channel state changes to open. That is hard to test
// but the same buffering is used in that case.
-TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) {
+TEST_F(P2PTestConductor, RegisterDataChannelObserver) {
FakeConstraints setup_constraints;
setup_constraints.SetAllowRtpDataChannels();
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
@@ -1463,7 +1684,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, RegisterDataChannelObserver) {
// This test sets up a call between two parties with audio, video and but only
// the initiating client support data.
-TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) {
+TEST_F(P2PTestConductor, LocalP2PTestReceiverDoesntSupportData) {
FakeConstraints setup_constraints_1;
setup_constraints_1.SetAllowRtpDataChannels();
// Must disable DTLS to make negotiation succeed.
@@ -1482,7 +1703,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, LocalP2PTestReceiverDoesntSupportData) {
// This test sets up a call between two parties with audio, video. When audio
// and video is setup and flowing and data channel is negotiated.
-TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) {
+TEST_F(P2PTestConductor, AddDataChannelAfterRenegotiation) {
FakeConstraints setup_constraints;
setup_constraints.SetAllowRtpDataChannels();
ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
@@ -1501,7 +1722,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, AddDataChannelAfterRenegotiation) {
// This test sets up a Jsep call with SCTP DataChannel and verifies the
// negotiation is completed without error.
#ifdef HAVE_SCTP
-TEST_F(JsepPeerConnectionP2PTestClient, CreateOfferWithSctpDataChannel) {
+TEST_F(P2PTestConductor, CreateOfferWithSctpDataChannel) {
MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
FakeConstraints constraints;
constraints.SetMandatory(
@@ -1515,7 +1736,7 @@ TEST_F(JsepPeerConnectionP2PTestClient, CreateOfferWithSctpDataChannel) {
// This test sets up a call between two parties with audio, and video.
// During the call, the initializing side restart ice and the test verifies that
// new ice candidates are generated and audio and video still can flow.
-TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) {
+TEST_F(P2PTestConductor, IceRestart) {
ASSERT_TRUE(CreateTestClients());
// Negotiate and wait for ice completion and make sure audio and video plays.
@@ -1562,17 +1783,69 @@ TEST_F(JsepPeerConnectionP2PTestClient, IceRestart) {
EXPECT_NE(receiver_candidate, receiver_candidate_restart);
}
+// This test sets up a call between two parties with audio, and video.
+// It then renegotiates setting the video m-line to "port 0", then later
+// renegotiates again, enabling video.
+TEST_F(P2PTestConductor, LocalP2PTestVideoDisableEnable) {
+ ASSERT_TRUE(CreateTestClients());
+
+ // Do initial negotiation. Will result in video and audio sendonly m-lines.
+ receiving_client()->set_auto_add_stream(false);
+ initializing_client()->AddMediaStream(true, true);
+ initializing_client()->Negotiate();
+
+ // Negotiate again, disabling the video m-line (receiving client will
+ // set port to 0 due to mandatory "OfferToReceiveVideo: false" constraint).
+ receiving_client()->SetReceiveVideo(false);
+ initializing_client()->Negotiate();
+
+ // Enable video and do negotiation again, making sure video is received
+ // end-to-end.
+ receiving_client()->SetReceiveVideo(true);
+ receiving_client()->AddMediaStream(true, true);
+ LocalP2PTest();
+}
+
// This test sets up a Jsep call between two parties with external
// VideoDecoderFactory.
// TODO(holmer): Disabled due to sometimes crashing on buildbots.
// See issue webrtc/2378.
-TEST_F(JsepPeerConnectionP2PTestClient,
- DISABLED_LocalP2PTestWithVideoDecoderFactory) {
+TEST_F(P2PTestConductor, DISABLED_LocalP2PTestWithVideoDecoderFactory) {
ASSERT_TRUE(CreateTestClients());
EnableVideoDecoderFactory();
LocalP2PTest();
}
+// This tests that if we negotiate after calling CreateSender but before we
+// have a track, then set a track later, frames from the newly-set track are
+// received end-to-end.
+TEST_F(P2PTestConductor, EarlyWarmupTest) {
+ ASSERT_TRUE(CreateTestClients());
+ auto audio_sender =
+ initializing_client()->pc()->CreateSender("audio", "stream_id");
+ auto video_sender =
+ initializing_client()->pc()->CreateSender("video", "stream_id");
+ initializing_client()->Negotiate();
+ // Wait for ICE connection to complete, without any tracks.
+ // Note that the receiving client WILL (in HandleIncomingOffer) create
+ // tracks, so it's only the initiator here that's doing early warmup.
+ ASSERT_TRUE_WAIT(SessionActive(), kMaxWaitForActivationMs);
+ VerifySessionDescriptions();
+ EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceConnectionCompleted,
+ initializing_client()->ice_connection_state(),
+ kMaxWaitForFramesMs);
+ EXPECT_EQ_WAIT(webrtc::PeerConnectionInterface::kIceConnectionConnected,
+ receiving_client()->ice_connection_state(),
+ kMaxWaitForFramesMs);
+ // Now set the tracks, and expect frames to immediately start flowing.
+ EXPECT_TRUE(
+ audio_sender->SetTrack(initializing_client()->CreateLocalAudioTrack("")));
+ EXPECT_TRUE(
+ video_sender->SetTrack(initializing_client()->CreateLocalVideoTrack("")));
+ EXPECT_TRUE_WAIT(FramesNotPending(kEndAudioFrameCount, kEndVideoFrameCount),
+ kMaxWaitForFramesMs);
+}
+
class IceServerParsingTest : public testing::Test {
public:
// Convenience for parsing a single URL.
@@ -1589,38 +1862,37 @@ class IceServerParsingTest : public testing::Test {
server.username = username;
server.password = password;
servers.push_back(server);
- return webrtc::ParseIceServers(servers, &stun_configurations_,
- &turn_configurations_);
+ return webrtc::ParseIceServers(servers, &stun_servers_, &turn_servers_);
}
protected:
- webrtc::StunConfigurations stun_configurations_;
- webrtc::TurnConfigurations turn_configurations_;
+ cricket::ServerAddresses stun_servers_;
+ std::vector<cricket::RelayServerConfig> turn_servers_;
};
// Make sure all STUN/TURN prefixes are parsed correctly.
TEST_F(IceServerParsingTest, ParseStunPrefixes) {
EXPECT_TRUE(ParseUrl("stun:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(0U, turn_configurations_.size());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(0U, turn_servers_.size());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stuns:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(0U, turn_configurations_.size());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(0U, turn_servers_.size());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("turn:hostname"));
- EXPECT_EQ(0U, stun_configurations_.size());
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_FALSE(turn_configurations_[0].secure);
- turn_configurations_.clear();
+ EXPECT_EQ(0U, stun_servers_.size());
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_FALSE(turn_servers_[0].ports[0].secure);
+ turn_servers_.clear();
EXPECT_TRUE(ParseUrl("turns:hostname"));
- EXPECT_EQ(0U, stun_configurations_.size());
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_TRUE(turn_configurations_[0].secure);
- turn_configurations_.clear();
+ EXPECT_EQ(0U, stun_servers_.size());
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_TRUE(turn_servers_[0].ports[0].secure);
+ turn_servers_.clear();
// invalid prefixes
EXPECT_FALSE(ParseUrl("stunn:hostname"));
@@ -1632,67 +1904,69 @@ TEST_F(IceServerParsingTest, ParseStunPrefixes) {
TEST_F(IceServerParsingTest, VerifyDefaults) {
// TURNS defaults
EXPECT_TRUE(ParseUrl("turns:hostname"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ(5349, turn_configurations_[0].server.port());
- EXPECT_EQ("tcp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(5349, turn_servers_[0].ports[0].address.port());
+ EXPECT_EQ(cricket::PROTO_TCP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
// TURN defaults
EXPECT_TRUE(ParseUrl("turn:hostname"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ(3478, turn_configurations_[0].server.port());
- EXPECT_EQ("udp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(3478, turn_servers_[0].ports[0].address.port());
+ EXPECT_EQ(cricket::PROTO_UDP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
// STUN defaults
EXPECT_TRUE(ParseUrl("stun:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
}
// Check that the 6 combinations of IPv4/IPv6/hostname and with/without port
// can be parsed correctly.
TEST_F(IceServerParsingTest, ParseHostnameAndPort) {
EXPECT_TRUE(ParseUrl("stun:1.2.3.4:1234"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1.2.3.4", stun_configurations_[0].server.hostname());
- EXPECT_EQ(1234, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1.2.3.4", stun_servers_.begin()->hostname());
+ EXPECT_EQ(1234, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:[1:2:3:4:5:6:7:8]:4321"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1:2:3:4:5:6:7:8", stun_configurations_[0].server.hostname());
- EXPECT_EQ(4321, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1:2:3:4:5:6:7:8", stun_servers_.begin()->hostname());
+ EXPECT_EQ(4321, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:hostname:9999"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("hostname", stun_configurations_[0].server.hostname());
- EXPECT_EQ(9999, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("hostname", stun_servers_.begin()->hostname());
+ EXPECT_EQ(9999, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:1.2.3.4"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1.2.3.4", stun_configurations_[0].server.hostname());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1.2.3.4", stun_servers_.begin()->hostname());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:[1:2:3:4:5:6:7:8]"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("1:2:3:4:5:6:7:8", stun_configurations_[0].server.hostname());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("1:2:3:4:5:6:7:8", stun_servers_.begin()->hostname());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
EXPECT_TRUE(ParseUrl("stun:hostname"));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ("hostname", stun_configurations_[0].server.hostname());
- EXPECT_EQ(3478, stun_configurations_[0].server.port());
- stun_configurations_.clear();
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ("hostname", stun_servers_.begin()->hostname());
+ EXPECT_EQ(3478, stun_servers_.begin()->port());
+ stun_servers_.clear();
// Try some invalid hostname:port strings.
EXPECT_FALSE(ParseUrl("stun:hostname:99a99"));
EXPECT_FALSE(ParseUrl("stun:hostname:-1"));
+ EXPECT_FALSE(ParseUrl("stun:hostname:port:more"));
+ EXPECT_FALSE(ParseUrl("stun:hostname:port more"));
EXPECT_FALSE(ParseUrl("stun:hostname:"));
EXPECT_FALSE(ParseUrl("stun:[1:2:3:4:5:6:7:8]junk:1000"));
EXPECT_FALSE(ParseUrl("stun::5555"));
@@ -1702,14 +1976,14 @@ TEST_F(IceServerParsingTest, ParseHostnameAndPort) {
// Test parsing the "?transport=xxx" part of the URL.
TEST_F(IceServerParsingTest, ParseTransport) {
EXPECT_TRUE(ParseUrl("turn:hostname:1234?transport=tcp"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("tcp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(cricket::PROTO_TCP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
EXPECT_TRUE(ParseUrl("turn:hostname?transport=udp"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("udp", turn_configurations_[0].transport_type);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ(cricket::PROTO_UDP, turn_servers_[0].ports[0].proto);
+ turn_servers_.clear();
EXPECT_FALSE(ParseUrl("turn:hostname?transport=invalid"));
}
@@ -1717,9 +1991,9 @@ TEST_F(IceServerParsingTest, ParseTransport) {
// Test parsing ICE username contained in URL.
TEST_F(IceServerParsingTest, ParseUsername) {
EXPECT_TRUE(ParseUrl("turn:user@hostname"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("user", turn_configurations_[0].username);
- turn_configurations_.clear();
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ("user", turn_servers_[0].credentials.username);
+ turn_servers_.clear();
EXPECT_FALSE(ParseUrl("turn:@hostname"));
EXPECT_FALSE(ParseUrl("turn:username@"));
@@ -1728,12 +2002,12 @@ TEST_F(IceServerParsingTest, ParseUsername) {
}
// Test that username and password from IceServer is copied into the resulting
-// TurnConfiguration.
+// RelayServerConfig.
TEST_F(IceServerParsingTest, CopyUsernameAndPasswordFromIceServer) {
EXPECT_TRUE(ParseUrl("turn:hostname", "username", "password"));
- EXPECT_EQ(1U, turn_configurations_.size());
- EXPECT_EQ("username", turn_configurations_[0].username);
- EXPECT_EQ("password", turn_configurations_[0].password);
+ EXPECT_EQ(1U, turn_servers_.size());
+ EXPECT_EQ("username", turn_servers_[0].credentials.username);
+ EXPECT_EQ("password", turn_servers_[0].credentials.password);
}
// Ensure that if a server has multiple URLs, each one is parsed.
@@ -1743,10 +2017,22 @@ TEST_F(IceServerParsingTest, ParseMultipleUrls) {
server.urls.push_back("stun:hostname");
server.urls.push_back("turn:hostname");
servers.push_back(server);
- EXPECT_TRUE(webrtc::ParseIceServers(servers, &stun_configurations_,
- &turn_configurations_));
- EXPECT_EQ(1U, stun_configurations_.size());
- EXPECT_EQ(1U, turn_configurations_.size());
+ EXPECT_TRUE(webrtc::ParseIceServers(servers, &stun_servers_, &turn_servers_));
+ EXPECT_EQ(1U, stun_servers_.size());
+ EXPECT_EQ(1U, turn_servers_.size());
+}
+
+// Ensure that TURN servers are given unique priorities,
+// so that their resulting candidates have unique priorities.
+TEST_F(IceServerParsingTest, TurnServerPrioritiesUnique) {
+ PeerConnectionInterface::IceServers servers;
+ PeerConnectionInterface::IceServer server;
+ server.urls.push_back("turn:hostname");
+ server.urls.push_back("turn:hostname2");
+ servers.push_back(server);
+ EXPECT_TRUE(webrtc::ParseIceServers(servers, &stun_servers_, &turn_servers_));
+ EXPECT_EQ(2U, turn_servers_.size());
+ EXPECT_NE(turn_servers_[0].priority, turn_servers_[1].priority);
}
#endif // if !defined(THREAD_SANITIZER)
diff --git a/talk/app/webrtc/peerconnectionendtoend_unittest.cc b/talk/app/webrtc/peerconnectionendtoend_unittest.cc
index eacedd4eea..1a180317ac 100644
--- a/talk/app/webrtc/peerconnectionendtoend_unittest.cc
+++ b/talk/app/webrtc/peerconnectionendtoend_unittest.cc
@@ -27,6 +27,9 @@
#include "talk/app/webrtc/test/peerconnectiontestwrapper.h"
#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "webrtc/base/gunit.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/ssladapter.h"
@@ -50,56 +53,6 @@ namespace {
const size_t kMaxWait = 10000;
-void RemoveLinesFromSdp(const std::string& line_start,
- std::string* sdp) {
- const char kSdpLineEnd[] = "\r\n";
- size_t ssrc_pos = 0;
- while ((ssrc_pos = sdp->find(line_start, ssrc_pos)) !=
- std::string::npos) {
- size_t end_ssrc = sdp->find(kSdpLineEnd, ssrc_pos);
- sdp->erase(ssrc_pos, end_ssrc - ssrc_pos + strlen(kSdpLineEnd));
- }
-}
-
-// Add |newlines| to the |message| after |line|.
-void InjectAfter(const std::string& line,
- const std::string& newlines,
- std::string* message) {
- const std::string tmp = line + newlines;
- rtc::replace_substrs(line.c_str(), line.length(),
- tmp.c_str(), tmp.length(), message);
-}
-
-void Replace(const std::string& line,
- const std::string& newlines,
- std::string* message) {
- rtc::replace_substrs(line.c_str(), line.length(),
- newlines.c_str(), newlines.length(), message);
-}
-
-void UseExternalSdes(std::string* sdp) {
- // Remove current crypto specification.
- RemoveLinesFromSdp("a=crypto", sdp);
- RemoveLinesFromSdp("a=fingerprint", sdp);
- // Add external crypto.
- const char kAudioSdes[] =
- "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
- "inline:PS1uQCVeeCFCanVmcjkpPywjNWhcYD0mXXtxaVBR\r\n";
- const char kVideoSdes[] =
- "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
- "inline:d0RmdmcmVCspeEc3QGZiNWpVLFJhQX1cfHAwJSoj\r\n";
- const char kDataSdes[] =
- "a=crypto:1 AES_CM_128_HMAC_SHA1_80 "
- "inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj\r\n";
- InjectAfter("a=mid:audio\r\n", kAudioSdes, sdp);
- InjectAfter("a=mid:video\r\n", kVideoSdes, sdp);
- InjectAfter("a=mid:data\r\n", kDataSdes, sdp);
-}
-
-void RemoveBundle(std::string* sdp) {
- RemoveLinesFromSdp("a=group:BUNDLE", sdp);
-}
-
} // namespace
class PeerConnectionEndToEndTest
@@ -114,6 +67,9 @@ class PeerConnectionEndToEndTest
"caller")),
callee_(new rtc::RefCountedObject<PeerConnectionTestWrapper>(
"callee")) {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
}
void CreatePcs() {
@@ -217,15 +173,20 @@ class PeerConnectionEndToEndTest
DataChannelList callee_signaled_data_channels_;
};
+// Disabled for TSan v2, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=4719 for details.
+// Disabled for Mac, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5231 for details.
+#if !defined(THREAD_SANITIZER) && !defined(WEBRTC_MAC)
TEST_F(PeerConnectionEndToEndTest, Call) {
CreatePcs();
GetAndAddUserMedia();
Negotiate();
WaitForCallEstablished();
}
+#endif // if !defined(THREAD_SANITIZER) && !defined(WEBRTC_MAC)
-// Disabled per b/14899892
-TEST_F(PeerConnectionEndToEndTest, DISABLED_CallWithLegacySdp) {
+TEST_F(PeerConnectionEndToEndTest, CallWithLegacySdp) {
FakeConstraints pc_constraints;
pc_constraints.AddMandatory(MediaConstraintsInterface::kEnableDtlsSrtp,
false);
@@ -396,3 +357,30 @@ TEST_F(PeerConnectionEndToEndTest,
CloseDataChannels(caller_dc, callee_signaled_data_channels_, 1);
}
+
+// This tests that if a data channel is closed remotely while not referenced
+// by the application (meaning only the PeerConnection contributes to its
+// reference count), no memory access violation will occur.
+// See: https://code.google.com/p/chromium/issues/detail?id=565048
+TEST_F(PeerConnectionEndToEndTest, CloseDataChannelRemotelyWhileNotReferenced) {
+ MAYBE_SKIP_TEST(rtc::SSLStreamAdapter::HaveDtlsSrtp);
+
+ CreatePcs();
+
+ webrtc::DataChannelInit init;
+ rtc::scoped_refptr<DataChannelInterface> caller_dc(
+ caller_->CreateDataChannel("data", init));
+
+ Negotiate();
+ WaitForConnection();
+
+ WaitForDataChannelsToOpen(caller_dc, callee_signaled_data_channels_, 0);
+ // This removes the reference to the remote data channel that we hold.
+ callee_signaled_data_channels_.clear();
+ caller_dc->Close();
+ EXPECT_EQ_WAIT(DataChannelInterface::kClosed, caller_dc->state(), kMaxWait);
+
+ // Wait for a bit longer so the remote data channel will receive the
+ // close message and be destroyed.
+ rtc::Thread::Current()->ProcessMessages(100);
+}
diff --git a/talk/app/webrtc/peerconnectionfactory.cc b/talk/app/webrtc/peerconnectionfactory.cc
index b46b4b68d3..c58f88cb41 100644
--- a/talk/app/webrtc/peerconnectionfactory.cc
+++ b/talk/app/webrtc/peerconnectionfactory.cc
@@ -27,6 +27,8 @@
#include "talk/app/webrtc/peerconnectionfactory.h"
+#include <utility>
+
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/localaudiosource.h"
#include "talk/app/webrtc/mediastream.h"
@@ -35,7 +37,6 @@
#include "talk/app/webrtc/peerconnection.h"
#include "talk/app/webrtc/peerconnectionfactoryproxy.h"
#include "talk/app/webrtc/peerconnectionproxy.h"
-#include "talk/app/webrtc/portallocatorfactory.h"
#include "talk/app/webrtc/videosource.h"
#include "talk/app/webrtc/videosourceproxy.h"
#include "talk/app/webrtc/videotrack.h"
@@ -44,6 +45,8 @@
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
#include "webrtc/base/bind.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/p2p/base/basicpacketsocketfactory.h"
+#include "webrtc/p2p/client/basicportallocator.h"
namespace webrtc {
@@ -153,11 +156,13 @@ PeerConnectionFactory::PeerConnectionFactory(
PeerConnectionFactory::~PeerConnectionFactory() {
RTC_DCHECK(signaling_thread_->IsCurrent());
channel_manager_.reset(nullptr);
- default_allocator_factory_ = nullptr;
// Make sure |worker_thread_| and |signaling_thread_| outlive
- // |dtls_identity_store_|.
+ // |dtls_identity_store_|, |default_socket_factory_| and
+ // |default_network_manager_|.
dtls_identity_store_ = nullptr;
+ default_socket_factory_ = nullptr;
+ default_network_manager_ = nullptr;
if (owns_ptrs_) {
if (wraps_current_thread_)
@@ -170,9 +175,16 @@ bool PeerConnectionFactory::Initialize() {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::InitRandom(rtc::Time());
- default_allocator_factory_ = PortAllocatorFactory::Create(worker_thread_);
- if (!default_allocator_factory_)
+ default_network_manager_.reset(new rtc::BasicNetworkManager());
+ if (!default_network_manager_) {
return false;
+ }
+
+ default_socket_factory_.reset(
+ new rtc::BasicPacketSocketFactory(worker_thread_));
+ if (!default_socket_factory_) {
+ return false;
+ }
// TODO: Need to make sure only one VoE is created inside
// WebRtcMediaEngine.
@@ -208,8 +220,8 @@ PeerConnectionFactory::CreateVideoSource(
cricket::VideoCapturer* capturer,
const MediaConstraintsInterface* constraints) {
RTC_DCHECK(signaling_thread_->IsCurrent());
- rtc::scoped_refptr<VideoSource> source(
- VideoSource::Create(channel_manager_.get(), capturer, constraints));
+ rtc::scoped_refptr<VideoSource> source(VideoSource::Create(
+ channel_manager_.get(), capturer, constraints, false));
return VideoSourceProxy::Create(signaling_thread_, source);
}
@@ -237,11 +249,10 @@ rtc::scoped_refptr<PeerConnectionInterface>
PeerConnectionFactory::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
PeerConnectionObserver* observer) {
RTC_DCHECK(signaling_thread_->IsCurrent());
- RTC_DCHECK(allocator_factory || default_allocator_factory_);
if (!dtls_identity_store.get()) {
// Because |pc|->Initialize takes ownership of the store we need a new
@@ -251,19 +262,17 @@ PeerConnectionFactory::CreatePeerConnection(
new DtlsIdentityStoreWrapper(dtls_identity_store_));
}
- PortAllocatorFactoryInterface* chosen_allocator_factory =
- allocator_factory ? allocator_factory : default_allocator_factory_.get();
- chosen_allocator_factory->SetNetworkIgnoreMask(options_.network_ignore_mask);
+ if (!allocator) {
+ allocator.reset(new cricket::BasicPortAllocator(
+ default_network_manager_.get(), default_socket_factory_.get()));
+ }
+ allocator->SetNetworkIgnoreMask(options_.network_ignore_mask);
rtc::scoped_refptr<PeerConnection> pc(
new rtc::RefCountedObject<PeerConnection>(this));
- if (!pc->Initialize(
- configuration,
- constraints,
- chosen_allocator_factory,
- dtls_identity_store.Pass(),
- observer)) {
- return NULL;
+ if (!pc->Initialize(configuration, constraints, std::move(allocator),
+ std::move(dtls_identity_store), observer)) {
+ return nullptr;
}
return PeerConnectionProxy::Create(signaling_thread(), pc);
}
@@ -289,8 +298,7 @@ rtc::scoped_refptr<AudioTrackInterface>
PeerConnectionFactory::CreateAudioTrack(const std::string& id,
AudioSourceInterface* source) {
RTC_DCHECK(signaling_thread_->IsCurrent());
- rtc::scoped_refptr<AudioTrackInterface> track(
- AudioTrack::Create(id, source));
+ rtc::scoped_refptr<AudioTrackInterface> track(AudioTrack::Create(id, source));
return AudioTrackProxy::Create(signaling_thread_, track);
}
diff --git a/talk/app/webrtc/peerconnectionfactory.h b/talk/app/webrtc/peerconnectionfactory.h
index af4117a9d3..8b274e118c 100644
--- a/talk/app/webrtc/peerconnectionfactory.h
+++ b/talk/app/webrtc/peerconnectionfactory.h
@@ -39,6 +39,11 @@
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/base/thread.h"
+namespace rtc {
+class BasicNetworkManager;
+class BasicPacketSocketFactory;
+}
+
namespace webrtc {
typedef rtc::RefCountedObject<DtlsIdentityStoreImpl>
@@ -50,14 +55,12 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
options_ = options;
}
- // webrtc::PeerConnectionFactoryInterface override;
- rtc::scoped_refptr<PeerConnectionInterface>
- CreatePeerConnection(
- const PeerConnectionInterface::RTCConfiguration& configuration,
- const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
- PeerConnectionObserver* observer) override;
+ rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
+ const PeerConnectionInterface::RTCConfiguration& configuration,
+ const MediaConstraintsInterface* constraints,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
+ rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
+ PeerConnectionObserver* observer) override;
bool Initialize();
@@ -107,7 +110,6 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
rtc::Thread* signaling_thread_;
rtc::Thread* worker_thread_;
Options options_;
- rtc::scoped_refptr<PortAllocatorFactoryInterface> default_allocator_factory_;
// External Audio device used for audio playback.
rtc::scoped_refptr<AudioDeviceModule> default_adm_;
rtc::scoped_ptr<cricket::ChannelManager> channel_manager_;
@@ -119,6 +121,8 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface {
// injected any. In that case, video engine will use the internal SW decoder.
rtc::scoped_ptr<cricket::WebRtcVideoDecoderFactory>
video_decoder_factory_;
+ rtc::scoped_ptr<rtc::BasicNetworkManager> default_network_manager_;
+ rtc::scoped_ptr<rtc::BasicPacketSocketFactory> default_socket_factory_;
rtc::scoped_refptr<RefCountedDtlsIdentityStore> dtls_identity_store_;
};
diff --git a/talk/app/webrtc/peerconnectionfactory_unittest.cc b/talk/app/webrtc/peerconnectionfactory_unittest.cc
index f1d5353abd..9fb013b54f 100644
--- a/talk/app/webrtc/peerconnectionfactory_unittest.cc
+++ b/talk/app/webrtc/peerconnectionfactory_unittest.cc
@@ -26,10 +26,13 @@
*/
#include <string>
+#include <utility>
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/peerconnectionfactory.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "talk/app/webrtc/test/fakedtlsidentitystore.h"
#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
#include "talk/app/webrtc/videosourceinterface.h"
@@ -39,6 +42,7 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
using webrtc::DataChannelInterface;
using webrtc::DtlsIdentityStoreInterface;
@@ -47,17 +51,11 @@ using webrtc::MediaStreamInterface;
using webrtc::PeerConnectionFactoryInterface;
using webrtc::PeerConnectionInterface;
using webrtc::PeerConnectionObserver;
-using webrtc::PortAllocatorFactoryInterface;
using webrtc::VideoSourceInterface;
using webrtc::VideoTrackInterface;
namespace {
-typedef std::vector<PortAllocatorFactoryInterface::StunConfiguration>
- StunConfigurations;
-typedef std::vector<PortAllocatorFactoryInterface::TurnConfiguration>
- TurnConfigurations;
-
static const char kStunIceServer[] = "stun:stun.l.google.com:19302";
static const char kTurnIceServer[] = "turn:test%40hello.com@test.com:1234";
static const char kTurnIceServerWithTransport[] =
@@ -103,6 +101,9 @@ class NullPeerConnectionObserver : public PeerConnectionObserver {
class PeerConnectionFactoryTest : public testing::Test {
void SetUp() {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
factory_ = webrtc::CreatePeerConnectionFactory(rtc::Thread::Current(),
rtc::Thread::Current(),
NULL,
@@ -110,57 +111,58 @@ class PeerConnectionFactoryTest : public testing::Test {
NULL);
ASSERT_TRUE(factory_.get() != NULL);
- allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
+ port_allocator_.reset(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
+ raw_port_allocator_ = port_allocator_.get();
}
protected:
- void VerifyStunConfigurations(StunConfigurations stun_config) {
- webrtc::FakePortAllocatorFactory* allocator =
- static_cast<webrtc::FakePortAllocatorFactory*>(
- allocator_factory_.get());
- ASSERT_TRUE(allocator != NULL);
- EXPECT_EQ(stun_config.size(), allocator->stun_configs().size());
- for (size_t i = 0; i < stun_config.size(); ++i) {
- EXPECT_EQ(stun_config[i].server.ToString(),
- allocator->stun_configs()[i].server.ToString());
- }
+ void VerifyStunServers(cricket::ServerAddresses stun_servers) {
+ EXPECT_EQ(stun_servers, raw_port_allocator_->stun_servers());
}
- void VerifyTurnConfigurations(TurnConfigurations turn_config) {
- webrtc::FakePortAllocatorFactory* allocator =
- static_cast<webrtc::FakePortAllocatorFactory*>(
- allocator_factory_.get());
- ASSERT_TRUE(allocator != NULL);
- EXPECT_EQ(turn_config.size(), allocator->turn_configs().size());
- for (size_t i = 0; i < turn_config.size(); ++i) {
- EXPECT_EQ(turn_config[i].server.ToString(),
- allocator->turn_configs()[i].server.ToString());
- EXPECT_EQ(turn_config[i].username, allocator->turn_configs()[i].username);
- EXPECT_EQ(turn_config[i].password, allocator->turn_configs()[i].password);
- EXPECT_EQ(turn_config[i].transport_type,
- allocator->turn_configs()[i].transport_type);
+ void VerifyTurnServers(std::vector<cricket::RelayServerConfig> turn_servers) {
+ EXPECT_EQ(turn_servers.size(), raw_port_allocator_->turn_servers().size());
+ for (size_t i = 0; i < turn_servers.size(); ++i) {
+ ASSERT_EQ(1u, turn_servers[i].ports.size());
+ EXPECT_EQ(1u, raw_port_allocator_->turn_servers()[i].ports.size());
+ EXPECT_EQ(
+ turn_servers[i].ports[0].address.ToString(),
+ raw_port_allocator_->turn_servers()[i].ports[0].address.ToString());
+ EXPECT_EQ(turn_servers[i].ports[0].proto,
+ raw_port_allocator_->turn_servers()[i].ports[0].proto);
+ EXPECT_EQ(turn_servers[i].credentials.username,
+ raw_port_allocator_->turn_servers()[i].credentials.username);
+ EXPECT_EQ(turn_servers[i].credentials.password,
+ raw_port_allocator_->turn_servers()[i].credentials.password);
}
}
rtc::scoped_refptr<PeerConnectionFactoryInterface> factory_;
NullPeerConnectionObserver observer_;
- rtc::scoped_refptr<PortAllocatorFactoryInterface> allocator_factory_;
+ rtc::scoped_ptr<cricket::FakePortAllocator> port_allocator_;
+ // Since the PC owns the port allocator after it's been initialized,
+ // this should only be used when known to be safe.
+ cricket::FakePortAllocator* raw_port_allocator_;
};
// Verify creation of PeerConnection using internal ADM, video factory and
// internal libjingle threads.
TEST(PeerConnectionFactoryTestInternal, CreatePCUsingInternalModules) {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
+
rtc::scoped_refptr<PeerConnectionFactoryInterface> factory(
webrtc::CreatePeerConnectionFactory());
NullPeerConnectionObserver observer;
- webrtc::PeerConnectionInterface::IceServers servers;
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
rtc::scoped_ptr<FakeDtlsIdentityStore> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory->CreatePeerConnection(
- servers, nullptr, nullptr, dtls_identity_store.Pass(), &observer));
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory->CreatePeerConnection(
+ config, nullptr, nullptr, std::move(dtls_identity_store), &observer));
EXPECT_TRUE(pc.get() != nullptr);
}
@@ -180,25 +182,22 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServers) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "stun.l.google.com", 19302);
- stun_configs.push_back(stun1);
- VerifyStunConfigurations(stun_configs);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "test.com", 1234, "test@hello.com", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn2);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ cricket::ServerAddresses stun_servers;
+ rtc::SocketAddress stun1("stun.l.google.com", 19302);
+ stun_servers.insert(stun1);
+ VerifyStunServers(stun_servers);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("test.com", 1234, "test@hello.com",
+ kTurnPassword, cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn1);
+ cricket::RelayServerConfig turn2("hello.com", kDefaultStunPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, false);
+ turn_servers.push_back(turn2);
+ VerifyTurnServers(turn_servers);
}
// This test verifies creation of PeerConnection with valid STUN and TURN
@@ -213,63 +212,22 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServersUrls) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "stun.l.google.com", 19302);
- stun_configs.push_back(stun1);
- VerifyStunConfigurations(stun_configs);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "test.com", 1234, "test@hello.com", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn2);
- VerifyTurnConfigurations(turn_configs);
-}
-
-// This test verifies creation of PeerConnection with valid STUN and TURN
-// configuration. Also verifies the URL's parsed correctly as expected.
-// This version doesn't use RTCConfiguration.
-// TODO(mallinath) - Remove this method after clients start using RTCConfig.
-TEST_F(PeerConnectionFactoryTest, CreatePCUsingIceServersOldSignature) {
- webrtc::PeerConnectionInterface::IceServers ice_servers;
- webrtc::PeerConnectionInterface::IceServer ice_server;
- ice_server.uri = kStunIceServer;
- ice_servers.push_back(ice_server);
- ice_server.uri = kTurnIceServer;
- ice_server.password = kTurnPassword;
- ice_servers.push_back(ice_server);
- ice_server.uri = kTurnIceServerWithTransport;
- ice_server.password = kTurnPassword;
- ice_servers.push_back(ice_server);
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
- new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(ice_servers, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "stun.l.google.com", 19302);
- stun_configs.push_back(stun1);
- VerifyStunConfigurations(stun_configs);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "test.com", 1234, "test@hello.com", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn2);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ cricket::ServerAddresses stun_servers;
+ rtc::SocketAddress stun1("stun.l.google.com", 19302);
+ stun_servers.insert(stun1);
+ VerifyStunServers(stun_servers);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("test.com", 1234, "test@hello.com",
+ kTurnPassword, cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn1);
+ cricket::RelayServerConfig turn2("hello.com", kDefaultStunPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, false);
+ turn_servers.push_back(turn2);
+ VerifyTurnServers(turn_servers);
}
TEST_F(PeerConnectionFactoryTest, CreatePCUsingNoUsernameInUri) {
@@ -283,17 +241,15 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingNoUsernameInUri) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
- "test.com", 1234, kTurnUsername, kTurnPassword, "udp", false);
- turn_configs.push_back(turn);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn("test.com", 1234, kTurnUsername,
+ kTurnPassword, cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn);
+ VerifyTurnServers(turn_servers);
}
// This test verifies the PeerConnection created properly with TURN url which
@@ -306,17 +262,15 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingTurnUrlWithTransportParam) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn(
- "hello.com", kDefaultStunPort, "test", kTurnPassword, "tcp", false);
- turn_configs.push_back(turn);
- VerifyTurnConfigurations(turn_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn("hello.com", kDefaultStunPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, false);
+ turn_servers.push_back(turn);
+ VerifyTurnServers(turn_servers);
}
TEST_F(PeerConnectionFactoryTest, CreatePCUsingSecureTurnUrl) {
@@ -333,25 +287,23 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingSecureTurnUrl) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "hello.com", kDefaultStunTlsPort, "test", kTurnPassword, "tcp", true);
- turn_configs.push_back(turn1);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("hello.com", kDefaultStunTlsPort, "test",
+ kTurnPassword, cricket::PROTO_TCP, true);
+ turn_servers.push_back(turn1);
// TURNS with transport param should be default to tcp.
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn2(
- "hello.com", 443, "test_no_transport", kTurnPassword, "tcp", true);
- turn_configs.push_back(turn2);
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn3(
- "hello.com", kDefaultStunTlsPort, "test_no_transport",
- kTurnPassword, "tcp", true);
- turn_configs.push_back(turn3);
- VerifyTurnConfigurations(turn_configs);
+ cricket::RelayServerConfig turn2("hello.com", 443, "test_no_transport",
+ kTurnPassword, cricket::PROTO_TCP, true);
+ turn_servers.push_back(turn2);
+ cricket::RelayServerConfig turn3("hello.com", kDefaultStunTlsPort,
+ "test_no_transport", kTurnPassword,
+ cricket::PROTO_TCP, true);
+ turn_servers.push_back(turn3);
+ VerifyTurnServers(turn_servers);
}
TEST_F(PeerConnectionFactoryTest, CreatePCUsingIPLiteralAddress) {
@@ -370,32 +322,26 @@ TEST_F(PeerConnectionFactoryTest, CreatePCUsingIPLiteralAddress) {
config.servers.push_back(ice_server);
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store(
new FakeDtlsIdentityStore());
- rtc::scoped_refptr<PeerConnectionInterface> pc(
- factory_->CreatePeerConnection(config, nullptr,
- allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_));
- EXPECT_TRUE(pc.get() != NULL);
- StunConfigurations stun_configs;
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun1(
- "1.2.3.4", 1234);
- stun_configs.push_back(stun1);
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun2(
- "1.2.3.4", 3478);
- stun_configs.push_back(stun2); // Default port
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun3(
- "2401:fa00:4::", 1234);
- stun_configs.push_back(stun3);
- webrtc::PortAllocatorFactoryInterface::StunConfiguration stun4(
- "2401:fa00:4::", 3478);
- stun_configs.push_back(stun4); // Default port
- VerifyStunConfigurations(stun_configs);
+ rtc::scoped_refptr<PeerConnectionInterface> pc(factory_->CreatePeerConnection(
+ config, nullptr, std::move(port_allocator_),
+ std::move(dtls_identity_store), &observer_));
+ ASSERT_TRUE(pc.get() != NULL);
+ cricket::ServerAddresses stun_servers;
+ rtc::SocketAddress stun1("1.2.3.4", 1234);
+ stun_servers.insert(stun1);
+ rtc::SocketAddress stun2("1.2.3.4", 3478);
+ stun_servers.insert(stun2); // Default port
+ rtc::SocketAddress stun3("2401:fa00:4::", 1234);
+ stun_servers.insert(stun3);
+ rtc::SocketAddress stun4("2401:fa00:4::", 3478);
+ stun_servers.insert(stun4); // Default port
+ VerifyStunServers(stun_servers);
- TurnConfigurations turn_configs;
- webrtc::PortAllocatorFactoryInterface::TurnConfiguration turn1(
- "2401:fa00:4::", 1234, "test", kTurnPassword, "udp", false);
- turn_configs.push_back(turn1);
- VerifyTurnConfigurations(turn_configs);
+ std::vector<cricket::RelayServerConfig> turn_servers;
+ cricket::RelayServerConfig turn1("2401:fa00:4::", 1234, "test", kTurnPassword,
+ cricket::PROTO_UDP, false);
+ turn_servers.push_back(turn1);
+ VerifyTurnServers(turn_servers);
}
// This test verifies the captured stream is rendered locally using a
diff --git a/talk/app/webrtc/peerconnectionfactoryproxy.h b/talk/app/webrtc/peerconnectionfactoryproxy.h
index 5e924df3a1..714ce6b7eb 100644
--- a/talk/app/webrtc/peerconnectionfactoryproxy.h
+++ b/talk/app/webrtc/peerconnectionfactoryproxy.h
@@ -29,6 +29,7 @@
#define TALK_APP_WEBRTC_PEERCONNECTIONFACTORYPROXY_H_
#include <string>
+#include <utility>
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/proxy.h"
@@ -38,17 +39,17 @@ namespace webrtc {
BEGIN_PROXY_MAP(PeerConnectionFactory)
PROXY_METHOD1(void, SetOptions, const Options&)
- // Can't use PROXY_METHOD5 because scoped_ptr must be Pass()ed.
+ // Can't use PROXY_METHOD5 because scoped_ptr must be moved.
// TODO(tommi,hbos): Use of templates to support scoped_ptr?
rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& a1,
const MediaConstraintsInterface* a2,
- PortAllocatorFactoryInterface* a3,
+ rtc::scoped_ptr<cricket::PortAllocator> a3,
rtc::scoped_ptr<DtlsIdentityStoreInterface> a4,
PeerConnectionObserver* a5) override {
return owner_thread_->Invoke<rtc::scoped_refptr<PeerConnectionInterface>>(
rtc::Bind(&PeerConnectionFactoryProxy::CreatePeerConnection_ot, this,
- a1, a2, a3, a4.release(), a5));
+ a1, a2, a3.release(), a4.release(), a5));
}
PROXY_METHOD1(rtc::scoped_refptr<MediaStreamInterface>,
CreateLocalMediaStream, const std::string&)
@@ -70,11 +71,13 @@ BEGIN_PROXY_MAP(PeerConnectionFactory)
rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection_ot(
const PeerConnectionInterface::RTCConfiguration& a1,
const MediaConstraintsInterface* a2,
- PortAllocatorFactoryInterface* a3,
+ cricket::PortAllocator* a3,
DtlsIdentityStoreInterface* a4,
PeerConnectionObserver* a5) {
+ rtc::scoped_ptr<cricket::PortAllocator> ptr_a3(a3);
rtc::scoped_ptr<DtlsIdentityStoreInterface> ptr_a4(a4);
- return c_->CreatePeerConnection(a1, a2, a3, ptr_a4.Pass(), a5);
+ return c_->CreatePeerConnection(a1, a2, std::move(ptr_a3),
+ std::move(ptr_a4), a5);
}
END_PROXY()
diff --git a/talk/app/webrtc/peerconnectioninterface.h b/talk/app/webrtc/peerconnectioninterface.h
index 77caa9d78b..b9afbad204 100644
--- a/talk/app/webrtc/peerconnectioninterface.h
+++ b/talk/app/webrtc/peerconnectioninterface.h
@@ -69,6 +69,7 @@
#define TALK_APP_WEBRTC_PEERCONNECTIONINTERFACE_H_
#include <string>
+#include <utility>
#include <vector>
#include "talk/app/webrtc/datachannelinterface.h"
@@ -86,6 +87,7 @@
#include "webrtc/base/rtccertificate.h"
#include "webrtc/base/sslstreamadapter.h"
#include "webrtc/base/socketaddress.h"
+#include "webrtc/p2p/base/portallocator.h"
namespace rtc {
class SSLIdentity;
@@ -93,7 +95,6 @@ class Thread;
}
namespace cricket {
-class PortAllocator;
class WebRtcVideoDecoderFactory;
class WebRtcVideoEncoderFactory;
}
@@ -248,28 +249,27 @@ class PeerConnectionInterface : public rtc::RefCountInterface {
// TODO(pthatcher): Rename this ice_servers, but update Chromium
// at the same time.
IceServers servers;
- // A localhost candidate is signaled whenever a candidate with the any
- // address is allocated.
- bool enable_localhost_ice_candidate;
BundlePolicy bundle_policy;
RtcpMuxPolicy rtcp_mux_policy;
TcpCandidatePolicy tcp_candidate_policy;
int audio_jitter_buffer_max_packets;
bool audio_jitter_buffer_fast_accelerate;
- int ice_connection_receiving_timeout;
+ int ice_connection_receiving_timeout; // ms
+ int ice_backup_candidate_pair_ping_interval; // ms
ContinualGatheringPolicy continual_gathering_policy;
std::vector<rtc::scoped_refptr<rtc::RTCCertificate>> certificates;
-
+ bool disable_prerenderer_smoothing;
RTCConfiguration()
: type(kAll),
- enable_localhost_ice_candidate(false),
bundle_policy(kBundlePolicyBalanced),
rtcp_mux_policy(kRtcpMuxPolicyNegotiate),
tcp_candidate_policy(kTcpCandidatePolicyEnabled),
audio_jitter_buffer_max_packets(kAudioJitterBufferMaxPackets),
audio_jitter_buffer_fast_accelerate(false),
ice_connection_receiving_timeout(kUndefined),
- continual_gathering_policy(GATHER_ONCE) {}
+ ice_backup_candidate_pair_ping_interval(kUndefined),
+ continual_gathering_policy(GATHER_ONCE),
+ disable_prerenderer_smoothing(false) {}
};
struct RTCOfferAnswerOptions {
@@ -337,6 +337,15 @@ class PeerConnectionInterface : public rtc::RefCountInterface {
AudioTrackInterface* track) = 0;
// TODO(deadbeef): Make these pure virtual once all subclasses implement them.
+ // |kind| must be "audio" or "video".
+ // |stream_id| is used to populate the msid attribute; if empty, one will
+ // be generated automatically.
+ virtual rtc::scoped_refptr<RtpSenderInterface> CreateSender(
+ const std::string& kind,
+ const std::string& stream_id) {
+ return rtc::scoped_refptr<RtpSenderInterface>();
+ }
+
virtual std::vector<rtc::scoped_refptr<RtpSenderInterface>> GetSenders()
const {
return std::vector<rtc::scoped_refptr<RtpSenderInterface>>();
@@ -480,51 +489,6 @@ class PeerConnectionObserver {
~PeerConnectionObserver() {}
};
-// Factory class used for creating cricket::PortAllocator that is used
-// for ICE negotiation.
-class PortAllocatorFactoryInterface : public rtc::RefCountInterface {
- public:
- struct StunConfiguration {
- StunConfiguration(const std::string& address, int port)
- : server(address, port) {}
- // STUN server address and port.
- rtc::SocketAddress server;
- };
-
- struct TurnConfiguration {
- TurnConfiguration(const std::string& address,
- int port,
- const std::string& username,
- const std::string& password,
- const std::string& transport_type,
- bool secure)
- : server(address, port),
- username(username),
- password(password),
- transport_type(transport_type),
- secure(secure) {}
- rtc::SocketAddress server;
- std::string username;
- std::string password;
- std::string transport_type;
- bool secure;
- };
-
- virtual cricket::PortAllocator* CreatePortAllocator(
- const std::vector<StunConfiguration>& stun_servers,
- const std::vector<TurnConfiguration>& turn_configurations) = 0;
-
- // TODO(phoglund): Make pure virtual when Chrome's factory implements this.
- // After this method is called, the port allocator should consider loopback
- // network interfaces as well.
- virtual void SetNetworkIgnoreMask(int network_ignore_mask) {
- }
-
- protected:
- PortAllocatorFactoryInterface() {}
- ~PortAllocatorFactoryInterface() {}
-};
-
// PeerConnectionFactoryInterface is the factory interface use for creating
// PeerConnection, MediaStream and media tracks.
// PeerConnectionFactoryInterface will create required libjingle threads,
@@ -532,19 +496,18 @@ class PortAllocatorFactoryInterface : public rtc::RefCountInterface {
// If an application decides to provide its own threads and network
// implementation of these classes it should use the alternate
// CreatePeerConnectionFactory method which accepts threads as input and use the
-// CreatePeerConnection version that takes a PortAllocatorFactoryInterface as
+// CreatePeerConnection version that takes a PortAllocator as an
// argument.
class PeerConnectionFactoryInterface : public rtc::RefCountInterface {
public:
class Options {
public:
- Options() :
- disable_encryption(false),
- disable_sctp_data_channels(false),
- disable_network_monitor(false),
- network_ignore_mask(rtc::kDefaultNetworkIgnoreMask),
- ssl_max_version(rtc::SSL_PROTOCOL_DTLS_10) {
- }
+ Options()
+ : disable_encryption(false),
+ disable_sctp_data_channels(false),
+ disable_network_monitor(false),
+ network_ignore_mask(rtc::kDefaultNetworkIgnoreMask),
+ ssl_max_version(rtc::SSL_PROTOCOL_DTLS_12) {}
bool disable_encryption;
bool disable_sctp_data_channels;
bool disable_network_monitor;
@@ -562,31 +525,12 @@ class PeerConnectionFactoryInterface : public rtc::RefCountInterface {
virtual void SetOptions(const Options& options) = 0;
- virtual rtc::scoped_refptr<PeerConnectionInterface>
- CreatePeerConnection(
- const PeerConnectionInterface::RTCConfiguration& configuration,
- const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
- PeerConnectionObserver* observer) = 0;
-
- // TODO(hbos): Remove below version after clients are updated to above method.
- // In latest W3C WebRTC draft, PC constructor will take RTCConfiguration,
- // and not IceServers. RTCConfiguration is made up of ice servers and
- // ice transport type.
- // http://dev.w3.org/2011/webrtc/editor/webrtc.html
- inline rtc::scoped_refptr<PeerConnectionInterface>
- CreatePeerConnection(
- const PeerConnectionInterface::IceServers& servers,
- const MediaConstraintsInterface* constraints,
- PortAllocatorFactoryInterface* allocator_factory,
- rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
- PeerConnectionObserver* observer) {
- PeerConnectionInterface::RTCConfiguration rtc_config;
- rtc_config.servers = servers;
- return CreatePeerConnection(rtc_config, constraints, allocator_factory,
- dtls_identity_store.Pass(), observer);
- }
+ virtual rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
+ const PeerConnectionInterface::RTCConfiguration& configuration,
+ const MediaConstraintsInterface* constraints,
+ rtc::scoped_ptr<cricket::PortAllocator> allocator,
+ rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
+ PeerConnectionObserver* observer) = 0;
virtual rtc::scoped_refptr<MediaStreamInterface>
CreateLocalMediaStream(const std::string& label) = 0;
diff --git a/talk/app/webrtc/peerconnectioninterface_unittest.cc b/talk/app/webrtc/peerconnectioninterface_unittest.cc
index 63163fd651..c3789b7dd8 100644
--- a/talk/app/webrtc/peerconnectioninterface_unittest.cc
+++ b/talk/app/webrtc/peerconnectioninterface_unittest.cc
@@ -26,9 +26,9 @@
*/
#include <string>
+#include <utility>
#include "talk/app/webrtc/audiotrack.h"
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/mediastream.h"
#include "talk/app/webrtc/mediastreaminterface.h"
@@ -37,6 +37,9 @@
#include "talk/app/webrtc/rtpreceiverinterface.h"
#include "talk/app/webrtc/rtpsenderinterface.h"
#include "talk/app/webrtc/streamcollection.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "talk/app/webrtc/test/fakeconstraints.h"
#include "talk/app/webrtc/test/fakedtlsidentitystore.h"
#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
@@ -52,6 +55,7 @@
#include "webrtc/base/sslstreamadapter.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/base/thread.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
static const char kStreamLabel1[] = "local_stream_1";
static const char kStreamLabel2[] = "local_stream_2";
@@ -258,7 +262,6 @@ using webrtc::AudioTrackInterface;
using webrtc::DataBuffer;
using webrtc::DataChannelInterface;
using webrtc::FakeConstraints;
-using webrtc::FakePortAllocatorFactory;
using webrtc::IceCandidateInterface;
using webrtc::MediaConstraintsInterface;
using webrtc::MediaStream;
@@ -270,7 +273,6 @@ using webrtc::MockSetSessionDescriptionObserver;
using webrtc::MockStatsObserver;
using webrtc::PeerConnectionInterface;
using webrtc::PeerConnectionObserver;
-using webrtc::PortAllocatorFactoryInterface;
using webrtc::RtpReceiverInterface;
using webrtc::RtpSenderInterface;
using webrtc::SdpParseError;
@@ -515,6 +517,12 @@ class MockPeerConnectionObserver : public PeerConnectionObserver {
class PeerConnectionInterfaceTest : public testing::Test {
protected:
+ PeerConnectionInterfaceTest() {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
+ }
+
virtual void SetUp() {
pc_factory_ = webrtc::CreatePeerConnectionFactory(
rtc::Thread::Current(), rtc::Thread::Current(), NULL, NULL,
@@ -533,15 +541,17 @@ class PeerConnectionInterfaceTest : public testing::Test {
void CreatePeerConnection(const std::string& uri,
const std::string& password,
webrtc::MediaConstraintsInterface* constraints) {
+ PeerConnectionInterface::RTCConfiguration config;
PeerConnectionInterface::IceServer server;
- PeerConnectionInterface::IceServers servers;
if (!uri.empty()) {
server.uri = uri;
server.password = password;
- servers.push_back(server);
+ config.servers.push_back(server);
}
- port_allocator_factory_ = FakePortAllocatorFactory::Create();
+ rtc::scoped_ptr<cricket::FakePortAllocator> port_allocator(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
+ port_allocator_ = port_allocator.get();
// DTLS does not work in a loopback call, so is disabled for most of the
// tests in this file. We only create a FakeIdentityService if the test
@@ -562,52 +572,47 @@ class PeerConnectionInterfaceTest : public testing::Test {
nullptr) && dtls) {
dtls_identity_store.reset(new FakeDtlsIdentityStore());
}
- pc_ = pc_factory_->CreatePeerConnection(servers, constraints,
- port_allocator_factory_.get(),
- dtls_identity_store.Pass(),
- &observer_);
+ pc_ = pc_factory_->CreatePeerConnection(
+ config, constraints, std::move(port_allocator),
+ std::move(dtls_identity_store), &observer_);
ASSERT_TRUE(pc_.get() != NULL);
observer_.SetPeerConnectionInterface(pc_.get());
EXPECT_EQ(PeerConnectionInterface::kStable, observer_.state_);
}
void CreatePeerConnectionExpectFail(const std::string& uri) {
+ PeerConnectionInterface::RTCConfiguration config;
PeerConnectionInterface::IceServer server;
- PeerConnectionInterface::IceServers servers;
server.uri = uri;
- servers.push_back(server);
+ config.servers.push_back(server);
- scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store;
- port_allocator_factory_ = FakePortAllocatorFactory::Create();
scoped_refptr<PeerConnectionInterface> pc;
- pc = pc_factory_->CreatePeerConnection(
- servers, nullptr, port_allocator_factory_.get(),
- dtls_identity_store.Pass(), &observer_);
- ASSERT_EQ(nullptr, pc);
+ pc = pc_factory_->CreatePeerConnection(config, nullptr, nullptr, nullptr,
+ &observer_);
+ EXPECT_EQ(nullptr, pc);
}
void CreatePeerConnectionWithDifferentConfigurations() {
CreatePeerConnection(kStunAddressOnly, "", NULL);
- EXPECT_EQ(1u, port_allocator_factory_->stun_configs().size());
- EXPECT_EQ(0u, port_allocator_factory_->turn_configs().size());
- EXPECT_EQ("address",
- port_allocator_factory_->stun_configs()[0].server.hostname());
+ EXPECT_EQ(1u, port_allocator_->stun_servers().size());
+ EXPECT_EQ(0u, port_allocator_->turn_servers().size());
+ EXPECT_EQ("address", port_allocator_->stun_servers().begin()->hostname());
EXPECT_EQ(kDefaultStunPort,
- port_allocator_factory_->stun_configs()[0].server.port());
+ port_allocator_->stun_servers().begin()->port());
CreatePeerConnectionExpectFail(kStunInvalidPort);
CreatePeerConnectionExpectFail(kStunAddressPortAndMore1);
CreatePeerConnectionExpectFail(kStunAddressPortAndMore2);
CreatePeerConnection(kTurnIceServerUri, kTurnPassword, NULL);
- EXPECT_EQ(0u, port_allocator_factory_->stun_configs().size());
- EXPECT_EQ(1u, port_allocator_factory_->turn_configs().size());
+ EXPECT_EQ(0u, port_allocator_->stun_servers().size());
+ EXPECT_EQ(1u, port_allocator_->turn_servers().size());
EXPECT_EQ(kTurnUsername,
- port_allocator_factory_->turn_configs()[0].username);
+ port_allocator_->turn_servers()[0].credentials.username);
EXPECT_EQ(kTurnPassword,
- port_allocator_factory_->turn_configs()[0].password);
+ port_allocator_->turn_servers()[0].credentials.password);
EXPECT_EQ(kTurnHostname,
- port_allocator_factory_->turn_configs()[0].server.hostname());
+ port_allocator_->turn_servers()[0].ports[0].address.hostname());
}
void ReleasePeerConnection() {
@@ -926,7 +931,7 @@ class PeerConnectionInterfaceTest : public testing::Test {
ASSERT_TRUE(stream->AddTrack(video_track));
}
- scoped_refptr<FakePortAllocatorFactory> port_allocator_factory_;
+ cricket::FakePortAllocator* port_allocator_ = nullptr;
scoped_refptr<webrtc::PeerConnectionFactoryInterface> pc_factory_;
scoped_refptr<PeerConnectionInterface> pc_;
MockPeerConnectionObserver observer_;
@@ -1156,6 +1161,64 @@ TEST_F(PeerConnectionInterfaceTest, SsrcInOfferAnswer) {
EXPECT_NE(audio_ssrc, video_ssrc);
}
+// Test that it's possible to call AddTrack on a MediaStream after adding
+// the stream to a PeerConnection.
+// TODO(deadbeef): Remove this test once this behavior is no longer supported.
+TEST_F(PeerConnectionInterfaceTest, AddTrackAfterAddStream) {
+ CreatePeerConnection();
+ // Create audio stream and add to PeerConnection.
+ AddVoiceStream(kStreamLabel1);
+ MediaStreamInterface* stream = pc_->local_streams()->at(0);
+
+ // Add video track to the audio-only stream.
+ scoped_refptr<VideoTrackInterface> video_track(
+ pc_factory_->CreateVideoTrack("video_label", nullptr));
+ stream->AddTrack(video_track.get());
+
+ scoped_ptr<SessionDescriptionInterface> offer;
+ ASSERT_TRUE(DoCreateOffer(offer.use(), nullptr));
+
+ const cricket::MediaContentDescription* video_desc =
+ cricket::GetFirstVideoContentDescription(offer->description());
+ EXPECT_TRUE(video_desc != nullptr);
+}
+
+// Test that it's possible to call RemoveTrack on a MediaStream after adding
+// the stream to a PeerConnection.
+// TODO(deadbeef): Remove this test once this behavior is no longer supported.
+TEST_F(PeerConnectionInterfaceTest, RemoveTrackAfterAddStream) {
+ CreatePeerConnection();
+ // Create audio/video stream and add to PeerConnection.
+ AddAudioVideoStream(kStreamLabel1, "audio_label", "video_label");
+ MediaStreamInterface* stream = pc_->local_streams()->at(0);
+
+ // Remove the video track.
+ stream->RemoveTrack(stream->GetVideoTracks()[0]);
+
+ scoped_ptr<SessionDescriptionInterface> offer;
+ ASSERT_TRUE(DoCreateOffer(offer.use(), nullptr));
+
+ const cricket::MediaContentDescription* video_desc =
+ cricket::GetFirstVideoContentDescription(offer->description());
+ EXPECT_TRUE(video_desc == nullptr);
+}
+
+// Test creating a sender with a stream ID, and ensure the ID is populated
+// in the offer.
+TEST_F(PeerConnectionInterfaceTest, CreateSenderWithStream) {
+ CreatePeerConnection();
+ pc_->CreateSender("video", kStreamLabel1);
+
+ scoped_ptr<SessionDescriptionInterface> offer;
+ ASSERT_TRUE(DoCreateOffer(offer.use(), nullptr));
+
+ const cricket::MediaContentDescription* video_desc =
+ cricket::GetFirstVideoContentDescription(offer->description());
+ ASSERT_TRUE(video_desc != nullptr);
+ ASSERT_EQ(1u, video_desc->streams().size());
+ EXPECT_EQ(kStreamLabel1, video_desc->streams()[0].sync_label);
+}
+
// Test that we can specify a certain track that we want statistics about.
TEST_F(PeerConnectionInterfaceTest, GetStatsForSpecificTrack) {
InitiateCall();
@@ -1660,6 +1723,22 @@ TEST_F(PeerConnectionInterfaceTest, CreateSubsequentInactiveOffer) {
ASSERT_EQ(cricket::MD_INACTIVE, audio_desc->direction());
}
+// Test that we can use SetConfiguration to change the ICE servers of the
+// PortAllocator.
+TEST_F(PeerConnectionInterfaceTest, SetConfigurationChangesIceServers) {
+ CreatePeerConnection();
+
+ PeerConnectionInterface::RTCConfiguration config;
+ PeerConnectionInterface::IceServer server;
+ server.uri = "stun:test_hostname";
+ config.servers.push_back(server);
+ EXPECT_TRUE(pc_->SetConfiguration(config));
+
+ EXPECT_EQ(1u, port_allocator_->stun_servers().size());
+ EXPECT_EQ("test_hostname",
+ port_allocator_->stun_servers().begin()->hostname());
+}
+
// Test that PeerConnection::Close changes the states to closed and all remote
// tracks change state to ended.
TEST_F(PeerConnectionInterfaceTest, CloseAndTestStreamsAndStates) {
@@ -1977,6 +2056,28 @@ TEST_F(PeerConnectionInterfaceTest, SdpWithMsidDontCreatesDefaultStream) {
EXPECT_EQ(0u, observer_.remote_streams()->count());
}
+// This tests that when setting a new description, the old default tracks are
+// not destroyed and recreated.
+// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5250
+TEST_F(PeerConnectionInterfaceTest, DefaultTracksNotDestroyedAndRecreated) {
+ FakeConstraints constraints;
+ constraints.AddMandatory(webrtc::MediaConstraintsInterface::kEnableDtlsSrtp,
+ true);
+ CreatePeerConnection(&constraints);
+ CreateAndSetRemoteOffer(kSdpStringWithoutStreamsAudioOnly);
+
+ ASSERT_EQ(1u, observer_.remote_streams()->count());
+ MediaStreamInterface* remote_stream = observer_.remote_streams()->at(0);
+ ASSERT_EQ(1u, remote_stream->GetAudioTracks().size());
+
+ // Set the track to "disabled", then set a new description and ensure the
+ // track is still disabled, which ensures it hasn't been recreated.
+ remote_stream->GetAudioTracks()[0]->set_enabled(false);
+ CreateAndSetRemoteOffer(kSdpStringWithoutStreamsAudioOnly);
+ ASSERT_EQ(1u, remote_stream->GetAudioTracks().size());
+ EXPECT_FALSE(remote_stream->GetAudioTracks()[0]->enabled());
+}
+
// This tests that a default MediaStream is not created if a remote session
// description is updated to not have any MediaStreams.
TEST_F(PeerConnectionInterfaceTest, VerifyDefaultStreamIsNotCreated) {
@@ -2020,8 +2121,10 @@ TEST_F(PeerConnectionInterfaceTest, LocalDescriptionChanged) {
EXPECT_TRUE(ContainsSender(senders, kVideoTracks[1]));
// Remove an audio and video track.
+ pc_->RemoveStream(reference_collection_->at(0));
rtc::scoped_ptr<SessionDescriptionInterface> desc_2;
CreateSessionDescriptionAndReference(1, 1, desc_2.accept());
+ pc_->AddStream(reference_collection_->at(0));
EXPECT_TRUE(DoSetLocalDescription(desc_2.release()));
senders = pc_->GetSenders();
EXPECT_EQ(2u, senders.size());
@@ -2220,7 +2323,9 @@ TEST(CreateSessionOptionsTest, GetDefaultMediaSessionOptionsForOffer) {
EXPECT_FALSE(options.has_video());
EXPECT_TRUE(options.bundle_enabled);
EXPECT_TRUE(options.vad_enabled);
- EXPECT_FALSE(options.transport_options.ice_restart);
+ EXPECT_FALSE(options.audio_transport_options.ice_restart);
+ EXPECT_FALSE(options.video_transport_options.ice_restart);
+ EXPECT_FALSE(options.data_transport_options.ice_restart);
}
// Test that a correct MediaSessionOptions is created for an offer if
@@ -2255,18 +2360,22 @@ TEST(CreateSessionOptionsTest,
// Test that a correct MediaSessionOptions is created to restart ice if
// IceRestart is set. It also tests that subsequent MediaSessionOptions don't
-// have |transport_options.ice_restart| set.
+// have |audio_transport_options.ice_restart| etc. set.
TEST(CreateSessionOptionsTest, GetMediaSessionOptionsForOfferWithIceRestart) {
RTCOfferAnswerOptions rtc_options;
rtc_options.ice_restart = true;
cricket::MediaSessionOptions options;
EXPECT_TRUE(ConvertRtcOptionsForOffer(rtc_options, &options));
- EXPECT_TRUE(options.transport_options.ice_restart);
+ EXPECT_TRUE(options.audio_transport_options.ice_restart);
+ EXPECT_TRUE(options.video_transport_options.ice_restart);
+ EXPECT_TRUE(options.data_transport_options.ice_restart);
rtc_options = RTCOfferAnswerOptions();
EXPECT_TRUE(ConvertRtcOptionsForOffer(rtc_options, &options));
- EXPECT_FALSE(options.transport_options.ice_restart);
+ EXPECT_FALSE(options.audio_transport_options.ice_restart);
+ EXPECT_FALSE(options.video_transport_options.ice_restart);
+ EXPECT_FALSE(options.data_transport_options.ice_restart);
}
// Test that the MediaConstraints in an answer don't affect if audio and video
diff --git a/talk/app/webrtc/peerconnectionproxy.h b/talk/app/webrtc/peerconnectionproxy.h
index d207fbbdd8..3c983d73c9 100644
--- a/talk/app/webrtc/peerconnectionproxy.h
+++ b/talk/app/webrtc/peerconnectionproxy.h
@@ -43,6 +43,10 @@ BEGIN_PROXY_MAP(PeerConnection)
PROXY_METHOD1(void, RemoveStream, MediaStreamInterface*)
PROXY_METHOD1(rtc::scoped_refptr<DtmfSenderInterface>,
CreateDtmfSender, AudioTrackInterface*)
+ PROXY_METHOD2(rtc::scoped_refptr<RtpSenderInterface>,
+ CreateSender,
+ const std::string&,
+ const std::string&)
PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpSenderInterface>>,
GetSenders)
PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpReceiverInterface>>,
diff --git a/talk/app/webrtc/portallocatorfactory.cc b/talk/app/webrtc/portallocatorfactory.cc
index bd6caccc80..64d714cd50 100644
--- a/talk/app/webrtc/portallocatorfactory.cc
+++ b/talk/app/webrtc/portallocatorfactory.cc
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2004--2011 Google Inc.
+ * Copyright 2011 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -24,69 +24,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+// TODO(deadbeef): Remove this file once chromium build files no longer
+// reference it.
#include "talk/app/webrtc/portallocatorfactory.h"
-
-#include "webrtc/p2p/base/basicpacketsocketfactory.h"
-#include "webrtc/p2p/client/basicportallocator.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/base/network.h"
-#include "webrtc/base/thread.h"
-
-namespace webrtc {
-
-using rtc::scoped_ptr;
-
-rtc::scoped_refptr<PortAllocatorFactoryInterface>
-PortAllocatorFactory::Create(
- rtc::Thread* worker_thread) {
- rtc::RefCountedObject<PortAllocatorFactory>* allocator =
- new rtc::RefCountedObject<PortAllocatorFactory>(worker_thread);
- return allocator;
-}
-
-PortAllocatorFactory::PortAllocatorFactory(rtc::Thread* worker_thread)
- : network_manager_(new rtc::BasicNetworkManager()),
- socket_factory_(new rtc::BasicPacketSocketFactory(worker_thread)) {
-}
-
-PortAllocatorFactory::~PortAllocatorFactory() {}
-
-void PortAllocatorFactory::SetNetworkIgnoreMask(int network_ignore_mask) {
- network_manager_->set_network_ignore_mask(network_ignore_mask);
-}
-
-cricket::PortAllocator* PortAllocatorFactory::CreatePortAllocator(
- const std::vector<StunConfiguration>& stun,
- const std::vector<TurnConfiguration>& turn) {
- cricket::ServerAddresses stun_hosts;
- typedef std::vector<StunConfiguration>::const_iterator StunIt;
- for (StunIt stun_it = stun.begin(); stun_it != stun.end(); ++stun_it) {
- stun_hosts.insert(stun_it->server);
- }
-
- scoped_ptr<cricket::BasicPortAllocator> allocator(
- new cricket::BasicPortAllocator(
- network_manager_.get(), socket_factory_.get(), stun_hosts));
-
- for (size_t i = 0; i < turn.size(); ++i) {
- cricket::RelayCredentials credentials(turn[i].username, turn[i].password);
- cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
- cricket::ProtocolType protocol;
- if (cricket::StringToProto(turn[i].transport_type.c_str(), &protocol)) {
- relay_server.ports.push_back(cricket::ProtocolAddress(
- turn[i].server, protocol, turn[i].secure));
- relay_server.credentials = credentials;
- // First in the list gets highest priority.
- relay_server.priority = static_cast<int>(turn.size() - i - 1);
- allocator->AddRelay(relay_server);
- } else {
- LOG(LS_WARNING) << "Ignoring TURN server " << turn[i].server << ". "
- << "Reason= Incorrect " << turn[i].transport_type
- << " transport parameter.";
- }
- }
- return allocator.release();
-}
-
-} // namespace webrtc
diff --git a/talk/app/webrtc/portallocatorfactory.h b/talk/app/webrtc/portallocatorfactory.h
index 83376d0b84..bb6cf4741f 100644
--- a/talk/app/webrtc/portallocatorfactory.h
+++ b/talk/app/webrtc/portallocatorfactory.h
@@ -24,49 +24,10 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
-// This file defines the default implementation of
-// PortAllocatorFactoryInterface.
-// This implementation creates instances of cricket::HTTPPortAllocator and uses
-// the BasicNetworkManager and BasicPacketSocketFactory.
+// TODO(deadbeef): Remove this file once chromium build files no longer
+// reference it.
#ifndef TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
#define TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
-#include "talk/app/webrtc/peerconnectioninterface.h"
-#include "webrtc/base/scoped_ptr.h"
-
-namespace cricket {
-class PortAllocator;
-}
-
-namespace rtc {
-class BasicNetworkManager;
-class BasicPacketSocketFactory;
-}
-
-namespace webrtc {
-
-class PortAllocatorFactory : public PortAllocatorFactoryInterface {
- public:
- static rtc::scoped_refptr<PortAllocatorFactoryInterface> Create(
- rtc::Thread* worker_thread);
-
- virtual cricket::PortAllocator* CreatePortAllocator(
- const std::vector<StunConfiguration>& stun,
- const std::vector<TurnConfiguration>& turn);
-
- virtual void SetNetworkIgnoreMask(int network_ignore_mask);
-
- protected:
- explicit PortAllocatorFactory(rtc::Thread* worker_thread);
- ~PortAllocatorFactory();
-
- private:
- rtc::scoped_ptr<rtc::BasicNetworkManager> network_manager_;
- rtc::scoped_ptr<rtc::BasicPacketSocketFactory> socket_factory_;
-};
-
-} // namespace webrtc
-
#endif // TALK_APP_WEBRTC_PORTALLOCATORFACTORY_H_
diff --git a/talk/app/webrtc/remoteaudiosource.cc b/talk/app/webrtc/remoteaudiosource.cc
index 41f3d8798a..e904dd9192 100644
--- a/talk/app/webrtc/remoteaudiosource.cc
+++ b/talk/app/webrtc/remoteaudiosource.cc
@@ -29,44 +29,148 @@
#include <algorithm>
#include <functional>
+#include <utility>
+#include "talk/app/webrtc/mediastreamprovider.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
+#include "webrtc/base/thread.h"
namespace webrtc {
-rtc::scoped_refptr<RemoteAudioSource> RemoteAudioSource::Create() {
- return new rtc::RefCountedObject<RemoteAudioSource>();
+class RemoteAudioSource::MessageHandler : public rtc::MessageHandler {
+ public:
+ explicit MessageHandler(RemoteAudioSource* source) : source_(source) {}
+
+ private:
+ ~MessageHandler() override {}
+
+ void OnMessage(rtc::Message* msg) override {
+ source_->OnMessage(msg);
+ delete this;
+ }
+
+ const rtc::scoped_refptr<RemoteAudioSource> source_;
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MessageHandler);
+};
+
+class RemoteAudioSource::Sink : public AudioSinkInterface {
+ public:
+ explicit Sink(RemoteAudioSource* source) : source_(source) {}
+ ~Sink() override { source_->OnAudioProviderGone(); }
+
+ private:
+ void OnData(const AudioSinkInterface::Data& audio) override {
+ if (source_)
+ source_->OnData(audio);
+ }
+
+ const rtc::scoped_refptr<RemoteAudioSource> source_;
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Sink);
+};
+
+rtc::scoped_refptr<RemoteAudioSource> RemoteAudioSource::Create(
+ uint32_t ssrc,
+ AudioProviderInterface* provider) {
+ rtc::scoped_refptr<RemoteAudioSource> ret(
+ new rtc::RefCountedObject<RemoteAudioSource>());
+ ret->Initialize(ssrc, provider);
+ return ret;
}
-RemoteAudioSource::RemoteAudioSource() {
+RemoteAudioSource::RemoteAudioSource()
+ : main_thread_(rtc::Thread::Current()),
+ state_(MediaSourceInterface::kLive) {
+ RTC_DCHECK(main_thread_);
}
RemoteAudioSource::~RemoteAudioSource() {
- ASSERT(audio_observers_.empty());
+ RTC_DCHECK(main_thread_->IsCurrent());
+ RTC_DCHECK(audio_observers_.empty());
+ RTC_DCHECK(sinks_.empty());
+}
+
+void RemoteAudioSource::Initialize(uint32_t ssrc,
+ AudioProviderInterface* provider) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ // To make sure we always get notified when the provider goes out of scope,
+ // we register for callbacks here and not on demand in AddSink.
+ if (provider) { // May be null in tests.
+ provider->SetRawAudioSink(
+ ssrc, rtc::scoped_ptr<AudioSinkInterface>(new Sink(this)));
+ }
}
MediaSourceInterface::SourceState RemoteAudioSource::state() const {
- return MediaSourceInterface::kLive;
+ RTC_DCHECK(main_thread_->IsCurrent());
+ return state_;
+}
+
+bool RemoteAudioSource::remote() const {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ return true;
}
void RemoteAudioSource::SetVolume(double volume) {
- ASSERT(volume >= 0 && volume <= 10);
- for (AudioObserverList::iterator it = audio_observers_.begin();
- it != audio_observers_.end(); ++it) {
- (*it)->OnSetVolume(volume);
- }
+ RTC_DCHECK(volume >= 0 && volume <= 10);
+ for (auto* observer : audio_observers_)
+ observer->OnSetVolume(volume);
}
void RemoteAudioSource::RegisterAudioObserver(AudioObserver* observer) {
- ASSERT(observer != NULL);
- ASSERT(std::find(audio_observers_.begin(), audio_observers_.end(),
- observer) == audio_observers_.end());
+ RTC_DCHECK(observer != NULL);
+ RTC_DCHECK(std::find(audio_observers_.begin(), audio_observers_.end(),
+ observer) == audio_observers_.end());
audio_observers_.push_back(observer);
}
void RemoteAudioSource::UnregisterAudioObserver(AudioObserver* observer) {
- ASSERT(observer != NULL);
+ RTC_DCHECK(observer != NULL);
audio_observers_.remove(observer);
}
+void RemoteAudioSource::AddSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ RTC_DCHECK(sink);
+
+ if (state_ != MediaSourceInterface::kLive) {
+ LOG(LS_ERROR) << "Can't register sink as the source isn't live.";
+ return;
+ }
+
+ rtc::CritScope lock(&sink_lock_);
+ RTC_DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end());
+ sinks_.push_back(sink);
+}
+
+void RemoteAudioSource::RemoveSink(AudioTrackSinkInterface* sink) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ RTC_DCHECK(sink);
+
+ rtc::CritScope lock(&sink_lock_);
+ sinks_.remove(sink);
+}
+
+void RemoteAudioSource::OnData(const AudioSinkInterface::Data& audio) {
+ // Called on the externally-owned audio callback thread, via/from webrtc.
+ rtc::CritScope lock(&sink_lock_);
+ for (auto* sink : sinks_) {
+ sink->OnData(audio.data, 16, audio.sample_rate, audio.channels,
+ audio.samples_per_channel);
+ }
+}
+
+void RemoteAudioSource::OnAudioProviderGone() {
+ // Called when the data provider is deleted. It may be the worker thread
+ // in libjingle or may be a different worker thread.
+ main_thread_->Post(new MessageHandler(this));
+}
+
+void RemoteAudioSource::OnMessage(rtc::Message* msg) {
+ RTC_DCHECK(main_thread_->IsCurrent());
+ sinks_.clear();
+ state_ = MediaSourceInterface::kEnded;
+ FireOnChanged();
+}
+
} // namespace webrtc
diff --git a/talk/app/webrtc/remoteaudiosource.h b/talk/app/webrtc/remoteaudiosource.h
index e49aca5684..d648ba4604 100644
--- a/talk/app/webrtc/remoteaudiosource.h
+++ b/talk/app/webrtc/remoteaudiosource.h
@@ -29,36 +29,66 @@
#define TALK_APP_WEBRTC_REMOTEAUDIOSOURCE_H_
#include <list>
+#include <string>
#include "talk/app/webrtc/mediastreaminterface.h"
#include "talk/app/webrtc/notifier.h"
+#include "talk/media/base/audiorenderer.h"
+#include "webrtc/audio/audio_sink.h"
+#include "webrtc/base/criticalsection.h"
+
+namespace rtc {
+struct Message;
+class Thread;
+} // namespace rtc
namespace webrtc {
-using webrtc::AudioSourceInterface;
+class AudioProviderInterface;
// This class implements the audio source used by the remote audio track.
class RemoteAudioSource : public Notifier<AudioSourceInterface> {
public:
// Creates an instance of RemoteAudioSource.
- static rtc::scoped_refptr<RemoteAudioSource> Create();
+ static rtc::scoped_refptr<RemoteAudioSource> Create(
+ uint32_t ssrc,
+ AudioProviderInterface* provider);
+
+ // MediaSourceInterface implementation.
+ MediaSourceInterface::SourceState state() const override;
+ bool remote() const override;
+
+ void AddSink(AudioTrackSinkInterface* sink) override;
+ void RemoveSink(AudioTrackSinkInterface* sink) override;
protected:
RemoteAudioSource();
- virtual ~RemoteAudioSource();
+ ~RemoteAudioSource() override;
+
+ // Post construction initialize where we can do things like save a reference
+ // to ourselves (need to be fully constructed).
+ void Initialize(uint32_t ssrc, AudioProviderInterface* provider);
private:
typedef std::list<AudioObserver*> AudioObserverList;
- // MediaSourceInterface implementation.
- MediaSourceInterface::SourceState state() const override;
-
// AudioSourceInterface implementation.
void SetVolume(double volume) override;
void RegisterAudioObserver(AudioObserver* observer) override;
void UnregisterAudioObserver(AudioObserver* observer) override;
+ class Sink;
+ void OnData(const AudioSinkInterface::Data& audio);
+ void OnAudioProviderGone();
+
+ class MessageHandler;
+ void OnMessage(rtc::Message* msg);
+
AudioObserverList audio_observers_;
+ rtc::CriticalSection sink_lock_;
+ std::list<AudioTrackSinkInterface*> sinks_;
+ rtc::Thread* const main_thread_;
+ SourceState state_;
};
} // namespace webrtc
diff --git a/talk/app/webrtc/mediastreamsignaling.h b/talk/app/webrtc/remoteaudiotrack.cc
index e8c5c110d0..5f0b23e59e 100644
--- a/talk/app/webrtc/mediastreamsignaling.h
+++ b/talk/app/webrtc/remoteaudiotrack.cc
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2012 Google Inc.
+ * Copyright 2015 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -25,4 +25,4 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-// TODO(deadbeef): Remove this file once Chrome build files don't reference it.
+// TODO(tommi): Delete this file when removed from build files in Chromium.
diff --git a/talk/app/webrtc/mediastreamsignaling.cc b/talk/app/webrtc/remoteaudiotrack.h
index b405273902..5f0b23e59e 100644
--- a/talk/app/webrtc/mediastreamsignaling.cc
+++ b/talk/app/webrtc/remoteaudiotrack.h
@@ -1,6 +1,6 @@
/*
* libjingle
- * Copyright 2012 Google Inc.
+ * Copyright 2015 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -25,6 +25,4 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "talk/app/webrtc/mediastreamsignaling.h"
-
-// TODO(deadbeef): Remove this file once Chrome build files don't reference it.
+// TODO(tommi): Delete this file when removed from build files in Chromium.
diff --git a/talk/app/webrtc/rtpreceiver.cc b/talk/app/webrtc/rtpreceiver.cc
index b88554f0ac..9540f36f2f 100644
--- a/talk/app/webrtc/rtpreceiver.cc
+++ b/talk/app/webrtc/rtpreceiver.cc
@@ -39,6 +39,7 @@ AudioRtpReceiver::AudioRtpReceiver(AudioTrackInterface* track,
ssrc_(ssrc),
provider_(provider),
cached_track_enabled_(track->enabled()) {
+ RTC_DCHECK(track_->GetSource()->remote());
track_->RegisterObserver(this);
track_->GetSource()->RegisterAudioObserver(this);
Reconfigure();
@@ -85,6 +86,7 @@ VideoRtpReceiver::VideoRtpReceiver(VideoTrackInterface* track,
uint32_t ssrc,
VideoProviderInterface* provider)
: id_(track->id()), track_(track), ssrc_(ssrc), provider_(provider) {
+ RTC_DCHECK(track_->GetSource()->remote());
provider_->SetVideoPlayout(ssrc_, true, track_->GetSource()->FrameInput());
}
diff --git a/talk/app/webrtc/rtpreceiver.h b/talk/app/webrtc/rtpreceiver.h
index a93ccbcbfe..db021baf68 100644
--- a/talk/app/webrtc/rtpreceiver.h
+++ b/talk/app/webrtc/rtpreceiver.h
@@ -68,10 +68,10 @@ class AudioRtpReceiver : public ObserverInterface,
private:
void Reconfigure();
- std::string id_;
- rtc::scoped_refptr<AudioTrackInterface> track_;
- uint32_t ssrc_;
- AudioProviderInterface* provider_;
+ const std::string id_;
+ const rtc::scoped_refptr<AudioTrackInterface> track_;
+ const uint32_t ssrc_;
+ AudioProviderInterface* provider_; // Set to null in Stop().
bool cached_track_enabled_;
};
diff --git a/talk/app/webrtc/rtpsender.cc b/talk/app/webrtc/rtpsender.cc
index 3a78f4598a..91e484b733 100644
--- a/talk/app/webrtc/rtpsender.cc
+++ b/talk/app/webrtc/rtpsender.cc
@@ -29,6 +29,7 @@
#include "talk/app/webrtc/localaudiosource.h"
#include "talk/app/webrtc/videosourceinterface.h"
+#include "webrtc/base/helpers.h"
namespace webrtc {
@@ -43,7 +44,7 @@ LocalAudioSinkAdapter::~LocalAudioSinkAdapter() {
void LocalAudioSinkAdapter::OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) {
rtc::CritScope lock(&lock_);
if (sink_) {
@@ -59,34 +60,49 @@ void LocalAudioSinkAdapter::SetSink(cricket::AudioRenderer::Sink* sink) {
}
AudioRtpSender::AudioRtpSender(AudioTrackInterface* track,
- uint32_t ssrc,
- AudioProviderInterface* provider)
+ const std::string& stream_id,
+ AudioProviderInterface* provider,
+ StatsCollector* stats)
: id_(track->id()),
- track_(track),
- ssrc_(ssrc),
+ stream_id_(stream_id),
provider_(provider),
+ stats_(stats),
+ track_(track),
cached_track_enabled_(track->enabled()),
sink_adapter_(new LocalAudioSinkAdapter()) {
+ RTC_DCHECK(provider != nullptr);
track_->RegisterObserver(this);
track_->AddSink(sink_adapter_.get());
- Reconfigure();
}
+AudioRtpSender::AudioRtpSender(AudioProviderInterface* provider,
+ StatsCollector* stats)
+ : id_(rtc::CreateRandomUuid()),
+ stream_id_(rtc::CreateRandomUuid()),
+ provider_(provider),
+ stats_(stats),
+ sink_adapter_(new LocalAudioSinkAdapter()) {}
+
AudioRtpSender::~AudioRtpSender() {
- track_->RemoveSink(sink_adapter_.get());
- track_->UnregisterObserver(this);
Stop();
}
void AudioRtpSender::OnChanged() {
+ RTC_DCHECK(!stopped_);
if (cached_track_enabled_ != track_->enabled()) {
cached_track_enabled_ = track_->enabled();
- Reconfigure();
+ if (can_send_track()) {
+ SetAudioSend();
+ }
}
}
bool AudioRtpSender::SetTrack(MediaStreamTrackInterface* track) {
- if (track->kind() != "audio") {
+ if (stopped_) {
+ LOG(LS_ERROR) << "SetTrack can't be called on a stopped RtpSender.";
+ return false;
+ }
+ if (track && track->kind() != MediaStreamTrackInterface::kAudioKind) {
LOG(LS_ERROR) << "SetTrack called on audio RtpSender with " << track->kind()
<< " track.";
return false;
@@ -94,36 +110,84 @@ bool AudioRtpSender::SetTrack(MediaStreamTrackInterface* track) {
AudioTrackInterface* audio_track = static_cast<AudioTrackInterface*>(track);
// Detach from old track.
- track_->RemoveSink(sink_adapter_.get());
- track_->UnregisterObserver(this);
+ if (track_) {
+ track_->RemoveSink(sink_adapter_.get());
+ track_->UnregisterObserver(this);
+ }
+
+ if (can_send_track() && stats_) {
+ stats_->RemoveLocalAudioTrack(track_.get(), ssrc_);
+ }
// Attach to new track.
+ bool prev_can_send_track = can_send_track();
track_ = audio_track;
- cached_track_enabled_ = track_->enabled();
- track_->RegisterObserver(this);
- track_->AddSink(sink_adapter_.get());
- Reconfigure();
+ if (track_) {
+ cached_track_enabled_ = track_->enabled();
+ track_->RegisterObserver(this);
+ track_->AddSink(sink_adapter_.get());
+ }
+
+ // Update audio provider.
+ if (can_send_track()) {
+ SetAudioSend();
+ if (stats_) {
+ stats_->AddLocalAudioTrack(track_.get(), ssrc_);
+ }
+ } else if (prev_can_send_track) {
+ cricket::AudioOptions options;
+ provider_->SetAudioSend(ssrc_, false, options, nullptr);
+ }
return true;
}
-void AudioRtpSender::Stop() {
- // TODO(deadbeef): Need to do more here to fully stop sending packets.
- if (!provider_) {
+void AudioRtpSender::SetSsrc(uint32_t ssrc) {
+ if (stopped_ || ssrc == ssrc_) {
return;
}
- cricket::AudioOptions options;
- provider_->SetAudioSend(ssrc_, false, options, nullptr);
- provider_ = nullptr;
+ // If we are already sending with a particular SSRC, stop sending.
+ if (can_send_track()) {
+ cricket::AudioOptions options;
+ provider_->SetAudioSend(ssrc_, false, options, nullptr);
+ if (stats_) {
+ stats_->RemoveLocalAudioTrack(track_.get(), ssrc_);
+ }
+ }
+ ssrc_ = ssrc;
+ if (can_send_track()) {
+ SetAudioSend();
+ if (stats_) {
+ stats_->AddLocalAudioTrack(track_.get(), ssrc_);
+ }
+ }
}
-void AudioRtpSender::Reconfigure() {
- if (!provider_) {
+void AudioRtpSender::Stop() {
+ // TODO(deadbeef): Need to do more here to fully stop sending packets.
+ if (stopped_) {
return;
}
+ if (track_) {
+ track_->RemoveSink(sink_adapter_.get());
+ track_->UnregisterObserver(this);
+ }
+ if (can_send_track()) {
+ cricket::AudioOptions options;
+ provider_->SetAudioSend(ssrc_, false, options, nullptr);
+ if (stats_) {
+ stats_->RemoveLocalAudioTrack(track_.get(), ssrc_);
+ }
+ }
+ stopped_ = true;
+}
+
+void AudioRtpSender::SetAudioSend() {
+ RTC_DCHECK(!stopped_ && can_send_track());
cricket::AudioOptions options;
- if (track_->enabled() && track_->GetSource()) {
+ if (track_->enabled() && track_->GetSource() &&
+ !track_->GetSource()->remote()) {
// TODO(xians): Remove this static_cast since we should be able to connect
- // a remote audio track to peer connection.
+ // a remote audio track to a peer connection.
options = static_cast<LocalAudioSource*>(track_->GetSource())->options();
}
@@ -136,35 +200,42 @@ void AudioRtpSender::Reconfigure() {
}
VideoRtpSender::VideoRtpSender(VideoTrackInterface* track,
- uint32_t ssrc,
+ const std::string& stream_id,
VideoProviderInterface* provider)
: id_(track->id()),
- track_(track),
- ssrc_(ssrc),
+ stream_id_(stream_id),
provider_(provider),
+ track_(track),
cached_track_enabled_(track->enabled()) {
+ RTC_DCHECK(provider != nullptr);
track_->RegisterObserver(this);
- VideoSourceInterface* source = track_->GetSource();
- if (source) {
- provider_->SetCaptureDevice(ssrc_, source->GetVideoCapturer());
- }
- Reconfigure();
}
+VideoRtpSender::VideoRtpSender(VideoProviderInterface* provider)
+ : id_(rtc::CreateRandomUuid()),
+ stream_id_(rtc::CreateRandomUuid()),
+ provider_(provider) {}
+
VideoRtpSender::~VideoRtpSender() {
- track_->UnregisterObserver(this);
Stop();
}
void VideoRtpSender::OnChanged() {
+ RTC_DCHECK(!stopped_);
if (cached_track_enabled_ != track_->enabled()) {
cached_track_enabled_ = track_->enabled();
- Reconfigure();
+ if (can_send_track()) {
+ SetVideoSend();
+ }
}
}
bool VideoRtpSender::SetTrack(MediaStreamTrackInterface* track) {
- if (track->kind() != "video") {
+ if (stopped_) {
+ LOG(LS_ERROR) << "SetTrack can't be called on a stopped RtpSender.";
+ return false;
+ }
+ if (track && track->kind() != MediaStreamTrackInterface::kVideoKind) {
LOG(LS_ERROR) << "SetTrack called on video RtpSender with " << track->kind()
<< " track.";
return false;
@@ -172,30 +243,72 @@ bool VideoRtpSender::SetTrack(MediaStreamTrackInterface* track) {
VideoTrackInterface* video_track = static_cast<VideoTrackInterface*>(track);
// Detach from old track.
- track_->UnregisterObserver(this);
+ if (track_) {
+ track_->UnregisterObserver(this);
+ }
// Attach to new track.
+ bool prev_can_send_track = can_send_track();
track_ = video_track;
- cached_track_enabled_ = track_->enabled();
- track_->RegisterObserver(this);
- Reconfigure();
+ if (track_) {
+ cached_track_enabled_ = track_->enabled();
+ track_->RegisterObserver(this);
+ }
+
+ // Update video provider.
+ if (can_send_track()) {
+ VideoSourceInterface* source = track_->GetSource();
+ // TODO(deadbeef): If SetTrack is called with a disabled track, and the
+ // previous track was enabled, this could cause a frame from the new track
+ // to slip out. Really, what we need is for SetCaptureDevice and
+ // SetVideoSend
+ // to be combined into one atomic operation, all the way down to
+ // WebRtcVideoSendStream.
+ provider_->SetCaptureDevice(ssrc_,
+ source ? source->GetVideoCapturer() : nullptr);
+ SetVideoSend();
+ } else if (prev_can_send_track) {
+ provider_->SetCaptureDevice(ssrc_, nullptr);
+ provider_->SetVideoSend(ssrc_, false, nullptr);
+ }
return true;
}
-void VideoRtpSender::Stop() {
- // TODO(deadbeef): Need to do more here to fully stop sending packets.
- if (!provider_) {
+void VideoRtpSender::SetSsrc(uint32_t ssrc) {
+ if (stopped_ || ssrc == ssrc_) {
return;
}
- provider_->SetCaptureDevice(ssrc_, nullptr);
- provider_->SetVideoSend(ssrc_, false, nullptr);
- provider_ = nullptr;
+ // If we are already sending with a particular SSRC, stop sending.
+ if (can_send_track()) {
+ provider_->SetCaptureDevice(ssrc_, nullptr);
+ provider_->SetVideoSend(ssrc_, false, nullptr);
+ }
+ ssrc_ = ssrc;
+ if (can_send_track()) {
+ VideoSourceInterface* source = track_->GetSource();
+ provider_->SetCaptureDevice(ssrc_,
+ source ? source->GetVideoCapturer() : nullptr);
+ SetVideoSend();
+ }
}
-void VideoRtpSender::Reconfigure() {
- if (!provider_) {
+void VideoRtpSender::Stop() {
+ // TODO(deadbeef): Need to do more here to fully stop sending packets.
+ if (stopped_) {
return;
}
+ if (track_) {
+ track_->UnregisterObserver(this);
+ }
+ if (can_send_track()) {
+ provider_->SetCaptureDevice(ssrc_, nullptr);
+ provider_->SetVideoSend(ssrc_, false, nullptr);
+ }
+ stopped_ = true;
+}
+
+void VideoRtpSender::SetVideoSend() {
+ RTC_DCHECK(!stopped_ && can_send_track());
const cricket::VideoOptions* options = nullptr;
VideoSourceInterface* source = track_->GetSource();
if (track_->enabled() && source) {
diff --git a/talk/app/webrtc/rtpsender.h b/talk/app/webrtc/rtpsender.h
index 3741909323..dd846b556c 100644
--- a/talk/app/webrtc/rtpsender.h
+++ b/talk/app/webrtc/rtpsender.h
@@ -36,6 +36,7 @@
#include "talk/app/webrtc/mediastreamprovider.h"
#include "talk/app/webrtc/rtpsenderinterface.h"
+#include "talk/app/webrtc/statscollector.h"
#include "talk/media/base/audiorenderer.h"
#include "webrtc/base/basictypes.h"
#include "webrtc/base/criticalsection.h"
@@ -56,7 +57,7 @@ class LocalAudioSinkAdapter : public AudioTrackSinkInterface,
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) override;
// cricket::AudioRenderer implementation.
@@ -70,9 +71,15 @@ class LocalAudioSinkAdapter : public AudioTrackSinkInterface,
class AudioRtpSender : public ObserverInterface,
public rtc::RefCountedObject<RtpSenderInterface> {
public:
+ // StatsCollector provided so that Add/RemoveLocalAudioTrack can be called
+ // at the appropriate times.
AudioRtpSender(AudioTrackInterface* track,
- uint32_t ssrc,
- AudioProviderInterface* provider);
+ const std::string& stream_id,
+ AudioProviderInterface* provider,
+ StatsCollector* stats);
+
+ // Randomly generates id and stream_id.
+ AudioRtpSender(AudioProviderInterface* provider, StatsCollector* stats);
virtual ~AudioRtpSender();
@@ -85,18 +92,37 @@ class AudioRtpSender : public ObserverInterface,
return track_.get();
}
+ void SetSsrc(uint32_t ssrc) override;
+
+ uint32_t ssrc() const override { return ssrc_; }
+
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_AUDIO;
+ }
+
std::string id() const override { return id_; }
+ void set_stream_id(const std::string& stream_id) override {
+ stream_id_ = stream_id;
+ }
+ std::string stream_id() const override { return stream_id_; }
+
void Stop() override;
private:
- void Reconfigure();
+ bool can_send_track() const { return track_ && ssrc_; }
+ // Helper function to construct options for
+ // AudioProviderInterface::SetAudioSend.
+ void SetAudioSend();
std::string id_;
- rtc::scoped_refptr<AudioTrackInterface> track_;
- uint32_t ssrc_;
+ std::string stream_id_;
AudioProviderInterface* provider_;
- bool cached_track_enabled_;
+ StatsCollector* stats_;
+ rtc::scoped_refptr<AudioTrackInterface> track_;
+ uint32_t ssrc_ = 0;
+ bool cached_track_enabled_ = false;
+ bool stopped_ = false;
// Used to pass the data callback from the |track_| to the other end of
// cricket::AudioRenderer.
@@ -107,9 +133,12 @@ class VideoRtpSender : public ObserverInterface,
public rtc::RefCountedObject<RtpSenderInterface> {
public:
VideoRtpSender(VideoTrackInterface* track,
- uint32_t ssrc,
+ const std::string& stream_id,
VideoProviderInterface* provider);
+ // Randomly generates id and stream_id.
+ explicit VideoRtpSender(VideoProviderInterface* provider);
+
virtual ~VideoRtpSender();
// ObserverInterface implementation
@@ -121,18 +150,36 @@ class VideoRtpSender : public ObserverInterface,
return track_.get();
}
+ void SetSsrc(uint32_t ssrc) override;
+
+ uint32_t ssrc() const override { return ssrc_; }
+
+ cricket::MediaType media_type() const override {
+ return cricket::MEDIA_TYPE_VIDEO;
+ }
+
std::string id() const override { return id_; }
+ void set_stream_id(const std::string& stream_id) override {
+ stream_id_ = stream_id;
+ }
+ std::string stream_id() const override { return stream_id_; }
+
void Stop() override;
private:
- void Reconfigure();
+ bool can_send_track() const { return track_ && ssrc_; }
+ // Helper function to construct options for
+ // VideoProviderInterface::SetVideoSend.
+ void SetVideoSend();
std::string id_;
- rtc::scoped_refptr<VideoTrackInterface> track_;
- uint32_t ssrc_;
+ std::string stream_id_;
VideoProviderInterface* provider_;
- bool cached_track_enabled_;
+ rtc::scoped_refptr<VideoTrackInterface> track_;
+ uint32_t ssrc_ = 0;
+ bool cached_track_enabled_ = false;
+ bool stopped_ = false;
};
} // namespace webrtc
diff --git a/talk/app/webrtc/rtpsenderinterface.h b/talk/app/webrtc/rtpsenderinterface.h
index fca98f21db..f54e8ca090 100644
--- a/talk/app/webrtc/rtpsenderinterface.h
+++ b/talk/app/webrtc/rtpsenderinterface.h
@@ -35,6 +35,7 @@
#include "talk/app/webrtc/proxy.h"
#include "talk/app/webrtc/mediastreaminterface.h"
+#include "talk/session/media/mediasession.h"
#include "webrtc/base/refcount.h"
#include "webrtc/base/scoped_ref_ptr.h"
@@ -47,10 +48,24 @@ class RtpSenderInterface : public rtc::RefCountInterface {
virtual bool SetTrack(MediaStreamTrackInterface* track) = 0;
virtual rtc::scoped_refptr<MediaStreamTrackInterface> track() const = 0;
+ // Used to set the SSRC of the sender, once a local description has been set.
+ // If |ssrc| is 0, this indiates that the sender should disconnect from the
+ // underlying transport (this occurs if the sender isn't seen in a local
+ // description).
+ virtual void SetSsrc(uint32_t ssrc) = 0;
+ virtual uint32_t ssrc() const = 0;
+
+ // Audio or video sender?
+ virtual cricket::MediaType media_type() const = 0;
+
// Not to be confused with "mid", this is a field we can temporarily use
// to uniquely identify a receiver until we implement Unified Plan SDP.
virtual std::string id() const = 0;
+ // TODO(deadbeef): Support one sender having multiple stream ids.
+ virtual void set_stream_id(const std::string& stream_id) = 0;
+ virtual std::string stream_id() const = 0;
+
virtual void Stop() = 0;
protected:
@@ -61,7 +76,12 @@ class RtpSenderInterface : public rtc::RefCountInterface {
BEGIN_PROXY_MAP(RtpSender)
PROXY_METHOD1(bool, SetTrack, MediaStreamTrackInterface*)
PROXY_CONSTMETHOD0(rtc::scoped_refptr<MediaStreamTrackInterface>, track)
+PROXY_METHOD1(void, SetSsrc, uint32_t)
+PROXY_CONSTMETHOD0(uint32_t, ssrc)
+PROXY_CONSTMETHOD0(cricket::MediaType, media_type)
PROXY_CONSTMETHOD0(std::string, id)
+PROXY_METHOD1(void, set_stream_id, const std::string&)
+PROXY_CONSTMETHOD0(std::string, stream_id)
PROXY_METHOD0(void, Stop)
END_PROXY()
diff --git a/talk/app/webrtc/rtpsenderreceiver_unittest.cc b/talk/app/webrtc/rtpsenderreceiver_unittest.cc
index c9d7e008c3..a590e1d01f 100644
--- a/talk/app/webrtc/rtpsenderreceiver_unittest.cc
+++ b/talk/app/webrtc/rtpsenderreceiver_unittest.cc
@@ -26,6 +26,7 @@
*/
#include <string>
+#include <utility>
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/mediastream.h"
@@ -48,14 +49,17 @@ static const char kStreamLabel1[] = "local_stream_1";
static const char kVideoTrackId[] = "video_1";
static const char kAudioTrackId[] = "audio_1";
static const uint32_t kVideoSsrc = 98;
+static const uint32_t kVideoSsrc2 = 100;
static const uint32_t kAudioSsrc = 99;
+static const uint32_t kAudioSsrc2 = 101;
namespace webrtc {
// Helper class to test RtpSender/RtpReceiver.
class MockAudioProvider : public AudioProviderInterface {
public:
- virtual ~MockAudioProvider() {}
+ ~MockAudioProvider() override {}
+
MOCK_METHOD2(SetAudioPlayout,
void(uint32_t ssrc,
bool enable));
@@ -65,6 +69,14 @@ class MockAudioProvider : public AudioProviderInterface {
const cricket::AudioOptions& options,
cricket::AudioRenderer* renderer));
MOCK_METHOD2(SetAudioPlayoutVolume, void(uint32_t ssrc, double volume));
+
+ void SetRawAudioSink(uint32_t,
+ rtc::scoped_ptr<AudioSinkInterface> sink) override {
+ sink_ = std::move(sink);
+ }
+
+ private:
+ rtc::scoped_ptr<AudioSinkInterface> sink_;
};
// Helper class to test RtpSender/RtpReceiver.
@@ -85,8 +97,8 @@ class MockVideoProvider : public VideoProviderInterface {
class FakeVideoSource : public Notifier<VideoSourceInterface> {
public:
- static rtc::scoped_refptr<FakeVideoSource> Create() {
- return new rtc::RefCountedObject<FakeVideoSource>();
+ static rtc::scoped_refptr<FakeVideoSource> Create(bool remote) {
+ return new rtc::RefCountedObject<FakeVideoSource>(remote);
}
virtual cricket::VideoCapturer* GetVideoCapturer() { return &fake_capturer_; }
virtual void Stop() {}
@@ -94,16 +106,18 @@ class FakeVideoSource : public Notifier<VideoSourceInterface> {
virtual void AddSink(cricket::VideoRenderer* output) {}
virtual void RemoveSink(cricket::VideoRenderer* output) {}
virtual SourceState state() const { return state_; }
+ virtual bool remote() const { return remote_; }
virtual const cricket::VideoOptions* options() const { return &options_; }
virtual cricket::VideoRenderer* FrameInput() { return NULL; }
protected:
- FakeVideoSource() : state_(kLive) {}
+ explicit FakeVideoSource(bool remote) : state_(kLive), remote_(remote) {}
~FakeVideoSource() {}
private:
cricket::FakeVideoCapturer fake_capturer_;
SourceState state_;
+ bool remote_;
cricket::VideoOptions options_;
};
@@ -111,7 +125,11 @@ class RtpSenderReceiverTest : public testing::Test {
public:
virtual void SetUp() {
stream_ = MediaStream::Create(kStreamLabel1);
- rtc::scoped_refptr<VideoSourceInterface> source(FakeVideoSource::Create());
+ }
+
+ void AddVideoTrack(bool remote) {
+ rtc::scoped_refptr<VideoSourceInterface> source(
+ FakeVideoSource::Create(remote));
video_track_ = VideoTrack::Create(kVideoTrackId, source);
EXPECT_TRUE(stream_->AddTrack(video_track_));
}
@@ -120,17 +138,21 @@ class RtpSenderReceiverTest : public testing::Test {
audio_track_ = AudioTrack::Create(kAudioTrackId, NULL);
EXPECT_TRUE(stream_->AddTrack(audio_track_));
EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
- audio_rtp_sender_ = new AudioRtpSender(stream_->GetAudioTracks()[0],
- kAudioSsrc, &audio_provider_);
+ audio_rtp_sender_ =
+ new AudioRtpSender(stream_->GetAudioTracks()[0], stream_->label(),
+ &audio_provider_, nullptr);
+ audio_rtp_sender_->SetSsrc(kAudioSsrc);
}
void CreateVideoRtpSender() {
+ AddVideoTrack(false);
EXPECT_CALL(video_provider_,
SetCaptureDevice(
kVideoSsrc, video_track_->GetSource()->GetVideoCapturer()));
EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
video_rtp_sender_ = new VideoRtpSender(stream_->GetVideoTracks()[0],
- kVideoSsrc, &video_provider_);
+ stream_->label(), &video_provider_);
+ video_rtp_sender_->SetSsrc(kVideoSsrc);
}
void DestroyAudioRtpSender() {
@@ -146,8 +168,8 @@ class RtpSenderReceiverTest : public testing::Test {
}
void CreateAudioRtpReceiver() {
- audio_track_ =
- AudioTrack::Create(kAudioTrackId, RemoteAudioSource::Create().get());
+ audio_track_ = AudioTrack::Create(
+ kAudioTrackId, RemoteAudioSource::Create(kAudioSsrc, NULL));
EXPECT_TRUE(stream_->AddTrack(audio_track_));
EXPECT_CALL(audio_provider_, SetAudioPlayout(kAudioSsrc, true));
audio_rtp_receiver_ = new AudioRtpReceiver(stream_->GetAudioTracks()[0],
@@ -155,6 +177,7 @@ class RtpSenderReceiverTest : public testing::Test {
}
void CreateVideoRtpReceiver() {
+ AddVideoTrack(true);
EXPECT_CALL(video_provider_,
SetVideoPlayout(kVideoSsrc, true,
video_track_->GetSource()->FrameInput()));
@@ -280,4 +303,212 @@ TEST_F(RtpSenderReceiverTest, RemoteAudioTrackSetVolume) {
DestroyAudioRtpReceiver();
}
+// Test that provider methods aren't called without both a track and an SSRC.
+TEST_F(RtpSenderReceiverTest, AudioSenderWithoutTrackAndSsrc) {
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(&audio_provider_, nullptr);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_TRUE(sender->SetTrack(track));
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+ sender->SetSsrc(kAudioSsrc);
+ sender->SetSsrc(0);
+ // Just let it get destroyed and make sure it doesn't call any methods on the
+ // provider interface.
+}
+
+// Test that provider methods aren't called without both a track and an SSRC.
+TEST_F(RtpSenderReceiverTest, VideoSenderWithoutTrackAndSsrc) {
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(&video_provider_);
+ EXPECT_TRUE(sender->SetTrack(video_track_));
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+ sender->SetSsrc(kVideoSsrc);
+ sender->SetSsrc(0);
+ // Just let it get destroyed and make sure it doesn't call any methods on the
+ // provider interface.
+}
+
+// Test that an audio sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set first.
+TEST_F(RtpSenderReceiverTest, AudioSenderEarlyWarmupSsrcThenTrack) {
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(&audio_provider_, nullptr);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ sender->SetTrack(track);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+}
+
+// Test that an audio sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set last.
+TEST_F(RtpSenderReceiverTest, AudioSenderEarlyWarmupTrackThenSsrc) {
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(&audio_provider_, nullptr);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ sender->SetTrack(track);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ sender->SetSsrc(kAudioSsrc);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+}
+
+// Test that a video sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set first.
+TEST_F(RtpSenderReceiverTest, VideoSenderEarlyWarmupSsrcThenTrack) {
+ AddVideoTrack(false);
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(&video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ sender->SetTrack(video_track_);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+}
+
+// Test that a video sender calls the expected methods on the provider once
+// it has a track and SSRC, when the SSRC is set last.
+TEST_F(RtpSenderReceiverTest, VideoSenderEarlyWarmupTrackThenSsrc) {
+ AddVideoTrack(false);
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(&video_provider_);
+ sender->SetTrack(video_track_);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ sender->SetSsrc(kVideoSsrc);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+}
+
+// Test that the sender is disconnected from the provider when its SSRC is
+// set to 0.
+TEST_F(RtpSenderReceiverTest, AudioSenderSsrcSetToZero) {
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(track, kStreamLabel1, &audio_provider_, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+ sender->SetSsrc(0);
+
+ // Make sure it's SetSsrc that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(_, _, _, _)).Times(0);
+}
+
+// Test that the sender is disconnected from the provider when its SSRC is
+// set to 0.
+TEST_F(RtpSenderReceiverTest, VideoSenderSsrcSetToZero) {
+ AddVideoTrack(false);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(video_track_, kStreamLabel1, &video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+ sender->SetSsrc(0);
+
+ // Make sure it's SetSsrc that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(_, _)).Times(0);
+ EXPECT_CALL(video_provider_, SetVideoSend(_, _, _)).Times(0);
+}
+
+TEST_F(RtpSenderReceiverTest, AudioSenderTrackSetToNull) {
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(track, kStreamLabel1, &audio_provider_, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+
+ // Make sure it's SetTrack that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(_, _, _, _)).Times(0);
+}
+
+TEST_F(RtpSenderReceiverTest, VideoSenderTrackSetToNull) {
+ AddVideoTrack(false);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(video_track_, kStreamLabel1, &video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+ EXPECT_TRUE(sender->SetTrack(nullptr));
+
+ // Make sure it's SetTrack that called methods on the provider, and not the
+ // destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(_, _)).Times(0);
+ EXPECT_CALL(video_provider_, SetVideoSend(_, _, _)).Times(0);
+}
+
+TEST_F(RtpSenderReceiverTest, AudioSenderSsrcChanged) {
+ AddVideoTrack(false);
+ rtc::scoped_refptr<AudioTrackInterface> track =
+ AudioTrack::Create(kAudioTrackId, nullptr);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, true, _, _));
+ rtc::scoped_refptr<AudioRtpSender> sender =
+ new AudioRtpSender(track, kStreamLabel1, &audio_provider_, nullptr);
+ sender->SetSsrc(kAudioSsrc);
+
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc, false, _, _)).Times(1);
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc2, true, _, _)).Times(1);
+ sender->SetSsrc(kAudioSsrc2);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(audio_provider_, SetAudioSend(kAudioSsrc2, false, _, _)).Times(1);
+}
+
+TEST_F(RtpSenderReceiverTest, VideoSenderSsrcChanged) {
+ AddVideoTrack(false);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, true, _));
+ rtc::scoped_refptr<VideoRtpSender> sender =
+ new VideoRtpSender(video_track_, kStreamLabel1, &video_provider_);
+ sender->SetSsrc(kVideoSsrc);
+
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc, false, _)).Times(1);
+ EXPECT_CALL(video_provider_,
+ SetCaptureDevice(kVideoSsrc2,
+ video_track_->GetSource()->GetVideoCapturer()));
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc2, true, _));
+ sender->SetSsrc(kVideoSsrc2);
+
+ // Calls expected from destructor.
+ EXPECT_CALL(video_provider_, SetCaptureDevice(kVideoSsrc2, nullptr)).Times(1);
+ EXPECT_CALL(video_provider_, SetVideoSend(kVideoSsrc2, false, _)).Times(1);
+}
+
} // namespace webrtc
diff --git a/talk/app/webrtc/statscollector.cc b/talk/app/webrtc/statscollector.cc
index 347a84640c..b514b42fee 100644
--- a/talk/app/webrtc/statscollector.cc
+++ b/talk/app/webrtc/statscollector.cc
@@ -115,17 +115,17 @@ void ExtractCommonReceiveProperties(const cricket::MediaReceiverInfo& info,
report->AddString(StatsReport::kStatsValueNameCodecName, info.codec_name);
}
-void SetAudioProcessingStats(StatsReport* report, int signal_level,
- bool typing_noise_detected, int echo_return_loss,
- int echo_return_loss_enhancement, int echo_delay_median_ms,
- float aec_quality_min, int echo_delay_std_ms) {
+void SetAudioProcessingStats(StatsReport* report,
+ bool typing_noise_detected,
+ int echo_return_loss,
+ int echo_return_loss_enhancement,
+ int echo_delay_median_ms,
+ float aec_quality_min,
+ int echo_delay_std_ms) {
report->AddBoolean(StatsReport::kStatsValueNameTypingNoiseState,
typing_noise_detected);
report->AddFloat(StatsReport::kStatsValueNameEchoCancellationQualityMin,
aec_quality_min);
- // Don't overwrite the previous signal level if it's not available now.
- if (signal_level >= 0)
- report->AddInt(StatsReport::kStatsValueNameAudioInputLevel, signal_level);
const IntForAdd ints[] = {
{ StatsReport::kStatsValueNameEchoReturnLoss, echo_return_loss },
{ StatsReport::kStatsValueNameEchoReturnLossEnhancement,
@@ -182,11 +182,14 @@ void ExtractStats(const cricket::VoiceReceiverInfo& info, StatsReport* report) {
void ExtractStats(const cricket::VoiceSenderInfo& info, StatsReport* report) {
ExtractCommonSendProperties(info, report);
- SetAudioProcessingStats(report, info.audio_level, info.typing_noise_detected,
- info.echo_return_loss, info.echo_return_loss_enhancement,
- info.echo_delay_median_ms, info.aec_quality_min, info.echo_delay_std_ms);
+ SetAudioProcessingStats(
+ report, info.typing_noise_detected, info.echo_return_loss,
+ info.echo_return_loss_enhancement, info.echo_delay_median_ms,
+ info.aec_quality_min, info.echo_delay_std_ms);
+ RTC_DCHECK_GE(info.audio_level, 0);
const IntForAdd ints[] = {
+ { StatsReport::kStatsValueNameAudioInputLevel, info.audio_level},
{ StatsReport::kStatsValueNameJitterReceived, info.jitter_ms },
{ StatsReport::kStatsValueNamePacketsLost, info.packets_lost },
{ StatsReport::kStatsValueNamePacketsSent, info.packets_sent },
@@ -198,6 +201,8 @@ void ExtractStats(const cricket::VoiceSenderInfo& info, StatsReport* report) {
void ExtractStats(const cricket::VideoReceiverInfo& info, StatsReport* report) {
ExtractCommonReceiveProperties(info, report);
+ report->AddString(StatsReport::kStatsValueNameCodecImplementationName,
+ info.decoder_implementation_name);
report->AddInt64(StatsReport::kStatsValueNameBytesReceived,
info.bytes_rcvd);
report->AddInt64(StatsReport::kStatsValueNameCaptureStartNtpTimeMs,
@@ -230,6 +235,8 @@ void ExtractStats(const cricket::VideoReceiverInfo& info, StatsReport* report) {
void ExtractStats(const cricket::VideoSenderInfo& info, StatsReport* report) {
ExtractCommonSendProperties(info, report);
+ report->AddString(StatsReport::kStatsValueNameCodecImplementationName,
+ info.encoder_implementation_name);
report->AddBoolean(StatsReport::kStatsValueNameBandwidthLimitedResolution,
(info.adapt_reason & 0x2) > 0);
report->AddBoolean(StatsReport::kStatsValueNameCpuLimitedResolution,
@@ -730,17 +737,20 @@ void StatsCollector::ExtractSessionInfo() {
channel_report->AddId(StatsReport::kStatsValueNameRemoteCertificateId,
remote_cert_report_id);
}
- const std::string& srtp_cipher = channel_iter.srtp_cipher;
- if (!srtp_cipher.empty()) {
- channel_report->AddString(StatsReport::kStatsValueNameSrtpCipher,
- srtp_cipher);
+ int srtp_crypto_suite = channel_iter.srtp_crypto_suite;
+ if (srtp_crypto_suite != rtc::SRTP_INVALID_CRYPTO_SUITE &&
+ rtc::SrtpCryptoSuiteToName(srtp_crypto_suite).length()) {
+ channel_report->AddString(
+ StatsReport::kStatsValueNameSrtpCipher,
+ rtc::SrtpCryptoSuiteToName(srtp_crypto_suite));
}
- int ssl_cipher = channel_iter.ssl_cipher;
- if (ssl_cipher &&
- rtc::SSLStreamAdapter::GetSslCipherSuiteName(ssl_cipher).length()) {
+ int ssl_cipher_suite = channel_iter.ssl_cipher_suite;
+ if (ssl_cipher_suite != rtc::TLS_NULL_WITH_NULL_NULL &&
+ rtc::SSLStreamAdapter::SslCipherSuiteToName(ssl_cipher_suite)
+ .length()) {
channel_report->AddString(
StatsReport::kStatsValueNameDtlsCipher,
- rtc::SSLStreamAdapter::GetSslCipherSuiteName(ssl_cipher));
+ rtc::SSLStreamAdapter::SslCipherSuiteToName(ssl_cipher_suite));
}
int connection_id = 0;
@@ -888,21 +898,24 @@ void StatsCollector::UpdateReportFromAudioTrack(AudioTrackInterface* track,
RTC_DCHECK(pc_->session()->signaling_thread()->IsCurrent());
RTC_DCHECK(track != NULL);
- int signal_level = 0;
- if (!track->GetSignalLevel(&signal_level))
- signal_level = -1;
+ // Don't overwrite report values if they're not available.
+ int signal_level;
+ if (track->GetSignalLevel(&signal_level)) {
+ RTC_DCHECK_GE(signal_level, 0);
+ report->AddInt(StatsReport::kStatsValueNameAudioInputLevel, signal_level);
+ }
- rtc::scoped_refptr<AudioProcessorInterface> audio_processor(
- track->GetAudioProcessor());
+ auto audio_processor(track->GetAudioProcessor());
- AudioProcessorInterface::AudioProcessorStats stats;
- if (audio_processor.get())
+ if (audio_processor.get()) {
+ AudioProcessorInterface::AudioProcessorStats stats;
audio_processor->GetStats(&stats);
- SetAudioProcessingStats(report, signal_level, stats.typing_noise_detected,
- stats.echo_return_loss, stats.echo_return_loss_enhancement,
- stats.echo_delay_median_ms, stats.aec_quality_min,
- stats.echo_delay_std_ms);
+ SetAudioProcessingStats(
+ report, stats.typing_noise_detected, stats.echo_return_loss,
+ stats.echo_return_loss_enhancement, stats.echo_delay_median_ms,
+ stats.aec_quality_min, stats.echo_delay_std_ms);
+ }
}
bool StatsCollector::GetTrackIdBySsrc(uint32_t ssrc,
diff --git a/talk/app/webrtc/statscollector.h b/talk/app/webrtc/statscollector.h
index 18a345d71d..56db79de20 100644
--- a/talk/app/webrtc/statscollector.h
+++ b/talk/app/webrtc/statscollector.h
@@ -36,7 +36,6 @@
#include <vector>
#include "talk/app/webrtc/mediastreaminterface.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/statstypes.h"
#include "talk/app/webrtc/webrtcsession.h"
diff --git a/talk/app/webrtc/statscollector_unittest.cc b/talk/app/webrtc/statscollector_unittest.cc
index 9121c691b1..e7ee91190e 100644
--- a/talk/app/webrtc/statscollector_unittest.cc
+++ b/talk/app/webrtc/statscollector_unittest.cc
@@ -35,7 +35,6 @@
#include "talk/app/webrtc/peerconnectionfactory.h"
#include "talk/app/webrtc/mediastream.h"
#include "talk/app/webrtc/mediastreaminterface.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/mediastreamtrack.h"
#include "talk/app/webrtc/test/fakedatachannelprovider.h"
#include "talk/app/webrtc/videotrack.h"
@@ -683,8 +682,8 @@ class StatsCollectorTest : public testing::Test {
// Fake stats to process.
cricket::TransportChannelStats channel_stats;
channel_stats.component = 1;
- channel_stats.srtp_cipher = "the-srtp-cipher";
- channel_stats.ssl_cipher = TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA;
+ channel_stats.srtp_crypto_suite = rtc::SRTP_AES128_CM_SHA1_80;
+ channel_stats.ssl_cipher_suite = TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA;
cricket::TransportStats transport_stats;
transport_stats.transport_name = "audio";
@@ -697,8 +696,7 @@ class StatsCollectorTest : public testing::Test {
// Fake certificate to report
rtc::scoped_refptr<rtc::RTCCertificate> local_certificate(
rtc::RTCCertificate::Create(rtc::scoped_ptr<rtc::FakeSSLIdentity>(
- new rtc::FakeSSLIdentity(local_cert))
- .Pass()));
+ new rtc::FakeSSLIdentity(local_cert))));
// Configure MockWebRtcSession
EXPECT_CALL(session_,
@@ -747,18 +745,17 @@ class StatsCollectorTest : public testing::Test {
}
// Check negotiated ciphers.
- std::string dtls_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameDtlsCipher);
- EXPECT_EQ(rtc::SSLStreamAdapter::GetSslCipherSuiteName(
+ std::string dtls_cipher_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameDtlsCipher);
+ EXPECT_EQ(rtc::SSLStreamAdapter::SslCipherSuiteToName(
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA),
- dtls_cipher);
- std::string srtp_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameSrtpCipher);
- EXPECT_EQ("the-srtp-cipher", srtp_cipher);
+ dtls_cipher_suite);
+ std::string srtp_crypto_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameSrtpCipher);
+ EXPECT_EQ(rtc::SrtpCryptoSuiteToName(rtc::SRTP_AES128_CM_SHA1_80),
+ srtp_crypto_suite);
}
cricket::FakeMediaEngine* media_engine_;
@@ -1407,16 +1404,14 @@ TEST_F(StatsCollectorTest, NoTransport) {
ASSERT_EQ(kNotFound, remote_certificate_id);
// Check that the negotiated ciphers are absent.
- std::string dtls_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameDtlsCipher);
- ASSERT_EQ(kNotFound, dtls_cipher);
- std::string srtp_cipher = ExtractStatsValue(
- StatsReport::kStatsReportTypeComponent,
- reports,
- StatsReport::kStatsValueNameSrtpCipher);
- ASSERT_EQ(kNotFound, srtp_cipher);
+ std::string dtls_cipher_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameDtlsCipher);
+ ASSERT_EQ(kNotFound, dtls_cipher_suite);
+ std::string srtp_crypto_suite =
+ ExtractStatsValue(StatsReport::kStatsReportTypeComponent, reports,
+ StatsReport::kStatsValueNameSrtpCipher);
+ ASSERT_EQ(kNotFound, srtp_crypto_suite);
}
// This test verifies that the stats are generated correctly when the transport
diff --git a/talk/app/webrtc/statstypes.cc b/talk/app/webrtc/statstypes.cc
index e45833c668..19cb1f5d78 100644
--- a/talk/app/webrtc/statstypes.cc
+++ b/talk/app/webrtc/statstypes.cc
@@ -408,6 +408,8 @@ const char* StatsReport::Value::display_name() const {
return "state";
case kStatsValueNameDataChannelId:
return "datachannelid";
+ case kStatsValueNameCodecImplementationName:
+ return "codecImplementationName";
// 'goog' prefixed constants.
case kStatsValueNameAccelerateRate:
@@ -592,9 +594,6 @@ const char* StatsReport::Value::display_name() const {
return "googViewLimitedResolution";
case kStatsValueNameWritable:
return "googWritable";
- default:
- RTC_DCHECK(false);
- break;
}
return nullptr;
diff --git a/talk/app/webrtc/statstypes.h b/talk/app/webrtc/statstypes.h
index 7fa9f3212d..60439b9bc8 100644
--- a/talk/app/webrtc/statstypes.h
+++ b/talk/app/webrtc/statstypes.h
@@ -120,6 +120,7 @@ class StatsReport {
kStatsValueNameAudioOutputLevel,
kStatsValueNameBytesReceived,
kStatsValueNameBytesSent,
+ kStatsValueNameCodecImplementationName,
kStatsValueNameDataChannelId,
kStatsValueNamePacketsLost,
kStatsValueNamePacketsReceived,
diff --git a/talk/app/webrtc/test/DEPS b/talk/app/webrtc/test/DEPS
new file mode 100644
index 0000000000..a814b152f2
--- /dev/null
+++ b/talk/app/webrtc/test/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ # Allow include of Chrome base/android to allow inclusion of headers needed
+ # for accessing the JVM and Application context in gtest.
+ "+base/android",
+]
diff --git a/talk/app/webrtc/test/androidtestinitializer.cc b/talk/app/webrtc/test/androidtestinitializer.cc
new file mode 100644
index 0000000000..883c2d8178
--- /dev/null
+++ b/talk/app/webrtc/test/androidtestinitializer.cc
@@ -0,0 +1,74 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+
+#include <pthread.h>
+
+// Note: this dependency is dangerous since it reaches into Chromium's base.
+// There's a risk of e.g. macro clashes. This file may only be used in tests.
+// Since we use Chromes build system for creating the gtest binary, this should
+// be fine.
+#include "base/android/context_utils.h"
+#include "base/android/jni_android.h"
+
+#include "talk/app/webrtc/java/jni/classreferenceholder.h"
+#include "talk/app/webrtc/java/jni/jni_helpers.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/ssladapter.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+
+namespace webrtc {
+
+namespace {
+
+static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
+
+// There can only be one JNI_OnLoad in each binary. So since this is a GTEST
+// C++ runner binary, we want to initialize the same global objects we normally
+// do if this had been a Java binary.
+void EnsureInitializedOnce() {
+ RTC_CHECK(::base::android::IsVMInitialized());
+ JNIEnv* jni = ::base::android::AttachCurrentThread();
+ JavaVM* jvm = NULL;
+ RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
+ jobject context = ::base::android::GetApplicationContext();
+
+ RTC_CHECK_GE(webrtc_jni::InitGlobalJniVariables(jvm), 0);
+ RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
+ webrtc_jni::LoadGlobalClassReferenceHolder();
+
+ webrtc::VoiceEngine::SetAndroidObjects(jvm, context);
+}
+
+} // anonymous namespace
+
+void InitializeAndroidObjects() {
+ RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+}
+
+} // namespace webrtc
diff --git a/talk/app/webrtc/test/androidtestinitializer.h b/talk/app/webrtc/test/androidtestinitializer.h
new file mode 100644
index 0000000000..e6992825dd
--- /dev/null
+++ b/talk/app/webrtc/test/androidtestinitializer.h
@@ -0,0 +1,37 @@
+/*
+ * libjingle
+ * Copyright 2015 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_APP_WEBRTC_TEST_ANDROIDTESTINITIALIZER_H_
+#define TALK_APP_WEBRTC_TEST_ANDROIDTESTINITIALIZER_H_
+
+namespace webrtc {
+
+void InitializeAndroidObjects();
+
+} // namespace webrtc
+
+#endif // TALK_APP_WEBRTC_TEST_ANDROIDTESTINITIALIZER_H_
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
index e2dc12375b..6b675a9395 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
@@ -58,7 +58,7 @@ class FakeAdmTest : public testing::Test,
int32_t RecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -82,7 +82,7 @@ class FakeAdmTest : public testing::Test,
// ADM is pulling data.
int32_t NeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
diff --git a/talk/app/webrtc/test/fakedtlsidentitystore.h b/talk/app/webrtc/test/fakedtlsidentitystore.h
index 0f9bdb9e6c..98074c742a 100644
--- a/talk/app/webrtc/test/fakedtlsidentitystore.h
+++ b/talk/app/webrtc/test/fakedtlsidentitystore.h
@@ -29,41 +29,73 @@
#define TALK_APP_WEBRTC_TEST_FAKEDTLSIDENTITYSERVICE_H_
#include <string>
+#include <utility>
#include "talk/app/webrtc/dtlsidentitystore.h"
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "webrtc/base/rtccertificate.h"
-static const char kRSA_PRIVATE_KEY_PEM[] =
- "-----BEGIN RSA PRIVATE KEY-----\n"
- "MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAMYRkbhmI7kVA/rM\n"
- "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
- "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
- "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAECgYAvgOs4FJcgvp+TuREx7YtiYVsH\n"
- "mwQPTum2z/8VzWGwR8BBHBvIpVe1MbD/Y4seyI2aco/7UaisatSgJhsU46/9Y4fq\n"
- "2TwXH9QANf4at4d9n/R6rzwpAJOpgwZgKvdQjkfrKTtgLV+/dawvpxUYkRH4JZM1\n"
- "CVGukMfKNrSVH4Ap4QJBAOJmGV1ASPnB4r4nc99at7JuIJmd7fmuVUwUgYi4XgaR\n"
- "WhScBsgYwZ/JoywdyZJgnbcrTDuVcWG56B3vXbhdpMsCQQDf9zeJrjnPZ3Cqm79y\n"
- "kdqANep0uwZciiNiWxsQrCHztywOvbFhdp8iYVFG9EK8DMY41Y5TxUwsHD+67zao\n"
- "ZNqJAkEA1suLUP/GvL8IwuRneQd2tWDqqRQ/Td3qq03hP7e77XtF/buya3Ghclo5\n"
- "54czUR89QyVfJEC6278nzA7n2h1uVQJAcG6mztNL6ja/dKZjYZye2CY44QjSlLo0\n"
- "MTgTSjdfg/28fFn2Jjtqf9Pi/X+50LWI/RcYMC2no606wRk9kyOuIQJBAK6VSAim\n"
- "1pOEjsYQn0X5KEIrz1G3bfCbB848Ime3U2/FWlCHMr6ch8kCZ5d1WUeJD3LbwMNG\n"
- "UCXiYxSsu20QNVw=\n"
- "-----END RSA PRIVATE KEY-----\n";
-
-static const char kCERT_PEM[] =
- "-----BEGIN CERTIFICATE-----\n"
- "MIIBmTCCAQKgAwIBAgIEbzBSAjANBgkqhkiG9w0BAQsFADARMQ8wDQYDVQQDEwZX\n"
- "ZWJSVEMwHhcNMTQwMTAyMTgyNDQ3WhcNMTQwMjAxMTgyNDQ3WjARMQ8wDQYDVQQD\n"
- "EwZXZWJSVEMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMYRkbhmI7kVA/rM\n"
- "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
- "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
- "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAUflI\n"
- "VUe5Krqf5RVa5C3u/UTAOAUJBiDS3VANTCLBxjuMsvqOG0WvaYWP3HYPgrz0jXK2\n"
- "LJE/mGw3MyFHEqi81jh95J+ypl6xKW6Rm8jKLR87gUvCaVYn/Z4/P3AqcQTB7wOv\n"
- "UD0A8qfhfDM+LK6rPAnCsVN0NRDY3jvd6rzix9M=\n"
- "-----END CERTIFICATE-----\n";
+static const struct {
+ const char* rsa_private_key_pem;
+ const char* cert_pem;
+} kKeysAndCerts[] = {
+ {"-----BEGIN RSA PRIVATE KEY-----\n"
+ "MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAMYRkbhmI7kVA/rM\n"
+ "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
+ "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
+ "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAECgYAvgOs4FJcgvp+TuREx7YtiYVsH\n"
+ "mwQPTum2z/8VzWGwR8BBHBvIpVe1MbD/Y4seyI2aco/7UaisatSgJhsU46/9Y4fq\n"
+ "2TwXH9QANf4at4d9n/R6rzwpAJOpgwZgKvdQjkfrKTtgLV+/dawvpxUYkRH4JZM1\n"
+ "CVGukMfKNrSVH4Ap4QJBAOJmGV1ASPnB4r4nc99at7JuIJmd7fmuVUwUgYi4XgaR\n"
+ "WhScBsgYwZ/JoywdyZJgnbcrTDuVcWG56B3vXbhdpMsCQQDf9zeJrjnPZ3Cqm79y\n"
+ "kdqANep0uwZciiNiWxsQrCHztywOvbFhdp8iYVFG9EK8DMY41Y5TxUwsHD+67zao\n"
+ "ZNqJAkEA1suLUP/GvL8IwuRneQd2tWDqqRQ/Td3qq03hP7e77XtF/buya3Ghclo5\n"
+ "54czUR89QyVfJEC6278nzA7n2h1uVQJAcG6mztNL6ja/dKZjYZye2CY44QjSlLo0\n"
+ "MTgTSjdfg/28fFn2Jjtqf9Pi/X+50LWI/RcYMC2no606wRk9kyOuIQJBAK6VSAim\n"
+ "1pOEjsYQn0X5KEIrz1G3bfCbB848Ime3U2/FWlCHMr6ch8kCZ5d1WUeJD3LbwMNG\n"
+ "UCXiYxSsu20QNVw=\n"
+ "-----END RSA PRIVATE KEY-----\n",
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBmTCCAQKgAwIBAgIEbzBSAjANBgkqhkiG9w0BAQsFADARMQ8wDQYDVQQDEwZX\n"
+ "ZWJSVEMwHhcNMTQwMTAyMTgyNDQ3WhcNMTQwMjAxMTgyNDQ3WjARMQ8wDQYDVQQD\n"
+ "EwZXZWJSVEMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMYRkbhmI7kVA/rM\n"
+ "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
+ "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
+ "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAUflI\n"
+ "VUe5Krqf5RVa5C3u/UTAOAUJBiDS3VANTCLBxjuMsvqOG0WvaYWP3HYPgrz0jXK2\n"
+ "LJE/mGw3MyFHEqi81jh95J+ypl6xKW6Rm8jKLR87gUvCaVYn/Z4/P3AqcQTB7wOv\n"
+ "UD0A8qfhfDM+LK6rPAnCsVN0NRDY3jvd6rzix9M=\n"
+ "-----END CERTIFICATE-----\n"},
+ {"-----BEGIN RSA PRIVATE KEY-----\n"
+ "MIICXQIBAAKBgQDeYqlyJ1wuiMsi905e3X81/WA/G3ym50PIDZBVtSwZi7JVQPgj\n"
+ "Bl8CPZMvDh9EwB4Ji9ytA8dZZbQ4WbJWPr73zPpJSCvQqz6sOXSlenBRi72acNaQ\n"
+ "sOR/qPvviJx5I6Hqo4qemfnjZhAW85a5BpgrAwKgMLIQTHCTLWwVSyrDrwIDAQAB\n"
+ "AoGARni9eY8/hv+SX+I+05EdXt6MQXNUbQ+cSykBNCfVccLzIFEWUQMT2IHqwl6X\n"
+ "ShIXcq7/n1QzOAEiuzixauM3YHg4xZ1Um2Ha9a7ig5Xg4v6b43bmMkNE6LkoAtYs\n"
+ "qnQdfMh442b1liDud6IMb1Qk0amt3fSrgRMc547TZQVx4QECQQDxUeDm94r3p4ng\n"
+ "5rCLLC1K5/6HSTZsh7jatKPlz7GfP/IZlYV7iE5784/n0wRiCjZOS7hQRy/8m2Gp\n"
+ "pf4aZq+DAkEA6+np4d36FYikydvUrupLT3FkdRHGn/v83qOll/VmeNh+L1xMZlIP\n"
+ "tM26hAXCcQb7O5+J9y3cx2CAQsBS11ZXZQJAfGgTo76WG9p5UEJdXUInD2jOZPwv\n"
+ "XIATolxh6kXKcijLLLlSmT7KB0inNYIpzkkpee+7U1d/u6B3FriGaSHq9QJBAM/J\n"
+ "ICnDdLCgwNvWVraVQC3BpwSB2pswvCFwq7py94V60XFvbw80Ogc6qIv98qvQxVlX\n"
+ "hJIEgA/PjEi+0ng94Q0CQQDm8XSDby35gmjO+6eRmJtAjtB7nguLvrPXM6CPXRmD\n"
+ "sRoBocpHw6j9UdzZ6qYG0FkdXZghezXFY58ro2BYYRR3\n"
+ "-----END RSA PRIVATE KEY-----\n",
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIICWDCCAcGgAwIBAgIJALgDjxMbBOhbMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"
+ "BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"
+ "aWRnaXRzIFB0eSBMdGQwHhcNMTUxMTEzMjIzMjEzWhcNMTYxMTEyMjIzMjEzWjBF\n"
+ "MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"
+ "ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB\n"
+ "gQDeYqlyJ1wuiMsi905e3X81/WA/G3ym50PIDZBVtSwZi7JVQPgjBl8CPZMvDh9E\n"
+ "wB4Ji9ytA8dZZbQ4WbJWPr73zPpJSCvQqz6sOXSlenBRi72acNaQsOR/qPvviJx5\n"
+ "I6Hqo4qemfnjZhAW85a5BpgrAwKgMLIQTHCTLWwVSyrDrwIDAQABo1AwTjAdBgNV\n"
+ "HQ4EFgQUx2tbJdlcSTCepn09UdYORXKuSTAwHwYDVR0jBBgwFoAUx2tbJdlcSTCe\n"
+ "pn09UdYORXKuSTAwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQAmp9Id\n"
+ "E716gHMqeBG4S2FCgVFCr0a0ugkaneQAN/c2L9CbMemEN9W6jvucUIVOtYd90dDW\n"
+ "lXuowWmT/JctPe3D2qt4yvYW3puECHk2tVQmrJOZiZiTRtWm6HxkmoUYHYp/DtaS\n"
+ "1Xe29gSTnZtI5sQCrGMzk3SGRSSs7ejLKiVDBQ==\n"
+ "-----END CERTIFICATE-----\n"}};
class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
public rtc::MessageHandler {
@@ -77,6 +109,9 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
should_fail_ = should_fail;
}
+ void use_original_key() { key_index_ = 0; }
+ void use_alternate_key() { key_index_ = 1; }
+
void RequestIdentity(
rtc::KeyType key_type,
const rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver>&
@@ -92,8 +127,9 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
static rtc::scoped_refptr<rtc::RTCCertificate> GenerateCertificate() {
std::string cert;
std::string key;
- rtc::SSLIdentity::PemToDer("CERTIFICATE", kCERT_PEM, &cert);
- rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY", kRSA_PRIVATE_KEY_PEM, &key);
+ rtc::SSLIdentity::PemToDer("CERTIFICATE", kKeysAndCerts[0].cert_pem, &cert);
+ rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY",
+ kKeysAndCerts[0].rsa_private_key_pem, &key);
std::string pem_cert = rtc::SSLIdentity::DerToPem(
rtc::kPemTypeCertificate,
@@ -106,7 +142,7 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
rtc::scoped_ptr<rtc::SSLIdentity> identity(
rtc::SSLIdentity::FromPEMStrings(pem_key, pem_cert));
- return rtc::RTCCertificate::Create(identity.Pass());
+ return rtc::RTCCertificate::Create(std::move(identity));
}
private:
@@ -115,6 +151,11 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
MSG_FAILURE,
};
+ const char* get_key() {
+ return kKeysAndCerts[key_index_].rsa_private_key_pem;
+ }
+ const char* get_cert() { return kKeysAndCerts[key_index_].cert_pem; }
+
// rtc::MessageHandler implementation.
void OnMessage(rtc::Message* msg) {
MessageData* message_data = static_cast<MessageData*>(msg->pdata);
@@ -124,9 +165,8 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
case MSG_SUCCESS: {
std::string cert;
std::string key;
- rtc::SSLIdentity::PemToDer("CERTIFICATE", kCERT_PEM, &cert);
- rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY", kRSA_PRIVATE_KEY_PEM,
- &key);
+ rtc::SSLIdentity::PemToDer("CERTIFICATE", get_cert(), &cert);
+ rtc::SSLIdentity::PemToDer("RSA PRIVATE KEY", get_key(), &key);
observer->OnSuccess(cert, key);
break;
}
@@ -138,6 +178,7 @@ class FakeDtlsIdentityStore : public webrtc::DtlsIdentityStoreInterface,
}
bool should_fail_;
+ int key_index_ = 0;
};
#endif // TALK_APP_WEBRTC_TEST_FAKEDTLSIDENTITYSERVICE_H_
diff --git a/talk/app/webrtc/test/fakemediastreamsignaling.h b/talk/app/webrtc/test/fakemediastreamsignaling.h
deleted file mode 100644
index 562c4ad306..0000000000
--- a/talk/app/webrtc/test/fakemediastreamsignaling.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * libjingle
- * Copyright 2013 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_APP_WEBRTC_TEST_FAKEMEDIASTREAMSIGNALING_H_
-#define TALK_APP_WEBRTC_TEST_FAKEMEDIASTREAMSIGNALING_H_
-
-#include "talk/app/webrtc/audiotrack.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
-#include "talk/app/webrtc/videotrack.h"
-
-static const char kStream1[] = "stream1";
-static const char kVideoTrack1[] = "video1";
-static const char kAudioTrack1[] = "audio1";
-
-static const char kStream2[] = "stream2";
-static const char kVideoTrack2[] = "video2";
-static const char kAudioTrack2[] = "audio2";
-
-class FakeMediaStreamSignaling : public webrtc::MediaStreamSignaling,
- public webrtc::MediaStreamSignalingObserver {
- public:
- explicit FakeMediaStreamSignaling(cricket::ChannelManager* channel_manager) :
- webrtc::MediaStreamSignaling(rtc::Thread::Current(), this,
- channel_manager) {
- }
-
- void SendAudioVideoStream1() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream1, kAudioTrack1, kVideoTrack1));
- }
-
- void SendAudioVideoStream2() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream2, kAudioTrack2, kVideoTrack2));
- }
-
- void SendAudioVideoStream1And2() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream1, kAudioTrack1, kVideoTrack1));
- AddLocalStream(CreateStream(kStream2, kAudioTrack2, kVideoTrack2));
- }
-
- void SendNothing() {
- ClearLocalStreams();
- }
-
- void UseOptionsAudioOnly() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream2, kAudioTrack2, ""));
- }
-
- void UseOptionsVideoOnly() {
- ClearLocalStreams();
- AddLocalStream(CreateStream(kStream2, "", kVideoTrack2));
- }
-
- void ClearLocalStreams() {
- while (local_streams()->count() != 0) {
- RemoveLocalStream(local_streams()->at(0));
- }
- }
-
- // Implements MediaStreamSignalingObserver.
- virtual void OnAddRemoteStream(webrtc::MediaStreamInterface* stream) {}
- virtual void OnRemoveRemoteStream(webrtc::MediaStreamInterface* stream) {}
- virtual void OnAddDataChannel(webrtc::DataChannelInterface* data_channel) {}
- virtual void OnAddLocalAudioTrack(webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track,
- uint32_t ssrc) {}
- virtual void OnAddLocalVideoTrack(webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track,
- uint32_t ssrc) {}
- virtual void OnAddRemoteAudioTrack(webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track,
- uint32_t ssrc) {}
- virtual void OnAddRemoteVideoTrack(webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track,
- uint32_t ssrc) {}
- virtual void OnRemoveRemoteAudioTrack(
- webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track) {}
- virtual void OnRemoveRemoteVideoTrack(
- webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track) {}
- virtual void OnRemoveLocalAudioTrack(webrtc::MediaStreamInterface* stream,
- webrtc::AudioTrackInterface* audio_track,
- uint32_t ssrc) {}
- virtual void OnRemoveLocalVideoTrack(
- webrtc::MediaStreamInterface* stream,
- webrtc::VideoTrackInterface* video_track) {}
- virtual void OnRemoveLocalStream(webrtc::MediaStreamInterface* stream) {}
-
- private:
- rtc::scoped_refptr<webrtc::MediaStreamInterface> CreateStream(
- const std::string& stream_label,
- const std::string& audio_track_id,
- const std::string& video_track_id) {
- rtc::scoped_refptr<webrtc::MediaStreamInterface> stream(
- webrtc::MediaStream::Create(stream_label));
-
- if (!audio_track_id.empty()) {
- rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- webrtc::AudioTrack::Create(audio_track_id, NULL));
- stream->AddTrack(audio_track);
- }
-
- if (!video_track_id.empty()) {
- rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track(
- webrtc::VideoTrack::Create(video_track_id, NULL));
- stream->AddTrack(video_track);
- }
- return stream;
- }
-};
-
-#endif // TALK_APP_WEBRTC_TEST_FAKEMEDIASTREAMSIGNALING_H_
diff --git a/talk/app/webrtc/test/peerconnectiontestwrapper.cc b/talk/app/webrtc/test/peerconnectiontestwrapper.cc
index 2eb24d9700..86b7842517 100644
--- a/talk/app/webrtc/test/peerconnectiontestwrapper.cc
+++ b/talk/app/webrtc/test/peerconnectiontestwrapper.cc
@@ -25,13 +25,15 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "talk/app/webrtc/fakeportallocatorfactory.h"
+#include <utility>
+
#include "talk/app/webrtc/test/fakedtlsidentitystore.h"
#include "talk/app/webrtc/test/fakeperiodicvideocapturer.h"
#include "talk/app/webrtc/test/mockpeerconnectionobservers.h"
#include "talk/app/webrtc/test/peerconnectiontestwrapper.h"
#include "talk/app/webrtc/videosourceinterface.h"
#include "webrtc/base/gunit.h"
+#include "webrtc/p2p/client/fakeportallocator.h"
static const char kStreamLabelBase[] = "stream_label";
static const char kVideoTrackLabelBase[] = "video_track";
@@ -70,10 +72,8 @@ PeerConnectionTestWrapper::~PeerConnectionTestWrapper() {}
bool PeerConnectionTestWrapper::CreatePc(
const MediaConstraintsInterface* constraints) {
- allocator_factory_ = webrtc::FakePortAllocatorFactory::Create();
- if (!allocator_factory_) {
- return false;
- }
+ rtc::scoped_ptr<cricket::PortAllocator> port_allocator(
+ new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr));
fake_audio_capture_module_ = FakeAudioCaptureModule::Create();
if (fake_audio_capture_module_ == NULL) {
@@ -87,17 +87,17 @@ bool PeerConnectionTestWrapper::CreatePc(
return false;
}
- // CreatePeerConnection with IceServers.
- webrtc::PeerConnectionInterface::IceServers ice_servers;
+ // CreatePeerConnection with RTCConfiguration.
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
webrtc::PeerConnectionInterface::IceServer ice_server;
ice_server.uri = "stun:stun.l.google.com:19302";
- ice_servers.push_back(ice_server);
+ config.servers.push_back(ice_server);
rtc::scoped_ptr<webrtc::DtlsIdentityStoreInterface> dtls_identity_store(
rtc::SSLStreamAdapter::HaveDtlsSrtp() ?
new FakeDtlsIdentityStore() : nullptr);
peer_connection_ = peer_connection_factory_->CreatePeerConnection(
- ice_servers, constraints, allocator_factory_.get(),
- dtls_identity_store.Pass(), this);
+ config, constraints, std::move(port_allocator),
+ std::move(dtls_identity_store), this);
return peer_connection_.get() != NULL;
}
diff --git a/talk/app/webrtc/test/peerconnectiontestwrapper.h b/talk/app/webrtc/test/peerconnectiontestwrapper.h
index b65426326f..883f2f2454 100644
--- a/talk/app/webrtc/test/peerconnectiontestwrapper.h
+++ b/talk/app/webrtc/test/peerconnectiontestwrapper.h
@@ -34,11 +34,6 @@
#include "talk/app/webrtc/test/fakevideotrackrenderer.h"
#include "webrtc/base/sigslot.h"
-namespace webrtc {
-class DtlsIdentityStoreInterface;
-class PortAllocatorFactoryInterface;
-}
-
class PeerConnectionTestWrapper
: public webrtc::PeerConnectionObserver,
public webrtc::CreateSessionDescriptionObserver,
@@ -110,8 +105,6 @@ class PeerConnectionTestWrapper
bool video, const webrtc::FakeConstraints& video_constraints);
std::string name_;
- rtc::scoped_refptr<webrtc::PortAllocatorFactoryInterface>
- allocator_factory_;
rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface>
peer_connection_factory_;
diff --git a/talk/app/webrtc/videosource.cc b/talk/app/webrtc/videosource.cc
index b33f5f9e13..4b371e3ed5 100644
--- a/talk/app/webrtc/videosource.cc
+++ b/talk/app/webrtc/videosource.cc
@@ -32,6 +32,7 @@
#include "talk/app/webrtc/mediaconstraintsinterface.h"
#include "talk/session/media/channelmanager.h"
+#include "webrtc/base/arraysize.h"
using cricket::CaptureState;
using webrtc::MediaConstraintsInterface;
@@ -267,11 +268,12 @@ const cricket::VideoFormat& GetBestCaptureFormat(
// Set |option| to the highest-priority value of |key| in the constraints.
// Return false if the key is mandatory, and the value is invalid.
bool ExtractOption(const MediaConstraintsInterface* all_constraints,
- const std::string& key, cricket::Settable<bool>* option) {
+ const std::string& key,
+ rtc::Optional<bool>* option) {
size_t mandatory = 0;
bool value;
if (FindConstraint(all_constraints, key, &value, &mandatory)) {
- option->Set(value);
+ *option = rtc::Optional<bool>(value);
return true;
}
@@ -302,8 +304,6 @@ class FrameInputWrapper : public cricket::VideoRenderer {
virtual ~FrameInputWrapper() {}
// VideoRenderer implementation.
- bool SetSize(int width, int height, int reserved) override { return true; }
-
bool RenderFrame(const cricket::VideoFrame* frame) override {
if (!capturer_->IsRunning()) {
return true;
@@ -329,21 +329,23 @@ namespace webrtc {
rtc::scoped_refptr<VideoSource> VideoSource::Create(
cricket::ChannelManager* channel_manager,
cricket::VideoCapturer* capturer,
- const webrtc::MediaConstraintsInterface* constraints) {
+ const webrtc::MediaConstraintsInterface* constraints,
+ bool remote) {
ASSERT(channel_manager != NULL);
ASSERT(capturer != NULL);
- rtc::scoped_refptr<VideoSource> source(
- new rtc::RefCountedObject<VideoSource>(channel_manager,
- capturer));
+ rtc::scoped_refptr<VideoSource> source(new rtc::RefCountedObject<VideoSource>(
+ channel_manager, capturer, remote));
source->Initialize(constraints);
return source;
}
VideoSource::VideoSource(cricket::ChannelManager* channel_manager,
- cricket::VideoCapturer* capturer)
+ cricket::VideoCapturer* capturer,
+ bool remote)
: channel_manager_(channel_manager),
video_capturer_(capturer),
- state_(kInitializing) {
+ state_(kInitializing),
+ remote_(remote) {
channel_manager_->SignalVideoCaptureStateChange.connect(
this, &VideoSource::OnStateChange);
}
@@ -368,7 +370,7 @@ void VideoSource::Initialize(
} else {
// The VideoCapturer implementation doesn't support capability
// enumeration. We need to guess what the camera supports.
- for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) {
+ for (int i = 0; i < arraysize(kVideoFormats); ++i) {
formats.push_back(cricket::VideoFormat(kVideoFormats[i]));
}
}
@@ -460,7 +462,9 @@ void VideoSource::OnStateChange(cricket::VideoCapturer* capturer,
}
void VideoSource::SetState(SourceState new_state) {
- if (VERIFY(state_ != new_state)) {
+ // TODO(hbos): Temporarily disabled VERIFY due to webrtc:4776.
+ // if (VERIFY(state_ != new_state)) {
+ if (state_ != new_state) {
state_ = new_state;
FireOnChanged();
}
diff --git a/talk/app/webrtc/videosource.h b/talk/app/webrtc/videosource.h
index 8253cbac18..98c1e083a3 100644
--- a/talk/app/webrtc/videosource.h
+++ b/talk/app/webrtc/videosource.h
@@ -66,9 +66,12 @@ class VideoSource : public Notifier<VideoSourceInterface>,
static rtc::scoped_refptr<VideoSource> Create(
cricket::ChannelManager* channel_manager,
cricket::VideoCapturer* capturer,
- const webrtc::MediaConstraintsInterface* constraints);
+ const webrtc::MediaConstraintsInterface* constraints,
+ bool remote);
+
+ SourceState state() const override { return state_; }
+ bool remote() const override { return remote_; }
- virtual SourceState state() const { return state_; }
virtual const cricket::VideoOptions* options() const { return &options_; }
virtual cricket::VideoRenderer* FrameInput();
@@ -86,7 +89,8 @@ class VideoSource : public Notifier<VideoSourceInterface>,
protected:
VideoSource(cricket::ChannelManager* channel_manager,
- cricket::VideoCapturer* capturer);
+ cricket::VideoCapturer* capturer,
+ bool remote);
virtual ~VideoSource();
void Initialize(const webrtc::MediaConstraintsInterface* constraints);
@@ -104,6 +108,7 @@ class VideoSource : public Notifier<VideoSourceInterface>,
cricket::VideoFormat format_;
cricket::VideoOptions options_;
SourceState state_;
+ const bool remote_;
};
} // namespace webrtc
diff --git a/talk/app/webrtc/videosource_unittest.cc b/talk/app/webrtc/videosource_unittest.cc
index 2efcc1d84e..6f1df3434e 100644
--- a/talk/app/webrtc/videosource_unittest.cc
+++ b/talk/app/webrtc/videosource_unittest.cc
@@ -144,9 +144,9 @@ class VideoSourceTest : public testing::Test {
void CreateVideoSource(
const webrtc::MediaConstraintsInterface* constraints) {
// VideoSource take ownership of |capturer_|
- source_ = VideoSource::Create(channel_manager_.get(),
- capturer_cleanup_.release(),
- constraints);
+ source_ =
+ VideoSource::Create(channel_manager_.get(), capturer_cleanup_.release(),
+ constraints, false);
ASSERT_TRUE(source_.get() != NULL);
EXPECT_EQ(capturer_, source_->GetVideoCapturer());
@@ -210,8 +210,7 @@ TEST_F(VideoSourceTest, StopRestart) {
// RemoteVideoCapturer and takes video frames from FrameInput.
TEST_F(VideoSourceTest, StartStopRemote) {
source_ = VideoSource::Create(channel_manager_.get(),
- new webrtc::RemoteVideoCapturer(),
- NULL);
+ new webrtc::RemoteVideoCapturer(), NULL, true);
ASSERT_TRUE(source_.get() != NULL);
EXPECT_TRUE(NULL != source_->GetVideoCapturer());
@@ -392,16 +391,14 @@ TEST_F(VideoSourceTest, SetValidOptionValues) {
CreateVideoSource(&constraints);
- bool value = true;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false),
+ source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, OptionNotSet) {
FakeConstraints constraints;
CreateVideoSource(&constraints);
- bool value;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, MandatoryOptionOverridesOptional) {
@@ -413,9 +410,8 @@ TEST_F(VideoSourceTest, MandatoryOptionOverridesOptional) {
CreateVideoSource(&constraints);
- bool value = false;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_TRUE(value);
+ EXPECT_EQ(rtc::Optional<bool>(true),
+ source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionKeyOptional) {
@@ -428,9 +424,8 @@ TEST_F(VideoSourceTest, InvalidOptionKeyOptional) {
EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
kMaxWaitMs);
- bool value = true;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false),
+ source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionKeyMandatory) {
@@ -443,8 +438,7 @@ TEST_F(VideoSourceTest, InvalidOptionKeyMandatory) {
EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
kMaxWaitMs);
- bool value;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionValueOptional) {
@@ -456,8 +450,7 @@ TEST_F(VideoSourceTest, InvalidOptionValueOptional) {
EXPECT_EQ_WAIT(MediaSourceInterface::kLive, state_observer_->state(),
kMaxWaitMs);
- bool value = false;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, InvalidOptionValueMandatory) {
@@ -473,8 +466,7 @@ TEST_F(VideoSourceTest, InvalidOptionValueMandatory) {
EXPECT_EQ_WAIT(MediaSourceInterface::kEnded, state_observer_->state(),
kMaxWaitMs);
- bool value;
- EXPECT_FALSE(source_->options()->video_noise_reduction.Get(&value));
+ EXPECT_EQ(rtc::Optional<bool>(), source_->options()->video_noise_reduction);
}
TEST_F(VideoSourceTest, MixedOptionsAndConstraints) {
@@ -497,9 +489,8 @@ TEST_F(VideoSourceTest, MixedOptionsAndConstraints) {
EXPECT_EQ(288, format->height);
EXPECT_EQ(30, format->framerate());
- bool value = true;
- EXPECT_TRUE(source_->options()->video_noise_reduction.Get(&value));
- EXPECT_FALSE(value);
+ EXPECT_EQ(rtc::Optional<bool>(false),
+ source_->options()->video_noise_reduction);
}
// Tests that the source starts video with the default resolution for
diff --git a/talk/app/webrtc/videosourceproxy.h b/talk/app/webrtc/videosourceproxy.h
index 677fa9cf0f..ce96e8e6d1 100644
--- a/talk/app/webrtc/videosourceproxy.h
+++ b/talk/app/webrtc/videosourceproxy.h
@@ -38,6 +38,7 @@ namespace webrtc {
// signaling thread.
BEGIN_PROXY_MAP(VideoSource)
PROXY_CONSTMETHOD0(SourceState, state)
+ PROXY_CONSTMETHOD0(bool, remote)
PROXY_METHOD0(cricket::VideoCapturer*, GetVideoCapturer)
PROXY_METHOD0(void, Stop)
PROXY_METHOD0(void, Restart)
diff --git a/talk/app/webrtc/videotrack.cc b/talk/app/webrtc/videotrack.cc
index 7c78aea91f..f138240068 100644
--- a/talk/app/webrtc/videotrack.cc
+++ b/talk/app/webrtc/videotrack.cc
@@ -31,7 +31,7 @@
namespace webrtc {
-static const char kVideoTrackKind[] = "video";
+const char MediaStreamTrackInterface::kVideoKind[] = "video";
VideoTrack::VideoTrack(const std::string& label,
VideoSourceInterface* video_source)
@@ -47,7 +47,7 @@ VideoTrack::~VideoTrack() {
}
std::string VideoTrack::kind() const {
- return kVideoTrackKind;
+ return kVideoKind;
}
void VideoTrack::AddRenderer(VideoRendererInterface* renderer) {
diff --git a/talk/app/webrtc/videotrack_unittest.cc b/talk/app/webrtc/videotrack_unittest.cc
index 609ee80ffc..013d925cd2 100644
--- a/talk/app/webrtc/videotrack_unittest.cc
+++ b/talk/app/webrtc/videotrack_unittest.cc
@@ -62,7 +62,7 @@ class VideoTrackTest : public testing::Test {
video_track_ = VideoTrack::Create(
kVideoTrackId,
VideoSource::Create(channel_manager_.get(),
- new webrtc::RemoteVideoCapturer(), NULL));
+ new webrtc::RemoteVideoCapturer(), NULL, true));
}
protected:
diff --git a/talk/app/webrtc/videotrackrenderers.cc b/talk/app/webrtc/videotrackrenderers.cc
index 3c47c6edab..3f9301b718 100644
--- a/talk/app/webrtc/videotrackrenderers.cc
+++ b/talk/app/webrtc/videotrackrenderers.cc
@@ -54,10 +54,6 @@ void VideoTrackRenderers::SetEnabled(bool enable) {
enabled_ = enable;
}
-bool VideoTrackRenderers::SetSize(int width, int height, int reserved) {
- return true;
-}
-
bool VideoTrackRenderers::RenderFrame(const cricket::VideoFrame* frame) {
rtc::CritScope cs(&critical_section_);
if (!enabled_) {
diff --git a/talk/app/webrtc/videotrackrenderers.h b/talk/app/webrtc/videotrackrenderers.h
index 15274a1530..3262e22dff 100644
--- a/talk/app/webrtc/videotrackrenderers.h
+++ b/talk/app/webrtc/videotrackrenderers.h
@@ -48,7 +48,6 @@ class VideoTrackRenderers : public cricket::VideoRenderer {
~VideoTrackRenderers();
// Implements cricket::VideoRenderer
- virtual bool SetSize(int width, int height, int reserved);
virtual bool RenderFrame(const cricket::VideoFrame* frame);
void AddRenderer(VideoRendererInterface* renderer);
diff --git a/talk/app/webrtc/webrtcsdp.cc b/talk/app/webrtc/webrtcsdp.cc
index 3fa9a7d469..e287e90916 100644
--- a/talk/app/webrtc/webrtcsdp.cc
+++ b/talk/app/webrtc/webrtcsdp.cc
@@ -45,6 +45,7 @@
#include "webrtc/p2p/base/constants.h"
#include "webrtc/p2p/base/port.h"
#include "talk/session/media/mediasession.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/common.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/messagedigest.h"
@@ -121,6 +122,7 @@ static const char kLineTypeAttributes = 'a';
static const char kAttributeGroup[] = "group";
static const char kAttributeMid[] = "mid";
static const char kAttributeRtcpMux[] = "rtcp-mux";
+static const char kAttributeRtcpReducedSize[] = "rtcp-rsize";
static const char kAttributeSsrc[] = "ssrc";
static const char kSsrcAttributeCname[] = "cname";
static const char kAttributeExtmap[] = "extmap";
@@ -138,8 +140,8 @@ static const char kAttributeCandidate[] = "candidate";
static const char kAttributeCandidateTyp[] = "typ";
static const char kAttributeCandidateRaddr[] = "raddr";
static const char kAttributeCandidateRport[] = "rport";
-static const char kAttributeCandidateUsername[] = "username";
-static const char kAttributeCandidatePassword[] = "password";
+static const char kAttributeCandidateUfrag[] = "ufrag";
+static const char kAttributeCandidatePwd[] = "pwd";
static const char kAttributeCandidateGeneration[] = "generation";
static const char kAttributeFingerprint[] = "fingerprint";
static const char kAttributeSetup[] = "setup";
@@ -260,6 +262,7 @@ static void BuildRtpMap(const MediaContentDescription* media_desc,
const MediaType media_type,
std::string* message);
static void BuildCandidate(const std::vector<Candidate>& candidates,
+ bool include_ufrag,
std::string* message);
static void BuildIceOptions(const std::vector<std::string>& transport_options,
std::string* message);
@@ -876,7 +879,7 @@ std::string SdpSerializeCandidate(
std::string message;
std::vector<cricket::Candidate> candidates;
candidates.push_back(candidate.candidate());
- BuildCandidate(candidates, &message);
+ BuildCandidate(candidates, true, &message);
// From WebRTC draft section 4.8.1.1 candidate-attribute will be
// just candidate:<candidate> not a=candidate:<blah>CRLF
ASSERT(message.find("a=") == 0);
@@ -1070,10 +1073,9 @@ bool ParseCandidate(const std::string& message, Candidate* candidate,
}
// Extension
- // Empty string as the candidate username and password.
- // Will be updated later with the ice-ufrag and ice-pwd.
- // TODO: Remove the username/password extension, which is currently
- // kept for backwards compatibility.
+ // Though non-standard, we support the ICE ufrag and pwd being signaled on
+ // the candidate to avoid issues with confusing which generation a candidate
+ // belongs to when trickling multiple generations at the same time.
std::string username;
std::string password;
uint32_t generation = 0;
@@ -1084,9 +1086,9 @@ bool ParseCandidate(const std::string& message, Candidate* candidate,
if (!GetValueFromString(first_line, fields[++i], &generation, error)) {
return false;
}
- } else if (fields[i] == kAttributeCandidateUsername) {
+ } else if (fields[i] == kAttributeCandidateUfrag) {
username = fields[++i];
- } else if (fields[i] == kAttributeCandidatePassword) {
+ } else if (fields[i] == kAttributeCandidatePwd) {
password = fields[++i];
} else {
// Skip the unknown extension.
@@ -1283,8 +1285,9 @@ void BuildMediaDescription(const ContentInfo* content_info,
}
}
- // Build the a=candidate lines.
- BuildCandidate(candidates, message);
+ // Build the a=candidate lines. We don't include ufrag and pwd in the
+ // candidates in the SDP to avoid redundancy.
+ BuildCandidate(candidates, false, message);
// Use the transport_info to build the media level ice-ufrag and ice-pwd.
if (transport_info) {
@@ -1292,13 +1295,17 @@ void BuildMediaDescription(const ContentInfo* content_info,
// ice-pwd-att = "ice-pwd" ":" password
// ice-ufrag-att = "ice-ufrag" ":" ufrag
// ice-ufrag
- InitAttrLine(kAttributeIceUfrag, &os);
- os << kSdpDelimiterColon << transport_info->description.ice_ufrag;
- AddLine(os.str(), message);
+ if (!transport_info->description.ice_ufrag.empty()) {
+ InitAttrLine(kAttributeIceUfrag, &os);
+ os << kSdpDelimiterColon << transport_info->description.ice_ufrag;
+ AddLine(os.str(), message);
+ }
// ice-pwd
- InitAttrLine(kAttributeIcePwd, &os);
- os << kSdpDelimiterColon << transport_info->description.ice_pwd;
- AddLine(os.str(), message);
+ if (!transport_info->description.ice_pwd.empty()) {
+ InitAttrLine(kAttributeIcePwd, &os);
+ os << kSdpDelimiterColon << transport_info->description.ice_pwd;
+ AddLine(os.str(), message);
+ }
// draft-petithuguenin-mmusic-ice-attributes-level-03
BuildIceOptions(transport_info->description.transport_options, message);
@@ -1399,6 +1406,13 @@ void BuildRtpContentAttributes(
AddLine(os.str(), message);
}
+ // RFC 5506
+ // a=rtcp-rsize
+ if (media_desc->rtcp_reduced_size()) {
+ InitAttrLine(kAttributeRtcpReducedSize, &os);
+ AddLine(os.str(), message);
+ }
+
// RFC 4568
// a=crypto:<tag> <crypto-suite> <key-params> [<session-params>]
for (std::vector<CryptoParams>::const_iterator it =
@@ -1525,7 +1539,7 @@ bool IsFmtpParam(const std::string& name) {
kCodecParamMaxAverageBitrate, kCodecParamMaxPlaybackRate,
kCodecParamAssociatedPayloadType
};
- for (size_t i = 0; i < ARRAY_SIZE(kFmtpParams); ++i) {
+ for (size_t i = 0; i < arraysize(kFmtpParams); ++i) {
if (_stricmp(name.c_str(), kFmtpParams[i]) == 0) {
return true;
}
@@ -1708,6 +1722,7 @@ void BuildRtpMap(const MediaContentDescription* media_desc,
}
void BuildCandidate(const std::vector<Candidate>& candidates,
+ bool include_ufrag,
std::string* message) {
std::ostringstream os;
@@ -1757,6 +1772,9 @@ void BuildCandidate(const std::vector<Candidate>& candidates,
// Extensions
os << kAttributeCandidateGeneration << " " << it->generation();
+ if (include_ufrag && !it->username().empty()) {
+ os << " " << kAttributeCandidateUfrag << " " << it->username();
+ }
AddLine(os.str(), message);
}
@@ -2046,7 +2064,7 @@ static bool ParseDtlsSetup(const std::string& line,
struct StaticPayloadAudioCodec {
const char* name;
int clockrate;
- int channels;
+ size_t channels;
};
static const StaticPayloadAudioCodec kStaticPayloadAudioCodecs[] = {
{ "PCMU", 8000, 1 },
@@ -2082,10 +2100,10 @@ void MaybeCreateStaticPayloadAudioCodecs(
int payload_type = *it;
if (!media_desc->HasCodec(payload_type) &&
payload_type >= 0 &&
- payload_type < ARRAY_SIZE(kStaticPayloadAudioCodecs)) {
+ payload_type < arraysize(kStaticPayloadAudioCodecs)) {
std::string encoding_name = kStaticPayloadAudioCodecs[payload_type].name;
int clock_rate = kStaticPayloadAudioCodecs[payload_type].clockrate;
- int channels = kStaticPayloadAudioCodecs[payload_type].channels;
+ size_t channels = kStaticPayloadAudioCodecs[payload_type].channels;
media_desc->AddCodec(cricket::AudioCodec(payload_type, encoding_name,
clock_rate, 0, channels,
preference));
@@ -2552,6 +2570,8 @@ bool ParseContent(const std::string& message,
//
if (HasAttribute(line, kAttributeRtcpMux)) {
media_desc->set_rtcp_mux(true);
+ } else if (HasAttribute(line, kAttributeRtcpReducedSize)) {
+ media_desc->set_rtcp_reduced_size(true);
} else if (HasAttribute(line, kAttributeSsrcGroup)) {
if (!ParseSsrcGroupAttribute(line, &ssrc_groups, error)) {
return false;
@@ -2666,7 +2686,8 @@ bool ParseContent(const std::string& message,
// Update the candidates with the media level "ice-pwd" and "ice-ufrag".
for (Candidates::iterator it = candidates_orig.begin();
it != candidates_orig.end(); ++it) {
- ASSERT((*it).username().empty());
+ ASSERT((*it).username().empty() ||
+ (*it).username() == transport->ice_ufrag);
(*it).set_username(transport->ice_ufrag);
ASSERT((*it).password().empty());
(*it).set_password(transport->ice_pwd);
@@ -2817,7 +2838,7 @@ bool ParseCryptoAttribute(const std::string& line,
// Updates or creates a new codec entry in the audio description with according
// to |name|, |clockrate|, |bitrate|, |channels| and |preference|.
void UpdateCodec(int payload_type, const std::string& name, int clockrate,
- int bitrate, int channels, int preference,
+ int bitrate, size_t channels, int preference,
AudioContentDescription* audio_desc) {
// Codec may already be populated with (only) optional parameters
// (from an fmtp).
@@ -2916,7 +2937,7 @@ bool ParseRtpmapAttribute(const std::string& line,
// of audio channels. This parameter is OPTIONAL and may be
// omitted if the number of channels is one, provided that no
// additional parameters are needed.
- int channels = 1;
+ size_t channels = 1;
if (codec_params.size() == 3) {
if (!GetValueFromString(line, codec_params[2], &channels, error)) {
return false;
diff --git a/talk/app/webrtc/webrtcsdp_unittest.cc b/talk/app/webrtc/webrtcsdp_unittest.cc
index cb6a392ab4..15fc8083b4 100644
--- a/talk/app/webrtc/webrtcsdp_unittest.cc
+++ b/talk/app/webrtc/webrtcsdp_unittest.cc
@@ -30,6 +30,9 @@
#include <vector>
#include "talk/app/webrtc/jsepsessiondescription.h"
+#ifdef WEBRTC_ANDROID
+#include "talk/app/webrtc/test/androidtestinitializer.h"
+#endif
#include "talk/app/webrtc/webrtcsdp.h"
#include "talk/media/base/constants.h"
#include "webrtc/p2p/base/constants.h"
@@ -80,11 +83,13 @@ static const char kSessionTime[] = "t=0 0\r\n";
static const uint32_t kCandidatePriority = 2130706432U; // pref = 1.0
static const char kCandidateUfragVoice[] = "ufrag_voice";
static const char kCandidatePwdVoice[] = "pwd_voice";
+static const char kAttributeIceUfragVoice[] = "a=ice-ufrag:ufrag_voice\r\n";
static const char kAttributeIcePwdVoice[] = "a=ice-pwd:pwd_voice\r\n";
static const char kCandidateUfragVideo[] = "ufrag_video";
static const char kCandidatePwdVideo[] = "pwd_video";
static const char kCandidateUfragData[] = "ufrag_data";
static const char kCandidatePwdData[] = "pwd_data";
+static const char kAttributeIceUfragVideo[] = "a=ice-ufrag:ufrag_video\r\n";
static const char kAttributeIcePwdVideo[] = "a=ice-pwd:pwd_video\r\n";
static const uint32_t kCandidateGeneration = 2;
static const char kCandidateFoundation1[] = "a0+B/1";
@@ -153,6 +158,7 @@ static const char kSdpFullString[] =
"a=mid:audio_content_name\r\n"
"a=sendrecv\r\n"
"a=rtcp-mux\r\n"
+ "a=rtcp-rsize\r\n"
"a=crypto:1 AES_CM_128_HMAC_SHA1_32 "
"inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32 "
"dummy_session_params\r\n"
@@ -220,6 +226,7 @@ static const char kSdpString[] =
"a=mid:audio_content_name\r\n"
"a=sendrecv\r\n"
"a=rtcp-mux\r\n"
+ "a=rtcp-rsize\r\n"
"a=crypto:1 AES_CM_128_HMAC_SHA1_32 "
"inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32 "
"dummy_session_params\r\n"
@@ -394,9 +401,9 @@ static const char kRawIPV6Candidate[] =
"abcd::abcd::abcd::abcd::abcd::abcd::abcd::abcd 1234 typ host generation 2";
// One candidate reference string.
-static const char kSdpOneCandidateOldFormat[] =
+static const char kSdpOneCandidateWithUfragPwd[] =
"a=candidate:a0+B/1 1 udp 2130706432 192.168.1.5 1234 typ host network_name"
- " eth0 username user_rtp password password_rtp generation 2\r\n";
+ " eth0 ufrag user_rtp pwd password_rtp generation 2\r\n";
// Session id and version
static const char kSessionId[] = "18446744069414584320";
@@ -523,10 +530,14 @@ static void ReplaceDirection(cricket::MediaContentDirection direction,
static void ReplaceRejected(bool audio_rejected, bool video_rejected,
std::string* message) {
if (audio_rejected) {
- Replace("m=audio 2345", "m=audio 0", message);
+ Replace("m=audio 9", "m=audio 0", message);
+ Replace(kAttributeIceUfragVoice, "", message);
+ Replace(kAttributeIcePwdVoice, "", message);
}
if (video_rejected) {
- Replace("m=video 3457", "m=video 0", message);
+ Replace("m=video 9", "m=video 0", message);
+ Replace(kAttributeIceUfragVideo, "", message);
+ Replace(kAttributeIcePwdVideo, "", message);
}
}
@@ -536,6 +547,9 @@ class WebRtcSdpTest : public testing::Test {
public:
WebRtcSdpTest()
: jdesc_(kDummyString) {
+#ifdef WEBRTC_ANDROID
+ webrtc::InitializeAndroidObjects();
+#endif
// AudioContentDescription
audio_desc_ = CreateAudioContentDescription();
AudioCodec opus(111, "opus", 48000, 0, 2, 3);
@@ -704,6 +718,7 @@ class WebRtcSdpTest : public testing::Test {
AudioContentDescription* CreateAudioContentDescription() {
AudioContentDescription* audio = new AudioContentDescription();
audio->set_rtcp_mux(true);
+ audio->set_rtcp_reduced_size(true);
StreamParams audio_stream1;
audio_stream1.id = kAudioTrackId1;
audio_stream1.cname = kStream1Cname;
@@ -735,6 +750,9 @@ class WebRtcSdpTest : public testing::Test {
// rtcp_mux
EXPECT_EQ(cd1->rtcp_mux(), cd2->rtcp_mux());
+ // rtcp_reduced_size
+ EXPECT_EQ(cd1->rtcp_reduced_size(), cd2->rtcp_reduced_size());
+
// cryptos
EXPECT_EQ(cd1->cryptos().size(), cd2->cryptos().size());
if (cd1->cryptos().size() != cd2->cryptos().size()) {
@@ -979,6 +997,18 @@ class WebRtcSdpTest : public testing::Test {
desc_.AddTransportInfo(transport_info);
}
+ void SetIceUfragPwd(const std::string& content_name,
+ const std::string& ice_ufrag,
+ const std::string& ice_pwd) {
+ ASSERT_TRUE(desc_.GetTransportInfoByName(content_name) != NULL);
+ cricket::TransportInfo transport_info =
+ *(desc_.GetTransportInfoByName(content_name));
+ desc_.RemoveTransportInfoByName(content_name);
+ transport_info.description.ice_ufrag = ice_ufrag;
+ transport_info.description.ice_pwd = ice_pwd;
+ desc_.AddTransportInfo(transport_info);
+ }
+
void AddFingerprint() {
desc_.RemoveTransportInfoByName(kAudioContentName);
desc_.RemoveTransportInfoByName(kVideoContentName);
@@ -1050,15 +1080,22 @@ class WebRtcSdpTest : public testing::Test {
audio_desc_);
desc_.AddContent(kVideoContentName, NS_JINGLE_RTP, video_rejected,
video_desc_);
- std::string new_sdp = kSdpFullString;
+ SetIceUfragPwd(kAudioContentName,
+ audio_rejected ? "" : kCandidateUfragVoice,
+ audio_rejected ? "" : kCandidatePwdVoice);
+ SetIceUfragPwd(kVideoContentName,
+ video_rejected ? "" : kCandidateUfragVideo,
+ video_rejected ? "" : kCandidatePwdVideo);
+
+ std::string new_sdp = kSdpString;
ReplaceRejected(audio_rejected, video_rejected, &new_sdp);
- if (!jdesc_.Initialize(desc_.Copy(),
- jdesc_.session_id(),
- jdesc_.session_version())) {
+ JsepSessionDescription jdesc_no_candidates(kDummyString);
+ if (!jdesc_no_candidates.Initialize(desc_.Copy(), kSessionId,
+ kSessionVersion)) {
return false;
}
- std::string message = webrtc::SdpSerialize(jdesc_);
+ std::string message = webrtc::SdpSerialize(jdesc_no_candidates);
EXPECT_EQ(new_sdp, message);
return true;
}
@@ -1121,11 +1158,11 @@ class WebRtcSdpTest : public testing::Test {
}
bool TestDeserializeRejected(bool audio_rejected, bool video_rejected) {
- std::string new_sdp = kSdpFullString;
+ std::string new_sdp = kSdpString;
ReplaceRejected(audio_rejected, video_rejected, &new_sdp);
JsepSessionDescription new_jdesc(JsepSessionDescription::kOffer);
-
EXPECT_TRUE(SdpDeserialize(new_sdp, &new_jdesc));
+
audio_desc_ = static_cast<AudioContentDescription*>(
audio_desc_->Copy());
video_desc_ = static_cast<VideoContentDescription*>(
@@ -1136,12 +1173,18 @@ class WebRtcSdpTest : public testing::Test {
audio_desc_);
desc_.AddContent(kVideoContentName, NS_JINGLE_RTP, video_rejected,
video_desc_);
- if (!jdesc_.Initialize(desc_.Copy(),
- jdesc_.session_id(),
- jdesc_.session_version())) {
+ SetIceUfragPwd(kAudioContentName,
+ audio_rejected ? "" : kCandidateUfragVoice,
+ audio_rejected ? "" : kCandidatePwdVoice);
+ SetIceUfragPwd(kVideoContentName,
+ video_rejected ? "" : kCandidateUfragVideo,
+ video_rejected ? "" : kCandidatePwdVideo);
+ JsepSessionDescription jdesc_no_candidates(kDummyString);
+ if (!jdesc_no_candidates.Initialize(desc_.Copy(), jdesc_.session_id(),
+ jdesc_.session_version())) {
return false;
}
- EXPECT_TRUE(CompareSessionDescription(jdesc_, new_jdesc));
+ EXPECT_TRUE(CompareSessionDescription(jdesc_no_candidates, new_jdesc));
return true;
}
@@ -1540,8 +1583,8 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithFingerprintNoCryptos) {
TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithoutCandidates) {
// JsepSessionDescription with desc but without candidates.
JsepSessionDescription jdesc_no_candidates(kDummyString);
- ASSERT_TRUE(jdesc_no_candidates.Initialize(desc_.Copy(),
- kSessionId, kSessionVersion));
+ ASSERT_TRUE(jdesc_no_candidates.Initialize(desc_.Copy(), kSessionId,
+ kSessionVersion));
std::string message = webrtc::SdpSerialize(jdesc_no_candidates);
EXPECT_EQ(std::string(kSdpString), message);
}
@@ -1721,6 +1764,13 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithExtmap) {
TEST_F(WebRtcSdpTest, SerializeCandidates) {
std::string message = webrtc::SdpSerializeCandidate(*jcandidate_);
EXPECT_EQ(std::string(kRawCandidate), message);
+
+ Candidate candidate_with_ufrag(candidates_.front());
+ candidate_with_ufrag.set_username("ABC");
+ jcandidate_.reset(new JsepIceCandidate(std::string("audio_content_name"), 0,
+ candidate_with_ufrag));
+ message = webrtc::SdpSerializeCandidate(*jcandidate_);
+ EXPECT_EQ(std::string(kRawCandidate) + " ufrag ABC", message);
}
// TODO(mallinath) : Enable this test once WebRTCSdp capable of parsing
@@ -2317,9 +2367,10 @@ TEST_F(WebRtcSdpTest, DeserializeCandidateWithDifferentTransport) {
EXPECT_TRUE(jcandidate.candidate().IsEquivalent(jcandidate_->candidate()));
}
-TEST_F(WebRtcSdpTest, DeserializeCandidateOldFormat) {
+TEST_F(WebRtcSdpTest, DeserializeCandidateWithUfragPwd) {
JsepIceCandidate jcandidate(kDummyMid, kDummyIndex);
- EXPECT_TRUE(SdpDeserializeCandidate(kSdpOneCandidateOldFormat,&jcandidate));
+ EXPECT_TRUE(
+ SdpDeserializeCandidate(kSdpOneCandidateWithUfragPwd, &jcandidate));
EXPECT_EQ(kDummyMid, jcandidate.sdp_mid());
EXPECT_EQ(kDummyIndex, jcandidate.sdp_mline_index());
Candidate ref_candidate = jcandidate_->candidate();
diff --git a/talk/app/webrtc/webrtcsession.cc b/talk/app/webrtc/webrtcsession.cc
index 95abeab77a..d8f76379c1 100644
--- a/talk/app/webrtc/webrtcsession.cc
+++ b/talk/app/webrtc/webrtcsession.cc
@@ -30,13 +30,13 @@
#include <limits.h>
#include <algorithm>
-#include <vector>
#include <set>
+#include <utility>
+#include <vector>
#include "talk/app/webrtc/jsepicecandidate.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/mediaconstraintsinterface.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/sctputils.h"
#include "talk/app/webrtc/webrtcsessiondescriptionfactory.h"
@@ -45,6 +45,7 @@
#include "talk/session/media/channel.h"
#include "talk/session/media/channelmanager.h"
#include "talk/session/media/mediasession.h"
+#include "webrtc/audio/audio_sink.h"
#include "webrtc/base/basictypes.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/helpers.h"
@@ -441,10 +442,11 @@ static std::string MakeTdErrorString(const std::string& desc) {
// Set |option| to the highest-priority value of |key| in the optional
// constraints if the key is found and has a valid value.
-template<typename T>
+template <typename T>
static void SetOptionFromOptionalConstraint(
const MediaConstraintsInterface* constraints,
- const std::string& key, cricket::Settable<T>* option) {
+ const std::string& key,
+ rtc::Optional<T>* option) {
if (!constraints) {
return;
}
@@ -452,7 +454,7 @@ static void SetOptionFromOptionalConstraint(
T value;
if (constraints->GetOptional().FindFirst(key, &string_value)) {
if (rtc::FromString(string_value, &value)) {
- option->Set(value);
+ *option = rtc::Optional<T>(value);
}
}
}
@@ -492,9 +494,13 @@ class IceRestartAnswerLatch {
}
}
+ // This method has two purposes: 1. Return whether |new_desc| requests
+ // an ICE restart (i.e., new ufrag/pwd). 2. If it requests an ICE restart
+ // and it is an OFFER, remember this in |ice_restart_| so that the next
+ // Local Answer will be created with new ufrag and pwd.
bool CheckForRemoteIceRestart(const SessionDescriptionInterface* old_desc,
const SessionDescriptionInterface* new_desc) {
- if (!old_desc || new_desc->type() != SessionDescriptionInterface::kOffer) {
+ if (!old_desc) {
return false;
}
const SessionDescription* new_sd = new_desc->description();
@@ -520,7 +526,9 @@ class IceRestartAnswerLatch {
new_transport_desc->ice_ufrag,
new_transport_desc->ice_pwd)) {
LOG(LS_INFO) << "Remote peer request ice restart.";
- ice_restart_ = true;
+ if (new_desc->type() == SessionDescriptionInterface::kOffer) {
+ ice_restart_ = true;
+ }
return true;
}
}
@@ -593,6 +601,8 @@ bool WebRtcSession::Initialize(
const PeerConnectionInterface::RTCConfiguration& rtc_configuration) {
bundle_policy_ = rtc_configuration.bundle_policy;
rtcp_mux_policy_ = rtc_configuration.rtcp_mux_policy;
+ video_options_.disable_prerenderer_smoothing =
+ rtc::Optional<bool>(rtc_configuration.disable_prerenderer_smoothing);
transport_controller_->SetSslMaxProtocolVersion(options.ssl_max_version);
// Obtain a certificate from RTCConfiguration if any were provided (optional).
@@ -644,8 +654,8 @@ bool WebRtcSession::Initialize(
constraints,
MediaConstraintsInterface::kEnableDscp,
&value, NULL)) {
- audio_options_.dscp.Set(value);
- video_options_.dscp.Set(value);
+ audio_options_.dscp = rtc::Optional<bool>(value);
+ video_options_.dscp = rtc::Optional<bool>(value);
}
// Find Suspend Below Min Bitrate constraint.
@@ -654,7 +664,7 @@ bool WebRtcSession::Initialize(
MediaConstraintsInterface::kEnableVideoSuspendBelowMinBitrate,
&value,
NULL)) {
- video_options_.suspend_below_min_bitrate.Set(value);
+ video_options_.suspend_below_min_bitrate = rtc::Optional<bool>(value);
}
SetOptionFromOptionalConstraint(constraints,
@@ -684,12 +694,10 @@ bool WebRtcSession::Initialize(
SetOptionFromOptionalConstraint(constraints,
MediaConstraintsInterface::kNumUnsignalledRecvStreams,
&video_options_.unsignalled_recv_stream_limit);
- if (video_options_.unsignalled_recv_stream_limit.IsSet()) {
- int stream_limit;
- video_options_.unsignalled_recv_stream_limit.Get(&stream_limit);
- stream_limit = std::min(kMaxUnsignalledRecvStreams, stream_limit);
- stream_limit = std::max(0, stream_limit);
- video_options_.unsignalled_recv_stream_limit.Set(stream_limit);
+ if (video_options_.unsignalled_recv_stream_limit) {
+ video_options_.unsignalled_recv_stream_limit = rtc::Optional<int>(
+ std::max(0, std::min(kMaxUnsignalledRecvStreams,
+ *video_options_.unsignalled_recv_stream_limit)));
}
SetOptionFromOptionalConstraint(constraints,
@@ -700,22 +708,12 @@ bool WebRtcSession::Initialize(
MediaConstraintsInterface::kCombinedAudioVideoBwe,
&audio_options_.combined_audio_video_bwe);
- audio_options_.audio_jitter_buffer_max_packets.Set(
- rtc_configuration.audio_jitter_buffer_max_packets);
+ audio_options_.audio_jitter_buffer_max_packets =
+ rtc::Optional<int>(rtc_configuration.audio_jitter_buffer_max_packets);
- audio_options_.audio_jitter_buffer_fast_accelerate.Set(
+ audio_options_.audio_jitter_buffer_fast_accelerate = rtc::Optional<bool>(
rtc_configuration.audio_jitter_buffer_fast_accelerate);
- const cricket::VideoCodec default_codec(
- JsepSessionDescription::kDefaultVideoCodecId,
- JsepSessionDescription::kDefaultVideoCodecName,
- JsepSessionDescription::kMaxVideoCodecWidth,
- JsepSessionDescription::kMaxVideoCodecHeight,
- JsepSessionDescription::kDefaultVideoCodecFramerate,
- JsepSessionDescription::kDefaultVideoCodecPreference);
- channel_manager_->SetDefaultVideoEncoderConfig(
- cricket::VideoEncoderConfig(default_codec));
-
if (!dtls_enabled_) {
// Construct with DTLS disabled.
webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory(
@@ -726,7 +724,7 @@ bool WebRtcSession::Initialize(
// Use the |dtls_identity_store| to generate a certificate.
RTC_DCHECK(dtls_identity_store);
webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory(
- signaling_thread(), channel_manager_, dtls_identity_store.Pass(),
+ signaling_thread(), channel_manager_, std::move(dtls_identity_store),
this, id()));
} else {
// Use the already generated certificate.
@@ -744,12 +742,6 @@ bool WebRtcSession::Initialize(
port_allocator()->set_candidate_filter(
ConvertIceTransportTypeToCandidateFilter(rtc_configuration.type));
- if (rtc_configuration.enable_localhost_ice_candidate) {
- port_allocator()->set_flags(
- port_allocator()->flags() |
- cricket::PORTALLOCATOR_ENABLE_LOCALHOST_CANDIDATE);
- }
-
return true;
}
@@ -769,14 +761,20 @@ cricket::SecurePolicy WebRtcSession::SdesPolicy() const {
return webrtc_session_desc_factory_->SdesPolicy();
}
-bool WebRtcSession::GetSslRole(rtc::SSLRole* role) {
+bool WebRtcSession::GetSslRole(const std::string& transport_name,
+ rtc::SSLRole* role) {
if (!local_desc_ || !remote_desc_) {
LOG(LS_INFO) << "Local and Remote descriptions must be applied to get "
<< "SSL Role of the session.";
return false;
}
- return transport_controller_->GetSslRole(role);
+ return transport_controller_->GetSslRole(transport_name, role);
+}
+
+bool WebRtcSession::GetSslRole(const cricket::BaseChannel* channel,
+ rtc::SSLRole* role) {
+ return channel && GetSslRole(channel->transport_name(), role);
}
void WebRtcSession::CreateOffer(
@@ -978,15 +976,12 @@ bool WebRtcSession::UpdateSessionState(
return BadPranswerSdp(source, GetSessionErrorMsg(), err_desc);
}
} else if (action == kAnswer) {
- if (!PushdownTransportDescription(source, cricket::CA_ANSWER, &td_err)) {
- return BadAnswerSdp(source, MakeTdErrorString(td_err), err_desc);
- }
const cricket::ContentGroup* local_bundle =
local_desc_->description()->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
const cricket::ContentGroup* remote_bundle =
remote_desc_->description()->GetGroupByName(cricket::GROUP_TYPE_BUNDLE);
if (local_bundle && remote_bundle) {
- // The answerer decides the transport to bundle on
+ // The answerer decides the transport to bundle on.
const cricket::ContentGroup* answer_bundle =
(source == cricket::CS_LOCAL ? local_bundle : remote_bundle);
if (!EnableBundle(*answer_bundle)) {
@@ -994,6 +989,11 @@ bool WebRtcSession::UpdateSessionState(
return BadAnswerSdp(source, kEnableBundleFailed, err_desc);
}
}
+ // Only push down the transport description after enabling BUNDLE; we don't
+ // want to push down a description on a transport about to be destroyed.
+ if (!PushdownTransportDescription(source, cricket::CA_ANSWER, &td_err)) {
+ return BadAnswerSdp(source, MakeTdErrorString(td_err), err_desc);
+ }
EnableChannels();
SetState(STATE_INPROGRESS);
if (!PushdownMediaDescription(cricket::CA_ANSWER, source, err_desc)) {
@@ -1250,6 +1250,8 @@ cricket::IceConfig WebRtcSession::ParseIceConfig(
const PeerConnectionInterface::RTCConfiguration& config) const {
cricket::IceConfig ice_config;
ice_config.receiving_timeout_ms = config.ice_connection_receiving_timeout;
+ ice_config.backup_connection_ping_interval =
+ config.ice_backup_candidate_pair_ping_interval;
ice_config.gather_continually = (config.continual_gathering_policy ==
PeerConnectionInterface::GATHER_CONTINUALLY);
return ice_config;
@@ -1326,6 +1328,15 @@ void WebRtcSession::SetAudioPlayoutVolume(uint32_t ssrc, double volume) {
}
}
+void WebRtcSession::SetRawAudioSink(uint32_t ssrc,
+ rtc::scoped_ptr<AudioSinkInterface> sink) {
+ ASSERT(signaling_thread()->IsCurrent());
+ if (!voice_channel_)
+ return;
+
+ voice_channel_->SetRawAudioSink(ssrc, std::move(sink));
+}
+
bool WebRtcSession::SetCaptureDevice(uint32_t ssrc,
cricket::VideoCapturer* camera) {
ASSERT(signaling_thread()->IsCurrent());
@@ -1409,8 +1420,7 @@ bool WebRtcSession::InsertDtmf(const std::string& track_id,
LOG(LS_ERROR) << "InsertDtmf: Track does not exist: " << track_id;
return false;
}
- if (!voice_channel_->InsertDtmf(send_ssrc, code, duration,
- cricket::DF_SEND)) {
+ if (!voice_channel_->InsertDtmf(send_ssrc, code, duration)) {
LOG(LS_ERROR) << "Failed to insert DTMF to channel.";
return false;
}
@@ -1747,7 +1757,6 @@ void WebRtcSession::RemoveUnusedChannels(const SessionDescription* desc) {
cricket::GetFirstVideoContent(desc);
if ((!video_info || video_info->rejected) && video_channel_) {
SignalVideoChannelDestroyed();
- const std::string content_name = video_channel_->content_name();
channel_manager_->DestroyVideoChannel(video_channel_.release());
}
@@ -1755,7 +1764,6 @@ void WebRtcSession::RemoveUnusedChannels(const SessionDescription* desc) {
cricket::GetFirstAudioContent(desc);
if ((!voice_info || voice_info->rejected) && voice_channel_) {
SignalVoiceChannelDestroyed();
- const std::string content_name = voice_channel_->content_name();
channel_manager_->DestroyVoiceChannel(voice_channel_.release());
}
@@ -1763,7 +1771,6 @@ void WebRtcSession::RemoveUnusedChannels(const SessionDescription* desc) {
cricket::GetFirstDataContent(desc);
if ((!data_info || data_info->rejected) && data_channel_) {
SignalDataChannelDestroyed();
- const std::string content_name = data_channel_->content_name();
channel_manager_->DestroyDataChannel(data_channel_.release());
}
}
@@ -2164,9 +2171,10 @@ void WebRtcSession::ReportNegotiatedCiphers(
return;
}
- const std::string& srtp_cipher = stats.channel_stats[0].srtp_cipher;
- int ssl_cipher = stats.channel_stats[0].ssl_cipher;
- if (srtp_cipher.empty() && !ssl_cipher) {
+ int srtp_crypto_suite = stats.channel_stats[0].srtp_crypto_suite;
+ int ssl_cipher_suite = stats.channel_stats[0].ssl_cipher_suite;
+ if (srtp_crypto_suite == rtc::SRTP_INVALID_CRYPTO_SUITE &&
+ ssl_cipher_suite == rtc::TLS_NULL_WITH_NULL_NULL) {
return;
}
@@ -2186,12 +2194,13 @@ void WebRtcSession::ReportNegotiatedCiphers(
return;
}
- if (!srtp_cipher.empty()) {
- metrics_observer_->IncrementSparseEnumCounter(
- srtp_counter_type, rtc::GetSrtpCryptoSuiteFromName(srtp_cipher));
+ if (srtp_crypto_suite != rtc::SRTP_INVALID_CRYPTO_SUITE) {
+ metrics_observer_->IncrementSparseEnumCounter(srtp_counter_type,
+ srtp_crypto_suite);
}
- if (ssl_cipher) {
- metrics_observer_->IncrementSparseEnumCounter(ssl_counter_type, ssl_cipher);
+ if (ssl_cipher_suite != rtc::TLS_NULL_WITH_NULL_NULL) {
+ metrics_observer_->IncrementSparseEnumCounter(ssl_counter_type,
+ ssl_cipher_suite);
}
}
diff --git a/talk/app/webrtc/webrtcsession.h b/talk/app/webrtc/webrtcsession.h
index d9c40d1a83..b79e0ec270 100644
--- a/talk/app/webrtc/webrtcsession.h
+++ b/talk/app/webrtc/webrtcsession.h
@@ -38,11 +38,11 @@
#include "talk/app/webrtc/peerconnectioninterface.h"
#include "talk/app/webrtc/statstypes.h"
#include "talk/media/base/mediachannel.h"
-#include "webrtc/p2p/base/transportcontroller.h"
#include "talk/session/media/mediasession.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/base/sslidentity.h"
#include "webrtc/base/thread.h"
+#include "webrtc/p2p/base/transportcontroller.h"
namespace cricket {
@@ -204,7 +204,11 @@ class WebRtcSession : public AudioProviderInterface,
cricket::SecurePolicy SdesPolicy() const;
// Get current ssl role from transport.
- bool GetSslRole(rtc::SSLRole* role);
+ bool GetSslRole(const std::string& transport_name, rtc::SSLRole* role);
+
+ // Get current SSL role for this channel's transport.
+ // If |transport| is null, returns false.
+ bool GetSslRole(const cricket::BaseChannel* channel, rtc::SSLRole* role);
void CreateOffer(
CreateSessionDescriptionObserver* observer,
@@ -250,6 +254,8 @@ class WebRtcSession : public AudioProviderInterface,
const cricket::AudioOptions& options,
cricket::AudioRenderer* renderer) override;
void SetAudioPlayoutVolume(uint32_t ssrc, double volume) override;
+ void SetRawAudioSink(uint32_t ssrc,
+ rtc::scoped_ptr<AudioSinkInterface> sink) override;
// Implements VideoMediaProviderInterface.
bool SetCaptureDevice(uint32_t ssrc, cricket::VideoCapturer* camera) override;
diff --git a/talk/app/webrtc/webrtcsession_unittest.cc b/talk/app/webrtc/webrtcsession_unittest.cc
index 3eb46f1d3c..e81b8b5b54 100644
--- a/talk/app/webrtc/webrtcsession_unittest.cc
+++ b/talk/app/webrtc/webrtcsession_unittest.cc
@@ -25,6 +25,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <utility>
#include <vector>
#include "talk/app/webrtc/audiotrack.h"
@@ -33,7 +34,6 @@
#include "talk/app/webrtc/jsepicecandidate.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
#include "talk/app/webrtc/peerconnection.h"
-#include "talk/app/webrtc/mediastreamsignaling.h"
#include "talk/app/webrtc/sctputils.h"
#include "talk/app/webrtc/streamcollection.h"
#include "talk/app/webrtc/streamcollection.h"
@@ -72,8 +72,6 @@
return; \
}
-using cricket::DF_PLAY;
-using cricket::DF_SEND;
using cricket::FakeVoiceMediaChannel;
using cricket::TransportInfo;
using rtc::SocketAddress;
@@ -173,15 +171,6 @@ static const char kAudioTrack2[] = "audio2";
enum RTCCertificateGenerationMethod { ALREADY_GENERATED, DTLS_IDENTITY_STORE };
-// Add some extra |newlines| to the |message| after |line|.
-static void InjectAfter(const std::string& line,
- const std::string& newlines,
- std::string* message) {
- const std::string tmp = line + newlines;
- rtc::replace_substrs(line.c_str(), line.length(), tmp.c_str(), tmp.length(),
- message);
-}
-
class MockIceObserver : public webrtc::IceObserver {
public:
MockIceObserver()
@@ -428,7 +417,7 @@ class WebRtcSessionTest
observer_.ice_gathering_state_);
EXPECT_TRUE(session_->Initialize(options_, constraints_.get(),
- dtls_identity_store.Pass(),
+ std::move(dtls_identity_store),
rtc_configuration));
session_->set_metrics_observer(metrics_observer_);
}
@@ -479,7 +468,7 @@ class WebRtcSessionTest
} else {
RTC_CHECK(false);
}
- Init(dtls_identity_store.Pass(), configuration);
+ Init(std::move(dtls_identity_store), configuration);
}
// Init with DTLS with a store that will fail to generate a certificate.
@@ -488,7 +477,7 @@ class WebRtcSessionTest
new FakeDtlsIdentityStore());
dtls_identity_store->set_should_fail(true);
PeerConnectionInterface::RTCConfiguration configuration;
- Init(dtls_identity_store.Pass(), configuration);
+ Init(std::move(dtls_identity_store), configuration);
}
void InitWithDtmfCodec() {
@@ -726,9 +715,9 @@ class WebRtcSessionTest
std::string identity_name = "WebRTC" +
rtc::ToString(rtc::CreateRandomId());
// Confirmed to work with KT_RSA and KT_ECDSA.
- tdesc_factory_->set_certificate(rtc::RTCCertificate::Create(
- rtc::scoped_ptr<rtc::SSLIdentity>(rtc::SSLIdentity::Generate(
- identity_name, rtc::KT_DEFAULT)).Pass()));
+ tdesc_factory_->set_certificate(
+ rtc::RTCCertificate::Create(rtc::scoped_ptr<rtc::SSLIdentity>(
+ rtc::SSLIdentity::Generate(identity_name, rtc::KT_DEFAULT))));
tdesc_factory_->set_secure(cricket::SEC_REQUIRED);
}
@@ -789,7 +778,7 @@ class WebRtcSessionTest
ASSERT_TRUE(video_channel_ != NULL);
const cricket::VideoOptions& video_options = video_channel_->options();
EXPECT_EQ(value_expected,
- video_options.unsignalled_recv_stream_limit.GetWithDefaultIfUnset(-1));
+ video_options.unsignalled_recv_stream_limit.value_or(-1));
}
void CompareIceUfragAndPassword(const cricket::SessionDescription* desc1,
@@ -1442,12 +1431,12 @@ class WebRtcSessionTest
}
void ConfigureAllocatorWithTurn() {
- cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
+ cricket::RelayServerConfig turn_server(cricket::RELAY_TURN);
cricket::RelayCredentials credentials(kTurnUsername, kTurnPassword);
- relay_server.credentials = credentials;
- relay_server.ports.push_back(cricket::ProtocolAddress(
- kTurnUdpIntAddr, cricket::PROTO_UDP, false));
- allocator_->AddRelay(relay_server);
+ turn_server.credentials = credentials;
+ turn_server.ports.push_back(
+ cricket::ProtocolAddress(kTurnUdpIntAddr, cricket::PROTO_UDP, false));
+ allocator_->AddTurnServer(turn_server);
allocator_->set_step_delay(cricket::kMinimumStepDelay);
allocator_->set_flags(cricket::PORTALLOCATOR_DISABLE_TCP);
}
@@ -1968,6 +1957,67 @@ TEST_P(WebRtcSessionTest, TestCreateAnswerReceiveOfferWithoutEncryption) {
SetLocalDescriptionWithoutError(answer);
}
+// Test that we can create and set an answer correctly when different
+// SSL roles have been negotiated for different transports.
+// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=4525
+TEST_P(WebRtcSessionTest, TestCreateAnswerWithDifferentSslRoles) {
+ SendAudioVideoStream1();
+ InitWithDtls(GetParam());
+ SetFactoryDtlsSrtp();
+
+ SessionDescriptionInterface* offer = CreateOffer();
+ SetLocalDescriptionWithoutError(offer);
+
+ cricket::MediaSessionOptions options;
+ options.recv_video = true;
+
+ // First, negotiate different SSL roles.
+ SessionDescriptionInterface* answer =
+ CreateRemoteAnswer(offer, options, cricket::SEC_DISABLED);
+ TransportInfo* audio_transport_info =
+ answer->description()->GetTransportInfoByName("audio");
+ audio_transport_info->description.connection_role =
+ cricket::CONNECTIONROLE_ACTIVE;
+ TransportInfo* video_transport_info =
+ answer->description()->GetTransportInfoByName("video");
+ video_transport_info->description.connection_role =
+ cricket::CONNECTIONROLE_PASSIVE;
+ SetRemoteDescriptionWithoutError(answer);
+
+ // Now create an offer in the reverse direction, and ensure the initial
+ // offerer responds with an answer with correct SSL roles.
+ offer = CreateRemoteOfferWithVersion(options, cricket::SEC_DISABLED,
+ kSessionVersion,
+ session_->remote_description());
+ SetRemoteDescriptionWithoutError(offer);
+
+ answer = CreateAnswer(nullptr);
+ audio_transport_info = answer->description()->GetTransportInfoByName("audio");
+ EXPECT_EQ(cricket::CONNECTIONROLE_PASSIVE,
+ audio_transport_info->description.connection_role);
+ video_transport_info = answer->description()->GetTransportInfoByName("video");
+ EXPECT_EQ(cricket::CONNECTIONROLE_ACTIVE,
+ video_transport_info->description.connection_role);
+ SetLocalDescriptionWithoutError(answer);
+
+ // Lastly, start BUNDLE-ing on "audio", expecting that the "passive" role of
+ // audio is transferred over to video in the answer that completes the BUNDLE
+ // negotiation.
+ options.bundle_enabled = true;
+ offer = CreateRemoteOfferWithVersion(options, cricket::SEC_DISABLED,
+ kSessionVersion,
+ session_->remote_description());
+ SetRemoteDescriptionWithoutError(offer);
+ answer = CreateAnswer(nullptr);
+ audio_transport_info = answer->description()->GetTransportInfoByName("audio");
+ EXPECT_EQ(cricket::CONNECTIONROLE_PASSIVE,
+ audio_transport_info->description.connection_role);
+ video_transport_info = answer->description()->GetTransportInfoByName("video");
+ EXPECT_EQ(cricket::CONNECTIONROLE_PASSIVE,
+ video_transport_info->description.connection_role);
+ SetLocalDescriptionWithoutError(answer);
+}
+
TEST_F(WebRtcSessionTest, TestSetLocalOfferTwice) {
Init();
SendNothing();
@@ -2809,10 +2859,9 @@ TEST_F(WebRtcSessionTest, TestSetRemoteDescriptionInvalidIceCredentials) {
EXPECT_FALSE(session_->SetRemoteDescription(modified_offer, &error));
}
-// Test that if the remote description indicates the peer requested ICE restart
-// (via a new ufrag or pwd), the old ICE candidates are not copied,
-// and vice versa.
-TEST_F(WebRtcSessionTest, TestSetRemoteDescriptionWithIceRestart) {
+// Test that if the remote offer indicates the peer requested ICE restart (via
+// a new ufrag or pwd), the old ICE candidates are not copied, and vice versa.
+TEST_F(WebRtcSessionTest, TestSetRemoteOfferWithIceRestart) {
Init();
scoped_ptr<SessionDescriptionInterface> offer(CreateRemoteOffer());
@@ -2866,6 +2915,64 @@ TEST_F(WebRtcSessionTest, TestSetRemoteDescriptionWithIceRestart) {
EXPECT_EQ(0, session_->remote_description()->candidates(0)->count());
}
+// Test that if the remote answer indicates the peer requested ICE restart (via
+// a new ufrag or pwd), the old ICE candidates are not copied, and vice versa.
+TEST_F(WebRtcSessionTest, TestSetRemoteAnswerWithIceRestart) {
+ Init();
+ SessionDescriptionInterface* offer = CreateOffer();
+ SetLocalDescriptionWithoutError(offer);
+ scoped_ptr<SessionDescriptionInterface> answer(CreateRemoteAnswer(offer));
+
+ // Create the first answer.
+ std::string sdp;
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012345",
+ "abcdefghijklmnopqrstuvwx", &sdp);
+ SessionDescriptionInterface* answer1 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ cricket::Candidate candidate1(1, "udp", rtc::SocketAddress("1.1.1.1", 5000),
+ 0, "", "", "relay", 0, "");
+ JsepIceCandidate ice_candidate1(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ EXPECT_TRUE(answer1->AddCandidate(&ice_candidate1));
+ SetRemoteDescriptionWithoutError(answer1);
+ EXPECT_EQ(1, session_->remote_description()->candidates(0)->count());
+
+ // The second answer has the same ufrag and pwd but different address.
+ sdp.clear();
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012345",
+ "abcdefghijklmnopqrstuvwx", &sdp);
+ SessionDescriptionInterface* answer2 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ candidate1.set_address(rtc::SocketAddress("1.1.1.1", 6000));
+ JsepIceCandidate ice_candidate2(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ EXPECT_TRUE(answer2->AddCandidate(&ice_candidate2));
+ SetRemoteDescriptionWithoutError(answer2);
+ EXPECT_EQ(2, session_->remote_description()->candidates(0)->count());
+
+ // The third answer has a different ufrag and different address.
+ sdp.clear();
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012333",
+ "abcdefghijklmnopqrstuvwx", &sdp);
+ SessionDescriptionInterface* answer3 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ candidate1.set_address(rtc::SocketAddress("1.1.1.1", 7000));
+ JsepIceCandidate ice_candidate3(kMediaContentName0, kMediaContentIndex0,
+ candidate1);
+ EXPECT_TRUE(answer3->AddCandidate(&ice_candidate3));
+ SetRemoteDescriptionWithoutError(answer3);
+ EXPECT_EQ(1, session_->remote_description()->candidates(0)->count());
+
+ // The fourth answer has no candidate but a different ufrag/pwd.
+ sdp.clear();
+ ModifyIceUfragPwdLines(answer.get(), "0123456789012444",
+ "abcdefghijklmnopqrstuvyz", &sdp);
+ SessionDescriptionInterface* offer4 =
+ CreateSessionDescription(JsepSessionDescription::kPrAnswer, sdp, NULL);
+ SetRemoteDescriptionWithoutError(offer4);
+ EXPECT_EQ(0, session_->remote_description()->candidates(0)->count());
+}
+
// Test that candidates sent to the "video" transport do not get pushed down to
// the "audio" transport channel when bundling.
TEST_F(WebRtcSessionTest, TestIgnoreCandidatesForUnusedTransportWhenBundling) {
@@ -3297,20 +3404,18 @@ TEST_F(WebRtcSessionTest, SetAudioSend) {
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
cricket::AudioOptions options;
- options.echo_cancellation.Set(true);
+ options.echo_cancellation = rtc::Optional<bool>(true);
rtc::scoped_ptr<FakeAudioRenderer> renderer(new FakeAudioRenderer());
session_->SetAudioSend(send_ssrc, false, options, renderer.get());
EXPECT_TRUE(channel->IsStreamMuted(send_ssrc));
- EXPECT_FALSE(channel->options().echo_cancellation.IsSet());
+ EXPECT_EQ(rtc::Optional<bool>(), channel->options().echo_cancellation);
EXPECT_TRUE(renderer->sink() != NULL);
// This will trigger SetSink(NULL) to the |renderer|.
session_->SetAudioSend(send_ssrc, true, options, NULL);
EXPECT_FALSE(channel->IsStreamMuted(send_ssrc));
- bool value;
- EXPECT_TRUE(channel->options().echo_cancellation.Get(&value));
- EXPECT_TRUE(value);
+ EXPECT_EQ(rtc::Optional<bool>(true), channel->options().echo_cancellation);
EXPECT_TRUE(renderer->sink() == NULL);
}
@@ -3387,7 +3492,6 @@ TEST_F(WebRtcSessionTest, InsertDtmf) {
EXPECT_EQ(0U, channel->dtmf_info_queue().size());
// Insert DTMF
- const int expected_flags = DF_SEND;
const int expected_duration = 90;
session_->InsertDtmf(kAudioTrack1, 0, expected_duration);
session_->InsertDtmf(kAudioTrack1, 1, expected_duration);
@@ -3397,11 +3501,11 @@ TEST_F(WebRtcSessionTest, InsertDtmf) {
ASSERT_EQ(3U, channel->dtmf_info_queue().size());
const uint32_t send_ssrc = channel->send_streams()[0].first_ssrc();
EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[0], send_ssrc, 0,
- expected_duration, expected_flags));
+ expected_duration));
EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[1], send_ssrc, 1,
- expected_duration, expected_flags));
+ expected_duration));
EXPECT_TRUE(CompareDtmfInfo(channel->dtmf_info_queue()[2], send_ssrc, 2,
- expected_duration, expected_flags));
+ expected_duration));
}
// This test verifies the |initial_offerer| flag when session initiates the
@@ -3582,7 +3686,9 @@ TEST_F(WebRtcSessionTest, TestCreateAnswerWithNewUfragAndPassword) {
SetLocalDescriptionWithoutError(answer.release());
// Receive an offer with new ufrag and password.
- options.transport_options.ice_restart = true;
+ options.audio_transport_options.ice_restart = true;
+ options.video_transport_options.ice_restart = true;
+ options.data_transport_options.ice_restart = true;
rtc::scoped_ptr<JsepSessionDescription> updated_offer1(
CreateRemoteOffer(options, session_->remote_description()));
SetRemoteDescriptionWithoutError(updated_offer1.release());
@@ -3613,7 +3719,9 @@ TEST_F(WebRtcSessionTest, TestCreateAnswerWithOldUfragAndPassword) {
SetLocalDescriptionWithoutError(answer.release());
// Receive an offer without changed ufrag or password.
- options.transport_options.ice_restart = false;
+ options.audio_transport_options.ice_restart = false;
+ options.video_transport_options.ice_restart = false;
+ options.data_transport_options.ice_restart = false;
rtc::scoped_ptr<JsepSessionDescription> updated_offer2(
CreateRemoteOffer(options, session_->remote_description()));
SetRemoteDescriptionWithoutError(updated_offer2.release());
@@ -3993,10 +4101,8 @@ TEST_F(WebRtcSessionTest, TestDscpConstraint) {
ASSERT_TRUE(voice_channel_ != NULL);
const cricket::AudioOptions& audio_options = voice_channel_->options();
const cricket::VideoOptions& video_options = video_channel_->options();
- EXPECT_TRUE(audio_options.dscp.IsSet());
- EXPECT_TRUE(audio_options.dscp.GetWithDefaultIfUnset(false));
- EXPECT_TRUE(video_options.dscp.IsSet());
- EXPECT_TRUE(video_options.dscp.GetWithDefaultIfUnset(false));
+ EXPECT_EQ(rtc::Optional<bool>(true), audio_options.dscp);
+ EXPECT_EQ(rtc::Optional<bool>(true), video_options.dscp);
}
TEST_F(WebRtcSessionTest, TestSuspendBelowMinBitrateConstraint) {
@@ -4014,8 +4120,7 @@ TEST_F(WebRtcSessionTest, TestSuspendBelowMinBitrateConstraint) {
ASSERT_TRUE(video_channel_ != NULL);
const cricket::VideoOptions& video_options = video_channel_->options();
- EXPECT_TRUE(
- video_options.suspend_below_min_bitrate.GetWithDefaultIfUnset(false));
+ EXPECT_EQ(rtc::Optional<bool>(true), video_options.suspend_below_min_bitrate);
}
TEST_F(WebRtcSessionTest, TestNumUnsignalledRecvStreamsConstraint) {
@@ -4042,8 +4147,7 @@ TEST_F(WebRtcSessionTest, TestCombinedAudioVideoBweConstraint) {
ASSERT_TRUE(voice_channel_ != NULL);
const cricket::AudioOptions& audio_options = voice_channel_->options();
- EXPECT_TRUE(
- audio_options.combined_audio_video_bwe.GetWithDefaultIfUnset(false));
+ EXPECT_EQ(rtc::Optional<bool>(true), audio_options.combined_audio_video_bwe);
}
// Tests that we can renegotiate new media content with ICE candidates in the
diff --git a/talk/app/webrtc/webrtcsessiondescriptionfactory.cc b/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
index 25965af79d..f08b77eb40 100644
--- a/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
+++ b/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
@@ -27,6 +27,8 @@
#include "talk/app/webrtc/webrtcsessiondescriptionfactory.h"
+#include <utility>
+
#include "talk/app/webrtc/dtlsidentitystore.h"
#include "talk/app/webrtc/jsep.h"
#include "talk/app/webrtc/jsepsessiondescription.h"
@@ -99,12 +101,12 @@ void WebRtcIdentityRequestObserver::OnSuccess(
der_private_key.length());
rtc::scoped_ptr<rtc::SSLIdentity> identity(
rtc::SSLIdentity::FromPEMStrings(pem_key, pem_cert));
- SignalCertificateReady(rtc::RTCCertificate::Create(identity.Pass()));
+ SignalCertificateReady(rtc::RTCCertificate::Create(std::move(identity)));
}
void WebRtcIdentityRequestObserver::OnSuccess(
rtc::scoped_ptr<rtc::SSLIdentity> identity) {
- SignalCertificateReady(rtc::RTCCertificate::Create(identity.Pass()));
+ SignalCertificateReady(rtc::RTCCertificate::Create(std::move(identity)));
}
// static
@@ -143,7 +145,7 @@ WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory(
// to just use a random number as session id and start version from
// |kInitSessionVersion|.
session_version_(kInitSessionVersion),
- dtls_identity_store_(dtls_identity_store.Pass()),
+ dtls_identity_store_(std::move(dtls_identity_store)),
identity_request_observer_(identity_request_observer),
session_(session),
session_id_(session_id),
@@ -177,7 +179,7 @@ WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory(
: WebRtcSessionDescriptionFactory(
signaling_thread,
channel_manager,
- dtls_identity_store.Pass(),
+ std::move(dtls_identity_store),
new rtc::RefCountedObject<WebRtcIdentityRequestObserver>(),
session,
session_id,
@@ -390,7 +392,9 @@ void WebRtcSessionDescriptionFactory::InternalCreateOffer(
return;
}
if (session_->local_description() &&
- !request.options.transport_options.ice_restart) {
+ !request.options.audio_transport_options.ice_restart &&
+ !request.options.video_transport_options.ice_restart &&
+ !request.options.data_transport_options.ice_restart) {
// Include all local ice candidates in the SessionDescription unless
// the an ice restart has been requested.
CopyCandidatesFromSessionDescription(session_->local_description(), offer);
@@ -403,12 +407,25 @@ void WebRtcSessionDescriptionFactory::InternalCreateAnswer(
// According to http://tools.ietf.org/html/rfc5245#section-9.2.1.1
// an answer should also contain new ice ufrag and password if an offer has
// been received with new ufrag and password.
- request.options.transport_options.ice_restart = session_->IceRestartPending();
+ request.options.audio_transport_options.ice_restart =
+ session_->IceRestartPending();
+ request.options.video_transport_options.ice_restart =
+ session_->IceRestartPending();
+ request.options.data_transport_options.ice_restart =
+ session_->IceRestartPending();
// We should pass current ssl role to the transport description factory, if
// there is already an existing ongoing session.
rtc::SSLRole ssl_role;
- if (session_->GetSslRole(&ssl_role)) {
- request.options.transport_options.prefer_passive_role =
+ if (session_->GetSslRole(session_->voice_channel(), &ssl_role)) {
+ request.options.audio_transport_options.prefer_passive_role =
+ (rtc::SSL_SERVER == ssl_role);
+ }
+ if (session_->GetSslRole(session_->video_channel(), &ssl_role)) {
+ request.options.video_transport_options.prefer_passive_role =
+ (rtc::SSL_SERVER == ssl_role);
+ }
+ if (session_->GetSslRole(session_->data_channel(), &ssl_role)) {
+ request.options.data_transport_options.prefer_passive_role =
(rtc::SSL_SERVER == ssl_role);
}
@@ -437,7 +454,9 @@ void WebRtcSessionDescriptionFactory::InternalCreateAnswer(
return;
}
if (session_->local_description() &&
- !request.options.transport_options.ice_restart) {
+ !request.options.audio_transport_options.ice_restart &&
+ !request.options.video_transport_options.ice_restart &&
+ !request.options.data_transport_options.ice_restart) {
// Include all local ice candidates in the SessionDescription unless
// the remote peer has requested an ice restart.
CopyCandidatesFromSessionDescription(session_->local_description(), answer);