aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_coding/codecs/h264
diff options
context:
space:
mode:
authorChih-hung Hsieh <chh@google.com>2015-12-01 17:07:48 +0000
committerandroid-build-merger <android-build-merger@google.com>2015-12-01 17:07:48 +0000
commita4acd9d6bc9b3b033d7d274316e75ee067df8d20 (patch)
tree672a185b294789cf991f385c3e395dd63bea9063 /webrtc/modules/video_coding/codecs/h264
parent3681b90ba4fe7a27232dd3e27897d5d7ed9d651c (diff)
parentfe8b4a657979b49e1701bd92f6d5814a99e0b2be (diff)
downloadwebrtc-a4acd9d6bc9b3b033d7d274316e75ee067df8d20.tar.gz
Merge changes I7bbf776e,I1b827825
am: fe8b4a6579 * commit 'fe8b4a657979b49e1701bd92f6d5814a99e0b2be': (7237 commits) WIP: Changes after merge commit 'cb3f9bd' Make the nonlinear beamformer steerable Utilize bitrate above codec max to protect video. Enable VP9 internal resize by default. Filter overlapping RTP header extensions. Make VCMEncodedFrameCallback const. MediaCodecVideoEncoder: Add number of quality resolution downscales to Encoded callback. Remove redudant encoder rate calls. Create isolate files for nonparallel tests. Register header extensions in RtpRtcpObserver to avoid log spam. Make an enum class out of NetEqDecoder, and hide the neteq_decoders_ table ACM: Move NACK functionality inside NetEq Fix chromium-style warnings in webrtc/sound/. Create a 'webrtc_nonparallel_tests' target. Update scalability structure data according to updates in the RTP payload profile. audio_coding: rename interface -> include Rewrote perform_action_on_all_files to be parallell. Update reference indices according to updates in the RTP payload profile. Disable P2PTransport...TestFailoverControlledSide on Memcheck pass clangcl compile options to ignore warnings in gflags.cc ...
Diffstat (limited to 'webrtc/modules/video_coding/codecs/h264')
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264.cc66
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264.gypi63
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_objc.mm33
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc271
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h62
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc439
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h66
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc356
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h100
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc151
-rw-r--r--webrtc/modules/video_coding/codecs/h264/include/h264.h48
11 files changed, 1655 insertions, 0 deletions
diff --git a/webrtc/modules/video_coding/codecs/h264/h264.cc b/webrtc/modules/video_coding/codecs/h264/h264.cc
new file mode 100644
index 0000000000..645ed2cad7
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_IOS)
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h"
+#endif
+
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
+
+// We need this file to be C++ only so it will compile properly for all
+// platforms. In order to write ObjC specific implementations we use private
+// externs. This function is defined in h264.mm.
+#if defined(WEBRTC_IOS)
+extern bool IsH264CodecSupportedObjC();
+#endif
+
+bool IsH264CodecSupported() {
+#if defined(WEBRTC_IOS)
+ return IsH264CodecSupportedObjC();
+#else
+ return false;
+#endif
+}
+
+H264Encoder* H264Encoder::Create() {
+ RTC_DCHECK(H264Encoder::IsSupported());
+#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+ return new H264VideoToolboxEncoder();
+#else
+ RTC_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Encoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+H264Decoder* H264Decoder::Create() {
+ RTC_DCHECK(H264Decoder::IsSupported());
+#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+ return new H264VideoToolboxDecoder();
+#else
+ RTC_NOTREACHED();
+ return nullptr;
+#endif
+}
+
+bool H264Decoder::IsSupported() {
+ return IsH264CodecSupported();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/h264/h264.gypi b/webrtc/modules/video_coding/codecs/h264/h264.gypi
new file mode 100644
index 0000000000..a20865c3aa
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264.gypi
@@ -0,0 +1,63 @@
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': [
+ '../../../../build/common.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'webrtc_h264',
+ 'type': 'static_library',
+ 'conditions': [
+ ['OS=="ios"', {
+ 'dependencies': [
+ 'webrtc_h264_video_toolbox',
+ ],
+ 'sources': [
+ 'h264_objc.mm',
+ ],
+ }],
+ ],
+ 'sources': [
+ 'h264.cc',
+ 'include/h264.h',
+ ],
+ }, # webrtc_h264
+ ],
+ 'conditions': [
+ ['OS=="ios"', {
+ 'targets': [
+ {
+ 'target_name': 'webrtc_h264_video_toolbox',
+ 'type': 'static_library',
+ 'dependencies': [
+ '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ ],
+ 'link_settings': {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-framework CoreMedia',
+ '-framework CoreVideo',
+ '-framework VideoToolbox',
+ ],
+ },
+ },
+ 'sources': [
+ 'h264_video_toolbox_decoder.cc',
+ 'h264_video_toolbox_decoder.h',
+ 'h264_video_toolbox_encoder.cc',
+ 'h264_video_toolbox_encoder.h',
+ 'h264_video_toolbox_nalu.cc',
+ 'h264_video_toolbox_nalu.h',
+ ],
+ }, # webrtc_h264_video_toolbox
+ ], # targets
+ }], # OS=="ios"
+ ], # conditions
+}
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_objc.mm b/webrtc/modules/video_coding/codecs/h264/h264_objc.mm
new file mode 100644
index 0000000000..b9e0fc0090
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_objc.mm
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_IOS)
+#import <UIKit/UIKit.h>
+#endif
+
+namespace webrtc {
+
+bool IsH264CodecSupportedObjC() {
+#if defined(WEBRTC_OBJC_H264) && \
+ defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED) && \
+ defined(WEBRTC_IOS)
+ // Supported on iOS8+.
+ return [[[UIDevice currentDevice] systemVersion] doubleValue] >= 8.0;
+#else
+ // TODO(tkchin): Support OS/X once we stop mixing libstdc++ and libc++ on
+ // OSX 10.9.
+ return false;
+#endif
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
new file mode 100644
index 0000000000..61ef80bbf1
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include "libyuv/convert.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+#include "webrtc/video_frame.h"
+
+namespace internal {
+
+// Convenience function for creating a dictionary.
+inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
+ CFTypeRef* values,
+ size_t size) {
+ return CFDictionaryCreate(nullptr, keys, values, size,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+// Struct that we pass to the decoder per frame to decode. We receive it again
+// in the decoder callback.
+struct FrameDecodeParams {
+ FrameDecodeParams(webrtc::DecodedImageCallback* cb, int64_t ts)
+ : callback(cb), timestamp(ts) {}
+ webrtc::DecodedImageCallback* callback;
+ int64_t timestamp;
+};
+
+// On decode we receive a CVPixelBuffer, which we need to convert to a frame
+// buffer for use in the rest of WebRTC. Unfortunately this involves a frame
+// copy.
+// TODO(tkchin): Stuff CVPixelBuffer into a TextureBuffer and pass that along
+// instead once the pipeline supports it.
+rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer(
+ CVPixelBufferRef pixel_buffer) {
+ RTC_DCHECK(pixel_buffer);
+ RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+ kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+ size_t width = CVPixelBufferGetWidthOfPlane(pixel_buffer, 0);
+ size_t height = CVPixelBufferGetHeightOfPlane(pixel_buffer, 0);
+ // TODO(tkchin): Use a frame buffer pool.
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+ new rtc::RefCountedObject<webrtc::I420Buffer>(width, height);
+ CVPixelBufferLockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly);
+ const uint8_t* src_y = reinterpret_cast<const uint8_t*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0));
+ int src_y_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0);
+ const uint8_t* src_uv = reinterpret_cast<const uint8_t*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1));
+ int src_uv_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
+ int ret = libyuv::NV12ToI420(
+ src_y, src_y_stride, src_uv, src_uv_stride,
+ buffer->MutableData(webrtc::kYPlane), buffer->stride(webrtc::kYPlane),
+ buffer->MutableData(webrtc::kUPlane), buffer->stride(webrtc::kUPlane),
+ buffer->MutableData(webrtc::kVPlane), buffer->stride(webrtc::kVPlane),
+ width, height);
+ CVPixelBufferUnlockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly);
+ if (ret) {
+ LOG(LS_ERROR) << "Error converting NV12 to I420: " << ret;
+ return nullptr;
+ }
+ return buffer;
+}
+
+// This is the callback function that VideoToolbox calls when decode is
+// complete.
+void VTDecompressionOutputCallback(void* decoder,
+ void* params,
+ OSStatus status,
+ VTDecodeInfoFlags info_flags,
+ CVImageBufferRef image_buffer,
+ CMTime timestamp,
+ CMTime duration) {
+ rtc::scoped_ptr<FrameDecodeParams> decode_params(
+ reinterpret_cast<FrameDecodeParams*>(params));
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
+ return;
+ }
+ // TODO(tkchin): Handle CVO properly.
+ rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+ VideoFrameBufferForPixelBuffer(image_buffer);
+ webrtc::VideoFrame decoded_frame(buffer, decode_params->timestamp, 0,
+ webrtc::kVideoRotation_0);
+ decode_params->callback->Decoded(decoded_frame);
+}
+
+} // namespace internal
+
+namespace webrtc {
+
+H264VideoToolboxDecoder::H264VideoToolboxDecoder()
+ : callback_(nullptr),
+ video_format_(nullptr),
+ decompression_session_(nullptr) {
+}
+
+H264VideoToolboxDecoder::~H264VideoToolboxDecoder() {
+ DestroyDecompressionSession();
+ SetVideoFormat(nullptr);
+}
+
+int H264VideoToolboxDecoder::InitDecode(const VideoCodec* video_codec,
+ int number_of_cores) {
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::Decode(
+ const EncodedImage& input_image,
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t render_time_ms) {
+ RTC_DCHECK(input_image._buffer);
+
+ CMSampleBufferRef sample_buffer = nullptr;
+ if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
+ input_image._length,
+ video_format_,
+ &sample_buffer)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(sample_buffer);
+ // Check if the video format has changed, and reinitialize decoder if needed.
+ CMVideoFormatDescriptionRef description =
+ CMSampleBufferGetFormatDescription(sample_buffer);
+ if (!CMFormatDescriptionEqual(description, video_format_)) {
+ SetVideoFormat(description);
+ ResetDecompressionSession();
+ }
+ VTDecodeFrameFlags decode_flags =
+ kVTDecodeFrame_EnableAsynchronousDecompression;
+ rtc::scoped_ptr<internal::FrameDecodeParams> frame_decode_params;
+ frame_decode_params.reset(
+ new internal::FrameDecodeParams(callback_, input_image._timeStamp));
+ OSStatus status = VTDecompressionSessionDecodeFrame(
+ decompression_session_, sample_buffer, decode_flags,
+ frame_decode_params.release(), nullptr);
+ CFRelease(sample_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::RegisterDecodeCompleteCallback(
+ DecodedImageCallback* callback) {
+ RTC_DCHECK(!callback_);
+ callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::Release() {
+ callback_ = nullptr;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::Reset() {
+ ResetDecompressionSession();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxDecoder::ResetDecompressionSession() {
+ DestroyDecompressionSession();
+
+ // Need to wait for the first SPS to initialize decoder.
+ if (!video_format_) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ // Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
+ // create pixel buffers with GPU backed memory. The intent here is to pass
+ // the pixel buffers directly so we avoid a texture upload later during
+ // rendering. This currently is moot because we are converting back to an
+ // I420 frame after decode, but eventually we will be able to plumb
+ // CVPixelBuffers directly to the renderer.
+ // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
+ // we can pass CVPixelBuffers as native handles in decoder output.
+ static size_t const attributes_size = 3;
+ CFTypeRef keys[attributes_size] = {
+#if defined(WEBRTC_IOS)
+ kCVPixelBufferOpenGLESCompatibilityKey,
+#elif defined(WEBRTC_MAC)
+ kCVPixelBufferOpenGLCompatibilityKey,
+#endif
+ kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey
+ };
+ CFDictionaryRef io_surface_value =
+ internal::CreateCFDictionary(nullptr, nullptr, 0);
+ int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
+ CFNumberRef pixel_format =
+ CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
+ CFTypeRef values[attributes_size] = {
+ kCFBooleanTrue,
+ io_surface_value,
+ pixel_format
+ };
+ CFDictionaryRef attributes =
+ internal::CreateCFDictionary(keys, values, attributes_size);
+ if (io_surface_value) {
+ CFRelease(io_surface_value);
+ io_surface_value = nullptr;
+ }
+ if (pixel_format) {
+ CFRelease(pixel_format);
+ pixel_format = nullptr;
+ }
+ VTDecompressionOutputCallbackRecord record = {
+ internal::VTDecompressionOutputCallback, this,
+ };
+ OSStatus status =
+ VTDecompressionSessionCreate(nullptr, video_format_, nullptr, attributes,
+ &record, &decompression_session_);
+ CFRelease(attributes);
+ if (status != noErr) {
+ DestroyDecompressionSession();
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ ConfigureDecompressionSession();
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void H264VideoToolboxDecoder::ConfigureDecompressionSession() {
+ RTC_DCHECK(decompression_session_);
+#if defined(WEBRTC_IOS)
+ VTSessionSetProperty(decompression_session_,
+ kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
+#endif
+}
+
+void H264VideoToolboxDecoder::DestroyDecompressionSession() {
+ if (decompression_session_) {
+ VTDecompressionSessionInvalidate(decompression_session_);
+ decompression_session_ = nullptr;
+ }
+}
+
+void H264VideoToolboxDecoder::SetVideoFormat(
+ CMVideoFormatDescriptionRef video_format) {
+ if (video_format_ == video_format) {
+ return;
+ }
+ if (video_format_) {
+ CFRelease(video_format_);
+ }
+ video_format_ = video_format;
+ if (video_format_) {
+ CFRetain(video_format_);
+ }
+}
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
new file mode 100644
index 0000000000..f54ddb9efd
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_DECODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_DECODER_H_
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <VideoToolbox/VideoToolbox.h>
+
+// This file provides a H264 encoder implementation using the VideoToolbox
+// APIs. Since documentation is almost non-existent, this is largely based on
+// the information in the VideoToolbox header files, a talk from WWDC 2014 and
+// experimentation.
+
+namespace webrtc {
+
+class H264VideoToolboxDecoder : public H264Decoder {
+ public:
+ H264VideoToolboxDecoder();
+
+ ~H264VideoToolboxDecoder() override;
+
+ int InitDecode(const VideoCodec* video_codec, int number_of_cores) override;
+
+ int Decode(const EncodedImage& input_image,
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t render_time_ms) override;
+
+ int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
+
+ int Release() override;
+
+ int Reset() override;
+
+ private:
+ int ResetDecompressionSession();
+ void ConfigureDecompressionSession();
+ void DestroyDecompressionSession();
+ void SetVideoFormat(CMVideoFormatDescriptionRef video_format);
+
+ DecodedImageCallback* callback_;
+ CMVideoFormatDescriptionRef video_format_;
+ VTDecompressionSessionRef decompression_session_;
+}; // H264VideoToolboxDecoder
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_DECODER_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
new file mode 100644
index 0000000000..d677f8b812
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <string>
+#include <vector>
+
+#include "libyuv/convert_from.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+
+namespace internal {
+
+// Convenience function for creating a dictionary.
+inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
+ CFTypeRef* values,
+ size_t size) {
+ return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+// Copies characters from a CFStringRef into a std::string.
+std::string CFStringToString(const CFStringRef cf_string) {
+ RTC_DCHECK(cf_string);
+ std::string std_string;
+ // Get the size needed for UTF8 plus terminating character.
+ size_t buffer_size =
+ CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string),
+ kCFStringEncodingUTF8) +
+ 1;
+ rtc::scoped_ptr<char[]> buffer(new char[buffer_size]);
+ if (CFStringGetCString(cf_string, buffer.get(), buffer_size,
+ kCFStringEncodingUTF8)) {
+ // Copy over the characters.
+ std_string.assign(buffer.get());
+ }
+ return std_string;
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ int32_t value) {
+ CFNumberRef cfNum =
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value);
+ OSStatus status = VTSessionSetProperty(session, key, cfNum);
+ CFRelease(cfNum);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) {
+ CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse;
+ OSStatus status = VTSessionSetProperty(session, key, cf_bool);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << value << ": " << status;
+ }
+}
+
+// Convenience function for setting a VT property.
+void SetVTSessionProperty(VTSessionRef session,
+ CFStringRef key,
+ CFStringRef value) {
+ OSStatus status = VTSessionSetProperty(session, key, value);
+ if (status != noErr) {
+ std::string key_string = CFStringToString(key);
+ std::string val_string = CFStringToString(value);
+ LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
+ << " to " << val_string << ": " << status;
+ }
+}
+
+// Struct that we pass to the encoder per frame to encode. We receive it again
+// in the encoder callback.
+struct FrameEncodeParams {
+ FrameEncodeParams(webrtc::EncodedImageCallback* cb,
+ const webrtc::CodecSpecificInfo* csi,
+ int32_t w,
+ int32_t h,
+ int64_t rtms,
+ uint32_t ts)
+ : callback(cb),
+ width(w),
+ height(h),
+ render_time_ms(rtms),
+ timestamp(ts) {
+ if (csi) {
+ codec_specific_info = *csi;
+ } else {
+ codec_specific_info.codecType = webrtc::kVideoCodecH264;
+ }
+ }
+ webrtc::EncodedImageCallback* callback;
+ webrtc::CodecSpecificInfo codec_specific_info;
+ int32_t width;
+ int32_t height;
+ int64_t render_time_ms;
+ uint32_t timestamp;
+};
+
+// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
+// encoder. This performs the copy and format conversion.
+// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
+bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
+ CVPixelBufferRef pixel_buffer) {
+ RTC_DCHECK(pixel_buffer);
+ RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+ kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+ RTC_DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
+ static_cast<size_t>(frame.height()));
+ RTC_DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
+ static_cast<size_t>(frame.width()));
+
+ CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
+ if (cvRet != kCVReturnSuccess) {
+ LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
+ return false;
+ }
+ uint8_t* dst_y = reinterpret_cast<uint8_t*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0));
+ int dst_stride_y = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0);
+ uint8_t* dst_uv = reinterpret_cast<uint8_t*>(
+ CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1));
+ int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
+ // Convert I420 to NV12.
+ int ret = libyuv::I420ToNV12(
+ frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
+ frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
+ dst_y, dst_stride_y, dst_uv, dst_stride_uv,
+ frame.width(), frame.height());
+ CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
+ if (ret) {
+ LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
+ return false;
+ }
+ return true;
+}
+
+// This is the callback function that VideoToolbox calls when encode is
+// complete.
+void VTCompressionOutputCallback(void* encoder,
+ void* params,
+ OSStatus status,
+ VTEncodeInfoFlags info_flags,
+ CMSampleBufferRef sample_buffer) {
+ rtc::scoped_ptr<FrameEncodeParams> encode_params(
+ reinterpret_cast<FrameEncodeParams*>(params));
+ if (status != noErr) {
+ LOG(LS_ERROR) << "H264 encoding failed.";
+ return;
+ }
+ if (info_flags & kVTEncodeInfo_FrameDropped) {
+ LOG(LS_INFO) << "H264 encode dropped frame.";
+ }
+
+ bool is_keyframe = false;
+ CFArrayRef attachments =
+ CMSampleBufferGetSampleAttachmentsArray(sample_buffer, 0);
+ if (attachments != nullptr && CFArrayGetCount(attachments)) {
+ CFDictionaryRef attachment =
+ static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
+ is_keyframe =
+ !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
+ }
+
+ // Convert the sample buffer into a buffer suitable for RTP packetization.
+ // TODO(tkchin): Allocate buffers through a pool.
+ rtc::scoped_ptr<rtc::Buffer> buffer(new rtc::Buffer());
+ rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
+ if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer,
+ is_keyframe,
+ buffer.get(),
+ header.accept())) {
+ return;
+ }
+ webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size());
+ frame._encodedWidth = encode_params->width;
+ frame._encodedHeight = encode_params->height;
+ frame._completeFrame = true;
+ frame._frameType =
+ is_keyframe ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta;
+ frame.capture_time_ms_ = encode_params->render_time_ms;
+ frame._timeStamp = encode_params->timestamp;
+
+ int result = encode_params->callback->Encoded(
+ frame, &(encode_params->codec_specific_info), header.get());
+ if (result != 0) {
+ LOG(LS_ERROR) << "Encoded callback failed: " << result;
+ }
+}
+
+} // namespace internal
+
+namespace webrtc {
+
+H264VideoToolboxEncoder::H264VideoToolboxEncoder()
+ : callback_(nullptr), compression_session_(nullptr) {
+}
+
+H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
+ DestroyCompressionSession();
+}
+
+int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
+ int number_of_cores,
+ size_t max_payload_size) {
+ RTC_DCHECK(codec_settings);
+ RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
+ // TODO(tkchin): We may need to enforce width/height dimension restrictions
+ // to match what the encoder supports.
+ width_ = codec_settings->width;
+ height_ = codec_settings->height;
+ // We can only set average bitrate on the HW encoder.
+ bitrate_ = codec_settings->startBitrate * 1000;
+
+ // TODO(tkchin): Try setting payload size via
+ // kVTCompressionPropertyKey_MaxH264SliceBytes.
+
+ return ResetCompressionSession();
+}
+
+int H264VideoToolboxEncoder::Encode(
+ const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) {
+ if (input_image.IsZeroSize()) {
+ // It's possible to get zero sizes as a signal to produce keyframes (this
+ // happens for internal sources). But this shouldn't happen in
+ // webrtcvideoengine2.
+ RTC_NOTREACHED();
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ if (!callback_ || !compression_session_) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // Get a pixel buffer from the pool and copy frame data over.
+ CVPixelBufferPoolRef pixel_buffer_pool =
+ VTCompressionSessionGetPixelBufferPool(compression_session_);
+ CVPixelBufferRef pixel_buffer = nullptr;
+ CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool,
+ &pixel_buffer);
+ if (ret != kCVReturnSuccess) {
+ LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
+ // We probably want to drop frames here, since failure probably means
+ // that the pool is empty.
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ RTC_DCHECK(pixel_buffer);
+ if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) {
+ LOG(LS_ERROR) << "Failed to copy frame data.";
+ CVBufferRelease(pixel_buffer);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Check if we need a keyframe.
+ bool is_keyframe_required = false;
+ if (frame_types) {
+ for (auto frame_type : *frame_types) {
+ if (frame_type == kVideoFrameKey) {
+ is_keyframe_required = true;
+ break;
+ }
+ }
+ }
+
+ CMTime presentation_time_stamp =
+ CMTimeMake(input_image.render_time_ms(), 1000);
+ CFDictionaryRef frame_properties = nullptr;
+ if (is_keyframe_required) {
+ CFTypeRef keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
+ CFTypeRef values[] = { kCFBooleanTrue };
+ frame_properties = internal::CreateCFDictionary(keys, values, 1);
+ }
+ rtc::scoped_ptr<internal::FrameEncodeParams> encode_params;
+ encode_params.reset(new internal::FrameEncodeParams(
+ callback_, codec_specific_info, width_, height_,
+ input_image.render_time_ms(), input_image.timestamp()));
+ VTCompressionSessionEncodeFrame(
+ compression_session_, pixel_buffer, presentation_time_stamp,
+ kCMTimeInvalid, frame_properties, encode_params.release(), nullptr);
+ if (frame_properties) {
+ CFRelease(frame_properties);
+ }
+ if (pixel_buffer) {
+ CVBufferRelease(pixel_buffer);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
+ callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::SetChannelParameters(uint32_t packet_loss,
+ int64_t rtt) {
+ // Encoder doesn't know anything about packet loss or rtt so just return.
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::SetRates(uint32_t new_bitrate_kbit,
+ uint32_t frame_rate) {
+ bitrate_ = new_bitrate_kbit * 1000;
+ if (compression_session_) {
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_AverageBitRate,
+ bitrate_);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int H264VideoToolboxEncoder::Release() {
+ callback_ = nullptr;
+ // Need to reset to that the session is invalidated and won't use the
+ // callback anymore.
+ return ResetCompressionSession();
+}
+
+int H264VideoToolboxEncoder::ResetCompressionSession() {
+ DestroyCompressionSession();
+
+ // Set source image buffer attributes. These attributes will be present on
+ // buffers retrieved from the encoder's pixel buffer pool.
+ const size_t attributes_size = 3;
+ CFTypeRef keys[attributes_size] = {
+#if defined(WEBRTC_IOS)
+ kCVPixelBufferOpenGLESCompatibilityKey,
+#elif defined(WEBRTC_MAC)
+ kCVPixelBufferOpenGLCompatibilityKey,
+#endif
+ kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey
+ };
+ CFDictionaryRef io_surface_value =
+ internal::CreateCFDictionary(nullptr, nullptr, 0);
+ int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
+ CFNumberRef pixel_format =
+ CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
+ CFTypeRef values[attributes_size] = {
+ kCFBooleanTrue,
+ io_surface_value,
+ pixel_format
+ };
+ CFDictionaryRef source_attributes =
+ internal::CreateCFDictionary(keys, values, attributes_size);
+ if (io_surface_value) {
+ CFRelease(io_surface_value);
+ io_surface_value = nullptr;
+ }
+ if (pixel_format) {
+ CFRelease(pixel_format);
+ pixel_format = nullptr;
+ }
+ OSStatus status = VTCompressionSessionCreate(
+ nullptr, // use default allocator
+ width_,
+ height_,
+ kCMVideoCodecType_H264,
+ nullptr, // use default encoder
+ source_attributes,
+ nullptr, // use default compressed data allocator
+ internal::VTCompressionOutputCallback,
+ this,
+ &compression_session_);
+ if (source_attributes) {
+ CFRelease(source_attributes);
+ source_attributes = nullptr;
+ }
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to create compression session: " << status;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ ConfigureCompressionSession();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void H264VideoToolboxEncoder::ConfigureCompressionSession() {
+ RTC_DCHECK(compression_session_);
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_RealTime, true);
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_ProfileLevel,
+ kVTProfileLevel_H264_Baseline_AutoLevel);
+ internal::SetVTSessionProperty(
+ compression_session_, kVTCompressionPropertyKey_AverageBitRate, bitrate_);
+ internal::SetVTSessionProperty(compression_session_,
+ kVTCompressionPropertyKey_AllowFrameReordering,
+ false);
+ // TODO(tkchin): Look at entropy mode and colorspace matrices.
+ // TODO(tkchin): Investigate to see if there's any way to make this work.
+ // May need it to interop with Android. Currently this call just fails.
+ // On inspecting encoder output on iOS8, this value is set to 6.
+ // internal::SetVTSessionProperty(compression_session_,
+ // kVTCompressionPropertyKey_MaxFrameDelayCount,
+ // 1);
+ // TODO(tkchin): See if enforcing keyframe frequency is beneficial in any
+ // way.
+ // internal::SetVTSessionProperty(
+ // compression_session_,
+ // kVTCompressionPropertyKey_MaxKeyFrameInterval, 240);
+ // internal::SetVTSessionProperty(
+ // compression_session_,
+ // kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
+}
+
+void H264VideoToolboxEncoder::DestroyCompressionSession() {
+ if (compression_session_) {
+ VTCompressionSessionInvalidate(compression_session_);
+ CFRelease(compression_session_);
+ compression_session_ = nullptr;
+ }
+}
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
new file mode 100644
index 0000000000..f4fb86fa04
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <VideoToolbox/VideoToolbox.h>
+#include <vector>
+
+// This file provides a H264 encoder implementation using the VideoToolbox
+// APIs. Since documentation is almost non-existent, this is largely based on
+// the information in the VideoToolbox header files, a talk from WWDC 2014 and
+// experimentation.
+
+namespace webrtc {
+
+class H264VideoToolboxEncoder : public H264Encoder {
+ public:
+ H264VideoToolboxEncoder();
+
+ ~H264VideoToolboxEncoder() override;
+
+ int InitEncode(const VideoCodec* codec_settings,
+ int number_of_cores,
+ size_t max_payload_size) override;
+
+ int Encode(const VideoFrame& input_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const std::vector<FrameType>* frame_types) override;
+
+ int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
+
+ int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
+
+ int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate) override;
+
+ int Release() override;
+
+ private:
+ int ResetCompressionSession();
+ void ConfigureCompressionSession();
+ void DestroyCompressionSession();
+
+ webrtc::EncodedImageCallback* callback_;
+ VTCompressionSessionRef compression_session_;
+ int32_t bitrate_; // Bitrate in bits per second.
+ int32_t width_;
+ int32_t height_;
+}; // H264VideoToolboxEncoder
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
new file mode 100644
index 0000000000..caca96d3d8
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <vector>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+
+namespace webrtc {
+
+const char kAnnexBHeaderBytes[4] = {0, 0, 0, 1};
+const size_t kAvccHeaderByteSize = sizeof(uint32_t);
+
+bool H264CMSampleBufferToAnnexBBuffer(
+ CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer,
+ webrtc::RTPFragmentationHeader** out_header) {
+ RTC_DCHECK(avcc_sample_buffer);
+ RTC_DCHECK(out_header);
+ *out_header = nullptr;
+
+ // Get format description from the sample buffer.
+ CMVideoFormatDescriptionRef description =
+ CMSampleBufferGetFormatDescription(avcc_sample_buffer);
+ if (description == nullptr) {
+ LOG(LS_ERROR) << "Failed to get sample buffer's description.";
+ return false;
+ }
+
+ // Get parameter set information.
+ int nalu_header_size = 0;
+ size_t param_set_count = 0;
+ OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, 0, nullptr, nullptr, &param_set_count, &nalu_header_size);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ // TODO(tkchin): handle other potential sizes.
+ RTC_DCHECK_EQ(nalu_header_size, 4);
+ RTC_DCHECK_EQ(param_set_count, 2u);
+
+ // Truncate any previous data in the buffer without changing its capacity.
+ annexb_buffer->SetSize(0);
+
+ size_t nalu_offset = 0;
+ std::vector<size_t> frag_offsets;
+ std::vector<size_t> frag_lengths;
+
+ // Place all parameter sets at the front of buffer.
+ if (is_keyframe) {
+ size_t param_set_size = 0;
+ const uint8_t* param_set = nullptr;
+ for (size_t i = 0; i < param_set_count; ++i) {
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
+ description, i, &param_set, &param_set_size, nullptr, nullptr);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to get parameter set.";
+ return false;
+ }
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(reinterpret_cast<const char*>(param_set),
+ param_set_size);
+ // Update fragmentation.
+ frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes));
+ frag_lengths.push_back(param_set_size);
+ nalu_offset += sizeof(kAnnexBHeaderBytes) + param_set_size;
+ }
+ }
+
+ // Get block buffer from the sample buffer.
+ CMBlockBufferRef block_buffer =
+ CMSampleBufferGetDataBuffer(avcc_sample_buffer);
+ if (block_buffer == nullptr) {
+ LOG(LS_ERROR) << "Failed to get sample buffer's block buffer.";
+ return false;
+ }
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ // Make sure block buffer is contiguous.
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(
+ nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ // Retain to make cleanup easier.
+ CFRetain(contiguous_buffer);
+ block_buffer = nullptr;
+ }
+
+ // Now copy the actual data.
+ char* data_ptr = nullptr;
+ size_t block_buffer_size = CMBlockBufferGetDataLength(contiguous_buffer);
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr, nullptr,
+ &data_ptr);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to get block buffer data.";
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ size_t bytes_remaining = block_buffer_size;
+ while (bytes_remaining > 0) {
+ // The size type here must match |nalu_header_size|, we expect 4 bytes.
+ // Read the length of the next packet of data. Must convert from big endian
+ // to host endian.
+ RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
+ uint32_t* uint32_data_ptr = reinterpret_cast<uint32_t*>(data_ptr);
+ uint32_t packet_size = CFSwapInt32BigToHost(*uint32_data_ptr);
+ // Update buffer.
+ annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
+ annexb_buffer->AppendData(data_ptr + nalu_header_size, packet_size);
+ // Update fragmentation.
+ frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes));
+ frag_lengths.push_back(packet_size);
+ nalu_offset += sizeof(kAnnexBHeaderBytes) + packet_size;
+
+ size_t bytes_written = packet_size + nalu_header_size;
+ bytes_remaining -= bytes_written;
+ data_ptr += bytes_written;
+ }
+ RTC_DCHECK_EQ(bytes_remaining, (size_t)0);
+
+ rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
+ header.reset(new webrtc::RTPFragmentationHeader());
+ header->VerifyAndAllocateFragmentationHeader(frag_offsets.size());
+ RTC_DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
+ for (size_t i = 0; i < frag_offsets.size(); ++i) {
+ header->fragmentationOffset[i] = frag_offsets[i];
+ header->fragmentationLength[i] = frag_lengths[i];
+ header->fragmentationPlType[i] = 0;
+ header->fragmentationTimeDiff[i] = 0;
+ }
+ *out_header = header.release();
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+bool H264AnnexBBufferToCMSampleBuffer(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer) {
+ RTC_DCHECK(annexb_buffer);
+ RTC_DCHECK(out_sample_buffer);
+ *out_sample_buffer = nullptr;
+
+ // The buffer we receive via RTP has 00 00 00 01 start code artifically
+ // embedded by the RTP depacketizer. Extract NALU information.
+ // TODO(tkchin): handle potential case where sps and pps are delivered
+ // separately.
+ uint8_t first_nalu_type = annexb_buffer[4] & 0x1f;
+ bool is_first_nalu_type_sps = first_nalu_type == 0x7;
+
+ AnnexBBufferReader reader(annexb_buffer, annexb_buffer_size);
+ CMVideoFormatDescriptionRef description = nullptr;
+ OSStatus status = noErr;
+ if (is_first_nalu_type_sps) {
+ // Parse the SPS and PPS into a CMVideoFormatDescription.
+ const uint8_t* param_set_ptrs[2] = {};
+ size_t param_set_sizes[2] = {};
+ if (!reader.ReadNalu(&param_set_ptrs[0], &param_set_sizes[0])) {
+ LOG(LS_ERROR) << "Failed to read SPS";
+ return false;
+ }
+ if (!reader.ReadNalu(&param_set_ptrs[1], &param_set_sizes[1])) {
+ LOG(LS_ERROR) << "Failed to read PPS";
+ return false;
+ }
+ status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
+ kCFAllocatorDefault, 2, param_set_ptrs, param_set_sizes, 4,
+ &description);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to create video format description.";
+ return false;
+ }
+ } else {
+ RTC_DCHECK(video_format);
+ description = video_format;
+ // We don't need to retain, but it makes logic easier since we are creating
+ // in the other block.
+ CFRetain(description);
+ }
+
+ // Allocate memory as a block buffer.
+ // TODO(tkchin): figure out how to use a pool.
+ CMBlockBufferRef block_buffer = nullptr;
+ status = CMBlockBufferCreateWithMemoryBlock(
+ nullptr, nullptr, reader.BytesRemaining(), nullptr, nullptr, 0,
+ reader.BytesRemaining(), kCMBlockBufferAssureMemoryNowFlag,
+ &block_buffer);
+ if (status != kCMBlockBufferNoErr) {
+ LOG(LS_ERROR) << "Failed to create block buffer.";
+ CFRelease(description);
+ return false;
+ }
+
+ // Make sure block buffer is contiguous.
+ CMBlockBufferRef contiguous_buffer = nullptr;
+ if (!CMBlockBufferIsRangeContiguous(block_buffer, 0, 0)) {
+ status = CMBlockBufferCreateContiguous(
+ nullptr, block_buffer, nullptr, nullptr, 0, 0, 0, &contiguous_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to flatten non-contiguous block buffer: "
+ << status;
+ CFRelease(description);
+ CFRelease(block_buffer);
+ return false;
+ }
+ } else {
+ contiguous_buffer = block_buffer;
+ block_buffer = nullptr;
+ }
+
+ // Get a raw pointer into allocated memory.
+ size_t block_buffer_size = 0;
+ char* data_ptr = nullptr;
+ status = CMBlockBufferGetDataPointer(contiguous_buffer, 0, nullptr,
+ &block_buffer_size, &data_ptr);
+ if (status != kCMBlockBufferNoErr) {
+ LOG(LS_ERROR) << "Failed to get block buffer data pointer.";
+ CFRelease(description);
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ RTC_DCHECK(block_buffer_size == reader.BytesRemaining());
+
+ // Write Avcc NALUs into block buffer memory.
+ AvccBufferWriter writer(reinterpret_cast<uint8_t*>(data_ptr),
+ block_buffer_size);
+ while (reader.BytesRemaining() > 0) {
+ const uint8_t* nalu_data_ptr = nullptr;
+ size_t nalu_data_size = 0;
+ if (reader.ReadNalu(&nalu_data_ptr, &nalu_data_size)) {
+ writer.WriteNalu(nalu_data_ptr, nalu_data_size);
+ }
+ }
+
+ // Create sample buffer.
+ status = CMSampleBufferCreate(nullptr, contiguous_buffer, true, nullptr,
+ nullptr, description, 1, 0, nullptr, 0, nullptr,
+ out_sample_buffer);
+ if (status != noErr) {
+ LOG(LS_ERROR) << "Failed to create sample buffer.";
+ CFRelease(description);
+ CFRelease(contiguous_buffer);
+ return false;
+ }
+ CFRelease(description);
+ CFRelease(contiguous_buffer);
+ return true;
+}
+
+AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
+ size_t length)
+ : start_(annexb_buffer), offset_(0), next_offset_(0), length_(length) {
+ RTC_DCHECK(annexb_buffer);
+ offset_ = FindNextNaluHeader(start_, length_, 0);
+ next_offset_ =
+ FindNextNaluHeader(start_, length_, offset_ + sizeof(kAnnexBHeaderBytes));
+}
+
+bool AnnexBBufferReader::ReadNalu(const uint8_t** out_nalu,
+ size_t* out_length) {
+ RTC_DCHECK(out_nalu);
+ RTC_DCHECK(out_length);
+ *out_nalu = nullptr;
+ *out_length = 0;
+
+ size_t data_offset = offset_ + sizeof(kAnnexBHeaderBytes);
+ if (data_offset > length_) {
+ return false;
+ }
+ *out_nalu = start_ + data_offset;
+ *out_length = next_offset_ - data_offset;
+ offset_ = next_offset_;
+ next_offset_ =
+ FindNextNaluHeader(start_, length_, offset_ + sizeof(kAnnexBHeaderBytes));
+ return true;
+}
+
+size_t AnnexBBufferReader::BytesRemaining() const {
+ return length_ - offset_;
+}
+
+size_t AnnexBBufferReader::FindNextNaluHeader(const uint8_t* start,
+ size_t length,
+ size_t offset) const {
+ RTC_DCHECK(start);
+ if (offset + sizeof(kAnnexBHeaderBytes) > length) {
+ return length;
+ }
+ // NALUs are separated by an 00 00 00 01 header. Scan the byte stream
+ // starting from the offset for the next such sequence.
+ const uint8_t* current = start + offset;
+ // The loop reads sizeof(kAnnexBHeaderBytes) at a time, so stop when there
+ // aren't enough bytes remaining.
+ const uint8_t* const end = start + length - sizeof(kAnnexBHeaderBytes);
+ while (current < end) {
+ if (current[3] > 1) {
+ current += 4;
+ } else if (current[3] == 1 && current[2] == 0 && current[1] == 0 &&
+ current[0] == 0) {
+ return current - start;
+ } else {
+ ++current;
+ }
+ }
+ return length;
+}
+
+AvccBufferWriter::AvccBufferWriter(uint8_t* const avcc_buffer, size_t length)
+ : start_(avcc_buffer), offset_(0), length_(length) {
+ RTC_DCHECK(avcc_buffer);
+}
+
+bool AvccBufferWriter::WriteNalu(const uint8_t* data, size_t data_size) {
+ // Check if we can write this length of data.
+ if (data_size + kAvccHeaderByteSize > BytesRemaining()) {
+ return false;
+ }
+ // Write length header, which needs to be big endian.
+ uint32_t big_endian_length = CFSwapInt32HostToBig(data_size);
+ memcpy(start_ + offset_, &big_endian_length, sizeof(big_endian_length));
+ offset_ += sizeof(big_endian_length);
+ // Write data.
+ memcpy(start_ + offset_, data, data_size);
+ offset_ += data_size;
+ return true;
+}
+
+size_t AvccBufferWriter::BytesRemaining() const {
+ return length_ - offset_;
+}
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
new file mode 100644
index 0000000000..230dea94a0
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#if defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+
+#include <CoreMedia/CoreMedia.h>
+
+#include "webrtc/base/buffer.h"
+#include "webrtc/modules/interface/module_common_types.h"
+
+namespace webrtc {
+
+// Converts a sample buffer emitted from the VideoToolbox encoder into a buffer
+// suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer
+// needs to be in Annex B format. Data is written directly to |annexb_buffer|
+// and a new RTPFragmentationHeader is returned in |out_header|.
+bool H264CMSampleBufferToAnnexBBuffer(
+ CMSampleBufferRef avcc_sample_buffer,
+ bool is_keyframe,
+ rtc::Buffer* annexb_buffer,
+ webrtc::RTPFragmentationHeader** out_header);
+
+// Converts a buffer received from RTP into a sample buffer suitable for the
+// VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample
+// buffer is in avcc format.
+// If |is_keyframe| is true then |video_format| is ignored since the format will
+// be read from the buffer. Otherwise |video_format| must be provided.
+// Caller is responsible for releasing the created sample buffer.
+bool H264AnnexBBufferToCMSampleBuffer(
+ const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer);
+
+// Helper class for reading NALUs from an RTP Annex B buffer.
+class AnnexBBufferReader final {
+ public:
+ AnnexBBufferReader(const uint8_t* annexb_buffer, size_t length);
+ ~AnnexBBufferReader() {}
+ AnnexBBufferReader(const AnnexBBufferReader& other) = delete;
+ void operator=(const AnnexBBufferReader& other) = delete;
+
+ // Returns a pointer to the beginning of the next NALU slice without the
+ // header bytes and its length. Returns false if no more slices remain.
+ bool ReadNalu(const uint8_t** out_nalu, size_t* out_length);
+
+ // Returns the number of unread NALU bytes, including the size of the header.
+ // If the buffer has no remaining NALUs this will return zero.
+ size_t BytesRemaining() const;
+
+ private:
+ // Returns the the next offset that contains NALU data.
+ size_t FindNextNaluHeader(const uint8_t* start,
+ size_t length,
+ size_t offset) const;
+
+ const uint8_t* const start_;
+ size_t offset_;
+ size_t next_offset_;
+ const size_t length_;
+};
+
+// Helper class for writing NALUs using avcc format into a buffer.
+class AvccBufferWriter final {
+ public:
+ AvccBufferWriter(uint8_t* const avcc_buffer, size_t length);
+ ~AvccBufferWriter() {}
+ AvccBufferWriter(const AvccBufferWriter& other) = delete;
+ void operator=(const AvccBufferWriter& other) = delete;
+
+ // Writes the data slice into the buffer. Returns false if there isn't
+ // enough space left.
+ bool WriteNalu(const uint8_t* data, size_t data_size);
+
+ // Returns the unused bytes in the buffer.
+ size_t BytesRemaining() const;
+
+ private:
+ uint8_t* const start_;
+ size_t offset_;
+ const size_t length_;
+};
+
+} // namespace webrtc
+
+#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc
new file mode 100644
index 0000000000..36946f1f8e
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/base/arraysize.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
+
+namespace webrtc {
+
+static const uint8_t NALU_TEST_DATA_0[] = {0xAA, 0xBB, 0xCC};
+static const uint8_t NALU_TEST_DATA_1[] = {0xDE, 0xAD, 0xBE, 0xEF};
+
+TEST(AnnexBBufferReaderTest, TestReadEmptyInput) {
+ const uint8_t annex_b_test_data[] = {0x00};
+ AnnexBBufferReader reader(annex_b_test_data, 0);
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AnnexBBufferReaderTest, TestReadSingleNalu) {
+ const uint8_t annex_b_test_data[] = {0x00, 0x00, 0x00, 0x01, 0xAA};
+ AnnexBBufferReader reader(annex_b_test_data, arraysize(annex_b_test_data));
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(arraysize(annex_b_test_data), reader.BytesRemaining());
+ EXPECT_TRUE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(annex_b_test_data + 4, nalu);
+ EXPECT_EQ(1u, nalu_length);
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AnnexBBufferReaderTest, TestReadMissingNalu) {
+ // clang-format off
+ const uint8_t annex_b_test_data[] = {0x01,
+ 0x00, 0x01,
+ 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0xFF};
+ // clang-format on
+ AnnexBBufferReader reader(annex_b_test_data, arraysize(annex_b_test_data));
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AnnexBBufferReaderTest, TestReadMultipleNalus) {
+ // clang-format off
+ const uint8_t annex_b_test_data[] = {0x00, 0x00, 0x00, 0x01, 0xFF,
+ 0x01,
+ 0x00, 0x01,
+ 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0xFF,
+ 0x00, 0x00, 0x00, 0x01, 0xAA, 0xBB};
+ // clang-format on
+ AnnexBBufferReader reader(annex_b_test_data, arraysize(annex_b_test_data));
+ const uint8_t* nalu = nullptr;
+ size_t nalu_length = 0;
+ EXPECT_EQ(arraysize(annex_b_test_data), reader.BytesRemaining());
+ EXPECT_TRUE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(annex_b_test_data + 4, nalu);
+ EXPECT_EQ(11u, nalu_length);
+ EXPECT_EQ(6u, reader.BytesRemaining());
+ EXPECT_TRUE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(annex_b_test_data + 19, nalu);
+ EXPECT_EQ(2u, nalu_length);
+ EXPECT_EQ(0u, reader.BytesRemaining());
+ EXPECT_FALSE(reader.ReadNalu(&nalu, &nalu_length));
+ EXPECT_EQ(nullptr, nalu);
+ EXPECT_EQ(0u, nalu_length);
+}
+
+TEST(AvccBufferWriterTest, TestEmptyOutputBuffer) {
+ const uint8_t expected_buffer[] = {0x00};
+ const size_t buffer_size = 1;
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ memset(buffer.get(), 0, buffer_size);
+ AvccBufferWriter writer(buffer.get(), 0);
+ EXPECT_EQ(0u, writer.BytesRemaining());
+ EXPECT_FALSE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+TEST(AvccBufferWriterTest, TestWriteSingleNalu) {
+ const uint8_t expected_buffer[] = {
+ 0x00, 0x00, 0x00, 0x03, 0xAA, 0xBB, 0xCC,
+ };
+ const size_t buffer_size = arraysize(NALU_TEST_DATA_0) + 4;
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ AvccBufferWriter writer(buffer.get(), buffer_size);
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_TRUE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(0u, writer.BytesRemaining());
+ EXPECT_FALSE(writer.WriteNalu(NALU_TEST_DATA_1, arraysize(NALU_TEST_DATA_1)));
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+TEST(AvccBufferWriterTest, TestWriteMultipleNalus) {
+ // clang-format off
+ const uint8_t expected_buffer[] = {
+ 0x00, 0x00, 0x00, 0x03, 0xAA, 0xBB, 0xCC,
+ 0x00, 0x00, 0x00, 0x04, 0xDE, 0xAD, 0xBE, 0xEF
+ };
+ // clang-format on
+ const size_t buffer_size =
+ arraysize(NALU_TEST_DATA_0) + arraysize(NALU_TEST_DATA_1) + 8;
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ AvccBufferWriter writer(buffer.get(), buffer_size);
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_TRUE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(buffer_size - (arraysize(NALU_TEST_DATA_0) + 4),
+ writer.BytesRemaining());
+ EXPECT_TRUE(writer.WriteNalu(NALU_TEST_DATA_1, arraysize(NALU_TEST_DATA_1)));
+ EXPECT_EQ(0u, writer.BytesRemaining());
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+TEST(AvccBufferWriterTest, TestOverflow) {
+ const uint8_t expected_buffer[] = {0x00, 0x00, 0x00};
+ const size_t buffer_size = arraysize(NALU_TEST_DATA_0);
+ rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ memset(buffer.get(), 0, buffer_size);
+ AvccBufferWriter writer(buffer.get(), buffer_size);
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_FALSE(writer.WriteNalu(NALU_TEST_DATA_0, arraysize(NALU_TEST_DATA_0)));
+ EXPECT_EQ(buffer_size, writer.BytesRemaining());
+ EXPECT_EQ(0,
+ memcmp(expected_buffer, buffer.get(), arraysize(expected_buffer)));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/h264/include/h264.h b/webrtc/modules/video_coding/codecs/h264/include/h264.h
new file mode 100644
index 0000000000..3f52839a6c
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+
+#if defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
+
+#include <Availability.h>
+#if (defined(__IPHONE_8_0) && \
+ __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0) || \
+ (defined(__MAC_10_8) && __MAC_OS_X_VERSION_MAX_ALLOWED >= __MAC_10_8)
+#define WEBRTC_VIDEO_TOOLBOX_SUPPORTED 1
+#endif
+
+#endif // defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
+
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+
+namespace webrtc {
+
+class H264Encoder : public VideoEncoder {
+ public:
+ static H264Encoder* Create();
+ static bool IsSupported();
+
+ ~H264Encoder() override {}
+};
+
+class H264Decoder : public VideoDecoder {
+ public:
+ static H264Decoder* Create();
+ static bool IsSupported();
+
+ ~H264Decoder() override {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_