aboutsummaryrefslogtreecommitdiff
path: root/videodecoder
diff options
context:
space:
mode:
authorGuilhem IMBERTON <guilhem.imberton@intel.com>2014-08-06 20:47:04 +0200
committerPatrick Tjin <pattjin@google.com>2014-08-07 14:31:21 -0700
commit82b428e49a70ddc051a36d2b3a25d90db79770dc (patch)
tree3c7387e0ff0d1a4dfebec762a9b0a80f09724ef1 /videodecoder
parent4d358311bdb7a2e02671ecf499effeb0262e1fc3 (diff)
downloadlibmix-82b428e49a70ddc051a36d2b3a25d90db79770dc.tar.gz
Initial libmix commit
Change-Id: I7a0b9afdc83a3274189cef0788c7296a871a3d98 Signed-off-by: Guilhem IMBERTON <guilhem.imberton@intel.com>
Diffstat (limited to 'videodecoder')
-rw-r--r--videodecoder/Android.mk79
-rw-r--r--videodecoder/VideoDecoderAVC.cpp992
-rwxr-xr-xvideodecoder/VideoDecoderAVC.h84
-rw-r--r--videodecoder/VideoDecoderBase.cpp1514
-rwxr-xr-xvideodecoder/VideoDecoderBase.h187
-rw-r--r--videodecoder/VideoDecoderDefs.h263
-rw-r--r--videodecoder/VideoDecoderHost.cpp85
-rw-r--r--videodecoder/VideoDecoderHost.h29
-rw-r--r--videodecoder/VideoDecoderInterface.h40
-rw-r--r--videodecoder/VideoDecoderMPEG4.cpp645
-rw-r--r--videodecoder/VideoDecoderMPEG4.h70
-rw-r--r--videodecoder/VideoDecoderTrace.cpp37
-rwxr-xr-xvideodecoder/VideoDecoderTrace.h96
-rw-r--r--videodecoder/VideoDecoderVP8.cpp449
-rw-r--r--videodecoder/VideoDecoderVP8.h91
-rw-r--r--videodecoder/VideoDecoderWMV.cpp568
-rw-r--r--videodecoder/VideoDecoderWMV.h66
-rw-r--r--videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp367
-rw-r--r--videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h44
-rw-r--r--videodecoder/securevideo/baytrail/secvideoparser.h150
-rw-r--r--videodecoder/securevideo/baytrail/va_private.h64
-rw-r--r--videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp351
-rw-r--r--videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h44
-rw-r--r--videodecoder/securevideo/cherrytrail/secvideoparser.h150
-rw-r--r--videodecoder/securevideo/cherrytrail/va_private.h63
-rw-r--r--videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp507
-rw-r--r--videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h75
-rwxr-xr-xvideodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp858
-rwxr-xr-xvideodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h69
-rwxr-xr-xvideodecoder/securevideo/merrifield/VideoFrameInfo.h36
-rw-r--r--videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp510
-rw-r--r--videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h75
-rw-r--r--videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp861
-rw-r--r--videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h69
-rwxr-xr-xvideodecoder/securevideo/moorefield/VideoFrameInfo.h36
-rw-r--r--videodecoder/use_util_sse4.h93
36 files changed, 9717 insertions, 0 deletions
diff --git a/videodecoder/Android.mk b/videodecoder/Android.mk
new file mode 100644
index 0000000..885b325
--- /dev/null
+++ b/videodecoder/Android.mk
@@ -0,0 +1,79 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+ifeq ($(TARGET_HAS_VPP),true)
+LOCAL_CFLAGS += -DTARGET_HAS_VPP
+endif
+
+LOCAL_SRC_FILES := \
+ VideoDecoderHost.cpp \
+ VideoDecoderBase.cpp \
+ VideoDecoderWMV.cpp \
+ VideoDecoderMPEG4.cpp \
+ VideoDecoderAVC.cpp \
+ VideoDecoderTrace.cpp
+
+LOCAL_C_INCLUDES := \
+ $(TARGET_OUT_HEADERS)/libva \
+ $(TARGET_OUT_HEADERS)/libmixvbp
+
+ifeq ($(USE_INTEL_SECURE_AVC),true)
+LOCAL_CFLAGS += -DUSE_INTEL_SECURE_AVC
+LOCAL_SRC_FILES += securevideo/$(TARGET_BOARD_PLATFORM)/VideoDecoderAVCSecure.cpp
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/securevideo/$(TARGET_BOARD_PLATFORM)
+LOCAL_CFLAGS += -DUSE_INTEL_SECURE_AVC
+endif
+
+PLATFORM_USE_GEN_HW := \
+ baytrail \
+ cherrytrail
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_USE_GEN_HW)),)
+ LOCAL_CFLAGS += -DUSE_AVC_SHORT_FORMAT -DUSE_GEN_HW
+endif
+
+
+PLATFORM_USE_HYBRID_DRIVER := \
+ baytrail
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_USE_HYBRID_DRIVER)),)
+ LOCAL_CFLAGS += -DUSE_HYBRID_DRIVER
+endif
+
+PLATFORM_SUPPORT_SLICE_HEADER_PARSER := \
+ merrifield \
+ moorefield
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_SLICE_HEADER_PARSER)),)
+ LOCAL_CFLAGS += -DUSE_SLICE_HEADER_PARSING
+endif
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libva \
+ libva-android \
+ libva-tpi \
+ libdl
+
+LOCAL_COPY_HEADERS_TO := libmix_videodecoder
+
+LOCAL_COPY_HEADERS := \
+ VideoDecoderHost.h \
+ VideoDecoderInterface.h \
+ VideoDecoderDefs.h
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_SLICE_HEADER_PARSER)),)
+ LOCAL_COPY_HEADERS += securevideo/$(TARGET_BOARD_PLATFORM)/VideoFrameInfo.h
+endif
+
+LOCAL_CFLAGS += -Werror
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE := libva_videodecoder
+
+ifeq ($(USE_HW_VP8),true)
+LOCAL_SRC_FILES += VideoDecoderVP8.cpp
+LOCAL_CFLAGS += -DUSE_HW_VP8
+endif
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/videodecoder/VideoDecoderAVC.cpp b/videodecoder/VideoDecoderAVC.cpp
new file mode 100644
index 0000000..8ed91f9
--- /dev/null
+++ b/videodecoder/VideoDecoderAVC.cpp
@@ -0,0 +1,992 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+#include <cutils/properties.h>
+
+// Macros for actual buffer needed calculation
+#define WIDI_CONSUMED 6
+#define HDMI_CONSUMED 2
+#define NW_CONSUMED 2
+#define POC_DEFAULT 0x7FFFFFFF
+
+VideoDecoderAVC::VideoDecoderAVC(const char *mimeType)
+ : VideoDecoderBase(mimeType, VBP_H264),
+ mToggleDPB(0),
+ mErrorConcealment(false){
+
+ invalidateDPB(0);
+ invalidateDPB(1);
+ mLastPictureFlags = VA_PICTURE_H264_INVALID;
+}
+
+VideoDecoderAVC::~VideoDecoderAVC() {
+ stop();
+}
+
+Decode_Status VideoDecoderAVC::start(VideoConfigBuffer *buffer) {
+ Decode_Status status;
+
+ status = VideoDecoderBase::start(buffer);
+ CHECK_STATUS("VideoDecoderBase::start");
+
+ // We don't want base class to manage reference.
+ VideoDecoderBase::ManageReference(false);
+ // output by picture order count
+ VideoDecoderBase::setOutputMethod(OUTPUT_BY_POC);
+
+ mErrorConcealment = buffer->flag & WANT_ERROR_CONCEALMENT;
+ if (buffer->data == NULL || buffer->size == 0) {
+ WTRACE("No config data to start VA.");
+ if ((buffer->flag & HAS_SURFACE_NUMBER) && (buffer->flag & HAS_VA_PROFILE)) {
+ ITRACE("Used client supplied profile and surface to start VA.");
+ return VideoDecoderBase::setupVA(buffer->surfaceNumber, buffer->profile);
+ }
+ return DECODE_SUCCESS;
+ }
+
+ vbp_data_h264 *data = NULL;
+ status = VideoDecoderBase::parseBuffer(buffer->data, buffer->size, true, (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ status = startVA(data);
+ return status;
+}
+
+void VideoDecoderAVC::stop(void) {
+ // drop the last frame and ignore return value
+ endDecodingFrame(true);
+ VideoDecoderBase::stop();
+ invalidateDPB(0);
+ invalidateDPB(1);
+ mToggleDPB = 0;
+ mErrorConcealment = false;
+ mLastPictureFlags = VA_PICTURE_H264_INVALID;
+}
+
+void VideoDecoderAVC::flush(void) {
+ // drop the frame and ignore return value
+ VideoDecoderBase::flush();
+ invalidateDPB(0);
+ invalidateDPB(1);
+ mToggleDPB = 0;
+ mLastPictureFlags = VA_PICTURE_H264_INVALID;
+}
+
+Decode_Status VideoDecoderAVC::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ vbp_data_h264 *data = NULL;
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+ status = VideoDecoderBase::parseBuffer(
+ buffer->data,
+ buffer->size,
+ false,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+
+ VideoDecoderBase::setRotationDegrees(buffer->rotationDegrees);
+
+ status = decodeFrame(buffer, data);
+ if (status == DECODE_MULTIPLE_FRAME) {
+ buffer->ext = &mExtensionBuffer;
+ mExtensionBuffer.extType = PACKED_FRAME_TYPE;
+ mExtensionBuffer.extSize = sizeof(mPackedFrame);
+ mExtensionBuffer.extData = (uint8_t*)&mPackedFrame;
+ }
+ return status;
+}
+
+Decode_Status VideoDecoderAVC::decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ Decode_Status status;
+ if (data->has_sps == 0 || data->has_pps == 0) {
+ return DECODE_NO_CONFIG;
+ }
+
+ mVideoFormatInfo.flags = 0;
+ uint32_t fieldFlags = 0;
+ for (unsigned int i = 0; i < data->num_pictures; i++) {
+ VAPictureH264 &pic = data->pic_data[i].pic_parms->CurrPic;
+ fieldFlags |= pic.flags;
+ // Don't remove the following codes, it can be enabled for debugging DPB.
+#if 0
+ VTRACE("%d: decoding frame %.2f, poc top = %d, poc bottom = %d, flags = %d, reference = %d",
+ i,
+ buffer->timeStamp/1E6,
+ pic.TopFieldOrderCnt,
+ pic.BottomFieldOrderCnt,
+ pic.flags,
+ (pic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (pic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE));
+#endif
+ }
+ int32_t topField = fieldFlags & VA_PICTURE_H264_TOP_FIELD;
+ int32_t botField = fieldFlags & VA_PICTURE_H264_BOTTOM_FIELD;
+ if ((topField == 0 && botField != 0) || (topField != 0 && botField == 0)) {
+ mVideoFormatInfo.flags |= IS_SINGLE_FIELD;
+ }
+
+ if (data->new_sps || data->new_pps) {
+ status = handleNewSequence(data);
+ CHECK_STATUS("handleNewSequence");
+ }
+
+ if (isWiDiStatusChanged()) {
+ mSizeChanged = false;
+ flushSurfaceBuffers();
+ return DECODE_FORMAT_CHANGE;
+ }
+
+ // first pic_data always exists, check if any slice is parsed
+ if (data->pic_data[0].num_slices == 0) {
+ ITRACE("No slice available for decoding.");
+ status = mSizeChanged ? DECODE_FORMAT_CHANGE : DECODE_SUCCESS;
+ mSizeChanged = false;
+ return status;
+ }
+
+ uint64_t lastPTS = mCurrentPTS;
+ mCurrentPTS = buffer->timeStamp;
+ //if (lastPTS != mCurrentPTS) {
+ if (isNewFrame(data, lastPTS == mCurrentPTS)) {
+ if (mLowDelay) {
+ // start decoding a new frame
+ status = beginDecodingFrame(data);
+ if (status != DECODE_SUCCESS) {
+ Decode_Status st = status;
+ // finish decoding the last frame if
+ // encounter error when decode the new frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+ return st;
+ }
+ }
+
+ // finish decoding the last frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+
+ if (!mLowDelay) {
+ // start decoding a new frame
+ status = beginDecodingFrame(data);
+ CHECK_STATUS("beginDecodingFrame");
+ }
+ } else {
+ status = continueDecodingFrame(data);
+ CHECK_STATUS("continueDecodingFrame");
+ }
+
+ // HAS_COMPLETE_FRAME is not reliable as it may indicate end of a field
+#if 0
+ if (buffer->flag & HAS_COMPLETE_FRAME) {
+ // finish decoding current frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+ }
+#endif
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVC::beginDecodingFrame(vbp_data_h264 *data) {
+ Decode_Status status;
+
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+ VAPictureH264 *picture = &(data->pic_data[0].pic_parms->CurrPic);
+ if ((picture->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (picture->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+ mAcquiredBuffer->referenceFrame = true;
+ } else {
+ mAcquiredBuffer->referenceFrame = false;
+ }
+ // set asReference in updateDPB
+
+ if (picture->flags & VA_PICTURE_H264_TOP_FIELD) {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD | VA_TOP_FIELD;
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ }
+
+ // TODO: Set the discontinuity flag
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ mAcquiredBuffer->pictureOrder = getPOC(picture);
+
+ if (mSizeChanged) {
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ mSizeChanged = false;
+ }
+
+ status = continueDecodingFrame(data);
+ // surface buffer is released if decode fails
+ return status;
+}
+
+
+Decode_Status VideoDecoderAVC::continueDecodingFrame(vbp_data_h264 *data) {
+ Decode_Status status;
+ vbp_picture_data_h264 *picData = data->pic_data;
+
+ // TODO: remove these debugging codes
+ if (mAcquiredBuffer == NULL || mAcquiredBuffer->renderBuffer.surface == VA_INVALID_SURFACE) {
+ ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+ return DECODE_FAIL;
+ }
+ for (uint32_t picIndex = 0; picIndex < data->num_pictures; picIndex++, picData++) {
+ // sanity check
+ if (picData == NULL || picData->pic_parms == NULL || picData->slc_data == NULL || picData->num_slices == 0) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ if (picIndex > 0 &&
+ (picData->pic_parms->CurrPic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) == 0) {
+ // it is a packed frame buffer
+ vbp_picture_data_h264 *lastPic = &data->pic_data[picIndex - 1];
+ vbp_slice_data_h264 *sliceData = &(lastPic->slc_data[lastPic->num_slices - 1]);
+ mPackedFrame.offSet = sliceData->slice_size + sliceData->slice_offset;
+ mPackedFrame.timestamp = mCurrentPTS; // use the current time stamp for the packed frame
+ ITRACE("slice data offset= %d, size = %d", sliceData->slice_offset, sliceData->slice_size);
+ return DECODE_MULTIPLE_FRAME;
+ }
+
+ for (uint32_t sliceIndex = 0; sliceIndex < picData->num_slices; sliceIndex++) {
+ status = decodeSlice(data, picIndex, sliceIndex);
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ // TODO: this is new code
+ // remove current frame from DPB as it can't be decoded.
+ removeReferenceFromDPB(picData->pic_parms);
+ return status;
+ }
+ }
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVC::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[4];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ int32_t poc = getPOC(&(picParam->CurrPic));
+ if (poc < mAcquiredBuffer->pictureOrder) {
+ mAcquiredBuffer->pictureOrder = poc;
+ }
+ }
+
+ // Check there is no reference frame loss before decoding a frame
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+#ifndef USE_AVC_SHORT_FORMAT
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+#endif
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+#ifndef USE_AVC_SHORT_FORMAT
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+#else
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264Base),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+#endif
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ sliceData->buffer_addr + sliceData->slice_offset,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVC::setReference(VASliceParameterBufferH264 *sliceParam) {
+ int32_t numList = 1;
+ // TODO: set numList to 0 if it is I slice
+ if (sliceParam->slice_type == 1 || sliceParam->slice_type == 6) {
+ // B slice
+ numList = 2;
+ }
+
+ int32_t activeMinus1 = sliceParam->num_ref_idx_l0_active_minus1;
+ VAPictureH264 *ref = sliceParam->RefPicList0;
+
+ for (int32_t i = 0; i < numList; i++) {
+ if (activeMinus1 >= REF_LIST_SIZE) {
+ ETRACE("Invalid activeMinus1 (%d)", activeMinus1);
+ return DECODE_PARSER_FAIL;
+ }
+ for (int32_t j = 0; j <= activeMinus1; j++, ref++) {
+ if (!(ref->flags & VA_PICTURE_H264_INVALID)) {
+ ref->picture_id = findSurface(ref);
+ if (ref->picture_id == VA_INVALID_SURFACE) {
+ // Error DecodeRefMissing is counted once even there're multiple
+ mAcquiredBuffer->renderBuffer.errBuf.errorNumber = 1;
+ mAcquiredBuffer->renderBuffer.errBuf.errorArray[0].type = DecodeRefMissing;
+
+ if (mLastReference) {
+ WTRACE("Reference frame %d is missing. Use last reference", getPOC(ref));
+ ref->picture_id = mLastReference->renderBuffer.surface;
+ } else {
+ ETRACE("Reference frame %d is missing. Stop decoding.", getPOC(ref));
+ return DECODE_NO_REFERENCE;
+ }
+ }
+ }
+ }
+ activeMinus1 = sliceParam->num_ref_idx_l1_active_minus1;
+ ref = sliceParam->RefPicList1;
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVC::updateDPB(VAPictureParameterBufferH264 *picParam) {
+ clearAsReference(mToggleDPB);
+ // pointer to toggled DPB (new)
+ DecodedPictureBuffer *dpb = mDPBs[!mToggleDPB];
+ VAPictureH264 *ref = picParam->ReferenceFrames;
+
+ // update current picture ID
+ picParam->CurrPic.picture_id = mAcquiredBuffer->renderBuffer.surface;
+
+ // build new DPB
+ for (int32_t i = 0; i < MAX_REF_NUMBER; i++, ref++) {
+ if (ref->flags & VA_PICTURE_H264_INVALID) {
+ continue;
+ }
+#ifdef USE_AVC_SHORT_FORMAT
+ ref->picture_id = findSurface(ref);
+#endif
+ dpb->poc = getPOC(ref);
+ // looking for the latest ref frame in the DPB with specified POC, in case frames have same POC
+ dpb->surfaceBuffer = findRefSurfaceBuffer(ref);
+ if (dpb->surfaceBuffer == NULL) {
+ ETRACE("Reference frame %d is missing for current frame %d", dpb->poc, getPOC(&(picParam->CurrPic)));
+ // Error DecodeRefMissing is counted once even there're multiple
+ mAcquiredBuffer->renderBuffer.errBuf.errorNumber = 1;
+ mAcquiredBuffer->renderBuffer.errBuf.errorArray[0].type = DecodeRefMissing;
+ if (dpb->poc == getPOC(&(picParam->CurrPic))) {
+ WTRACE("updateDPB: Using the current picture for missing reference.");
+ dpb->surfaceBuffer = mAcquiredBuffer;
+ } else if (mLastReference) {
+ WTRACE("updateDPB: Use last reference frame %d for missing reference.", mLastReference->pictureOrder);
+ // TODO: this is new code for error resilience
+ dpb->surfaceBuffer = mLastReference;
+ } else {
+ WTRACE("updateDPB: Unable to recover the missing reference frame.");
+ // continue buillding DPB without updating dpb pointer.
+ continue;
+ // continue building DPB as this reference may not be actually used.
+ // especially happen after seeking to a non-IDR I frame.
+ //return DECODE_NO_REFERENCE;
+ }
+ }
+ if (dpb->surfaceBuffer) {
+ // this surface is used as reference
+ dpb->surfaceBuffer->asReferernce = true;
+ }
+ dpb++;
+ }
+
+ // add current frame to DPB if it is a reference frame
+ if ((picParam->CurrPic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (picParam->CurrPic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+ dpb->poc = getPOC(&(picParam->CurrPic));
+ dpb->surfaceBuffer = mAcquiredBuffer;
+ dpb->surfaceBuffer->asReferernce = true;
+ }
+ // invalidate the current used DPB
+ invalidateDPB(mToggleDPB);
+ mToggleDPB = !mToggleDPB;
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVC::updateReferenceFrames(vbp_picture_data_h264 *picData) {
+ bool found = false;
+ uint32_t flags = 0;
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = NULL;
+ uint8_t activeMinus1 = 0;
+ VAPictureH264 *refList = NULL;
+ VAPictureH264 *dpb = picParam->ReferenceFrames;
+ VAPictureH264 *refFrame = NULL;
+
+ // invalidate DPB in the picture buffer
+ memset(picParam->ReferenceFrames, 0xFF, sizeof(picParam->ReferenceFrames));
+ picParam->num_ref_frames = 0;
+
+ // update DPB from the reference list in each slice.
+ for (uint32_t slice = 0; slice < picData->num_slices; slice++) {
+ sliceParam = &(picData->slc_data[slice].slc_parms);
+
+ for (int32_t list = 0; list < 2; list++) {
+ refList = (list == 0) ? sliceParam->RefPicList0 :
+ sliceParam->RefPicList1;
+ activeMinus1 = (list == 0) ? sliceParam->num_ref_idx_l0_active_minus1 :
+ sliceParam->num_ref_idx_l1_active_minus1;
+ if (activeMinus1 >= REF_LIST_SIZE) {
+ return DECODE_PARSER_FAIL;
+ }
+ for (uint8_t item = 0; item < (uint8_t)(activeMinus1 + 1); item++, refList++) {
+ if (refList->flags & VA_PICTURE_H264_INVALID) {
+ break;
+ }
+ found = false;
+ refFrame = picParam->ReferenceFrames;
+ for (uint8_t frame = 0; frame < picParam->num_ref_frames; frame++, refFrame++) {
+ if (refFrame->TopFieldOrderCnt == refList->TopFieldOrderCnt) {
+ ///check for complementary field
+ flags = refFrame->flags | refList->flags;
+ //If both TOP and BOTTOM are set, we'll clear those flags
+ if ((flags & VA_PICTURE_H264_TOP_FIELD) &&
+ (flags & VA_PICTURE_H264_BOTTOM_FIELD)) {
+ refFrame->flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+ }
+ found = true; //already in the DPB; will not add this one
+ break;
+ }
+ }
+ if (found == false) {
+ // add a new reference to the DPB
+ dpb->picture_id = findSurface(refList);
+ if (dpb->picture_id == VA_INVALID_SURFACE) {
+ if (mLastReference != NULL) {
+ dpb->picture_id = mLastReference->renderBuffer.surface;
+ } else {
+ ETRACE("Reference frame %d is missing. Stop updating references frames.", getPOC(refList));
+ return DECODE_NO_REFERENCE;
+ }
+ }
+ dpb->flags = refList->flags;
+ // if it's bottom field in dpb, there must have top field in DPB,
+ // so clear the bottom flag, or will confuse VED to address top field
+ if (dpb->flags & VA_PICTURE_H264_BOTTOM_FIELD)
+ dpb->flags &= (~VA_PICTURE_H264_BOTTOM_FIELD);
+ dpb->frame_idx = refList->frame_idx;
+ dpb->TopFieldOrderCnt = refList->TopFieldOrderCnt;
+ dpb->BottomFieldOrderCnt = refList->BottomFieldOrderCnt;
+ dpb++;
+ picParam->num_ref_frames++;
+ }
+ }
+ }
+ }
+ return DECODE_SUCCESS;
+}
+
+void VideoDecoderAVC::removeReferenceFromDPB(VAPictureParameterBufferH264 *picParam) {
+ // remove the current frame from DPB as it can't be decoded.
+ if ((picParam->CurrPic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (picParam->CurrPic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+ DecodedPictureBuffer *dpb = mDPBs[mToggleDPB];
+ int32_t poc = getPOC(&(picParam->CurrPic));
+ for (int32_t i = 0; i < DPB_SIZE; i++, dpb++) {
+ if (poc == dpb->poc) {
+ dpb->poc = (int32_t)POC_DEFAULT;
+ if (dpb->surfaceBuffer) {
+ dpb->surfaceBuffer->asReferernce = false;
+ }
+ dpb->surfaceBuffer = NULL;
+ break;
+ }
+ }
+ }
+}
+
+int32_t VideoDecoderAVC::getPOC(VAPictureH264 *pic) {
+ if (pic->flags & VA_PICTURE_H264_BOTTOM_FIELD) {
+ return pic->BottomFieldOrderCnt;
+ }
+ return pic->TopFieldOrderCnt;
+}
+
+VASurfaceID VideoDecoderAVC::findSurface(VAPictureH264 *pic) {
+ VideoSurfaceBuffer *p = findSurfaceBuffer(pic);
+ if (p == NULL) {
+ ETRACE("Could not find surface for poc %d", getPOC(pic));
+ return VA_INVALID_SURFACE;
+ }
+ return p->renderBuffer.surface;
+}
+
+VideoSurfaceBuffer* VideoDecoderAVC::findSurfaceBuffer(VAPictureH264 *pic) {
+ DecodedPictureBuffer *dpb = mDPBs[mToggleDPB];
+ for (int32_t i = 0; i < DPB_SIZE; i++, dpb++) {
+ if (dpb->poc == pic->BottomFieldOrderCnt ||
+ dpb->poc == pic->TopFieldOrderCnt) {
+ // TODO: remove these debugging codes
+ if (dpb->surfaceBuffer == NULL) {
+ ETRACE("Invalid surface buffer in the DPB for poc %d.", getPOC(pic));
+ }
+ return dpb->surfaceBuffer;
+ }
+ }
+ // ETRACE("Unable to find surface for poc %d", getPOC(pic));
+ return NULL;
+}
+
+VideoSurfaceBuffer* VideoDecoderAVC::findRefSurfaceBuffer(VAPictureH264 *pic) {
+ DecodedPictureBuffer *dpb = mDPBs[mToggleDPB];
+ // always looking for the latest one in the DPB, in case ref frames have same POC
+ dpb += (DPB_SIZE - 1);
+ for (int32_t i = DPB_SIZE; i > 0; i--, dpb--) {
+ if (dpb->poc == pic->BottomFieldOrderCnt ||
+ dpb->poc == pic->TopFieldOrderCnt) {
+ // TODO: remove these debugging codes
+ if (dpb->surfaceBuffer == NULL) {
+ ETRACE("Invalid surface buffer in the DPB for poc %d.", getPOC(pic));
+ }
+ return dpb->surfaceBuffer;
+ }
+ }
+ ETRACE("Unable to find surface for poc %d", getPOC(pic));
+ return NULL;
+}
+
+void VideoDecoderAVC::invalidateDPB(int toggle) {
+ DecodedPictureBuffer* p = mDPBs[toggle];
+ for (int i = 0; i < DPB_SIZE; i++) {
+ p->poc = (int32_t) POC_DEFAULT;
+ p->surfaceBuffer = NULL;
+ p++;
+ }
+}
+
+void VideoDecoderAVC::clearAsReference(int toggle) {
+ DecodedPictureBuffer* p = mDPBs[toggle];
+ for (int i = 0; i < DPB_SIZE; i++) {
+ if (p->surfaceBuffer) {
+ p->surfaceBuffer->asReferernce = false;
+ }
+ p++;
+ }
+}
+
+Decode_Status VideoDecoderAVC::startVA(vbp_data_h264 *data) {
+ int32_t DPBSize = getDPBSize(data);
+
+ //Use high profile for all kinds of H.264 profiles (baseline, main and high) except for constrained baseline
+ VAProfile vaProfile = VAProfileH264High;
+
+ // TODO: determine when to use VAProfileH264ConstrainedBaseline, set only if we are told to do so
+ if ((data->codec_data->profile_idc == 66 || data->codec_data->constraint_set0_flag == 1) &&
+ data->codec_data->constraint_set1_flag == 1) {
+ if (mErrorConcealment) {
+ vaProfile = VAProfileH264ConstrainedBaseline;
+ }
+ }
+
+ VideoDecoderBase::setOutputWindowSize(mConfigBuffer.flag & WANT_ADAPTIVE_PLAYBACK ? OUTPUT_WINDOW_SIZE : DPBSize);
+ updateFormatInfo(data);
+
+ // for 1080p, limit the total surface to 19, according the hardware limitation
+ // change the max surface number from 19->10 to workaround memory shortage
+ // remove the workaround
+ if(mVideoFormatInfo.height == 1088 && DPBSize + AVC_EXTRA_SURFACE_NUMBER > 19) {
+ DPBSize = 19 - AVC_EXTRA_SURFACE_NUMBER;
+ }
+
+ if (mConfigBuffer.flag & WANT_ADAPTIVE_PLAYBACK) {
+ // When Adaptive playback is enabled, turn off low delay mode.
+ // Otherwise there may be a 240ms stuttering if the output mode is changed from LowDelay to Delay.
+ enableLowDelayMode(false);
+ } else {
+ // for baseline profile, enable low delay mode automatically
+ enableLowDelayMode(data->codec_data->profile_idc == 66);
+ }
+
+ return VideoDecoderBase::setupVA(DPBSize + AVC_EXTRA_SURFACE_NUMBER, vaProfile);
+}
+
+void VideoDecoderAVC::updateFormatInfo(vbp_data_h264 *data) {
+ // new video size
+ uint32_t width = (data->pic_data[0].pic_parms->picture_width_in_mbs_minus1 + 1) * 16;
+ uint32_t height = (data->pic_data[0].pic_parms->picture_height_in_mbs_minus1 + 1) * 16;
+ ITRACE("updateFormatInfo: current size: %d x %d, new size: %d x %d",
+ mVideoFormatInfo.width, mVideoFormatInfo.height, width, height);
+
+ if ((mVideoFormatInfo.width != width ||
+ mVideoFormatInfo.height != height) &&
+ width && height) {
+ if (VideoDecoderBase::alignMB(mVideoFormatInfo.width) != width ||
+ VideoDecoderBase::alignMB(mVideoFormatInfo.height) != height) {
+ mSizeChanged = true;
+ ITRACE("Video size is changed.");
+ }
+ mVideoFormatInfo.width = width;
+ mVideoFormatInfo.height = height;
+ }
+
+ // video_range has default value of 0.
+ mVideoFormatInfo.videoRange = data->codec_data->video_full_range_flag;
+
+ switch (data->codec_data->matrix_coefficients) {
+ case 1:
+ mVideoFormatInfo.colorMatrix = VA_SRC_BT709;
+ break;
+
+ // ITU-R Recommendation BT.470-6 System B, G (MP4), same as
+ // SMPTE 170M/BT601
+ case 5:
+ case 6:
+ mVideoFormatInfo.colorMatrix = VA_SRC_BT601;
+ break;
+
+ default:
+ // unknown color matrix, set to 0 so color space flag will not be set.
+ mVideoFormatInfo.colorMatrix = 0;
+ break;
+ }
+ mVideoFormatInfo.aspectX = data->codec_data->sar_width;
+ mVideoFormatInfo.aspectY = data->codec_data->sar_height;
+ mVideoFormatInfo.bitrate = data->codec_data->bit_rate;
+ mVideoFormatInfo.cropLeft = data->codec_data->crop_left;
+ mVideoFormatInfo.cropRight = data->codec_data->crop_right;
+ mVideoFormatInfo.cropTop = data->codec_data->crop_top;
+ mVideoFormatInfo.cropBottom = data->codec_data->crop_bottom;
+
+ ITRACE("Cropping: left = %d, top = %d, right = %d, bottom = %d",
+ data->codec_data->crop_left,
+ data->codec_data->crop_top,
+ data->codec_data->crop_right,
+ data->codec_data->crop_bottom);
+
+ if (mConfigBuffer.flag & WANT_SURFACE_PROTECTION) {
+ mVideoFormatInfo.actualBufferNeeded = mConfigBuffer.surfaceNumber;
+ } else {
+ // The number of actual buffer needed is
+ // outputQueue + nativewindow_owned + num_ref_frames + widi_need_max + 1(available buffer)
+ // while outputQueue = DPB < 8? DPB :8
+ mVideoFormatInfo.actualBufferNeeded = mOutputWindowSize + NW_CONSUMED /* Owned by native window */
+ + data->codec_data->num_ref_frames
+#ifndef USE_GEN_HW
+ + HDMI_CONSUMED /* Two extra buffers are needed for native window buffer cycling */
+ + (mWiDiOn ? WIDI_CONSUMED : 0) /* WiDi maximum needs */
+#endif
+ + 1;
+ }
+
+ ITRACE("actualBufferNeeded =%d", mVideoFormatInfo.actualBufferNeeded);
+
+ mVideoFormatInfo.valid = true;
+
+ setRenderRect();
+}
+
+bool VideoDecoderAVC::isWiDiStatusChanged() {
+#ifndef USE_GEN_HW
+ if (mWiDiOn)
+ return false;
+
+ if (mConfigBuffer.flag & WANT_SURFACE_PROTECTION)
+ return false;
+
+ if (!(mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER))
+ return false;
+
+ char prop[PROPERTY_VALUE_MAX];
+ bool widi_on = (property_get("media.widi.enabled", prop, NULL) > 0) &&
+ (!strcmp(prop, "1") || !strcasecmp(prop, "true"));
+ if (widi_on) {
+ mVideoFormatInfo.actualBufferNeeded += WIDI_CONSUMED;
+ mWiDiOn = true;
+ ITRACE("WiDi is enabled, actual buffer needed is %d", mVideoFormatInfo.actualBufferNeeded);
+ return true;
+ }
+ return false;
+#else
+ return false;
+#endif
+}
+
+Decode_Status VideoDecoderAVC::handleNewSequence(vbp_data_h264 *data) {
+ updateFormatInfo(data);
+ bool needFlush = false;
+ bool rawDataMode = !(mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER);
+
+ if (!rawDataMode) {
+ needFlush = (mVideoFormatInfo.width > mVideoFormatInfo.surfaceWidth)
+ || (mVideoFormatInfo.height > mVideoFormatInfo.surfaceHeight)
+ || isWiDiStatusChanged()
+ || (mVideoFormatInfo.actualBufferNeeded > mConfigBuffer.surfaceNumber);
+ }
+
+ if (needFlush || (rawDataMode && mSizeChanged)) {
+ mSizeChanged = false;
+ flushSurfaceBuffers();
+ return DECODE_FORMAT_CHANGE;
+ } else
+ return DECODE_SUCCESS;
+}
+
+bool VideoDecoderAVC::isNewFrame(vbp_data_h264 *data, bool equalPTS) {
+ if (data->num_pictures == 0) {
+ ETRACE("num_pictures == 0");
+ return true;
+ }
+
+ vbp_picture_data_h264* picData = data->pic_data;
+ if (picData->num_slices == 0) {
+ ETRACE("num_slices == 0");
+ return true;
+ }
+
+ bool newFrame = false;
+ uint32_t fieldFlags = VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD;
+
+ if (picData->slc_data[0].slc_parms.first_mb_in_slice != 0) {
+ // not the first slice, assume it is continuation of a partial frame
+ // TODO: check if it is new frame boundary as the first slice may get lost in streaming case.
+ WTRACE("first_mb_in_slice != 0");
+ if (!equalPTS) {
+ // return true if different timestamp, it is a workaround here for a streaming case
+ WTRACE("different PTS, treat it as a new frame");
+ return true;
+ }
+ } else {
+ if ((picData->pic_parms->CurrPic.flags & fieldFlags) == fieldFlags) {
+ ETRACE("Current picture has both odd field and even field.");
+ }
+ // current picture is a field or a frame, and buffer conains the first slice, check if the current picture and
+ // the last picture form an opposite field pair
+ if (((mLastPictureFlags | picData->pic_parms->CurrPic.flags) & fieldFlags) == fieldFlags) {
+ // opposite field
+ newFrame = false;
+ WTRACE("current picture is not at frame boundary.");
+ mLastPictureFlags = 0;
+ } else {
+ newFrame = true;
+ mLastPictureFlags = 0;
+ for (uint32_t i = 0; i < data->num_pictures; i++) {
+ mLastPictureFlags |= data->pic_data[i].pic_parms->CurrPic.flags;
+ }
+ if ((mLastPictureFlags & fieldFlags) == fieldFlags) {
+ // current buffer contains both odd field and even field.
+ mLastPictureFlags = 0;
+ }
+ }
+ }
+
+ return newFrame;
+}
+
+int32_t VideoDecoderAVC::getDPBSize(vbp_data_h264 *data) {
+ // 1024 * MaxDPB / ( PicWidthInMbs * FrameHeightInMbs * 384 ), 16
+ struct DPBTable {
+ int32_t level;
+ float maxDPB;
+ } dpbTable[] = {
+ {9, 148.5},
+ {10, 148.5},
+ {11, 337.5},
+ {12, 891.0},
+ {13, 891.0},
+ {20, 891.0},
+ {21, 1782.0},
+ {22, 3037.5},
+ {30, 3037.5},
+ {31, 6750.0},
+ {32, 7680.0},
+ {40, 12288.0},
+ {41, 12288.0},
+ {42, 13056.0},
+ {50, 41400.0},
+ {51, 69120.0}
+ };
+
+ int32_t count = sizeof(dpbTable)/sizeof(DPBTable);
+ float maxDPB = 0;
+ for (int32_t i = 0; i < count; i++)
+ {
+ if (dpbTable[i].level == data->codec_data->level_idc) {
+ maxDPB = dpbTable[i].maxDPB;
+ break;
+ }
+ }
+
+ int32_t maxDPBSize = maxDPB * 1024 / (
+ (data->pic_data[0].pic_parms->picture_width_in_mbs_minus1 + 1) *
+ (data->pic_data[0].pic_parms->picture_height_in_mbs_minus1 + 1) *
+ 384);
+
+ if (maxDPBSize > 16) {
+ maxDPBSize = 16;
+ } else if (maxDPBSize == 0) {
+ maxDPBSize = 3;
+ }
+ if(maxDPBSize < data->codec_data->num_ref_frames) {
+ maxDPBSize = data->codec_data->num_ref_frames;
+ }
+
+ // add one extra frame for current frame.
+ maxDPBSize += 1;
+ ITRACE("maxDPBSize = %d, num_ref_frame = %d", maxDPBSize, data->codec_data->num_ref_frames);
+ return maxDPBSize;
+}
+
+Decode_Status VideoDecoderAVC::checkHardwareCapability() {
+#ifndef USE_GEN_HW
+ VAStatus vaStatus;
+ VAConfigAttrib cfgAttribs[2];
+ cfgAttribs[0].type = VAConfigAttribMaxPictureWidth;
+ cfgAttribs[1].type = VAConfigAttribMaxPictureHeight;
+ vaStatus = vaGetConfigAttributes(mVADisplay, VAProfileH264High,
+ VAEntrypointVLD, cfgAttribs, 2);
+ CHECK_VA_STATUS("vaGetConfigAttributes");
+ if (cfgAttribs[0].value * cfgAttribs[1].value < (uint32_t)mVideoFormatInfo.width * (uint32_t)mVideoFormatInfo.height) {
+ ETRACE("hardware supports resolution %d * %d smaller than the clip resolution %d * %d",
+ cfgAttribs[0].value, cfgAttribs[1].value, mVideoFormatInfo.width, mVideoFormatInfo.height);
+ return DECODE_DRIVER_FAIL;
+ }
+#endif
+ return DECODE_SUCCESS;
+}
+
+#ifdef USE_AVC_SHORT_FORMAT
+Decode_Status VideoDecoderAVC::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+
+ vaStatus = vaGetConfigAttributes(mVADisplay,profile,VAEntrypointVLD, &attrib[1], 1);
+
+ if (attrib[1].value & VA_DEC_SLICE_MODE_BASE) {
+ ITRACE("AVC short format used");
+ attrib[1].value = VA_DEC_SLICE_MODE_BASE;
+ } else if (attrib[1].value & VA_DEC_SLICE_MODE_NORMAL) {
+ ITRACE("AVC long format ssed");
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ } else {
+ ETRACE("Unsupported Decode Slice Mode!");
+ return DECODE_FAIL;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
+#endif
diff --git a/videodecoder/VideoDecoderAVC.h b/videodecoder/VideoDecoderAVC.h
new file mode 100755
index 0000000..6129703
--- /dev/null
+++ b/videodecoder/VideoDecoderAVC.h
@@ -0,0 +1,84 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_H_
+#define VIDEO_DECODER_AVC_H_
+
+#include "VideoDecoderBase.h"
+
+
+class VideoDecoderAVC : public VideoDecoderBase {
+public:
+ VideoDecoderAVC(const char *mimeType);
+ virtual ~VideoDecoderAVC();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual void flush(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+ virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+ Decode_Status setReference(VASliceParameterBufferH264 *sliceParam);
+ Decode_Status updateDPB(VAPictureParameterBufferH264 *picParam);
+ Decode_Status updateReferenceFrames(vbp_picture_data_h264 *picData);
+ void removeReferenceFromDPB(VAPictureParameterBufferH264 *picParam);
+ int32_t getPOC(VAPictureH264 *pic); // Picture Order Count
+ inline VASurfaceID findSurface(VAPictureH264 *pic);
+ inline VideoSurfaceBuffer* findSurfaceBuffer(VAPictureH264 *pic);
+ inline VideoSurfaceBuffer* findRefSurfaceBuffer(VAPictureH264 *pic);
+ inline void invalidateDPB(int toggle);
+ inline void clearAsReference(int toggle);
+ Decode_Status startVA(vbp_data_h264 *data);
+ void updateFormatInfo(vbp_data_h264 *data);
+ Decode_Status handleNewSequence(vbp_data_h264 *data);
+ bool isNewFrame(vbp_data_h264 *data, bool equalPTS);
+ int32_t getDPBSize(vbp_data_h264 *data);
+ virtual Decode_Status checkHardwareCapability();
+#ifdef USE_AVC_SHORT_FORMAT
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+#endif
+ bool isWiDiStatusChanged();
+
+private:
+ struct DecodedPictureBuffer {
+ VideoSurfaceBuffer *surfaceBuffer;
+ int32_t poc; // Picture Order Count
+ };
+
+ enum {
+ AVC_EXTRA_SURFACE_NUMBER = 11,
+ // maximum DPB (Decoded Picture Buffer) size
+ MAX_REF_NUMBER = 16,
+ DPB_SIZE = 17, // DPB_SIZE = MAX_REF_NUMBER + 1,
+ REF_LIST_SIZE = 32,
+ };
+
+ // maintain 2 ping-pong decoded picture buffers
+ DecodedPictureBuffer mDPBs[2][DPB_SIZE];
+ uint8_t mToggleDPB; // 0 or 1
+ bool mErrorConcealment;
+ uint32_t mLastPictureFlags;
+ VideoExtensionBuffer mExtensionBuffer;
+ PackedFrameData mPackedFrame;
+};
+
+
+
+#endif /* VIDEO_DECODER_AVC_H_ */
diff --git a/videodecoder/VideoDecoderBase.cpp b/videodecoder/VideoDecoderBase.cpp
new file mode 100644
index 0000000..1065cd4
--- /dev/null
+++ b/videodecoder/VideoDecoderBase.cpp
@@ -0,0 +1,1514 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderBase.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+#include <va/va_android.h>
+#include <va/va_tpi.h>
+#ifdef __SSE4_1__
+#include "use_util_sse4.h"
+#endif
+
+#define INVALID_PTS ((uint64_t)-1)
+#define MAXIMUM_POC 0x7FFFFFFF
+#define MINIMUM_POC 0x80000000
+#define ANDROID_DISPLAY_HANDLE 0x18C34078
+
+VideoDecoderBase::VideoDecoderBase(const char *mimeType, _vbp_parser_type type)
+ : mInitialized(false),
+ mLowDelay(false),
+ mDisplay(NULL),
+ mVADisplay(NULL),
+ mVAContext(VA_INVALID_ID),
+ mVAConfig(VA_INVALID_ID),
+ mVAStarted(false),
+ mCurrentPTS(INVALID_PTS),
+ mAcquiredBuffer(NULL),
+ mLastReference(NULL),
+ mForwardReference(NULL),
+ mDecodingFrame(false),
+ mSizeChanged(false),
+ mShowFrame(true),
+ mOutputWindowSize(OUTPUT_WINDOW_SIZE),
+ mRotationDegrees(0),
+ mErrReportEnabled(false),
+ mWiDiOn(false),
+ mRawOutput(false),
+ mManageReference(true),
+ mOutputMethod(OUTPUT_BY_PCT),
+ mNumSurfaces(0),
+ mSurfaceBuffers(NULL),
+ mOutputHead(NULL),
+ mOutputTail(NULL),
+ mSurfaces(NULL),
+ mVASurfaceAttrib(NULL),
+ mSurfaceUserPtr(NULL),
+ mSurfaceAcquirePos(0),
+ mNextOutputPOC(MINIMUM_POC),
+ mParserType(type),
+ mParserHandle(NULL),
+ mSignalBufferSize(0) {
+
+ memset(&mVideoFormatInfo, 0, sizeof(VideoFormatInfo));
+ memset(&mConfigBuffer, 0, sizeof(mConfigBuffer));
+ for (int i = 0; i < MAX_GRAPHIC_BUFFER_NUM; i++) {
+ mSignalBufferPre[i] = NULL;
+ }
+ pthread_mutex_init(&mLock, NULL);
+ mVideoFormatInfo.mimeType = strdup(mimeType);
+ mUseGEN = false;
+ mLibHandle = NULL;
+ mParserOpen = NULL;
+ mParserClose = NULL;
+ mParserParse = NULL;
+ mParserQuery = NULL;
+ mParserFlush = NULL;
+ mParserUpdate = NULL;
+}
+
+VideoDecoderBase::~VideoDecoderBase() {
+ pthread_mutex_destroy(&mLock);
+ stop();
+ free(mVideoFormatInfo.mimeType);
+}
+
+Decode_Status VideoDecoderBase::start(VideoConfigBuffer *buffer) {
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+
+ if (mParserHandle != NULL) {
+ WTRACE("Decoder has already started.");
+ return DECODE_SUCCESS;
+ }
+ mLibHandle = dlopen("libmixvbp.so", RTLD_NOW);
+ if (mLibHandle == NULL) {
+ return DECODE_NO_PARSER;
+ }
+ mParserOpen = (OpenFunc)dlsym(mLibHandle, "vbp_open");
+ mParserClose = (CloseFunc)dlsym(mLibHandle, "vbp_close");
+ mParserParse = (ParseFunc)dlsym(mLibHandle, "vbp_parse");
+ mParserQuery = (QueryFunc)dlsym(mLibHandle, "vbp_query");
+ mParserFlush = (FlushFunc)dlsym(mLibHandle, "vbp_flush");
+ if (mParserOpen == NULL || mParserClose == NULL || mParserParse == NULL
+ || mParserQuery == NULL || mParserFlush == NULL) {
+ return DECODE_NO_PARSER;
+ }
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
+ mParserUpdate = (UpdateFunc)dlsym(mLibHandle, "vbp_update");
+ if (mParserUpdate == NULL) {
+ return DECODE_NO_PARSER;
+ }
+#endif
+ if ((int32_t)mParserType != VBP_INVALID) {
+ ITRACE("mParserType = %d", mParserType);
+ if (mParserOpen(mParserType, &mParserHandle) != VBP_OK) {
+ ETRACE("Failed to open VBP parser.");
+ return DECODE_NO_PARSER;
+ }
+ }
+ // keep a copy of configure buffer, meta data only. It can be used to override VA setup parameter.
+ mConfigBuffer = *buffer;
+ mConfigBuffer.data = NULL;
+ mConfigBuffer.size = 0;
+
+ mVideoFormatInfo.width = buffer->width;
+ mVideoFormatInfo.height = buffer->height;
+ if (buffer->flag & USE_NATIVE_GRAPHIC_BUFFER) {
+ mVideoFormatInfo.surfaceWidth = buffer->graphicBufferWidth;
+ mVideoFormatInfo.surfaceHeight = buffer->graphicBufferHeight;
+ }
+ mLowDelay = buffer->flag & WANT_LOW_DELAY;
+ mRawOutput = buffer->flag & WANT_RAW_OUTPUT;
+ if (mRawOutput) {
+ WTRACE("Output is raw data.");
+ }
+
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderBase::reset(VideoConfigBuffer *buffer) {
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+
+ // if VA is already started, terminate VA as graphic buffers are reallocated by omxcodec
+ terminateVA();
+
+ // reset the mconfigBuffer to pass it for startVA.
+ mConfigBuffer = *buffer;
+ mConfigBuffer.data = NULL;
+ mConfigBuffer.size = 0;
+
+ mVideoFormatInfo.width = buffer->width;
+ mVideoFormatInfo.height = buffer->height;
+ if (buffer->flag & USE_NATIVE_GRAPHIC_BUFFER) {
+ mVideoFormatInfo.surfaceWidth = buffer->graphicBufferWidth;
+ mVideoFormatInfo.surfaceHeight = buffer->graphicBufferHeight;
+ }
+ mVideoFormatInfo.actualBufferNeeded = mConfigBuffer.surfaceNumber;
+ mLowDelay = buffer->flag & WANT_LOW_DELAY;
+ mRawOutput = buffer->flag & WANT_RAW_OUTPUT;
+ if (mRawOutput) {
+ WTRACE("Output is raw data.");
+ }
+ return DECODE_SUCCESS;
+}
+
+
+
+void VideoDecoderBase::stop(void) {
+ terminateVA();
+
+ mCurrentPTS = INVALID_PTS;
+ mAcquiredBuffer = NULL;
+ mLastReference = NULL;
+ mForwardReference = NULL;
+ mDecodingFrame = false;
+ mSizeChanged = false;
+
+ // private variables
+ mLowDelay = false;
+ mRawOutput = false;
+ mNumSurfaces = 0;
+ mSurfaceAcquirePos = 0;
+ mNextOutputPOC = MINIMUM_POC;
+ mVideoFormatInfo.valid = false;
+ if (mParserHandle){
+ mParserClose(mParserHandle);
+ mParserHandle = NULL;
+ }
+ if (mLibHandle) {
+ dlclose(mLibHandle);
+ mLibHandle = NULL;
+ }
+}
+
+void VideoDecoderBase::flush(void) {
+ if (mVAStarted == false) {
+ // nothing to flush at this stage
+ return;
+ }
+
+ endDecodingFrame(true);
+
+ VideoSurfaceBuffer *p = mOutputHead;
+ // check if there's buffer with DRC flag in the output queue
+ while (p) {
+ if (p->renderBuffer.flag & IS_RESOLUTION_CHANGE) {
+ mSizeChanged = true;
+ break;
+ }
+ p = p->next;
+ }
+ // avoid setting mSurfaceAcquirePos to 0 as it may cause tearing
+ // (surface is still being rendered)
+ mSurfaceAcquirePos = (mSurfaceAcquirePos + 1) % mNumSurfaces;
+ mNextOutputPOC = MINIMUM_POC;
+ mCurrentPTS = INVALID_PTS;
+ mAcquiredBuffer = NULL;
+ mLastReference = NULL;
+ mForwardReference = NULL;
+ mOutputHead = NULL;
+ mOutputTail = NULL;
+ mDecodingFrame = false;
+
+ // flush vbp parser
+ if (mParserHandle && (mParserFlush(mParserHandle) != VBP_OK)) {
+ WTRACE("Failed to flush parser. Continue");
+ }
+
+ // initialize surface buffer without resetting mapped/raw data
+ initSurfaceBuffer(false);
+
+}
+
+void VideoDecoderBase::freeSurfaceBuffers(void) {
+ if (mVAStarted == false) {
+ // nothing to free surface buffers at this stage
+ return;
+ }
+
+ pthread_mutex_lock(&mLock);
+
+ endDecodingFrame(true);
+
+ // if VA is already started, terminate VA as graphic buffers are reallocated by omxcodec
+ terminateVA();
+
+ pthread_mutex_unlock(&mLock);
+}
+
+const VideoFormatInfo* VideoDecoderBase::getFormatInfo(void) {
+ return &mVideoFormatInfo;
+}
+
+const VideoRenderBuffer* VideoDecoderBase::getOutput(bool draining, VideoErrorBuffer *outErrBuf) {
+ VAStatus vaStatus;
+ if (mVAStarted == false) {
+ return NULL;
+ }
+ bool useGraphicBuffer = mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER;
+
+ if (draining) {
+ // complete decoding the last frame and ignore return
+ endDecodingFrame(false);
+ }
+
+ if (mOutputHead == NULL) {
+ return NULL;
+ }
+
+ // output by position (the first buffer)
+ VideoSurfaceBuffer *outputByPos = mOutputHead;
+
+ if (mLowDelay) {
+ mOutputHead = mOutputHead->next;
+ if (mOutputHead == NULL) {
+ mOutputTail = NULL;
+ }
+ vaStatus = vaSetTimestampForSurface(mVADisplay, outputByPos->renderBuffer.surface, outputByPos->renderBuffer.timeStamp);
+ if (useGraphicBuffer && !mUseGEN) {
+ vaSyncSurface(mVADisplay, outputByPos->renderBuffer.surface);
+ fillDecodingErrors(&(outputByPos->renderBuffer));
+ }
+ if (draining && mOutputTail == NULL) {
+ outputByPos->renderBuffer.flag |= IS_EOS;
+ }
+ drainDecodingErrors(outErrBuf, &(outputByPos->renderBuffer));
+
+ return &(outputByPos->renderBuffer);
+ }
+
+ // output by presentation time stamp (the smallest pts)
+ VideoSurfaceBuffer *outputByPts = findOutputByPts();
+
+ VideoSurfaceBuffer *output = NULL;
+ if (mOutputMethod == OUTPUT_BY_POC) {
+ output = findOutputByPoc(draining);
+ } else if (mOutputMethod == OUTPUT_BY_PCT) {
+ output = findOutputByPct(draining);
+ } else {
+ ETRACE("Invalid output method.");
+ return NULL;
+ }
+
+ if (output == NULL) {
+ return NULL;
+ }
+
+ if (output != outputByPts) {
+ // swap time stamp
+ uint64_t ts = output->renderBuffer.timeStamp;
+ output->renderBuffer.timeStamp = outputByPts->renderBuffer.timeStamp;
+ outputByPts->renderBuffer.timeStamp = ts;
+ }
+
+ if (output != outputByPos) {
+ // remove this output from middle or end of the list
+ VideoSurfaceBuffer *p = outputByPos;
+ while (p->next != output) {
+ p = p->next;
+ }
+ p->next = output->next;
+ if (mOutputTail == output) {
+ mOutputTail = p;
+ }
+ } else {
+ // remove this output from head of the list
+ mOutputHead = mOutputHead->next;
+ if (mOutputHead == NULL) {
+ mOutputTail = NULL;
+ }
+ }
+ //VTRACE("Output POC %d for display (pts = %.2f)", output->pictureOrder, output->renderBuffer.timeStamp/1E6);
+ vaStatus = vaSetTimestampForSurface(mVADisplay, output->renderBuffer.surface, output->renderBuffer.timeStamp);
+
+ if (useGraphicBuffer && !mUseGEN) {
+ vaSyncSurface(mVADisplay, output->renderBuffer.surface);
+ fillDecodingErrors(&(output->renderBuffer));
+ }
+
+ if (draining && mOutputTail == NULL) {
+ output->renderBuffer.flag |= IS_EOS;
+ }
+
+ drainDecodingErrors(outErrBuf, &(output->renderBuffer));
+
+ return &(output->renderBuffer);
+}
+
+VideoSurfaceBuffer* VideoDecoderBase::findOutputByPts() {
+ // output by presentation time stamp - buffer with the smallest time stamp is output
+ VideoSurfaceBuffer *p = mOutputHead;
+ VideoSurfaceBuffer *outputByPts = NULL;
+ uint64_t pts = INVALID_PTS;
+ do {
+ if ((uint64_t)(p->renderBuffer.timeStamp) <= pts) {
+ // find buffer with the smallest PTS
+ pts = p->renderBuffer.timeStamp;
+ outputByPts = p;
+ }
+ p = p->next;
+ } while (p != NULL);
+
+ return outputByPts;
+}
+
+VideoSurfaceBuffer* VideoDecoderBase::findOutputByPct(bool draining) {
+ // output by picture coding type (PCT)
+ // if there is more than one reference frame, the first reference frame is ouput, otherwise,
+ // output non-reference frame if there is any.
+
+ VideoSurfaceBuffer *p = mOutputHead;
+ VideoSurfaceBuffer *outputByPct = NULL;
+ int32_t reference = 0;
+ do {
+ if (p->referenceFrame) {
+ reference++;
+ if (reference > 1) {
+ // mOutputHead must be a reference frame
+ outputByPct = mOutputHead;
+ break;
+ }
+ } else {
+ // first non-reference frame
+ outputByPct = p;
+ break;
+ }
+ p = p->next;
+ } while (p != NULL);
+
+ if (outputByPct == NULL && draining) {
+ outputByPct = mOutputHead;
+ }
+ return outputByPct;
+}
+
+#if 0
+VideoSurfaceBuffer* VideoDecoderBase::findOutputByPoc(bool draining) {
+ // output by picture order count (POC)
+ // Output criteria:
+ // if there is IDR frame (POC == 0), all the frames before IDR must be output;
+ // Otherwise, if draining flag is set or list is full, frame with the least POC is output;
+ // Otherwise, NOTHING is output
+
+ int32_t dpbFullness = 0;
+ for (int32_t i = 0; i < mNumSurfaces; i++) {
+ // count num of reference frames
+ if (mSurfaceBuffers[i].asReferernce) {
+ dpbFullness++;
+ }
+ }
+
+ if (mAcquiredBuffer && mAcquiredBuffer->asReferernce) {
+ // frame is being decoded and is not ready for output yet
+ dpbFullness--;
+ }
+
+ VideoSurfaceBuffer *p = mOutputHead;
+ while (p != NULL) {
+ // count dpbFullness with non-reference frame in the output queue
+ if (p->asReferernce == false) {
+ dpbFullness++;
+ }
+ p = p->next;
+ }
+
+Retry:
+ p = mOutputHead;
+ VideoSurfaceBuffer *outputByPoc = NULL;
+ int32_t count = 0;
+ int32_t poc = MAXIMUM_POC;
+
+ do {
+ if (p->pictureOrder == 0) {
+ // output picture with the least POC before IDR
+ if (outputByPoc != NULL) {
+ mNextOutputPOC = outputByPoc->pictureOrder + 1;
+ return outputByPoc;
+ } else {
+ mNextOutputPOC = MINIMUM_POC;
+ }
+ }
+
+ // POC of the output candidate must not be less than mNextOutputPOC
+ if (p->pictureOrder < mNextOutputPOC) {
+ break;
+ }
+
+ if (p->pictureOrder < poc) {
+ // update the least POC.
+ poc = p->pictureOrder;
+ outputByPoc = p;
+ }
+ count++;
+ p = p->next;
+ } while (p != NULL && count < mOutputWindowSize);
+
+ if (draining == false && dpbFullness < mOutputWindowSize) {
+ // list is not full and we are not in draining state
+ // if DPB is already full, one frame must be output
+ return NULL;
+ }
+
+ if (outputByPoc == NULL) {
+ mNextOutputPOC = MINIMUM_POC;
+ goto Retry;
+ }
+
+ // for debugging purpose
+ if (outputByPoc->pictureOrder != 0 && outputByPoc->pictureOrder < mNextOutputPOC) {
+ ETRACE("Output POC is not incremental, expected %d, actual %d", mNextOutputPOC, outputByPoc->pictureOrder);
+ //gaps_in_frame_num_value_allowed_flag is not currently supported
+ }
+
+ mNextOutputPOC = outputByPoc->pictureOrder + 1;
+
+ return outputByPoc;
+}
+#else
+VideoSurfaceBuffer* VideoDecoderBase::findOutputByPoc(bool draining) {
+ VideoSurfaceBuffer *output = NULL;
+ VideoSurfaceBuffer *p = mOutputHead;
+ int32_t count = 0;
+ int32_t poc = MAXIMUM_POC;
+ VideoSurfaceBuffer *outputleastpoc = mOutputHead;
+ do {
+ count++;
+ if (p->pictureOrder == 0) {
+ // any picture before this POC (new IDR) must be output
+ if (output == NULL) {
+ mNextOutputPOC = MINIMUM_POC;
+ // looking for any POC with negative value
+ } else {
+ mNextOutputPOC = output->pictureOrder + 1;
+ break;
+ }
+ }
+ if (p->pictureOrder < poc && p->pictureOrder >= mNextOutputPOC) {
+ // this POC meets ouput criteria.
+ poc = p->pictureOrder;
+ output = p;
+ outputleastpoc = p;
+ }
+ if (poc == mNextOutputPOC || count == mOutputWindowSize) {
+ if (output != NULL) {
+ // this indicates two cases:
+ // 1) the next output POC is found.
+ // 2) output queue is full and there is at least one buffer meeting the output criteria.
+ mNextOutputPOC = output->pictureOrder + 1;
+ break;
+ } else {
+ // this indicates output queue is full and no buffer in the queue meets the output criteria
+ // restart processing as queue is FULL and output criteria is changed. (next output POC is 0)
+ mNextOutputPOC = MINIMUM_POC;
+ count = 0;
+ poc = MAXIMUM_POC;
+ p = mOutputHead;
+ continue;
+ }
+ }
+ if (p->next == NULL) {
+ output = NULL;
+ }
+
+ p = p->next;
+ } while (p != NULL);
+
+ if (draining == true && output == NULL) {
+ output = outputleastpoc;
+ }
+
+ return output;
+}
+#endif
+
+bool VideoDecoderBase::checkBufferAvail(void) {
+ if (!mInitialized) {
+ if ((mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER) == 0) {
+ return true;
+ }
+ for (int i = 0; i < MAX_GRAPHIC_BUFFER_NUM; i++) {
+ if (mSignalBufferPre[i] != NULL) {
+ return true;
+ }
+ }
+ return false;
+ }
+ // check whether there is buffer available for decoding
+ // TODO: check frame being referenced for frame skipping
+ VideoSurfaceBuffer *buffer = NULL;
+ for (int32_t i = 0; i < mNumSurfaces; i++) {
+ buffer = mSurfaceBuffers + i;
+
+ if (buffer->asReferernce == false &&
+ buffer->renderBuffer.renderDone == true) {
+ querySurfaceRenderStatus(buffer);
+ if (buffer->renderBuffer.driverRenderDone == true)
+ return true;
+ }
+ }
+ return false;
+}
+
+Decode_Status VideoDecoderBase::acquireSurfaceBuffer(void) {
+ if (mVAStarted == false) {
+ return DECODE_FAIL;
+ }
+
+ if (mAcquiredBuffer != NULL) {
+ ETRACE("mAcquiredBuffer is not NULL. Implementation bug.");
+ return DECODE_FAIL;
+ }
+
+ int nextAcquire = mSurfaceAcquirePos;
+ VideoSurfaceBuffer *acquiredBuffer = NULL;
+ bool acquired = false;
+
+ while (acquired == false) {
+ acquiredBuffer = mSurfaceBuffers + nextAcquire;
+
+ querySurfaceRenderStatus(acquiredBuffer);
+
+ if (acquiredBuffer->asReferernce == false && acquiredBuffer->renderBuffer.renderDone == true && acquiredBuffer->renderBuffer.driverRenderDone == true) {
+ // this is potential buffer for acquisition. Check if it is referenced by other surface for frame skipping
+ VideoSurfaceBuffer *temp;
+ acquired = true;
+ for (int i = 0; i < mNumSurfaces; i++) {
+ if (i == nextAcquire) {
+ continue;
+ }
+ temp = mSurfaceBuffers + i;
+ // use mSurfaces[nextAcquire] instead of acquiredBuffer->renderBuffer.surface as its the actual surface to use.
+ if (temp->renderBuffer.surface == mSurfaces[nextAcquire] &&
+ temp->renderBuffer.renderDone == false) {
+ ITRACE("Surface is referenced by other surface buffer.");
+ acquired = false;
+ break;
+ }
+ }
+ }
+ if (acquired) {
+ break;
+ }
+ nextAcquire++;
+ if (nextAcquire == mNumSurfaces) {
+ nextAcquire = 0;
+ }
+ if (nextAcquire == mSurfaceAcquirePos) {
+ return DECODE_NO_SURFACE;
+ }
+ }
+
+ if (acquired == false) {
+ return DECODE_NO_SURFACE;
+ }
+
+ mAcquiredBuffer = acquiredBuffer;
+ mSurfaceAcquirePos = nextAcquire;
+
+ // set surface again as surface maybe reset by skipped frame.
+ // skipped frame is a "non-coded frame" and decoder needs to duplicate the previous reference frame as the output.
+ mAcquiredBuffer->renderBuffer.surface = mSurfaces[mSurfaceAcquirePos];
+ if (mSurfaceUserPtr && mAcquiredBuffer->mappedData) {
+ mAcquiredBuffer->mappedData->data = mSurfaceUserPtr[mSurfaceAcquirePos];
+ }
+ mAcquiredBuffer->renderBuffer.timeStamp = INVALID_PTS;
+ mAcquiredBuffer->renderBuffer.display = mVADisplay;
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.renderDone = false;
+ mAcquiredBuffer->asReferernce = false;
+ mAcquiredBuffer->renderBuffer.errBuf.errorNumber = 0;
+ mAcquiredBuffer->renderBuffer.errBuf.timeStamp = INVALID_PTS;
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::outputSurfaceBuffer(void) {
+ Decode_Status status;
+ if (mAcquiredBuffer == NULL) {
+ ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+ return DECODE_FAIL;
+ }
+
+ if (mRawOutput) {
+ status = getRawDataFromSurface();
+ CHECK_STATUS();
+ }
+
+ // frame is successfly decoded to the current surface, it is ready for output
+ if (mShowFrame) {
+ mAcquiredBuffer->renderBuffer.renderDone = false;
+ } else {
+ mAcquiredBuffer->renderBuffer.renderDone = true;
+ }
+
+ // decoder must set "asReference and referenceFrame" flags properly
+
+ // update reference frames
+ if (mAcquiredBuffer->referenceFrame) {
+ if (mManageReference) {
+ // managing reference for MPEG4/H.263/WMV.
+ // AVC should manage reference frame in a different way
+ if (mForwardReference != NULL) {
+ // this foward reference is no longer needed
+ mForwardReference->asReferernce = false;
+ }
+ // Forware reference for either P or B frame prediction
+ mForwardReference = mLastReference;
+ mAcquiredBuffer->asReferernce = true;
+ }
+
+ // the last reference frame.
+ mLastReference = mAcquiredBuffer;
+ }
+ // add to the output list
+ if (mShowFrame) {
+ if (mOutputHead == NULL) {
+ mOutputHead = mAcquiredBuffer;
+ } else {
+ mOutputTail->next = mAcquiredBuffer;
+ }
+ mOutputTail = mAcquiredBuffer;
+ mOutputTail->next = NULL;
+ }
+
+ //VTRACE("Pushing POC %d to queue (pts = %.2f)", mAcquiredBuffer->pictureOrder, mAcquiredBuffer->renderBuffer.timeStamp/1E6);
+
+ mAcquiredBuffer = NULL;
+ mSurfaceAcquirePos = (mSurfaceAcquirePos + 1 ) % mNumSurfaces;
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::releaseSurfaceBuffer(void) {
+ if (mAcquiredBuffer == NULL) {
+ // this is harmless error
+ return DECODE_SUCCESS;
+ }
+
+ // frame is not decoded to the acquired buffer, current surface is invalid, and can't be output.
+ mAcquiredBuffer->asReferernce = false;
+ mAcquiredBuffer->renderBuffer.renderDone = true;
+ mAcquiredBuffer = NULL;
+ return DECODE_SUCCESS;
+}
+
+void VideoDecoderBase::flushSurfaceBuffers(void) {
+ endDecodingFrame(true);
+ VideoSurfaceBuffer *p = NULL;
+ while (mOutputHead) {
+ mOutputHead->renderBuffer.renderDone = true;
+ p = mOutputHead;
+ mOutputHead = mOutputHead->next;
+ p->next = NULL;
+ }
+ mOutputHead = NULL;
+ mOutputTail = NULL;
+}
+
+Decode_Status VideoDecoderBase::endDecodingFrame(bool dropFrame) {
+ Decode_Status status = DECODE_SUCCESS;
+ VAStatus vaStatus;
+
+ if (mDecodingFrame == false) {
+ if (mAcquiredBuffer != NULL) {
+ //ETRACE("mAcquiredBuffer is not NULL. Implementation bug.");
+ releaseSurfaceBuffer();
+ status = DECODE_FAIL;
+ }
+ return status;
+ }
+ // return through exit label to reset mDecodingFrame
+ if (mAcquiredBuffer == NULL) {
+ ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+ status = DECODE_FAIL;
+ goto exit;
+ }
+
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ if (vaStatus != VA_STATUS_SUCCESS) {
+ releaseSurfaceBuffer();
+ ETRACE("vaEndPicture failed. vaStatus = %d", vaStatus);
+ status = DECODE_DRIVER_FAIL;
+ goto exit;
+ }
+
+ if (dropFrame) {
+ // we are asked to drop this decoded picture
+ VTRACE("Frame dropped in endDecodingFrame");
+ vaStatus = vaSyncSurface(mVADisplay, mAcquiredBuffer->renderBuffer.surface);
+ releaseSurfaceBuffer();
+ goto exit;
+ }
+ status = outputSurfaceBuffer();
+ // fall through
+exit:
+ mDecodingFrame = false;
+ return status;
+}
+
+
+Decode_Status VideoDecoderBase::setupVA(uint32_t numSurface, VAProfile profile, uint32_t numExtraSurface) {
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ Decode_Status status;
+ VAConfigAttrib attrib;
+
+ if (mVAStarted) {
+ return DECODE_SUCCESS;
+ }
+
+ mRotationDegrees = 0;
+ if (mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER){
+#ifdef TARGET_HAS_VPP
+ if (mVideoFormatInfo.actualBufferNeeded > mConfigBuffer.surfaceNumber - mConfigBuffer.vppBufferNum)
+#else
+ if (mVideoFormatInfo.actualBufferNeeded > mConfigBuffer.surfaceNumber)
+#endif
+ return DECODE_FORMAT_CHANGE;
+
+ numSurface = mConfigBuffer.surfaceNumber;
+ // if format has been changed in USE_NATIVE_GRAPHIC_BUFFER mode,
+ // we can not setupVA here when the graphic buffer resolution is smaller than the resolution decoder really needs
+ if (mSizeChanged) {
+ if (mVideoFormatInfo.surfaceWidth < mVideoFormatInfo.width || mVideoFormatInfo.surfaceHeight < mVideoFormatInfo.height) {
+ mSizeChanged = false;
+ return DECODE_FORMAT_CHANGE;
+ }
+ }
+ }
+
+ // TODO: validate profile
+ if (numSurface == 0) {
+ return DECODE_FAIL;
+ }
+
+ if (mConfigBuffer.flag & HAS_MINIMUM_SURFACE_NUMBER) {
+ if (numSurface < mConfigBuffer.surfaceNumber) {
+ WTRACE("surface to allocated %d is less than minimum number required %d",
+ numSurface, mConfigBuffer.surfaceNumber);
+ numSurface = mConfigBuffer.surfaceNumber;
+ }
+ }
+
+ if (mVADisplay != NULL) {
+ ETRACE("VA is partially started.");
+ return DECODE_FAIL;
+ }
+
+ // Display is defined as "unsigned int"
+#ifndef USE_HYBRID_DRIVER
+ mDisplay = new Display;
+ *mDisplay = ANDROID_DISPLAY_HANDLE;
+#else
+ if (profile >= VAProfileH264Baseline && profile <= VAProfileVC1Advanced) {
+ ITRACE("Using GEN driver");
+ mDisplay = "libva_driver_name=i965";
+ mUseGEN = true;
+ } else {
+ ITRACE("Using PVR driver");
+ mDisplay = "libva_driver_name=pvr";
+ mUseGEN = false;
+ }
+
+#endif
+ mVADisplay = vaGetDisplay(mDisplay);
+ if (mVADisplay == NULL) {
+ ETRACE("vaGetDisplay failed.");
+ return DECODE_DRIVER_FAIL;
+ }
+
+ int majorVersion, minorVersion;
+ vaStatus = vaInitialize(mVADisplay, &majorVersion, &minorVersion);
+ CHECK_VA_STATUS("vaInitialize");
+
+ if ((int32_t)profile != VAProfileSoftwareDecoding) {
+
+ status = checkHardwareCapability();
+ CHECK_STATUS("checkHardwareCapability");
+
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
+ status = getCodecSpecificConfigs(profile, &mVAConfig);
+ CHECK_STATUS("getCodecSpecificAttributes");
+#else
+ //We are requesting RT attributes
+ attrib.type = VAConfigAttribRTFormat;
+ attrib.value = VA_RT_FORMAT_YUV420;
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib,
+ 1,
+ &mVAConfig);
+ CHECK_VA_STATUS("vaCreateConfig");
+#endif
+ }
+
+ mNumSurfaces = numSurface;
+ mNumExtraSurfaces = numExtraSurface;
+ mSurfaces = new VASurfaceID [mNumSurfaces + mNumExtraSurfaces];
+ mExtraSurfaces = mSurfaces + mNumSurfaces;
+ if (mSurfaces == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+
+ setRenderRect();
+
+ int32_t format = VA_RT_FORMAT_YUV420;
+ if (mConfigBuffer.flag & WANT_SURFACE_PROTECTION) {
+#ifndef USE_AVC_SHORT_FORMAT
+ format |= VA_RT_FORMAT_PROTECTED;
+ WTRACE("Surface is protected.");
+#endif
+ }
+ if (mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER) {
+ VASurfaceAttrib attribs[2];
+ mVASurfaceAttrib = new VASurfaceAttribExternalBuffers;
+ if (mVASurfaceAttrib == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mVASurfaceAttrib->buffers= (unsigned long *)malloc(sizeof(unsigned long)*mNumSurfaces);
+ if (mVASurfaceAttrib->buffers == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+ mVASurfaceAttrib->num_buffers = mNumSurfaces;
+ mVASurfaceAttrib->pixel_format = VA_FOURCC_NV12;
+ mVASurfaceAttrib->width = mVideoFormatInfo.surfaceWidth;
+ mVASurfaceAttrib->height = mVideoFormatInfo.surfaceHeight;
+ mVASurfaceAttrib->data_size = mConfigBuffer.graphicBufferStride * mVideoFormatInfo.surfaceHeight * 1.5;
+ mVASurfaceAttrib->num_planes = 2;
+ mVASurfaceAttrib->pitches[0] = mConfigBuffer.graphicBufferStride;
+ mVASurfaceAttrib->pitches[1] = mConfigBuffer.graphicBufferStride;
+ mVASurfaceAttrib->pitches[2] = 0;
+ mVASurfaceAttrib->pitches[3] = 0;
+ mVASurfaceAttrib->offsets[0] = 0;
+ mVASurfaceAttrib->offsets[1] = mConfigBuffer.graphicBufferStride * mVideoFormatInfo.surfaceHeight;
+ mVASurfaceAttrib->offsets[2] = 0;
+ mVASurfaceAttrib->offsets[3] = 0;
+ mVASurfaceAttrib->private_data = (void *)mConfigBuffer.nativeWindow;
+ mVASurfaceAttrib->flags = VA_SURFACE_ATTRIB_MEM_TYPE_ANDROID_GRALLOC;
+ if (mConfigBuffer.flag & USE_TILING_MEMORY)
+ mVASurfaceAttrib->flags |= VA_SURFACE_EXTBUF_DESC_ENABLE_TILING;
+
+ for (int i = 0; i < mNumSurfaces; i++) {
+ mVASurfaceAttrib->buffers[i] = (unsigned long)mConfigBuffer.graphicBufferHandler[i];
+ }
+
+ attribs[0].type = (VASurfaceAttribType)VASurfaceAttribMemoryType;
+ attribs[0].flags = VA_SURFACE_ATTRIB_SETTABLE;
+ attribs[0].value.type = VAGenericValueTypeInteger;
+ attribs[0].value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_ANDROID_GRALLOC;
+
+ attribs[1].type = (VASurfaceAttribType)VASurfaceAttribExternalBufferDescriptor;
+ attribs[1].flags = VA_SURFACE_ATTRIB_SETTABLE;
+ attribs[1].value.type = VAGenericValueTypePointer;
+ attribs[1].value.value.p = (void *)mVASurfaceAttrib;
+
+ vaStatus = vaCreateSurfaces(
+ mVADisplay,
+ format,
+ mVideoFormatInfo.surfaceWidth,
+ mVideoFormatInfo.surfaceHeight,
+ mSurfaces,
+ mNumSurfaces,
+ attribs,
+ 2);
+
+ } else {
+ vaStatus = vaCreateSurfaces(
+ mVADisplay,
+ format,
+ mVideoFormatInfo.width,
+ mVideoFormatInfo.height,
+ mSurfaces,
+ mNumSurfaces,
+ NULL,
+ 0);
+ mVideoFormatInfo.surfaceWidth = mVideoFormatInfo.width;
+ mVideoFormatInfo.surfaceHeight = mVideoFormatInfo.height;
+ }
+ CHECK_VA_STATUS("vaCreateSurfaces");
+
+ if (mNumExtraSurfaces != 0) {
+ vaStatus = vaCreateSurfaces(
+ mVADisplay,
+ format,
+ mVideoFormatInfo.surfaceWidth,
+ mVideoFormatInfo.surfaceHeight,
+ mExtraSurfaces,
+ mNumExtraSurfaces,
+ NULL,
+ 0);
+ CHECK_VA_STATUS("vaCreateSurfaces");
+ }
+
+ mVideoFormatInfo.surfaceNumber = mNumSurfaces;
+ mVideoFormatInfo.ctxSurfaces = mSurfaces;
+
+ if ((int32_t)profile != VAProfileSoftwareDecoding) {
+ vaStatus = vaCreateContext(
+ mVADisplay,
+ mVAConfig,
+ mVideoFormatInfo.surfaceWidth,
+ mVideoFormatInfo.surfaceHeight,
+ 0,
+ mSurfaces,
+ mNumSurfaces + mNumExtraSurfaces,
+ &mVAContext);
+ CHECK_VA_STATUS("vaCreateContext");
+ }
+
+ mSurfaceBuffers = new VideoSurfaceBuffer [mNumSurfaces];
+ if (mSurfaceBuffers == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+ initSurfaceBuffer(true);
+
+ if ((int32_t)profile == VAProfileSoftwareDecoding) {
+ // derive user pointer from surface for direct access
+ status = mapSurface();
+ CHECK_STATUS("mapSurface")
+ }
+
+ setRotationDegrees(mConfigBuffer.rotationDegrees);
+
+ mVAStarted = true;
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::terminateVA(void) {
+ mSignalBufferSize = 0;
+ for (int i = 0; i < MAX_GRAPHIC_BUFFER_NUM; i++) {
+ mSignalBufferPre[i] = NULL;
+ }
+
+ if (mVAStarted == false) {
+ // VA hasn't been started yet
+ return DECODE_SUCCESS;
+ }
+
+ if (mSurfaceBuffers) {
+ for (int32_t i = 0; i < mNumSurfaces; i++) {
+ if (mSurfaceBuffers[i].renderBuffer.rawData) {
+ if (mSurfaceBuffers[i].renderBuffer.rawData->data) {
+ delete [] mSurfaceBuffers[i].renderBuffer.rawData->data;
+ }
+ delete mSurfaceBuffers[i].renderBuffer.rawData;
+ }
+ if (mSurfaceBuffers[i].mappedData) {
+ // don't delete data pointer as it is mapped from surface
+ delete mSurfaceBuffers[i].mappedData;
+ }
+ }
+ delete [] mSurfaceBuffers;
+ mSurfaceBuffers = NULL;
+ }
+
+ if (mVASurfaceAttrib) {
+ if (mVASurfaceAttrib->buffers) free(mVASurfaceAttrib->buffers);
+ delete mVASurfaceAttrib;
+ mVASurfaceAttrib = NULL;
+ }
+
+
+ if (mSurfaceUserPtr) {
+ delete [] mSurfaceUserPtr;
+ mSurfaceUserPtr = NULL;
+ }
+
+ if (mSurfaces)
+ {
+ vaDestroySurfaces(mVADisplay, mSurfaces, mNumSurfaces + mNumExtraSurfaces);
+ delete [] mSurfaces;
+ mSurfaces = NULL;
+ }
+
+ if (mVAContext != VA_INVALID_ID) {
+ vaDestroyContext(mVADisplay, mVAContext);
+ mVAContext = VA_INVALID_ID;
+ }
+
+ if (mVAConfig != VA_INVALID_ID) {
+ vaDestroyConfig(mVADisplay, mVAConfig);
+ mVAConfig = VA_INVALID_ID;
+ }
+
+ if (mVADisplay) {
+ vaTerminate(mVADisplay);
+ mVADisplay = NULL;
+ }
+
+ if (mDisplay) {
+#ifndef USE_HYBRID_DRIVER
+ delete mDisplay;
+#endif
+ mDisplay = NULL;
+ }
+
+ mVAStarted = false;
+ mInitialized = false;
+ mErrReportEnabled = false;
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::parseBuffer(uint8_t *buffer, int32_t size, bool config, void** vbpData) {
+ // DON'T check if mVAStarted == true
+ if (mParserHandle == NULL) {
+ return DECODE_NO_PARSER;
+ }
+
+ uint32_t vbpStatus;
+ if (buffer == NULL || size <= 0) {
+ return DECODE_INVALID_DATA;
+ }
+
+ uint8_t configFlag = config ? 1 : 0;
+ vbpStatus = mParserParse(mParserHandle, buffer, size, configFlag);
+ CHECK_VBP_STATUS("vbp_parse");
+
+ vbpStatus = mParserQuery(mParserHandle, vbpData);
+ CHECK_VBP_STATUS("vbp_query");
+
+ return DECODE_SUCCESS;
+}
+
+
+
+Decode_Status VideoDecoderBase::mapSurface(void) {
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ VAImage image;
+ uint8_t *userPtr;
+ mSurfaceUserPtr = new uint8_t* [mNumSurfaces];
+ if (mSurfaceUserPtr == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+
+ for (int32_t i = 0; i< mNumSurfaces; i++) {
+ vaStatus = vaDeriveImage(mVADisplay, mSurfaces[i], &image);
+ CHECK_VA_STATUS("vaDeriveImage");
+ vaStatus = vaMapBuffer(mVADisplay, image.buf, (void**)&userPtr);
+ CHECK_VA_STATUS("vaMapBuffer");
+ mSurfaceUserPtr[i] = userPtr;
+ mSurfaceBuffers[i].mappedData = new VideoFrameRawData;
+ if (mSurfaceBuffers[i].mappedData == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+ mSurfaceBuffers[i].mappedData->own = false; // derived from surface so can't be released
+ mSurfaceBuffers[i].mappedData->data = NULL; // specified during acquireSurfaceBuffer
+ mSurfaceBuffers[i].mappedData->fourcc = image.format.fourcc;
+ mSurfaceBuffers[i].mappedData->width = mVideoFormatInfo.width;
+ mSurfaceBuffers[i].mappedData->height = mVideoFormatInfo.height;
+ mSurfaceBuffers[i].mappedData->size = image.data_size;
+ for (int pi = 0; pi < 3; pi++) {
+ mSurfaceBuffers[i].mappedData->pitch[pi] = image.pitches[pi];
+ mSurfaceBuffers[i].mappedData->offset[pi] = image.offsets[pi];
+ }
+ // debug information
+ if (image.pitches[0] != image.pitches[1] ||
+ image.width != mVideoFormatInfo.width ||
+ image.height != mVideoFormatInfo.height ||
+ image.offsets[0] != 0) {
+ WTRACE("Unexpected VAImage format, w = %d, h = %d, offset = %d", image.width, image.height, image.offsets[0]);
+ }
+ // TODO: do we need to unmap buffer?
+ //vaStatus = vaUnmapBuffer(mVADisplay, image.buf);
+ //CHECK_VA_STATUS("vaMapBuffer");
+ vaStatus = vaDestroyImage(mVADisplay,image.image_id);
+ CHECK_VA_STATUS("vaDestroyImage");
+
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::getRawDataFromSurface(VideoRenderBuffer *renderBuffer, uint8_t *pRawData, uint32_t *pSize, bool internal) {
+ if (internal) {
+ if (mAcquiredBuffer == NULL) {
+ return DECODE_FAIL;
+ }
+ renderBuffer = &(mAcquiredBuffer->renderBuffer);
+ }
+
+ VAStatus vaStatus;
+ VAImageFormat imageFormat;
+ VAImage vaImage;
+ vaStatus = vaSyncSurface(renderBuffer->display, renderBuffer->surface);
+ CHECK_VA_STATUS("vaSyncSurface");
+
+ vaStatus = vaDeriveImage(renderBuffer->display, renderBuffer->surface, &vaImage);
+ CHECK_VA_STATUS("vaDeriveImage");
+
+ void *pBuf = NULL;
+ vaStatus = vaMapBuffer(renderBuffer->display, vaImage.buf, &pBuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+
+ // size in NV12 format
+ uint32_t cropWidth = mVideoFormatInfo.width - (mVideoFormatInfo.cropLeft + mVideoFormatInfo.cropRight);
+ uint32_t cropHeight = mVideoFormatInfo.height - (mVideoFormatInfo.cropBottom + mVideoFormatInfo.cropTop);
+ int32_t size = cropWidth * cropHeight * 3 / 2;
+
+ if (internal) {
+ VideoFrameRawData *rawData = NULL;
+ if (renderBuffer->rawData == NULL) {
+ rawData = new VideoFrameRawData;
+ if (rawData == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+ memset(rawData, 0, sizeof(VideoFrameRawData));
+ renderBuffer->rawData = rawData;
+ } else {
+ rawData = renderBuffer->rawData;
+ }
+
+ if (rawData->data != NULL && rawData->size != size) {
+ delete [] rawData->data;
+ rawData->data = NULL;
+ rawData->size = 0;
+ }
+ if (rawData->data == NULL) {
+ rawData->data = new uint8_t [size];
+ if (rawData->data == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+ }
+
+ rawData->own = true; // allocated by this library
+ rawData->width = cropWidth;
+ rawData->height = cropHeight;
+ rawData->pitch[0] = cropWidth;
+ rawData->pitch[1] = cropWidth;
+ rawData->pitch[2] = 0; // interleaved U/V, two planes
+ rawData->offset[0] = 0;
+ rawData->offset[1] = cropWidth * cropHeight;
+ rawData->offset[2] = cropWidth * cropHeight * 3 / 2;
+ rawData->size = size;
+ rawData->fourcc = 'NV12';
+
+ pRawData = rawData->data;
+ } else {
+ *pSize = size;
+ }
+
+ if (size == (int32_t)vaImage.data_size) {
+#ifdef __SSE4_1__
+ stream_memcpy(pRawData, pBuf, size);
+#else
+ memcpy(pRawData, pBuf, size);
+#endif
+ } else {
+ // copy Y data
+ uint8_t *src = (uint8_t*)pBuf;
+ uint8_t *dst = pRawData;
+ uint32_t row = 0;
+ for (row = 0; row < cropHeight; row++) {
+#ifdef __SSE4_1__
+ stream_memcpy(dst, src, cropWidth);
+#else
+ memcpy(dst, src, cropWidth);
+#endif
+ dst += cropWidth;
+ src += vaImage.pitches[0];
+ }
+ // copy interleaved V and U data
+ src = (uint8_t*)pBuf + vaImage.offsets[1];
+ for (row = 0; row < cropHeight / 2; row++) {
+#ifdef __SSE4_1__
+ stream_memcpy(dst, src, cropWidth);
+#else
+ memcpy(dst, src, cropWidth);
+#endif
+ dst += cropWidth;
+ src += vaImage.pitches[1];
+ }
+ }
+
+ vaStatus = vaUnmapBuffer(renderBuffer->display, vaImage.buf);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ vaStatus = vaDestroyImage(renderBuffer->display, vaImage.image_id);
+ CHECK_VA_STATUS("vaDestroyImage");
+
+ return DECODE_SUCCESS;
+}
+
+void VideoDecoderBase::initSurfaceBuffer(bool reset) {
+ bool useGraphicBuffer = mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER;
+ if (useGraphicBuffer && reset) {
+ pthread_mutex_lock(&mLock);
+ }
+ for (int32_t i = 0; i < mNumSurfaces; i++) {
+ mSurfaceBuffers[i].renderBuffer.display = mVADisplay;
+ mSurfaceBuffers[i].renderBuffer.surface = VA_INVALID_SURFACE; // set in acquireSurfaceBuffer
+ mSurfaceBuffers[i].renderBuffer.flag = 0;
+ mSurfaceBuffers[i].renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ mSurfaceBuffers[i].renderBuffer.timeStamp = 0;
+ mSurfaceBuffers[i].referenceFrame = false;
+ mSurfaceBuffers[i].asReferernce= false;
+ mSurfaceBuffers[i].pictureOrder = 0;
+ mSurfaceBuffers[i].next = NULL;
+ if (reset == true) {
+ mSurfaceBuffers[i].renderBuffer.rawData = NULL;
+ mSurfaceBuffers[i].mappedData = NULL;
+ }
+ if (useGraphicBuffer) {
+ if (reset) {
+ mSurfaceBuffers[i].renderBuffer.graphicBufferHandle = mConfigBuffer.graphicBufferHandler[i];
+ mSurfaceBuffers[i].renderBuffer.renderDone = false; //default false
+ for (uint32_t j = 0; j < mSignalBufferSize; j++) {
+ if(mSignalBufferPre[j] != NULL && mSignalBufferPre[j] == mSurfaceBuffers[i].renderBuffer.graphicBufferHandle) {
+ mSurfaceBuffers[i].renderBuffer.renderDone = true;
+ VTRACE("initSurfaceBuffer set renderDone = true index = %d", i);
+ mSignalBufferPre[j] = NULL;
+ break;
+ }
+ }
+ } else {
+ mSurfaceBuffers[i].renderBuffer.renderDone = false;
+ }
+ } else {
+ mSurfaceBuffers[i].renderBuffer.graphicBufferHandle = NULL;
+ mSurfaceBuffers[i].renderBuffer.renderDone = true;
+ }
+ mSurfaceBuffers[i].renderBuffer.graphicBufferIndex = i;
+ }
+
+ if (useGraphicBuffer && reset) {
+ mInitialized = true;
+ mSignalBufferSize = 0;
+ pthread_mutex_unlock(&mLock);
+ }
+}
+
+Decode_Status VideoDecoderBase::signalRenderDone(void * graphichandler) {
+ if (graphichandler == NULL) {
+ return DECODE_SUCCESS;
+ }
+ pthread_mutex_lock(&mLock);
+ int i = 0;
+ if (!mInitialized) {
+ if (mSignalBufferSize >= MAX_GRAPHIC_BUFFER_NUM) {
+ pthread_mutex_unlock(&mLock);
+ return DECODE_INVALID_DATA;
+ }
+ mSignalBufferPre[mSignalBufferSize++] = graphichandler;
+ VTRACE("SignalRenderDoneFlag mInitialized = false graphichandler = %p, mSignalBufferSize = %d", graphichandler, mSignalBufferSize);
+ } else {
+ if (!(mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER)) {
+ pthread_mutex_unlock(&mLock);
+ return DECODE_SUCCESS;
+ }
+ for (i = 0; i < mNumSurfaces; i++) {
+ if (mSurfaceBuffers[i].renderBuffer.graphicBufferHandle == graphichandler) {
+ mSurfaceBuffers[i].renderBuffer.renderDone = true;
+ VTRACE("SignalRenderDoneFlag mInitialized = true index = %d", i);
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock(&mLock);
+
+ return DECODE_SUCCESS;
+
+}
+
+void VideoDecoderBase::querySurfaceRenderStatus(VideoSurfaceBuffer* surface) {
+ VASurfaceStatus surfStat = VASurfaceReady;
+ VAStatus vaStat = VA_STATUS_SUCCESS;
+
+ if (!surface) {
+ LOGW("SurfaceBuffer not ready yet");
+ return;
+ }
+ surface->renderBuffer.driverRenderDone = true;
+
+#ifndef USE_GEN_HW
+ if (surface->renderBuffer.surface != VA_INVALID_SURFACE &&
+ (mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER)) {
+
+ vaStat = vaQuerySurfaceStatus(mVADisplay, surface->renderBuffer.surface, &surfStat);
+
+ if ((vaStat == VA_STATUS_SUCCESS) && (surfStat != VASurfaceReady))
+ surface->renderBuffer.driverRenderDone = false;
+
+ }
+#endif
+
+}
+
+// This function should be called before start() to load different type of parsers
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
+Decode_Status VideoDecoderBase::setParserType(_vbp_parser_type type) {
+ if ((int32_t)type != VBP_INVALID) {
+ ITRACE("Parser Type = %d", (int32_t)type);
+ mParserType = type;
+ return DECODE_SUCCESS;
+ } else {
+ ETRACE("Invalid parser type = %d", (int32_t)type);
+ return DECODE_NO_PARSER;
+ }
+}
+
+Decode_Status VideoDecoderBase::updateBuffer(uint8_t *buffer, int32_t size, void** vbpData) {
+ if (mParserHandle == NULL) {
+ return DECODE_NO_PARSER;
+ }
+
+ uint32_t vbpStatus;
+ if (buffer == NULL || size <= 0) {
+ return DECODE_INVALID_DATA;
+ }
+
+ vbpStatus = mParserUpdate(mParserHandle, buffer, size, vbpData);
+ CHECK_VBP_STATUS("vbp_update");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::queryBuffer(void** vbpData) {
+ if (mParserHandle == NULL) {
+ return DECODE_NO_PARSER;
+ }
+
+ uint32_t vbpStatus;
+ vbpStatus = mParserQuery(mParserHandle, vbpData);
+ CHECK_VBP_STATUS("vbp_query");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::getCodecSpecificConfigs(VAProfile profile, VAConfigID *config) {
+ VAStatus vaStatus;
+ VAConfigAttrib attrib;
+ attrib.type = VAConfigAttribRTFormat;
+ attrib.value = VA_RT_FORMAT_YUV420;
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib,
+ 1,
+ config);
+
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
+#endif
+Decode_Status VideoDecoderBase::checkHardwareCapability() {
+ return DECODE_SUCCESS;
+}
+
+void VideoDecoderBase::drainDecodingErrors(VideoErrorBuffer *outErrBuf, VideoRenderBuffer *currentSurface) {
+ if (mErrReportEnabled && outErrBuf && currentSurface) {
+ memcpy(outErrBuf, &(currentSurface->errBuf), sizeof(VideoErrorBuffer));
+
+ currentSurface->errBuf.errorNumber = 0;
+ currentSurface->errBuf.timeStamp = INVALID_PTS;
+ }
+ if (outErrBuf)
+ VTRACE("%s: error number is %d", __FUNCTION__, outErrBuf->errorNumber);
+}
+
+void VideoDecoderBase::fillDecodingErrors(VideoRenderBuffer *currentSurface) {
+ VAStatus ret;
+
+ if (mErrReportEnabled) {
+ currentSurface->errBuf.timeStamp = currentSurface->timeStamp;
+ // TODO: is 10 a suitable number?
+ VASurfaceDecodeMBErrors *err_drv_output = NULL;
+ ret = vaQuerySurfaceError(mVADisplay, currentSurface->surface, VA_STATUS_ERROR_DECODING_ERROR, (void **)&err_drv_output);
+ if (ret || !err_drv_output) {
+ WTRACE("vaQuerySurfaceError failed.");
+ return;
+ }
+
+ int offset = 0x1 & currentSurface->errBuf.errorNumber;// offset is either 0 or 1
+ for (int i = 0; i < MAX_ERR_NUM - offset; i++) {
+ if (err_drv_output[i].status != -1) {
+ currentSurface->errBuf.errorNumber++;
+ currentSurface->errBuf.errorArray[i + offset].type = DecodeMBError;
+ currentSurface->errBuf.errorArray[i + offset].error_data.mb_pos.start_mb = err_drv_output[i].start_mb;
+ currentSurface->errBuf.errorArray[i + offset].error_data.mb_pos.end_mb = err_drv_output[i].end_mb;
+ currentSurface->errBuf.errorArray[i + offset].num_mbs = err_drv_output[i].end_mb - err_drv_output[i].start_mb + 1;
+ ITRACE("Error Index[%d]: type = %d, start_mb = %d, end_mb = %d",
+ currentSurface->errBuf.errorNumber - 1,
+ currentSurface->errBuf.errorArray[i + offset].type,
+ currentSurface->errBuf.errorArray[i + offset].error_data.mb_pos.start_mb,
+ currentSurface->errBuf.errorArray[i + offset].error_data.mb_pos.end_mb);
+ } else break;
+ }
+ ITRACE("%s: error number of current surface is %d, timestamp @%llu",
+ __FUNCTION__, currentSurface->errBuf.errorNumber, currentSurface->timeStamp);
+ }
+}
+
+void VideoDecoderBase::setRotationDegrees(int32_t rotationDegrees) {
+ if (mRotationDegrees == rotationDegrees) {
+ return;
+ }
+
+ ITRACE("set new rotation degree: %d", rotationDegrees);
+ VADisplayAttribute rotate;
+ rotate.type = VADisplayAttribRotation;
+ rotate.value = VA_ROTATION_NONE;
+ if (rotationDegrees == 0)
+ rotate.value = VA_ROTATION_NONE;
+ else if (rotationDegrees == 90)
+ rotate.value = VA_ROTATION_90;
+ else if (rotationDegrees == 180)
+ rotate.value = VA_ROTATION_180;
+ else if (rotationDegrees == 270)
+ rotate.value = VA_ROTATION_270;
+
+ VAStatus ret = vaSetDisplayAttributes(mVADisplay, &rotate, 1);
+ if (ret) {
+ ETRACE("Failed to set rotation degree.");
+ }
+ mRotationDegrees = rotationDegrees;
+}
+
+void VideoDecoderBase::setRenderRect() {
+
+ if (!mVADisplay)
+ return;
+
+ VAStatus ret;
+ VARectangle rect;
+ rect.x = mVideoFormatInfo.cropLeft;
+ rect.y = mVideoFormatInfo.cropTop;
+ rect.width = mVideoFormatInfo.width - (mVideoFormatInfo.cropLeft + mVideoFormatInfo.cropRight);
+ rect.height = mVideoFormatInfo.height - (mVideoFormatInfo.cropBottom + mVideoFormatInfo.cropTop);
+
+ VADisplayAttribute render_rect;
+ render_rect.type = VADisplayAttribRenderRect;
+ render_rect.value = (long)&rect;
+
+ ret = vaSetDisplayAttributes(mVADisplay, &render_rect, 1);
+ if (ret) {
+ ETRACE("Failed to set rotation degree.");
+ }
+}
diff --git a/videodecoder/VideoDecoderBase.h b/videodecoder/VideoDecoderBase.h
new file mode 100755
index 0000000..9cf09e8
--- /dev/null
+++ b/videodecoder/VideoDecoderBase.h
@@ -0,0 +1,187 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_BASE_H_
+#define VIDEO_DECODER_BASE_H_
+
+#include <va/va.h>
+#include <va/va_tpi.h>
+#include "VideoDecoderDefs.h"
+#include "VideoDecoderInterface.h"
+#include <pthread.h>
+#include <dlfcn.h>
+
+extern "C" {
+#include "vbp_loader.h"
+}
+
+#ifndef Display
+#ifdef USE_GEN_HW
+typedef char Display;
+#else
+typedef unsigned int Display;
+#endif
+#endif
+
+// TODO: check what is the best number. Must be at least 2 to support one backward reference frame.
+// Currently set to 8 to support 7 backward reference frames. This value is used for AVC frame reordering only.
+// e.g:
+// POC: 4P, 8P, 10P, 6B and mNextOutputPOC = 5
+#define OUTPUT_WINDOW_SIZE 8
+
+class VideoDecoderBase : public IVideoDecoder {
+public:
+ VideoDecoderBase(const char *mimeType, _vbp_parser_type type);
+ virtual ~VideoDecoderBase();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual Decode_Status reset(VideoConfigBuffer *buffer) ;
+ virtual void stop(void);
+ //virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+ virtual void flush(void);
+ virtual void freeSurfaceBuffers(void);
+ virtual const VideoRenderBuffer* getOutput(bool draining = false, VideoErrorBuffer *output_buf = NULL);
+ virtual Decode_Status signalRenderDone(void * graphichandler);
+ virtual const VideoFormatInfo* getFormatInfo(void);
+ virtual bool checkBufferAvail();
+ virtual void enableErrorReport(bool enabled = false) {mErrReportEnabled = enabled; };
+
+protected:
+ // each acquireSurfaceBuffer must be followed by a corresponding outputSurfaceBuffer or releaseSurfaceBuffer.
+ // Only one surface buffer can be acquired at any given time
+ virtual Decode_Status acquireSurfaceBuffer(void);
+ // frame is successfully decoded to the acquired surface buffer and surface is ready for output
+ virtual Decode_Status outputSurfaceBuffer(void);
+ // acquired surface buffer is not used
+ virtual Decode_Status releaseSurfaceBuffer(void);
+ // flush all decoded but not rendered buffers
+ virtual void flushSurfaceBuffers(void);
+ virtual Decode_Status endDecodingFrame(bool dropFrame);
+ virtual VideoSurfaceBuffer* findOutputByPoc(bool draining = false);
+ virtual VideoSurfaceBuffer* findOutputByPct(bool draining = false);
+ virtual VideoSurfaceBuffer* findOutputByPts();
+ virtual Decode_Status setupVA(uint32_t numSurface, VAProfile profile, uint32_t numExtraSurface = 0);
+ virtual Decode_Status terminateVA(void);
+ virtual Decode_Status parseBuffer(uint8_t *buffer, int32_t size, bool config, void** vbpData);
+
+ static inline uint32_t alignMB(uint32_t a) {
+ return ((a + 15) & (~15));
+ }
+
+ virtual Decode_Status getRawDataFromSurface(VideoRenderBuffer *renderBuffer = NULL, uint8_t *pRawData = NULL, uint32_t *pSize = NULL, bool internal = true);
+
+#if (defined USE_AVC_SHORT_FORMAT) || (defined USE_SLICE_HEADER_PARSING)
+ Decode_Status updateBuffer(uint8_t *buffer, int32_t size, void** vbpData);
+ Decode_Status queryBuffer(void **vbpData);
+ Decode_Status setParserType(_vbp_parser_type type);
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID *config);
+#endif
+ virtual Decode_Status checkHardwareCapability();
+private:
+ Decode_Status mapSurface(void);
+ void initSurfaceBuffer(bool reset);
+ void drainDecodingErrors(VideoErrorBuffer *outErrBuf, VideoRenderBuffer *currentSurface);
+ void fillDecodingErrors(VideoRenderBuffer *currentSurface);
+
+ bool mInitialized;
+ pthread_mutex_t mLock;
+
+protected:
+ bool mLowDelay; // when true, decoded frame is immediately output for rendering
+ VideoFormatInfo mVideoFormatInfo;
+ Display *mDisplay;
+ VADisplay mVADisplay;
+ VAContextID mVAContext;
+ VAConfigID mVAConfig;
+ VASurfaceID *mExtraSurfaces; // extra surfaces array
+ int32_t mNumExtraSurfaces;
+ bool mVAStarted;
+ uint64_t mCurrentPTS; // current presentation time stamp (unit is unknown, depend on the framework: GStreamer 100-nanosec, Android: microsecond)
+ // the following three member variables should be set using
+ // acquireSurfaceBuffer/outputSurfaceBuffer/releaseSurfaceBuffer
+ VideoSurfaceBuffer *mAcquiredBuffer;
+ VideoSurfaceBuffer *mLastReference;
+ VideoSurfaceBuffer *mForwardReference;
+ VideoConfigBuffer mConfigBuffer; // only store configure meta data.
+ bool mDecodingFrame; // indicate whether a frame is being decoded
+ bool mSizeChanged; // indicate whether video size is changed.
+ bool mShowFrame; // indicate whether the decoded frame is for display
+
+ int32_t mOutputWindowSize; // indicate limit of number of outstanding frames for output
+ int32_t mRotationDegrees;
+
+ bool mErrReportEnabled;
+ bool mWiDiOn;
+ typedef uint32_t (*OpenFunc)(uint32_t, void **);
+ typedef uint32_t (*CloseFunc)(void *);
+ typedef uint32_t (*ParseFunc)(void *, uint8_t *, uint32_t, uint8_t);
+ typedef uint32_t (*QueryFunc)(void *, void **);
+ typedef uint32_t (*FlushFunc)(void *);
+ typedef uint32_t (*UpdateFunc)(void *, void *, uint32_t, void **);
+ void *mLibHandle;
+ OpenFunc mParserOpen;
+ CloseFunc mParserClose;
+ ParseFunc mParserParse;
+ QueryFunc mParserQuery;
+ FlushFunc mParserFlush;
+ UpdateFunc mParserUpdate;
+ enum {
+ // TODO: move this to vbp_loader.h
+ VBP_INVALID = 0xFF,
+ // TODO: move this to va.h
+ VAProfileSoftwareDecoding = 0xFF,
+ };
+
+ enum OUTPUT_METHOD {
+ // output by Picture Coding Type (I, P, B)
+ OUTPUT_BY_PCT,
+ // output by Picture Order Count (for AVC only)
+ OUTPUT_BY_POC,
+ //OUTPUT_BY_POS,
+ //OUTPUT_BY_PTS,
+ };
+
+private:
+ bool mRawOutput; // whether to output NV12 raw data
+ bool mManageReference; // this should stay true for VC1/MP4 decoder, and stay false for AVC decoder. AVC handles reference frame using DPB
+ OUTPUT_METHOD mOutputMethod;
+
+ int32_t mNumSurfaces;
+ VideoSurfaceBuffer *mSurfaceBuffers;
+ VideoSurfaceBuffer *mOutputHead; // head of output buffer list
+ VideoSurfaceBuffer *mOutputTail; // tail of output buffer list
+ VASurfaceID *mSurfaces; // surfaces array
+ VASurfaceAttribExternalBuffers *mVASurfaceAttrib;
+ uint8_t **mSurfaceUserPtr; // mapped user space pointer
+ int32_t mSurfaceAcquirePos; // position of surface to start acquiring
+ int32_t mNextOutputPOC; // Picture order count of next output
+ _vbp_parser_type mParserType;
+ void *mParserHandle;
+ void *mSignalBufferPre[MAX_GRAPHIC_BUFFER_NUM];
+ uint32 mSignalBufferSize;
+ bool mUseGEN;
+protected:
+ void ManageReference(bool enable) {mManageReference = enable;}
+ void setOutputMethod(OUTPUT_METHOD method) {mOutputMethod = method;}
+ void setOutputWindowSize(int32_t size) {mOutputWindowSize = (size < OUTPUT_WINDOW_SIZE) ? size : OUTPUT_WINDOW_SIZE;}
+ void querySurfaceRenderStatus(VideoSurfaceBuffer* surface);
+ void enableLowDelayMode(bool enable) {mLowDelay = enable;}
+ void setRotationDegrees(int32_t rotationDegrees);
+ void setRenderRect(void);
+};
+
+
+#endif // VIDEO_DECODER_BASE_H_
diff --git a/videodecoder/VideoDecoderDefs.h b/videodecoder/VideoDecoderDefs.h
new file mode 100644
index 0000000..c9b5d30
--- /dev/null
+++ b/videodecoder/VideoDecoderDefs.h
@@ -0,0 +1,263 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_DEFS_H_
+#define VIDEO_DECODER_DEFS_H_
+
+#include <va/va.h>
+#include <stdint.h>
+
+// format specific data, for future extension.
+struct VideoExtensionBuffer {
+ int32_t extType;
+ int32_t extSize;
+ uint8_t *extData;
+};
+
+typedef enum {
+ PACKED_FRAME_TYPE,
+} VIDEO_EXTENSION_TYPE;
+
+struct VideoFrameRawData {
+ int32_t width;
+ int32_t height;
+ int32_t pitch[3];
+ int32_t offset[3];
+ uint32_t fourcc; //NV12
+ int32_t size;
+ uint8_t *data;
+ bool own; // own data or derived from surface. If true, the library will release the memory during clearnup
+};
+
+struct PackedFrameData {
+ int64_t timestamp;
+ int32_t offSet;
+};
+
+// flags for VideoDecodeBuffer, VideoConfigBuffer and VideoRenderBuffer
+typedef enum {
+ // indicates if sample has discontinuity in time stamp (happen after seeking usually)
+ HAS_DISCONTINUITY = 0x01,
+
+ // indicates wheter the sample contains a complete frame or end of frame.
+ HAS_COMPLETE_FRAME = 0x02,
+
+ // indicate whether surfaceNumber field in the VideoConfigBuffer is valid
+ HAS_SURFACE_NUMBER = 0x04,
+
+ // indicate whether profile field in the VideoConfigBuffer is valid
+ HAS_VA_PROFILE = 0x08,
+
+ // indicate whether output order will be the same as decoder order
+ WANT_LOW_DELAY = 0x10, // make display order same as decoding order
+
+ // indicates whether error concealment algorithm should be enabled to automatically conceal error.
+ WANT_ERROR_CONCEALMENT = 0x20,
+
+ // indicate wheter raw data should be output.
+ WANT_RAW_OUTPUT = 0x40,
+
+ // indicate sample is decoded but should not be displayed.
+ WANT_DECODE_ONLY = 0x80,
+
+ // indicate surfaceNumber field is valid and it contains minimum surface number to allocate.
+ HAS_MINIMUM_SURFACE_NUMBER = 0x100,
+
+ // indicates surface created will be protected
+ WANT_SURFACE_PROTECTION = 0x400,
+
+ // indicates if extra data is appended at end of buffer
+ HAS_EXTRADATA = 0x800,
+
+ // indicates if buffer contains codec data
+ HAS_CODECDATA = 0x1000,
+
+ // indicate if it use graphic buffer.
+ USE_NATIVE_GRAPHIC_BUFFER = 0x2000,
+
+ // indicate whether it is a sync frame in container
+ IS_SYNC_FRAME = 0x4000,
+
+ // indicate whether video decoder buffer contains secure data
+ IS_SECURE_DATA = 0x8000,
+
+ // indicate it's the last output frame of the sequence
+ IS_EOS = 0x10000,
+
+ // indicate should allocate tiling surfaces
+ USE_TILING_MEMORY = 0x20000,
+
+ // indicate the frame has resolution change
+ IS_RESOLUTION_CHANGE = 0x40000,
+
+ // indicate whether video decoder buffer contains only one field
+ IS_SINGLE_FIELD = 0x80000,
+
+ // indicate adaptive playback mode
+ WANT_ADAPTIVE_PLAYBACK = 0x100000,
+
+ // indicate the modular drm type
+ IS_SUBSAMPLE_ENCRYPTION = 0x200000,
+
+} VIDEO_BUFFER_FLAG;
+
+typedef enum
+{
+ DecodeHeaderError = 0,
+ DecodeMBError = 1,
+ DecodeSliceMissing = 2,
+ DecodeRefMissing = 3,
+} VideoDecodeErrorType;
+
+#define MAX_ERR_NUM 10
+
+struct VideoDecodeBuffer {
+ uint8_t *data;
+ int32_t size;
+ int64_t timeStamp;
+ uint32_t flag;
+ uint32_t rotationDegrees;
+ VideoExtensionBuffer *ext;
+};
+
+
+//#define MAX_GRAPHIC_BUFFER_NUM (16 + 1 + 11) // max DPB + 1 + AVC_EXTRA_NUM
+#define MAX_GRAPHIC_BUFFER_NUM 64 // extended for VPP
+
+struct VideoConfigBuffer {
+ uint8_t *data;
+ int32_t size;
+ int32_t width;
+ int32_t height;
+ uint32_t surfaceNumber;
+ VAProfile profile;
+ uint32_t flag;
+ void *graphicBufferHandler[MAX_GRAPHIC_BUFFER_NUM];
+ uint32_t graphicBufferStride;
+ uint32_t graphicBufferColorFormat;
+ uint32_t graphicBufferWidth;
+ uint32_t graphicBufferHeight;
+ VideoExtensionBuffer *ext;
+ void* nativeWindow;
+ uint32_t rotationDegrees;
+#ifdef TARGET_HAS_VPP
+ uint32_t vppBufferNum;
+#endif
+};
+
+struct VideoErrorInfo {
+ VideoDecodeErrorType type;
+ uint32_t num_mbs;
+ union {
+ struct {uint32_t start_mb; uint32_t end_mb;} mb_pos;
+ } error_data;
+};
+
+struct VideoErrorBuffer {
+ uint32_t errorNumber; // Error number should be no more than MAX_ERR_NUM
+ int64_t timeStamp; // presentation time stamp
+ VideoErrorInfo errorArray[MAX_ERR_NUM];
+};
+
+struct VideoRenderBuffer {
+ VASurfaceID surface;
+ VADisplay display;
+ int32_t scanFormat; //progressive, top-field first, or bottom-field first
+ int64_t timeStamp; // presentation time stamp
+ mutable volatile bool renderDone; // indicated whether frame is rendered, this must be set to false by the client of this library once
+ // surface is rendered. Not setting this flag will lead to DECODE_NO_SURFACE error.
+ void * graphicBufferHandle;
+ int32_t graphicBufferIndex; //the index in graphichandle array
+ uint32_t flag;
+ mutable volatile bool driverRenderDone;
+ VideoFrameRawData *rawData;
+
+ VideoErrorBuffer errBuf;
+};
+
+struct VideoSurfaceBuffer {
+ VideoRenderBuffer renderBuffer;
+ int32_t pictureOrder; // picture order count, valid only for AVC format
+ bool referenceFrame; // indicated whether frame associated with this surface is a reference I/P frame
+ bool asReferernce; // indicated wheter frame is used as reference (as a result surface can not be used for decoding)
+ VideoFrameRawData *mappedData;
+ VideoSurfaceBuffer *next;
+};
+
+struct VideoFormatInfo {
+ bool valid; // indicates whether format info is valid. MimeType is always valid.
+ char *mimeType;
+ uint32_t width;
+ uint32_t height;
+ uint32_t surfaceWidth;
+ uint32_t surfaceHeight;
+ uint32_t surfaceNumber;
+ VASurfaceID *ctxSurfaces;
+ int32_t aspectX;
+ int32_t aspectY;
+ int32_t cropLeft;
+ int32_t cropRight;
+ int32_t cropTop;
+ int32_t cropBottom;
+ int32_t colorMatrix;
+ int32_t videoRange;
+ int32_t bitrate;
+ int32_t framerateNom;
+ int32_t framerateDenom;
+ uint32_t actualBufferNeeded;
+ int32_t flags; // indicate whether current picture is field or frame
+ VideoExtensionBuffer *ext;
+};
+
+// TODO: categorize the follow errors as fatal and non-fatal.
+typedef enum {
+ DECODE_NOT_STARTED = -10,
+ DECODE_NEED_RESTART = -9,
+ DECODE_NO_CONFIG = -8,
+ DECODE_NO_SURFACE = -7,
+ DECODE_NO_REFERENCE = -6,
+ DECODE_NO_PARSER = -5,
+ DECODE_INVALID_DATA = -4,
+ DECODE_DRIVER_FAIL = -3,
+ DECODE_PARSER_FAIL = -2,
+ DECODE_MEMORY_FAIL = -1,
+ DECODE_FAIL = 0,
+ DECODE_SUCCESS = 1,
+ DECODE_FORMAT_CHANGE = 2,
+ DECODE_FRAME_DROPPED = 3,
+ DECODE_MULTIPLE_FRAME = 4,
+} VIDEO_DECODE_STATUS;
+
+typedef int32_t Decode_Status;
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+inline bool checkFatalDecoderError(Decode_Status status) {
+ if (status == DECODE_NOT_STARTED ||
+ status == DECODE_NEED_RESTART ||
+ status == DECODE_NO_PARSER ||
+ status == DECODE_INVALID_DATA ||
+ status == DECODE_MEMORY_FAIL ||
+ status == DECODE_FAIL) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+#endif // VIDEO_DECODER_DEFS_H_
diff --git a/videodecoder/VideoDecoderHost.cpp b/videodecoder/VideoDecoderHost.cpp
new file mode 100644
index 0000000..56f55d7
--- /dev/null
+++ b/videodecoder/VideoDecoderHost.cpp
@@ -0,0 +1,85 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderWMV.h"
+#include "VideoDecoderMPEG4.h"
+#include "VideoDecoderAVC.h"
+
+#ifdef USE_INTEL_SECURE_AVC
+#include "VideoDecoderAVCSecure.h"
+#endif
+
+#ifdef USE_HW_VP8
+#include "VideoDecoderVP8.h"
+#endif
+#include "VideoDecoderHost.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+IVideoDecoder* createVideoDecoder(const char* mimeType) {
+ if (mimeType == NULL) {
+ ETRACE("NULL mime type.");
+ return NULL;
+ }
+
+ if (strcasecmp(mimeType, "video/wmv") == 0 ||
+ strcasecmp(mimeType, "video/vc1") == 0 ||
+ strcasecmp(mimeType, "video/x-ms-wmv") == 0) {
+ VideoDecoderWMV *p = new VideoDecoderWMV(mimeType);
+ return (IVideoDecoder *)p;
+ } else if (strcasecmp(mimeType, "video/avc") == 0 ||
+ strcasecmp(mimeType, "video/h264") == 0) {
+ VideoDecoderAVC *p = new VideoDecoderAVC(mimeType);
+ return (IVideoDecoder *)p;
+ } else if (strcasecmp(mimeType, "video/mp4v-es") == 0 ||
+ strcasecmp(mimeType, "video/mpeg4") == 0 ||
+ strcasecmp(mimeType, "video/h263") == 0 ||
+ strcasecmp(mimeType, "video/3gpp") == 0) {
+ VideoDecoderMPEG4 *p = new VideoDecoderMPEG4(mimeType);
+ return (IVideoDecoder *)p;
+ }
+#ifdef USE_INTEL_SECURE_AVC
+ else if (strcasecmp(mimeType, "video/avc-secure") == 0) {
+ VideoDecoderAVC *p = new VideoDecoderAVCSecure(mimeType);
+ return (IVideoDecoder *)p;
+ }
+#endif
+
+#ifdef USE_HW_VP8
+ else if (strcasecmp(mimeType, "video/vp8") == 0 ||
+ strcasecmp(mimeType, "video/x-vnd.on2.vp8") == 0) {
+ VideoDecoderVP8 *p = new VideoDecoderVP8(mimeType);
+ return (IVideoDecoder *)p;
+ }
+#endif
+
+ else {
+ ETRACE("Unknown mime type: %s", mimeType);
+ }
+ return NULL;
+}
+
+void releaseVideoDecoder(IVideoDecoder* p) {
+ if (p) {
+ const VideoFormatInfo *info = p->getFormatInfo();
+ if (info && info->mimeType) {
+ ITRACE("Deleting decoder for %s", info->mimeType);
+ }
+ }
+ delete p;
+}
+
+
diff --git a/videodecoder/VideoDecoderHost.h b/videodecoder/VideoDecoderHost.h
new file mode 100644
index 0000000..1f053b6
--- /dev/null
+++ b/videodecoder/VideoDecoderHost.h
@@ -0,0 +1,29 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+#ifndef VIDEO_DECODER_HOST_H_
+#define VIDEO_DECODER_HOST_H_
+
+
+#include "VideoDecoderInterface.h"
+
+IVideoDecoder* createVideoDecoder(const char* mimeType);
+void releaseVideoDecoder(IVideoDecoder *p);
+
+
+
+#endif /* VIDEO_DECODER_HOST_H_ */
diff --git a/videodecoder/VideoDecoderInterface.h b/videodecoder/VideoDecoderInterface.h
new file mode 100644
index 0000000..fdc2c12
--- /dev/null
+++ b/videodecoder/VideoDecoderInterface.h
@@ -0,0 +1,40 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+#ifndef VIDEO_DECODER_INTERFACE_H_
+#define VIDEO_DECODER_INTERFACE_H_
+
+#include "VideoDecoderDefs.h"
+
+class IVideoDecoder {
+public:
+ virtual ~IVideoDecoder() {}
+ virtual Decode_Status start(VideoConfigBuffer *buffer) = 0;
+ virtual Decode_Status reset(VideoConfigBuffer *buffer) = 0;
+ virtual void stop(void) = 0;
+ virtual void flush() = 0;
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer) = 0;
+ virtual void freeSurfaceBuffers(void) = 0;
+ virtual const VideoRenderBuffer* getOutput(bool draining = false, VideoErrorBuffer *output_buf = NULL) = 0;
+ virtual const VideoFormatInfo* getFormatInfo(void) = 0;
+ virtual Decode_Status signalRenderDone(void * graphichandler) = 0;
+ virtual bool checkBufferAvail() = 0;
+ virtual Decode_Status getRawDataFromSurface(VideoRenderBuffer *renderBuffer = NULL, uint8_t *pRawData = NULL, uint32_t *pSize = NULL, bool internal = true) = 0;
+ virtual void enableErrorReport(bool enabled) = 0;
+};
+
+#endif /* VIDEO_DECODER_INTERFACE_H_ */
diff --git a/videodecoder/VideoDecoderMPEG4.cpp b/videodecoder/VideoDecoderMPEG4.cpp
new file mode 100644
index 0000000..b54afa9
--- /dev/null
+++ b/videodecoder/VideoDecoderMPEG4.cpp
@@ -0,0 +1,645 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderMPEG4.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+VideoDecoderMPEG4::VideoDecoderMPEG4(const char *mimeType)
+ : VideoDecoderBase(mimeType, VBP_MPEG4),
+ mLastVOPTimeIncrement(0),
+ mExpectingNVOP(false),
+ mSendIQMatrixBuf(false),
+ mLastVOPCodingType(MP4_VOP_TYPE_I),
+ mIsShortHeader(false) {
+}
+
+VideoDecoderMPEG4::~VideoDecoderMPEG4() {
+ stop();
+}
+
+Decode_Status VideoDecoderMPEG4::start(VideoConfigBuffer *buffer) {
+ Decode_Status status;
+
+ status = VideoDecoderBase::start(buffer);
+ CHECK_STATUS("VideoDecoderBase::start");
+
+ if (buffer->data == NULL || buffer->size == 0) {
+ WTRACE("No config data to start VA.");
+ return DECODE_SUCCESS;
+ }
+
+ vbp_data_mp42 *data = NULL;
+ status = VideoDecoderBase::parseBuffer(buffer->data, buffer->size, true, (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ status = startVA(data);
+ return status;
+}
+
+void VideoDecoderMPEG4::stop(void) {
+ // drop the last frame and ignore return value
+ endDecodingFrame(true);
+ VideoDecoderBase::stop();
+
+ mLastVOPTimeIncrement = 0;
+ mExpectingNVOP = false;
+ mLastVOPCodingType = MP4_VOP_TYPE_I;
+}
+
+Decode_Status VideoDecoderMPEG4::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ vbp_data_mp42 *data = NULL;
+ bool useGraphicbuffer = mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER;
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+ if (buffer->flag & IS_SYNC_FRAME) {
+ mIsSyncFrame = true;
+ } else {
+ mIsSyncFrame = false;
+ }
+ buffer->ext = NULL;
+ status = VideoDecoderBase::parseBuffer(
+ buffer->data,
+ buffer->size,
+ false,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ if (!mVAStarted) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ }
+
+ if (mSizeChanged && !useGraphicbuffer) {
+ // some container has the incorrect width/height.
+ // send the format change to OMX to update the crop info.
+ mSizeChanged = false;
+ ITRACE("Video size is changed during startVA");
+ return DECODE_FORMAT_CHANGE;
+ }
+
+ if ((mVideoFormatInfo.width != (uint32_t)data->codec_data.video_object_layer_width ||
+ mVideoFormatInfo.height != (uint32_t)data->codec_data.video_object_layer_height) &&
+ data->codec_data.video_object_layer_width &&
+ data->codec_data.video_object_layer_height) {
+ // update encoded image size
+ ITRACE("Video size is changed. from %dx%d to %dx%d\n",mVideoFormatInfo.width,mVideoFormatInfo.height,
+ data->codec_data.video_object_layer_width,data->codec_data.video_object_layer_height);
+ bool noNeedFlush = false;
+ mVideoFormatInfo.width = data->codec_data.video_object_layer_width;
+ mVideoFormatInfo.height = data->codec_data.video_object_layer_height;
+ if (useGraphicbuffer) {
+ noNeedFlush = (mVideoFormatInfo.width <= mVideoFormatInfo.surfaceWidth)
+ && (mVideoFormatInfo.height <= mVideoFormatInfo.surfaceHeight);
+ }
+ if (!noNeedFlush) {
+ flushSurfaceBuffers();
+ mSizeChanged = false;
+ return DECODE_FORMAT_CHANGE;
+ } else {
+ mSizeChanged = true;
+ }
+
+ setRenderRect();
+ }
+
+ status = decodeFrame(buffer, data);
+ CHECK_STATUS("decodeFrame");
+
+ return status;
+}
+
+void VideoDecoderMPEG4::flush(void) {
+ VideoDecoderBase::flush();
+
+ mExpectingNVOP = false;
+ mLastVOPTimeIncrement = 0;
+ mLastVOPCodingType = MP4_VOP_TYPE_I;
+}
+
+Decode_Status VideoDecoderMPEG4::decodeFrame(VideoDecodeBuffer *buffer, vbp_data_mp42 *data) {
+ Decode_Status status;
+ // check if any slice is parsed, we may just receive configuration data
+ if (data->number_picture_data == 0) {
+ WTRACE("number_picture_data == 0");
+ return DECODE_SUCCESS;
+ }
+
+ // When the MPEG4 parser gets the invaild parameters, add the check
+ // and return error to OMX to avoid mediaserver crash.
+ if (data->picture_data && (data->picture_data->picture_param.vop_width == 0
+ || data->picture_data->picture_param.vop_height == 0)) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ uint64_t lastPTS = mCurrentPTS;
+ mCurrentPTS = buffer->timeStamp;
+
+ if (lastPTS != mCurrentPTS) {
+ // finish decoding the last frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+
+ // start decoding a new frame
+ status = beginDecodingFrame(data);
+ if (status == DECODE_MULTIPLE_FRAME) {
+ buffer->ext = &mExtensionBuffer;
+ mExtensionBuffer.extType = PACKED_FRAME_TYPE;
+ mExtensionBuffer.extSize = sizeof(mPackedFrame);
+ mExtensionBuffer.extData = (uint8_t*)&mPackedFrame;
+ } else if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ }
+ CHECK_STATUS("beginDecodingFrame");
+ } else {
+ status = continueDecodingFrame(data);
+ if (status == DECODE_MULTIPLE_FRAME) {
+ buffer->ext = &mExtensionBuffer;
+ mExtensionBuffer.extType = PACKED_FRAME_TYPE;
+ mExtensionBuffer.extSize = sizeof(mPackedFrame);
+ mExtensionBuffer.extData = (uint8_t*)&mPackedFrame;
+ } else if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ }
+ CHECK_STATUS("continueDecodingFrame");
+ }
+
+ if (buffer->flag & HAS_COMPLETE_FRAME) {
+ // finish decoding current frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+ }
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderMPEG4::beginDecodingFrame(vbp_data_mp42 *data) {
+
+ Decode_Status status = DECODE_SUCCESS;
+ vbp_picture_data_mp42 *picData = data->picture_data;
+ VAPictureParameterBufferMPEG4 *picParam = &(picData->picture_param);
+ int codingType = picParam->vop_fields.bits.vop_coding_type;
+
+ // start sanity checking
+ if (mExpectingNVOP) {
+ // if we are waiting for n-vop for packed frame, and the new frame is coded, the coding type
+ // of this frame must be B
+ // for example: {PB} B N P B B P...
+ if (picData->vop_coded == 1 && codingType != MP4_VOP_TYPE_B) {
+ WTRACE("Invalid coding type while waiting for n-vop for packed frame.");
+ mExpectingNVOP = false;
+ }
+ }
+
+ // handle N-VOP picuture, it could be a skipped frame or a simple placeholder of packed frame
+ if (picData->vop_coded == 0) {
+ if (mLastReference == NULL) {
+ WTRACE("The last reference is unavailable to construct skipped frame.");
+ flush();
+ mExpectingNVOP = false;
+ // TODO: handle this case
+ return DECODE_SUCCESS;
+ }
+
+ if (mExpectingNVOP) {
+ // P frame is already in queue, just need to update time stamp.
+ mLastReference->renderBuffer.timeStamp = mCurrentPTS;
+ mExpectingNVOP = false;
+ }
+ else {
+ // Do nothing for skip frame as the last frame will be rendered agian by natively
+ // No needs to handle reference frame neither
+#if 0
+ // this is skipped frame, use the last reference frame as output
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.scanFormat = mLastReference->renderBuffer.scanFormat;
+ mAcquiredBuffer->renderBuffer.surface = mLastReference->renderBuffer.surface;
+ // No need to update mappedData for HW decoding
+ //mAcquiredBuffer->mappedData.data = mLastReference->mappedData.data;
+ mAcquiredBuffer->referenceFrame = true;
+ status = outputSurfaceBuffer();
+ CHECK_STATUS("outputSurfaceBuffer");
+#endif
+ }
+
+ if (data->number_picture_data > 1) {
+ WTRACE("Unexpected to have more picture data following a non-coded VOP.");
+ //picture data is thrown away. No issue if picture data is for N-VOP. if picture data is for
+ // coded picture, a frame is lost.
+ // TODO: handle this case
+ // return DECODE_FAIL;
+ }
+ return DECODE_SUCCESS;
+ }
+ else {
+ // Check if we have reference frame(s) for decoding
+ if (codingType == MP4_VOP_TYPE_B) {
+ if (mForwardReference == NULL ||
+ mLastReference == NULL) {
+ if (mIsShortHeader) {
+ status = DECODE_SUCCESS;
+ VTRACE("%s: No reference frame but keep decoding", __FUNCTION__);
+ } else
+ return DECODE_NO_REFERENCE;
+ }
+ } else if (codingType == MP4_VOP_TYPE_P || codingType == MP4_VOP_TYPE_S) {
+ if (mLastReference == NULL && mIsSyncFrame == false) {
+ if (mIsShortHeader) {
+ status = DECODE_SUCCESS;
+ VTRACE("%s: No reference frame but keep decoding", __FUNCTION__);
+ } else
+ return DECODE_NO_REFERENCE;
+ }
+ }
+ // all sanity checks pass, continue decoding through continueDecodingFrame
+ status = continueDecodingFrame(data);
+ }
+ return status;
+}
+
+Decode_Status VideoDecoderMPEG4::continueDecodingFrame(vbp_data_mp42 *data) {
+ Decode_Status status = DECODE_SUCCESS;
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ bool useGraphicBuffer = mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER;
+
+ /*
+ Packed Frame Assumption:
+
+ 1. In one packed frame, there's only one P or I frame and only one B frame.
+ 2. In packed frame, there's no skipped frame (vop_coded = 0)
+ 3. For one packed frame, there will be one N-VOP frame to follow the packed frame (may not immediately).
+ 4. N-VOP frame is the frame with vop_coded = 0.
+ 5. The timestamp of N-VOP frame will be used for P or I frame in the packed frame
+
+
+ I, P, {P, B}, B, N, P, N, I, ...
+ I, P, {P, B}, N, P, N, I, ...
+
+ The first N is placeholder for P frame in the packed frame
+ The second N is a skipped frame
+ */
+
+ vbp_picture_data_mp42 *picData = data->picture_data;
+ for (uint32_t i = 0; i < data->number_picture_data; i++, picData = picData->next_picture_data) {
+ // each slice has its own picture data, video_packet_header following resync_marker may reset picture header, see MP4 spec
+ VAPictureParameterBufferMPEG4 *picParam = &(picData->picture_param);
+ int codingType = picParam->vop_fields.bits.vop_coding_type;
+ if (codingType == MP4_VOP_TYPE_S && picParam->no_of_sprite_warping_points > 1) {
+ WTRACE("Hardware only supports up to one warping point (stationary or translation)");
+ }
+
+ if (picData->vop_coded == 0) {
+ ETRACE("Unexpected to have non-coded VOP.");
+ return DECODE_FAIL;
+ }
+ if (picData->new_picture_flag == 1 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (picData->new_picture_flag == 0) {
+ WTRACE("First slice of picture is lost!");
+ // TODO: handle this case
+ }
+ if (mDecodingFrame) {
+ if (codingType == MP4_VOP_TYPE_B){
+ // this indicates the start of a new frame in the packed frame
+ // Update timestamp for P frame in the packed frame as timestamp here is for the B frame!
+ if (picParam->vop_time_increment_resolution){
+ uint64_t increment = mLastVOPTimeIncrement - picData->vop_time_increment +
+ picParam->vop_time_increment_resolution;
+ increment = increment % picParam->vop_time_increment_resolution;
+ // convert to micro-second
+ // TODO: unit of time stamp varies on different frame work
+ increment = increment * 1e6 / picParam->vop_time_increment_resolution;
+ mAcquiredBuffer->renderBuffer.timeStamp += increment;
+ if (useGraphicBuffer){
+ mPackedFrame.timestamp = mCurrentPTS;
+ mCurrentPTS = mAcquiredBuffer->renderBuffer.timeStamp;
+ }
+ }
+ } else {
+ // this indicates the start of a new frame in the packed frame. no B frame int the packet
+ // Update the timestamp according the increment
+ if (picParam->vop_time_increment_resolution){
+ int64_t increment = picData->vop_time_increment - mLastVOPTimeIncrement + picParam->vop_time_increment_resolution;
+ increment = increment % picParam->vop_time_increment_resolution;
+ //convert to micro-second
+ increment = increment * 1e6 / picParam->vop_time_increment_resolution;
+ if (useGraphicBuffer) {
+ mPackedFrame.timestamp = mCurrentPTS + increment;
+ }
+ else {
+ mCurrentPTS += increment;
+ }
+
+ } else {
+ if (useGraphicBuffer) {
+ mPackedFrame.timestamp = mCurrentPTS + 30000;
+ }
+ else {
+ mCurrentPTS += 30000;
+ }
+ }
+ }
+ endDecodingFrame(false);
+ mExpectingNVOP = true;
+ if (codingType != MP4_VOP_TYPE_B) {
+ mExpectingNVOP = false;
+ }
+ if (useGraphicBuffer) {
+ int32_t count = i - 1;
+ if (count < 0) {
+ WTRACE("Shuld not be here!");
+ return DECODE_SUCCESS;
+ }
+ vbp_picture_data_mp42 *lastpic = data->picture_data;
+ for(int k = 0; k < count; k++ ) {
+ lastpic = lastpic->next_picture_data;
+ }
+ mPackedFrame.offSet = lastpic->slice_data.slice_offset + lastpic->slice_data.slice_size;
+ VTRACE("Report OMX to handle for Multiple frame offset=%d time=%lld",mPackedFrame.offSet,mPackedFrame.timestamp);
+ return DECODE_MULTIPLE_FRAME;
+ }
+ }
+
+ // acquire a new surface buffer
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+
+ // sprite is treated as P frame in the display order, so only B frame frame is not used as "reference"
+ mAcquiredBuffer->referenceFrame = (codingType != MP4_VOP_TYPE_B);
+ if (picData->picture_param.vol_fields.bits.interlaced) {
+ // only MPEG-4 studio profile can have field coding. All other profiles
+ // use frame coding only, i.e, there is no field VOP. (see vop_structure in MP4 spec)
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD | VA_TOP_FIELD;
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ }
+ // TODO: set discontinuity flag
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ if (mSizeChanged) {
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ mSizeChanged = false;
+ }
+ if (codingType != MP4_VOP_TYPE_B) {
+ mLastVOPCodingType = codingType;
+ mLastVOPTimeIncrement = picData->vop_time_increment;
+ }
+
+ // start decoding a frame
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ mDecodingFrame = true;
+ mSendIQMatrixBuf = true;
+ }
+
+ status = decodeSlice(data, picData);
+ CHECK_STATUS("decodeSlice");
+ }
+
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderMPEG4::decodeSlice(vbp_data_mp42 *data, vbp_picture_data_mp42 *picData) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[4];
+
+ VAPictureParameterBufferMPEG4 *picParam = &(picData->picture_param);
+ vbp_slice_data_mp42 *sliceData = &(picData->slice_data);
+ VASliceParameterBufferMPEG4 *sliceParam = &(sliceData->slice_param);
+
+ // send picture parametre for each slice
+ status = setReference(picParam);
+ CHECK_STATUS("setReference");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferMPEG4),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+
+ bufferIDCount++;
+ if (picParam->vol_fields.bits.quant_type && mSendIQMatrixBuf)
+ {
+ // only send IQ matrix for the first slice in the picture
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferMPEG4),
+ 1,
+ &(data->iq_matrix_buffer),
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+
+ mSendIQMatrixBuf = false;
+ bufferIDCount++;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferMPEG4),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+
+ bufferIDCount++;
+
+ //slice data buffer pointer
+ //Note that this is the original data buffer ptr;
+ // offset to the actual slice data is provided in
+ // slice_data_offset in VASliceParameterBufferMP42
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ sliceData->buffer_addr + sliceData->slice_offset,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderMPEG4::setReference(VAPictureParameterBufferMPEG4 *picParam) {
+ switch (picParam->vop_fields.bits.vop_coding_type) {
+ case MP4_VOP_TYPE_I:
+ picParam->forward_reference_picture = VA_INVALID_SURFACE;
+ picParam->backward_reference_picture = VA_INVALID_SURFACE;
+ break;
+ case MP4_VOP_TYPE_P:
+ if (mLastReference == NULL && mIsSyncFrame == false && !mIsShortHeader) {
+ return DECODE_NO_REFERENCE;
+ }
+ if (mLastReference != NULL) {
+ picParam->forward_reference_picture = mLastReference->renderBuffer.surface;
+ } else {
+ VTRACE("%s: no reference frame, but keep decoding", __FUNCTION__);
+ picParam->forward_reference_picture = VA_INVALID_SURFACE;
+ }
+ picParam->backward_reference_picture = VA_INVALID_SURFACE;
+ break;
+ case MP4_VOP_TYPE_B:
+ picParam->vop_fields.bits.backward_reference_vop_coding_type = mLastVOPCodingType;
+ // WEIRD, CHECK AGAIN !!!!!!!
+ if (mIsShortHeader) {
+ if (mLastReference != NULL) {
+ picParam->forward_reference_picture = mLastReference->renderBuffer.surface;
+ } else {
+ VTRACE("%s: no forward reference frame, but keep decoding", __FUNCTION__);
+ picParam->forward_reference_picture = VA_INVALID_SURFACE;
+ }
+ if (mForwardReference != NULL) {
+ picParam->backward_reference_picture = mForwardReference->renderBuffer.surface;
+ } else {
+ VTRACE("%s: no backward reference frame, but keep decoding", __FUNCTION__);
+ picParam->backward_reference_picture = VA_INVALID_SURFACE;
+ }
+ } else if (mLastReference == NULL || mForwardReference == NULL) {
+ return DECODE_NO_REFERENCE;
+ } else {
+ picParam->forward_reference_picture = mLastReference->renderBuffer.surface;
+ picParam->backward_reference_picture = mForwardReference->renderBuffer.surface;
+ }
+ break;
+ case MP4_VOP_TYPE_S:
+ // WEIRD, CHECK AGAIN!!!! WAS using mForwardReference
+ if (mLastReference == NULL) {
+ return DECODE_NO_REFERENCE;
+ }
+ picParam->forward_reference_picture = mLastReference->renderBuffer.surface;
+ picParam->backward_reference_picture = VA_INVALID_SURFACE;
+ break;
+
+ default:
+ // Will never reach here;
+ return DECODE_PARSER_FAIL;
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderMPEG4::startVA(vbp_data_mp42 *data) {
+ updateFormatInfo(data);
+
+ VAProfile vaProfile;
+
+ if ((data->codec_data.profile_and_level_indication & 0xF8) == 0xF0) {
+ vaProfile = VAProfileMPEG4AdvancedSimple;
+ } else {
+ vaProfile = VAProfileMPEG4Simple;
+ }
+
+ mIsShortHeader = data->codec_data.short_video_header;
+
+ return VideoDecoderBase::setupVA(MP4_SURFACE_NUMBER, vaProfile);
+}
+
+void VideoDecoderMPEG4::updateFormatInfo(vbp_data_mp42 *data) {
+ ITRACE("updateFormatInfo: current size: %d x %d, new size: %d x %d",
+ mVideoFormatInfo.width, mVideoFormatInfo.height,
+ data->codec_data.video_object_layer_width,
+ data->codec_data.video_object_layer_height);
+
+ mVideoFormatInfo.cropBottom = data->codec_data.video_object_layer_height > mVideoFormatInfo.height ?
+ data->codec_data.video_object_layer_height - mVideoFormatInfo.height : 0;
+ mVideoFormatInfo.cropRight = data->codec_data.video_object_layer_width > mVideoFormatInfo.width ?
+ data->codec_data.video_object_layer_width - mVideoFormatInfo.width : 0;
+
+ if ((mVideoFormatInfo.width != (uint32_t)data->codec_data.video_object_layer_width ||
+ mVideoFormatInfo.height != (uint32_t)data->codec_data.video_object_layer_height) &&
+ data->codec_data.video_object_layer_width &&
+ data->codec_data.video_object_layer_height) {
+ // update encoded image size
+ mVideoFormatInfo.width = data->codec_data.video_object_layer_width;
+ mVideoFormatInfo.height = data->codec_data.video_object_layer_height;
+ mSizeChanged = true;
+ ITRACE("Video size is changed.");
+ }
+
+ // video_range has default value of 0. Y ranges from 16 to 235.
+ mVideoFormatInfo.videoRange = data->codec_data.video_range;
+
+ switch (data->codec_data.matrix_coefficients) {
+ case 1:
+ mVideoFormatInfo.colorMatrix = VA_SRC_BT709;
+ break;
+
+ // ITU-R Recommendation BT.470-6 System B, G (MP4), same as
+ // SMPTE 170M/BT601
+ case 5:
+ case 6:
+ mVideoFormatInfo.colorMatrix = VA_SRC_BT601;
+ break;
+
+ default:
+ // unknown color matrix, set to 0 so color space flag will not be set.
+ mVideoFormatInfo.colorMatrix = 0;
+ break;
+ }
+
+ mVideoFormatInfo.aspectX = data->codec_data.par_width;
+ mVideoFormatInfo.aspectY = data->codec_data.par_height;
+ //mVideoFormatInfo.bitrate = data->codec_data.bit_rate;
+ mVideoFormatInfo.valid = true;
+
+ setRenderRect();
+}
+
+Decode_Status VideoDecoderMPEG4::checkHardwareCapability() {
+ VAStatus vaStatus;
+ VAConfigAttrib cfgAttribs[2];
+ cfgAttribs[0].type = VAConfigAttribMaxPictureWidth;
+ cfgAttribs[1].type = VAConfigAttribMaxPictureHeight;
+ vaStatus = vaGetConfigAttributes(mVADisplay,
+ mIsShortHeader ? VAProfileH263Baseline : VAProfileMPEG4AdvancedSimple,
+ VAEntrypointVLD, cfgAttribs, 2);
+ CHECK_VA_STATUS("vaGetConfigAttributes");
+ if (cfgAttribs[0].value * cfgAttribs[1].value < (uint32_t)mVideoFormatInfo.width * (uint32_t)mVideoFormatInfo.height) {
+ ETRACE("hardware supports resolution %d * %d smaller than the clip resolution %d * %d",
+ cfgAttribs[0].value, cfgAttribs[1].value, mVideoFormatInfo.width, mVideoFormatInfo.height);
+ return DECODE_DRIVER_FAIL;
+ }
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/VideoDecoderMPEG4.h b/videodecoder/VideoDecoderMPEG4.h
new file mode 100644
index 0000000..8fa319e
--- /dev/null
+++ b/videodecoder/VideoDecoderMPEG4.h
@@ -0,0 +1,70 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_MPEG4_H_
+#define VIDEO_DECODER_MPEG4_H_
+
+#include "VideoDecoderBase.h"
+
+
+class VideoDecoderMPEG4 : public VideoDecoderBase {
+public:
+ VideoDecoderMPEG4(const char *mimeType);
+ virtual ~VideoDecoderMPEG4();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual void flush(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status checkHardwareCapability();
+
+private:
+ Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_mp42 *data);
+ Decode_Status beginDecodingFrame(vbp_data_mp42 *data);
+ Decode_Status continueDecodingFrame(vbp_data_mp42 *data);
+ Decode_Status decodeSlice(vbp_data_mp42 *data, vbp_picture_data_mp42 *picData);
+ Decode_Status setReference(VAPictureParameterBufferMPEG4 *picParam);
+ Decode_Status startVA(vbp_data_mp42 *data);
+ void updateFormatInfo(vbp_data_mp42 *data);
+
+private:
+ // Value of VOP type defined here follows MP4 spec
+ enum {
+ MP4_VOP_TYPE_I = 0,
+ MP4_VOP_TYPE_P = 1,
+ MP4_VOP_TYPE_B = 2,
+ MP4_VOP_TYPE_S = 3,
+ };
+
+ enum {
+ MP4_SURFACE_NUMBER = 10,
+ };
+
+ uint64_t mLastVOPTimeIncrement;
+ bool mExpectingNVOP; // indicate if future n-vop is a placeholder of a packed frame
+ bool mSendIQMatrixBuf; // indicate if iq_matrix_buffer is sent to driver
+ int32_t mLastVOPCodingType;
+ bool mIsSyncFrame; // indicate if it is SyncFrame in container
+ bool mIsShortHeader; // indicate if it is short header format
+ VideoExtensionBuffer mExtensionBuffer;
+ PackedFrameData mPackedFrame;
+};
+
+
+
+#endif /* VIDEO_DECODER_MPEG4_H_ */
diff --git a/videodecoder/VideoDecoderTrace.cpp b/videodecoder/VideoDecoderTrace.cpp
new file mode 100644
index 0000000..1075419
--- /dev/null
+++ b/videodecoder/VideoDecoderTrace.cpp
@@ -0,0 +1,37 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+
+#include "VideoDecoderTrace.h"
+
+#ifdef ENABLE_VIDEO_DECODER_TRACE
+
+void TraceVideoDecoder(const char* cat, const char* fun, int line, const char* format, ...)
+{
+ if (NULL == cat || NULL == fun || NULL == format)
+ return;
+
+ printf("%s %s(#%d): ", cat, fun, line);
+ va_list args;
+ va_start(args, format);
+ vprintf(format, args);
+ va_end(args);
+ printf("\n");
+}
+
+#endif
+
diff --git a/videodecoder/VideoDecoderTrace.h b/videodecoder/VideoDecoderTrace.h
new file mode 100755
index 0000000..c4c1001
--- /dev/null
+++ b/videodecoder/VideoDecoderTrace.h
@@ -0,0 +1,96 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+#ifndef VIDEO_DECODER_TRACE_H_
+#define VIDEO_DECODER_TRACE_H_
+
+
+#define ENABLE_VIDEO_DECODER_TRACE
+//#define ANDROID
+
+
+#ifdef ENABLE_VIDEO_DECODER_TRACE
+
+#ifndef ANDROID
+
+#include <stdio.h>
+#include <stdarg.h>
+
+extern void TraceVideoDecoder(const char* cat, const char* fun, int line, const char* format, ...);
+#define VIDEO_DECODER_TRACE(cat, format, ...) \
+TraceVideoDecoder(cat, __FUNCTION__, __LINE__, format, ##__VA_ARGS__)
+
+#define ETRACE(format, ...) VIDEO_DECODER_TRACE("ERROR: ", format, ##__VA_ARGS__)
+#define WTRACE(format, ...) VIDEO_DECODER_TRACE("WARNING: ", format, ##__VA_ARGS__)
+#define ITRACE(format, ...) VIDEO_DECODER_TRACE("INFO: ", format, ##__VA_ARGS__)
+#define VTRACE(format, ...) VIDEO_DECODER_TRACE("VERBOSE: ", format, ##__VA_ARGS__)
+
+#else
+// for Android OS
+
+//#define LOG_NDEBUG 0
+
+#define LOG_TAG "VideoDecoder"
+
+#include <wrs_omxil_core/log.h>
+#define ETRACE(...) LOGE(__VA_ARGS__)
+#define WTRACE(...) LOGW(__VA_ARGS__)
+#define ITRACE(...) LOGI(__VA_ARGS__)
+#define VTRACE(...) LOGV(__VA_ARGS__)
+
+#endif
+
+
+#else
+
+#define ETRACE(format, ...)
+#define WTRACE(format, ...)
+#define ITRACE(format, ...)
+#define VTRACE(format, ...)
+
+
+#endif /* ENABLE_VIDEO_DECODER_TRACE*/
+
+
+#define CHECK_STATUS(FUNC)\
+ if (status != DECODE_SUCCESS) {\
+ if (status > DECODE_SUCCESS) {\
+ WTRACE(FUNC" failed. status = %d", status);\
+ } else {\
+ ETRACE(FUNC" failed. status = %d", status);\
+ }\
+ return status;\
+ }
+
+#define CHECK_VA_STATUS(FUNC)\
+ if (vaStatus != VA_STATUS_SUCCESS) {\
+ ETRACE(FUNC" failed. vaStatus = 0x%x", vaStatus);\
+ return DECODE_DRIVER_FAIL;\
+ }
+
+#define CHECK_VBP_STATUS(FUNC)\
+ if (vbpStatus != VBP_OK) {\
+ ETRACE(FUNC" failed. vbpStatus = %d", (int)vbpStatus);\
+ if (vbpStatus == VBP_ERROR) {\
+ return DECODE_FAIL;\
+ }\
+ return DECODE_PARSER_FAIL;\
+ }
+
+#endif /*VIDEO_DECODER_TRACE_H_*/
+
+
diff --git a/videodecoder/VideoDecoderVP8.cpp b/videodecoder/VideoDecoderVP8.cpp
new file mode 100644
index 0000000..87249b4
--- /dev/null
+++ b/videodecoder/VideoDecoderVP8.cpp
@@ -0,0 +1,449 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderVP8.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+VideoDecoderVP8::VideoDecoderVP8(const char *mimeType)
+ : VideoDecoderBase(mimeType, VBP_VP8) {
+ invalidateReferenceFrames(0);
+ invalidateReferenceFrames(1);
+}
+
+VideoDecoderVP8::~VideoDecoderVP8() {
+ stop();
+}
+
+void VideoDecoderVP8::invalidateReferenceFrames(int toggle) {
+ ReferenceFrameBuffer *p = mRFBs[toggle];
+ for (int i = 0; i < VP8_REF_SIZE; i++) {
+ p->index = (uint32_t) -1;
+ p->surfaceBuffer = NULL;
+ p++;
+ }
+}
+
+void VideoDecoderVP8::clearAsReference(int toggle, int ref_type) {
+ ReferenceFrameBuffer ref = mRFBs[toggle][ref_type];
+ if (ref.surfaceBuffer) {
+ ref.surfaceBuffer->asReferernce = false;
+ }
+}
+
+void VideoDecoderVP8::updateFormatInfo(vbp_data_vp8 *data) {
+ uint32_t width = data->codec_data->frame_width;
+ uint32_t height = data->codec_data->frame_height;
+ ITRACE("updateFormatInfo: current size: %d x %d, new size: %d x %d",
+ mVideoFormatInfo.width, mVideoFormatInfo.height, width, height);
+
+ if ((mVideoFormatInfo.width != width ||
+ mVideoFormatInfo.height != height) &&
+ width && height) {
+ if ((VideoDecoderBase::alignMB(mVideoFormatInfo.width) != width) ||
+ (VideoDecoderBase::alignMB(mVideoFormatInfo.height) != height)) {
+ mSizeChanged = true;
+ ITRACE("Video size is changed.");
+ }
+ mVideoFormatInfo.width = width;
+ mVideoFormatInfo.height = height;
+ }
+
+ mVideoFormatInfo.cropLeft = data->codec_data->crop_left;
+ mVideoFormatInfo.cropRight = data->codec_data->crop_right;
+ mVideoFormatInfo.cropTop = data->codec_data->crop_top;
+ mVideoFormatInfo.cropBottom = data->codec_data->crop_bottom;
+ ITRACE("Cropping: left = %d, top = %d, right = %d, bottom = %d", data->codec_data->crop_left, data->codec_data->crop_top, data->codec_data->crop_right, data->codec_data->crop_bottom);
+
+ mVideoFormatInfo.valid = true;
+
+ setRenderRect();
+}
+
+Decode_Status VideoDecoderVP8::startVA(vbp_data_vp8 *data) {
+ updateFormatInfo(data);
+
+ VAProfile vaProfile = VAProfileVP8Version0_3;
+ if (data->codec_data->version_num > 3) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ enableLowDelayMode(true);
+
+ return VideoDecoderBase::setupVA(VP8_SURFACE_NUMBER + VP8_REF_SIZE, vaProfile);
+}
+
+Decode_Status VideoDecoderVP8::start(VideoConfigBuffer *buffer) {
+ Decode_Status status;
+
+ status = VideoDecoderBase::start(buffer);
+ CHECK_STATUS("VideoDecoderBase::start");
+
+ // We don't want base class to manage reference.
+ VideoDecoderBase::ManageReference(false);
+
+ if (buffer->data == NULL || buffer->size == 0) {
+ WTRACE("No config data to start VA.");
+ return DECODE_SUCCESS;
+ }
+
+ vbp_data_vp8 *data = NULL;
+ status = VideoDecoderBase::parseBuffer(buffer->data, buffer->size, true, (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ status = startVA(data);
+ return status;
+}
+
+void VideoDecoderVP8::stop(void) {
+ VideoDecoderBase::stop();
+
+ invalidateReferenceFrames(0);
+ invalidateReferenceFrames(1);
+}
+
+void VideoDecoderVP8::flush(void) {
+ VideoDecoderBase::flush();
+
+ invalidateReferenceFrames(0);
+ invalidateReferenceFrames(1);
+}
+
+Decode_Status VideoDecoderVP8::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ vbp_data_vp8 *data = NULL;
+ if (buffer == NULL) {
+ ETRACE("VideoDecodeBuffer is NULL.");
+ return DECODE_INVALID_DATA;
+ }
+
+ status = VideoDecoderBase::parseBuffer(
+ buffer->data,
+ buffer->size,
+ false,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ mShowFrame = data->codec_data->show_frame;
+
+ if (!mVAStarted) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ }
+
+ VideoDecoderBase::setRotationDegrees(buffer->rotationDegrees);
+
+ status = decodeFrame(buffer, data);
+
+ return status;
+}
+
+Decode_Status VideoDecoderVP8::decodeFrame(VideoDecodeBuffer* buffer, vbp_data_vp8 *data) {
+ Decode_Status status;
+ bool useGraphicbuffer = mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER;
+ mCurrentPTS = buffer->timeStamp;
+ if (0 == data->num_pictures || NULL == data->pic_data) {
+ WTRACE("Number of pictures is 0.");
+ return DECODE_SUCCESS;
+ }
+
+ if (VP8_KEY_FRAME == data->codec_data->frame_type) {
+ if (mSizeChanged && !useGraphicbuffer){
+ mSizeChanged = false;
+ return DECODE_FORMAT_CHANGE;
+ } else {
+ updateFormatInfo(data);
+ bool noNeedFlush = false;
+ if (useGraphicbuffer) {
+ noNeedFlush = (mVideoFormatInfo.width <= mVideoFormatInfo.surfaceWidth)
+ && (mVideoFormatInfo.height <= mVideoFormatInfo.surfaceHeight);
+ }
+ if (mSizeChanged == true && !noNeedFlush) {
+ flushSurfaceBuffers();
+ mSizeChanged = false;
+ return DECODE_FORMAT_CHANGE;
+ }
+ }
+ }
+
+ if (data->codec_data->frame_type == VP8_SKIPPED_FRAME) {
+ // Do nothing for skip frame as the last frame will be rendered agian by natively
+ return DECODE_SUCCESS;
+ }
+
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+
+ // set referenceFrame to true if frame decoded is I/P frame, false otherwise.
+ int frameType = data->codec_data->frame_type;
+ mAcquiredBuffer->referenceFrame = (frameType == VP8_KEY_FRAME || frameType == VP8_INTER_FRAME);
+ // assume it is frame picture.
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ mAcquiredBuffer->renderBuffer.timeStamp = buffer->timeStamp;
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ if (buffer->flag & WANT_DECODE_ONLY) {
+ mAcquiredBuffer->renderBuffer.flag |= WANT_DECODE_ONLY;
+ }
+ if (mSizeChanged) {
+ mSizeChanged = false;
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ }
+
+ // Here data->num_pictures is always equal to 1
+ for (uint32_t index = 0; index < data->num_pictures; index++) {
+ status = decodePicture(data, index);
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ return status;
+ }
+ }
+
+ if (frameType != VP8_SKIPPED_FRAME) {
+ updateReferenceFrames(data);
+ }
+
+ // if sample is successfully decoded, call outputSurfaceBuffer(); otherwise
+ // call releaseSurfacebuffer();
+ status = outputSurfaceBuffer();
+ return status;
+}
+
+Decode_Status VideoDecoderVP8::decodePicture(vbp_data_vp8 *data, int32_t picIndex) {
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ Decode_Status status;
+ uint32_t bufferIDCount = 0;
+ VABufferID bufferIDs[5];
+
+ vbp_picture_data_vp8 *picData = &(data->pic_data[picIndex]);
+ VAPictureParameterBufferVP8 *picParams = picData->pic_parms;
+
+ status = setReference(picParams);
+ CHECK_STATUS("setReference");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+ // setting mDecodingFrame to true so vaEndPicture will be invoked to end the picture decoding.
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferVP8),
+ 1,
+ picParams,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAProbabilityBufferType,
+ sizeof(VAProbabilityDataBufferVP8),
+ 1,
+ data->prob_data,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateProbabilityBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferVP8),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+
+ /* Here picData->num_slices is always equal to 1 */
+ for (uint32_t i = 0; i < picData->num_slices; i++) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferVP8),
+ 1,
+ &(picData->slc_data[i].slc_parms),
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ picData->slc_data[i].slice_size, //size
+ 1, //num_elements
+ picData->slc_data[i].buffer_addr + picData->slc_data[i].slice_offset,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+ }
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ mDecodingFrame = false;
+ CHECK_VA_STATUS("vaEndPicture");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderVP8::setReference(VAPictureParameterBufferVP8 *picParam) {
+ int frameType = picParam->pic_fields.bits.key_frame;
+ switch (frameType) {
+ case VP8_KEY_FRAME:
+ picParam->last_ref_frame = VA_INVALID_SURFACE;
+ picParam->alt_ref_frame = VA_INVALID_SURFACE;
+ picParam->golden_ref_frame = VA_INVALID_SURFACE;
+ break;
+ case VP8_INTER_FRAME:
+ if (mRFBs[0][VP8_LAST_REF_PIC].surfaceBuffer == NULL ||
+ mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer == NULL ||
+ mRFBs[0][VP8_GOLDEN_REF_PIC].surfaceBuffer == NULL) {
+ mAcquiredBuffer->renderBuffer.errBuf.errorNumber = 1;
+ mAcquiredBuffer->renderBuffer.errBuf.errorArray[0].type = DecodeRefMissing;
+ return DECODE_NO_REFERENCE;
+ }
+ //mRFBs[0][VP8_LAST_REF_PIC].surfaceBuffer = mLastReference;
+ picParam->last_ref_frame = mRFBs[0][VP8_LAST_REF_PIC].surfaceBuffer->renderBuffer.surface;
+ picParam->alt_ref_frame = mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer->renderBuffer.surface;
+ picParam->golden_ref_frame = mRFBs[0][VP8_GOLDEN_REF_PIC].surfaceBuffer->renderBuffer.surface;
+ break;
+ case VP8_SKIPPED_FRAME:
+ // will never happen here
+ break;
+ default:
+ return DECODE_PARSER_FAIL;
+ }
+
+ return DECODE_SUCCESS;
+}
+
+void VideoDecoderVP8::updateReferenceFrames(vbp_data_vp8 *data) {
+ /* Refresh last frame reference buffer using the currently reconstructed frame */
+ refreshLastReference(data);
+
+ /* Refresh golden frame reference buffer using the currently reconstructed frame */
+ refreshGoldenReference(data);
+
+ /* Refresh alternative frame reference buffer using the currently reconstructed frame */
+ refreshAltReference(data);
+
+ /* Update reference frames */
+ for (int i = 0; i < VP8_REF_SIZE; i++) {
+ VideoSurfaceBuffer *p = mRFBs[1][i].surfaceBuffer;
+ int j;
+ for (j = 0; j < VP8_REF_SIZE; j++) {
+ if (p == mRFBs[0][j].surfaceBuffer) {
+ break;
+ }
+ }
+ if (j == VP8_REF_SIZE) {
+ clearAsReference(1, i);
+ }
+ }
+}
+
+void VideoDecoderVP8::refreshLastReference(vbp_data_vp8 *data) {
+ /* Save previous last reference */
+ mRFBs[1][VP8_LAST_REF_PIC].surfaceBuffer = mRFBs[0][VP8_LAST_REF_PIC].surfaceBuffer;
+ mRFBs[1][VP8_LAST_REF_PIC].index = mRFBs[0][VP8_LAST_REF_PIC].index;
+
+ /* For key frame, this is always true */
+ if (data->codec_data->refresh_last_frame) {
+ mRFBs[0][VP8_LAST_REF_PIC].surfaceBuffer = mAcquiredBuffer;
+ mRFBs[0][VP8_LAST_REF_PIC].index = mAcquiredBuffer->renderBuffer.surface;
+ mRFBs[0][VP8_LAST_REF_PIC].surfaceBuffer->asReferernce = true;
+ }
+}
+
+void VideoDecoderVP8::refreshGoldenReference(vbp_data_vp8 *data) {
+ /* Save previous golden reference */
+ mRFBs[1][VP8_GOLDEN_REF_PIC].surfaceBuffer = mRFBs[0][VP8_GOLDEN_REF_PIC].surfaceBuffer;
+ mRFBs[1][VP8_GOLDEN_REF_PIC].index = mRFBs[0][VP8_GOLDEN_REF_PIC].index;
+
+ if (data->codec_data->golden_copied != BufferCopied_NoneToGolden) {
+ if (data->codec_data->golden_copied == BufferCopied_LastToGolden) {
+ /* LastFrame is copied to GoldenFrame */
+ mRFBs[0][VP8_GOLDEN_REF_PIC].surfaceBuffer = mRFBs[1][VP8_LAST_REF_PIC].surfaceBuffer;
+ mRFBs[0][VP8_GOLDEN_REF_PIC].index = mRFBs[1][VP8_LAST_REF_PIC].index;
+ } else if (data->codec_data->golden_copied == BufferCopied_AltRefToGolden) {
+ /* AltRefFrame is copied to GoldenFrame */
+ mRFBs[0][VP8_GOLDEN_REF_PIC].surfaceBuffer = mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer;
+ mRFBs[0][VP8_GOLDEN_REF_PIC].index = mRFBs[0][VP8_ALT_REF_PIC].index;
+ }
+ }
+
+ /* For key frame, this is always true */
+ if (data->codec_data->refresh_golden_frame) {
+ mRFBs[0][VP8_GOLDEN_REF_PIC].surfaceBuffer = mAcquiredBuffer;
+ mRFBs[0][VP8_GOLDEN_REF_PIC].index = mAcquiredBuffer->renderBuffer.surface;
+ mRFBs[0][VP8_GOLDEN_REF_PIC].surfaceBuffer->asReferernce = true;
+ }
+}
+
+void VideoDecoderVP8::refreshAltReference(vbp_data_vp8 *data) {
+ /* Save previous alternative reference */
+ mRFBs[1][VP8_ALT_REF_PIC].surfaceBuffer = mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer;
+ mRFBs[1][VP8_ALT_REF_PIC].index = mRFBs[0][VP8_ALT_REF_PIC].index;
+
+ if (data->codec_data->altref_copied != BufferCopied_NoneToAltRef) {
+ if (data->codec_data->altref_copied == BufferCopied_LastToAltRef) {
+ /* LastFrame is copied to AltRefFrame */
+ mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer = mRFBs[1][VP8_LAST_REF_PIC].surfaceBuffer;
+ mRFBs[0][VP8_ALT_REF_PIC].index = mRFBs[1][VP8_LAST_REF_PIC].index;
+ } else if (data->codec_data->altref_copied == BufferCopied_GoldenToAltRef) {
+ /* GoldenFrame is copied to AltRefFrame */
+ mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer = mRFBs[1][VP8_GOLDEN_REF_PIC].surfaceBuffer;
+ mRFBs[0][VP8_ALT_REF_PIC].index = mRFBs[1][VP8_GOLDEN_REF_PIC].index;
+ }
+ }
+
+ /* For key frame, this is always true */
+ if (data->codec_data->refresh_alt_frame) {
+ mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer = mAcquiredBuffer;
+ mRFBs[0][VP8_ALT_REF_PIC].index = mAcquiredBuffer->renderBuffer.surface;
+ mRFBs[0][VP8_ALT_REF_PIC].surfaceBuffer->asReferernce = true;
+ }
+}
+
+
+Decode_Status VideoDecoderVP8::checkHardwareCapability() {
+ VAStatus vaStatus;
+ VAConfigAttrib cfgAttribs[2];
+ cfgAttribs[0].type = VAConfigAttribMaxPictureWidth;
+ cfgAttribs[1].type = VAConfigAttribMaxPictureHeight;
+ vaStatus = vaGetConfigAttributes(mVADisplay, VAProfileVP8Version0_3,
+ VAEntrypointVLD, cfgAttribs, 2);
+ CHECK_VA_STATUS("vaGetConfigAttributes");
+ if (cfgAttribs[0].value * cfgAttribs[1].value < (uint32_t)mVideoFormatInfo.width * (uint32_t)mVideoFormatInfo.height) {
+ ETRACE("hardware supports resolution %d * %d smaller than the clip resolution %d * %d",
+ cfgAttribs[0].value, cfgAttribs[1].value, mVideoFormatInfo.width, mVideoFormatInfo.height);
+ return DECODE_DRIVER_FAIL;
+ }
+
+ return DECODE_SUCCESS;
+}
+
diff --git a/videodecoder/VideoDecoderVP8.h b/videodecoder/VideoDecoderVP8.h
new file mode 100644
index 0000000..1daecaf
--- /dev/null
+++ b/videodecoder/VideoDecoderVP8.h
@@ -0,0 +1,91 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_VP8_H_
+#define VIDEO_DECODER_VP8_H_
+
+#include "VideoDecoderBase.h"
+
+
+class VideoDecoderVP8 : public VideoDecoderBase {
+public:
+ VideoDecoderVP8(const char *mimeType);
+ virtual ~VideoDecoderVP8();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual void flush(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status checkHardwareCapability();
+
+private:
+ Decode_Status decodeFrame(VideoDecodeBuffer* buffer, vbp_data_vp8 *data);
+ Decode_Status decodePicture(vbp_data_vp8 *data, int32_t picIndex);
+ Decode_Status setReference(VAPictureParameterBufferVP8 *picParam);
+ Decode_Status startVA(vbp_data_vp8 *data);
+ void updateReferenceFrames(vbp_data_vp8 *data);
+ void refreshLastReference(vbp_data_vp8 *data);
+ void refreshGoldenReference(vbp_data_vp8 *data);
+ void refreshAltReference(vbp_data_vp8 *data);
+ void updateFormatInfo(vbp_data_vp8 *data);
+ void invalidateReferenceFrames(int toggle);
+ void clearAsReference(int toggle, int ref_type);
+
+private:
+ enum {
+ VP8_SURFACE_NUMBER = 9,
+ VP8_REF_SIZE = 3,
+ };
+
+ enum {
+ VP8_KEY_FRAME = 0,
+ VP8_INTER_FRAME,
+ VP8_SKIPPED_FRAME,
+ };
+
+ enum {
+ VP8_LAST_REF_PIC = 0,
+ VP8_GOLDEN_REF_PIC,
+ VP8_ALT_REF_PIC,
+ };
+
+ enum {
+ BufferCopied_NoneToGolden = 0,
+ BufferCopied_LastToGolden = 1,
+ BufferCopied_AltRefToGolden = 2
+ };
+
+ enum {
+ BufferCopied_NoneToAltRef = 0,
+ BufferCopied_LastToAltRef = 1,
+ BufferCopied_GoldenToAltRef = 2
+ };
+
+ struct ReferenceFrameBuffer {
+ VideoSurfaceBuffer *surfaceBuffer;
+ int32_t index;
+ };
+
+ //[2] : [0 for current each reference frame, 1 for the previous each reference frame]
+ //[VP8_REF_SIZE] : [0 for last ref pic, 1 for golden ref pic, 2 for alt ref pic]
+ ReferenceFrameBuffer mRFBs[2][VP8_REF_SIZE];
+};
+
+
+
+#endif /* VIDEO_DECODER_VP8_H_ */
diff --git a/videodecoder/VideoDecoderWMV.cpp b/videodecoder/VideoDecoderWMV.cpp
new file mode 100644
index 0000000..16c307a
--- /dev/null
+++ b/videodecoder/VideoDecoderWMV.cpp
@@ -0,0 +1,568 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderWMV.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+VideoDecoderWMV::VideoDecoderWMV(const char *mimeType)
+ : VideoDecoderBase(mimeType, VBP_VC1),
+ mBufferIDs(NULL),
+ mNumBufferIDs(0),
+ mConfigDataParsed(false),
+ mRangeMapped(false),
+ mDeblockedCurrPicIndex(0),
+ mDeblockedLastPicIndex(1),
+ mDeblockedForwardPicIndex(2) {
+}
+
+
+VideoDecoderWMV::~VideoDecoderWMV() {
+ stop();
+}
+
+Decode_Status VideoDecoderWMV::start(VideoConfigBuffer *buffer) {
+ Decode_Status status;
+
+ status = VideoDecoderBase::start(buffer);
+ CHECK_STATUS("VideoDecoderBase::start");
+
+ if (buffer->data == NULL || buffer->size == 0) {
+ WTRACE("No config data to start VA.");
+ return DECODE_SUCCESS;
+ }
+
+ vbp_data_vc1 *data = NULL;
+ status = parseBuffer(buffer->data, buffer->size, &data);
+ CHECK_STATUS("parseBuffer");
+
+ status = startVA(data);
+ return status;
+}
+
+void VideoDecoderWMV::stop(void) {
+ if (mBufferIDs) {
+ delete [] mBufferIDs;
+ mBufferIDs = NULL;
+ }
+ mNumBufferIDs = 0;
+ mConfigDataParsed = false;
+ mRangeMapped = false;
+
+ mDeblockedCurrPicIndex = 0;
+ mDeblockedLastPicIndex = 1;
+ mDeblockedForwardPicIndex = 2;
+
+ VideoDecoderBase::stop();
+}
+
+void VideoDecoderWMV::flush(void) {
+ VideoDecoderBase::flush();
+
+ mRangeMapped = false;
+ mDeblockedCurrPicIndex = 0;
+ mDeblockedLastPicIndex = 1;
+ mDeblockedForwardPicIndex = 2;
+}
+
+Decode_Status VideoDecoderWMV::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ vbp_data_vc1 *data = NULL;
+ bool useGraphicbuffer = mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER;
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+
+ status = parseBuffer(buffer->data, buffer->size, &data);
+ CHECK_STATUS("parseBuffer");
+
+ if (!mVAStarted) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ }
+
+ if (mSizeChanged && !useGraphicbuffer) {
+ mSizeChanged = false;
+ return DECODE_FORMAT_CHANGE;
+ }
+
+ if ((mVideoFormatInfo.width != data->se_data->CODED_WIDTH ||
+ mVideoFormatInfo.height != data->se_data->CODED_HEIGHT) &&
+ data->se_data->CODED_WIDTH &&
+ data->se_data->CODED_HEIGHT) {
+ ITRACE("video size is changed from %dx%d to %dx%d", mVideoFormatInfo.width, mVideoFormatInfo.height,
+ data->se_data->CODED_WIDTH, data->se_data->CODED_HEIGHT);
+ mVideoFormatInfo.width = data->se_data->CODED_WIDTH;
+ mVideoFormatInfo.height = data->se_data->CODED_HEIGHT;
+ bool noNeedFlush = false;
+ if (useGraphicbuffer) {
+ noNeedFlush = (mVideoFormatInfo.width <= mVideoFormatInfo.surfaceWidth)
+ && (mVideoFormatInfo.height <= mVideoFormatInfo.surfaceHeight);
+ }
+
+ setRenderRect();
+
+ if (noNeedFlush) {
+ mSizeChanged = true;
+ } else {
+ flushSurfaceBuffers();
+ mSizeChanged = false;
+ return DECODE_FORMAT_CHANGE;
+ }
+ }
+
+ status = decodeFrame(buffer, data);
+ CHECK_STATUS("decodeFrame");
+ return status;
+}
+
+Decode_Status VideoDecoderWMV::decodeFrame(VideoDecodeBuffer* buffer, vbp_data_vc1 *data) {
+ Decode_Status status;
+ mCurrentPTS = buffer->timeStamp;
+ if (0 == data->num_pictures || NULL == data->pic_data) {
+ WTRACE("Number of pictures is 0, buffer contains configuration data only?");
+ return DECODE_SUCCESS;
+ }
+
+ if (data->pic_data[0].picture_is_skipped == VC1_PTYPE_SKIPPED) {
+
+ // Do nothing for skip frame as the last frame will be rendered agian by natively
+ // No needs to handle reference frame neither
+ return DECODE_SUCCESS;
+#if 0
+ //use the last P or I frame surface for skipped frame and treat it as P frame
+ if (mLastReference == NULL) {
+ // TODO: handle this case
+ WTRACE("The last reference is unavailable to construct skipped frame.");
+ return DECODE_SUCCESS;
+ }
+
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.scanFormat = mLastReference->renderBuffer.scanFormat;
+ mAcquiredBuffer->renderBuffer.surface = mLastReference->renderBuffer.surface;
+ // No need to update mappedData for HW decoding
+ //mAcquiredBuffer->mappedData.data = mLastReference->mappedData.data;
+ mAcquiredBuffer->referenceFrame = true;
+ // let outputSurfaceBuffer handle "asReference" for VC1
+ status = outputSurfaceBuffer();
+ return status;
+#endif
+ }
+
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+
+ mAcquiredBuffer->renderBuffer.timeStamp = buffer->timeStamp;
+ if (buffer->flag & HAS_DISCONTINUITY) {
+ mAcquiredBuffer->renderBuffer.flag |= HAS_DISCONTINUITY;
+ }
+ if (buffer->flag & WANT_DECODE_ONLY) {
+ mAcquiredBuffer->renderBuffer.flag |= WANT_DECODE_ONLY;
+ }
+ if (mSizeChanged) {
+ mSizeChanged = false;
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ }
+
+ if (data->num_pictures > 1) {
+ if (data->pic_data[0].pic_parms->picture_fields.bits.is_first_field) {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_TOP_FIELD;
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD;
+ }
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ }
+
+ mRangeMapped = (data->se_data->RANGE_MAPY_FLAG || data->se_data->RANGE_MAPUV_FLAG || data->se_data->RANGERED);
+
+ int frameType = data->pic_data[0].pic_parms->picture_fields.bits.picture_type;
+ mAcquiredBuffer->referenceFrame = (frameType == VC1_PTYPE_I || frameType == VC1_PTYPE_P);
+
+ // TODO: handle multiple frames parsed from a sample buffer
+ int numPictures = (data->num_pictures > 1) ? 2 : 1;
+
+ for (int index = 0; index < numPictures; index++) {
+ status = decodePicture(data, index);
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ return status;
+ }
+ }
+
+ if (mRangeMapped) {
+ updateDeblockedPicIndexes(frameType);
+ }
+
+ // let outputSurfaceBuffer handle "asReference" for VC1
+ status = outputSurfaceBuffer();
+ return status;
+}
+
+
+Decode_Status VideoDecoderWMV::decodePicture(vbp_data_vc1 *data, int32_t picIndex) {
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ Decode_Status status;
+ int32_t bufferIDCount = 0;
+ vbp_picture_data_vc1 *picData = &(data->pic_data[picIndex]);
+ VAPictureParameterBufferVC1 *picParams = picData->pic_parms;
+
+ if (picParams == NULL) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ status = allocateVABufferIDs(picData->num_slices * 2 + 2);
+ CHECK_STATUS("allocateVABufferIDs");
+
+ status = setReference(picParams, picIndex, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_STATUS("setReference");
+
+ if (mRangeMapped) {
+ // keep the destination surface for the picture after decoding and in-loop filtering
+ picParams->inloop_decoded_picture = mExtraSurfaces[mDeblockedCurrPicIndex];
+ } else {
+ picParams->inloop_decoded_picture = VA_INVALID_SURFACE;
+ }
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+ // setting mDecodingFrame to true so vaEndPicture will be invoked to end the picture decoding.
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferVC1),
+ 1,
+ picParams,
+ &mBufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ if (picParams->bitplane_present.value) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VABitPlaneBufferType,
+ picData->size_bitplanes,
+ 1,
+ picData->packed_bitplanes,
+ &mBufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateBitPlaneBuffer");
+ bufferIDCount++;
+ }
+
+ for (uint32_t i = 0; i < picData->num_slices; i++) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferVC1),
+ 1,
+ &(picData->slc_data[i].slc_parms),
+ &mBufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ //size
+ picData->slc_data[i].slice_size,
+ //num_elements
+ 1,
+ //slice data buffer pointer
+ //Note that this is the original data buffer ptr;
+ // offset to the actual slice data is provided in
+ // slice_data_offset in VASliceParameterBufferVC1
+ picData->slc_data[i].buffer_addr + picData->slc_data[i].slice_offset,
+ &mBufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+ }
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ mBufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ mDecodingFrame = false;
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderWMV::setReference(
+ VAPictureParameterBufferVC1 *params,
+ int32_t picIndex,
+ VASurfaceID current) {
+ int frameType = params->picture_fields.bits.picture_type;
+ switch (frameType) {
+ case VC1_PTYPE_I:
+ params->forward_reference_picture = current;
+ params->backward_reference_picture = current;
+ break;
+ case VC1_PTYPE_P:
+ // check REFDIST in the picture parameter buffer
+ if (0 != params->reference_fields.bits.reference_distance_flag &&
+ 0 != params->reference_fields.bits.reference_distance) {
+ /* The previous decoded frame (distance is up to 16 but not 0) is used
+ for reference. Not supported here.
+ */
+ return DECODE_NO_REFERENCE;
+ }
+ if (1 == picIndex) {
+ // handle interlace field coding case
+ if (1 == params->reference_fields.bits.num_reference_pictures ||
+ 1 == params->reference_fields.bits.reference_field_pic_indicator) {
+ /*
+ two reference fields or the second closest I/P field is used for
+ prediction. Set forward reference picture to INVALID so it will be
+ updated to a valid previous reconstructed reference frame later.
+ */
+ params->forward_reference_picture = VA_INVALID_SURFACE;
+ } else {
+ /* the closest I/P is used for reference so it must be the
+ complementary field in the same surface.
+ */
+ params->forward_reference_picture = current;
+ }
+ }
+ if (VA_INVALID_SURFACE == params->forward_reference_picture) {
+ if (mLastReference == NULL) {
+ return DECODE_NO_REFERENCE;
+ }
+ params->forward_reference_picture = mLastReference->renderBuffer.surface;
+ }
+ params->backward_reference_picture = VA_INVALID_SURFACE;
+ break;
+ case VC1_PTYPE_B:
+ if (mForwardReference == NULL || mLastReference == NULL) {
+ return DECODE_NO_REFERENCE;
+ }
+ params->forward_reference_picture = mForwardReference->renderBuffer.surface;
+ params->backward_reference_picture = mLastReference->renderBuffer.surface;
+ break;
+ case VC1_PTYPE_BI:
+ params->forward_reference_picture = VA_INVALID_SURFACE;
+ params->backward_reference_picture = VA_INVALID_SURFACE;
+ break;
+ case VC1_PTYPE_SKIPPED:
+ //Will never happen here
+ break;
+ default:
+ break;
+ }
+ return DECODE_SUCCESS;
+}
+
+void VideoDecoderWMV::updateDeblockedPicIndexes(int frameType) {
+ int32_t curPicIndex = mDeblockedCurrPicIndex;
+
+ /* Out Loop (range map) buffers */
+ if (frameType != VC1_PTYPE_SKIPPED) {
+ if ((frameType == VC1_PTYPE_I) || (frameType == VC1_PTYPE_P)) {
+ mDeblockedCurrPicIndex = mDeblockedLastPicIndex;
+ mDeblockedLastPicIndex = curPicIndex;
+ } else {
+ mDeblockedCurrPicIndex = mDeblockedForwardPicIndex;
+ mDeblockedForwardPicIndex = curPicIndex;
+ }
+ }
+}
+
+Decode_Status VideoDecoderWMV::updateConfigData(
+ uint8_t *configData,
+ int32_t configDataLen,
+ uint8_t **newConfigData,
+ int32_t* newConfigDataLen) {
+ int32_t i = 0;
+ uint8_t *p = configData;
+
+ /* Check for start codes. If one exist, then this is VC-1 and not WMV. */
+ while (i < configDataLen - 2) {
+ if ((p[i] == 0) &&
+ (p[i + 1] == 0) &&
+ (p[i + 2] == 1)) {
+ *newConfigData = NULL;
+ *newConfigDataLen = 0;
+ return DECODE_SUCCESS;
+ }
+ i++;
+ }
+
+ *newConfigDataLen = configDataLen + 9;
+ p = *newConfigData = new uint8_t [*newConfigDataLen];
+ if (!p) {
+ return DECODE_MEMORY_FAIL;
+ }
+
+ /* If we get here we have 4+ bytes of codec data that must be formatted */
+ /* to pass through as an RCV sequence header. */
+ p[0] = 0;
+ p[1] = 0;
+ p[2] = 1;
+ p[3] = 0x0f; /* Start code. */
+ p[4] = (mVideoFormatInfo.width >> 8) & 0x0ff;
+ p[5] = mVideoFormatInfo.width & 0x0ff;
+ p[6] = (mVideoFormatInfo.height >> 8) & 0x0ff;
+ p[7] = mVideoFormatInfo.height & 0x0ff;
+
+ memcpy(p + 8, configData, configDataLen);
+ *(p + configDataLen + 8) = 0x80;
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderWMV::startVA(vbp_data_vc1 *data) {
+ updateFormatInfo(data);
+
+ VAProfile vaProfile;
+ switch (data->se_data->PROFILE) {
+ case 0:
+ vaProfile = VAProfileVC1Simple;
+ break;
+ case 1:
+ vaProfile = VAProfileVC1Main;
+ break;
+ default:
+ vaProfile = VAProfileVC1Advanced;
+ break;
+ }
+
+ return VideoDecoderBase::setupVA(VC1_SURFACE_NUMBER, vaProfile, VC1_EXTRA_SURFACE_NUMBER);
+}
+
+void VideoDecoderWMV::updateFormatInfo(vbp_data_vc1 *data) {
+ ITRACE("updateFormatInfo: current size: %d x %d, new size: %d x %d",
+ mVideoFormatInfo.width, mVideoFormatInfo.height,
+ data->se_data->CODED_WIDTH, data->se_data->CODED_HEIGHT);
+
+ mVideoFormatInfo.cropBottom = data->se_data->CODED_HEIGHT > mVideoFormatInfo.height ?
+ data->se_data->CODED_HEIGHT - mVideoFormatInfo.height : 0;
+ mVideoFormatInfo.cropRight = data->se_data->CODED_WIDTH > mVideoFormatInfo.width ?
+ data->se_data->CODED_WIDTH - mVideoFormatInfo.width : 0;
+
+ if ((mVideoFormatInfo.width != data->se_data->CODED_WIDTH ||
+ mVideoFormatInfo.height != data->se_data->CODED_HEIGHT) &&
+ data->se_data->CODED_WIDTH &&
+ data->se_data->CODED_HEIGHT) {
+ // encoded image size
+ mVideoFormatInfo.width = data->se_data->CODED_WIDTH;
+ mVideoFormatInfo.height = data->se_data->CODED_HEIGHT;
+ mSizeChanged = true;
+ ITRACE("Video size is changed.");
+ }
+
+ // scaling has been performed on the decoded image.
+ mVideoFormatInfo.videoRange = 1;
+
+ switch (data->se_data->MATRIX_COEF) {
+ case 1:
+ mVideoFormatInfo.colorMatrix = VA_SRC_BT709;
+ break;
+ // ITU-R BT.1700, ITU-R BT.601-5, and SMPTE 293M-1996.
+ case 6:
+ mVideoFormatInfo.colorMatrix = VA_SRC_BT601;
+ break;
+ default:
+ // unknown color matrix, set to 0 so color space flag will not be set.
+ mVideoFormatInfo.colorMatrix = 0;
+ break;
+ }
+
+ mVideoFormatInfo.aspectX = data->se_data->ASPECT_HORIZ_SIZE;
+ mVideoFormatInfo.aspectY = data->se_data->ASPECT_VERT_SIZE;
+ mVideoFormatInfo.bitrate = 0; //data->se_data->bitrate;
+ mVideoFormatInfo.valid = true;
+
+ setRenderRect();
+}
+
+Decode_Status VideoDecoderWMV::allocateVABufferIDs(int32_t number) {
+ if (mNumBufferIDs > number) {
+ return DECODE_SUCCESS;
+ }
+ if (mBufferIDs) {
+ delete [] mBufferIDs;
+ }
+ mBufferIDs = NULL;
+ mNumBufferIDs = 0;
+ mBufferIDs = new VABufferID [number];
+ if (mBufferIDs == NULL) {
+ return DECODE_MEMORY_FAIL;
+ }
+ mNumBufferIDs = number;
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderWMV::parseBuffer(uint8_t *data, int32_t size, vbp_data_vc1 **vbpData) {
+ Decode_Status status;
+
+ if (data == NULL || size == 0) {
+ return DECODE_INVALID_DATA;
+ }
+
+ if (mConfigDataParsed) {
+ status = VideoDecoderBase::parseBuffer(data, size, false, (void**)vbpData);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ uint8_t *newData = NULL;
+ int32_t newSize = 0;
+ status = updateConfigData(data, size, &newData, &newSize);
+ CHECK_STATUS("updateConfigData");
+
+ if (newSize) {
+ status = VideoDecoderBase::parseBuffer(newData, newSize, true, (void**)vbpData);
+ delete [] newData;
+ } else {
+ status = VideoDecoderBase::parseBuffer(data, size, true, (void**)vbpData);
+ }
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ mConfigDataParsed = true;
+ }
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderWMV::checkHardwareCapability() {
+#ifndef USE_GEN_HW
+ VAStatus vaStatus;
+ VAConfigAttrib cfgAttribs[2];
+ cfgAttribs[0].type = VAConfigAttribMaxPictureWidth;
+ cfgAttribs[1].type = VAConfigAttribMaxPictureHeight;
+ vaStatus = vaGetConfigAttributes(mVADisplay, VAProfileVC1Advanced,
+ VAEntrypointVLD, cfgAttribs, 2);
+ CHECK_VA_STATUS("vaGetConfigAttributes");
+ if (cfgAttribs[0].value * cfgAttribs[1].value < (uint32_t)mVideoFormatInfo.width * (uint32_t)mVideoFormatInfo.height) {
+ ETRACE("hardware supports resolution %d * %d smaller than the clip resolution %d * %d",
+ cfgAttribs[0].value, cfgAttribs[1].value, mVideoFormatInfo.width, mVideoFormatInfo.height);
+ return DECODE_DRIVER_FAIL;
+ }
+#endif
+ return DECODE_SUCCESS;
+}
+
+
diff --git a/videodecoder/VideoDecoderWMV.h b/videodecoder/VideoDecoderWMV.h
new file mode 100644
index 0000000..40e4a5c
--- /dev/null
+++ b/videodecoder/VideoDecoderWMV.h
@@ -0,0 +1,66 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_WMV_H_
+#define VIDEO_DECODER_WMV_H_
+
+#include "VideoDecoderBase.h"
+
+
+class VideoDecoderWMV : public VideoDecoderBase {
+public:
+ VideoDecoderWMV(const char *mimeType);
+ virtual ~VideoDecoderWMV();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual void flush(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status checkHardwareCapability();
+
+
+private:
+ Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_vc1 *data);
+ Decode_Status decodePicture(vbp_data_vc1 *data, int32_t picIndex);
+ Decode_Status setReference(VAPictureParameterBufferVC1 *params, int32_t picIndex, VASurfaceID current);
+ void updateDeblockedPicIndexes(int frameType);
+ Decode_Status updateConfigData(uint8_t *configData, int32_t configDataLen, uint8_t **newConfigData, int32_t *newConfigDataLen);
+ Decode_Status startVA(vbp_data_vc1 *data);
+ void updateFormatInfo(vbp_data_vc1 *data);
+ inline Decode_Status allocateVABufferIDs(int32_t number);
+ Decode_Status parseBuffer(uint8_t *data, int32_t size, vbp_data_vc1 **vbpData);
+
+private:
+ enum {
+ VC1_SURFACE_NUMBER = 10,
+ VC1_EXTRA_SURFACE_NUMBER = 3,
+ };
+
+ VABufferID *mBufferIDs;
+ int32_t mNumBufferIDs;
+ bool mConfigDataParsed;
+ bool mRangeMapped;
+
+ int32_t mDeblockedCurrPicIndex;
+ int32_t mDeblockedLastPicIndex;
+ int32_t mDeblockedForwardPicIndex;
+};
+
+
+
+#endif /* VIDEO_DECODER_WMV_H_ */
diff --git a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..52a5285
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,367 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "va_private.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mSliceHeaderBuffer(NULL) {
+ setParserType(VBP_H264SECURE);
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mNaluHeaderBuffer");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mSliceHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mSliceHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mSliceHeaderBuffer");
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+
+ if (mSliceHeaderBuffer) {
+ delete [] mSliceHeaderBuffer;
+ mSliceHeaderBuffer = NULL;
+ }
+
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sliceHeaderSize = 0;
+ int32_t sizeLeft = 0;
+ int32_t sliceIdx = 0;
+ uint8_t naluType;
+ frame_info_t* pFrameInfo;
+
+ mFrameSize = 0;
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ return VideoDecoderAVC::decode(buffer);
+ }
+
+ if (buffer->size != sizeof(frame_info_t)) {
+ ETRACE("Not enough data to read frame_info_t!");
+ return DECODE_INVALID_DATA;
+ }
+ pFrameInfo = (frame_info_t*) buffer->data;
+
+ mFrameSize = pFrameInfo->length;
+ VTRACE("mFrameSize = %d", mFrameSize);
+
+ memcpy(&mEncParam, pFrameInfo->pavp, sizeof(pavp_info_t));
+ for (int32_t i = 0; i < pFrameInfo->num_nalus; i++) {
+ naluType = pFrameInfo->nalus[i].type & NALU_TYPE_MASK;
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &sliceIdx,
+ sizeof(int32_t));
+ sliceHeaderSize += 4;
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->data,
+ sizeof(uint8_t*));
+ sliceHeaderSize += sizeof(uint8_t*);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].offset,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].length,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ pFrameInfo->nalus[i].slice_header,
+ sizeof(slice_header_t));
+ sliceHeaderSize += sizeof(slice_header_t);
+ if (pFrameInfo->nalus[i].type & 0x60) {
+ memcpy(mSliceHeaderBuffer+sliceHeaderSize, pFrameInfo->dec_ref_pic_marking, sizeof(dec_ref_pic_marking_t));
+ } else {
+ memset(mSliceHeaderBuffer+sliceHeaderSize, 0, sizeof(dec_ref_pic_marking_t));
+ }
+ sliceHeaderSize += sizeof(dec_ref_pic_marking_t);
+ sliceIdx++;
+ } else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ pFrameInfo->nalus[i].data,
+ pFrameInfo->nalus[i].length);
+ sizeAccumulated += pFrameInfo->nalus[i].length;
+ } else {
+ WTRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+
+ vbp_data_h264 *data = NULL;
+ int new_sequence_to_handle = 0;
+
+ if (sizeAccumulated > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ mNaluHeaderBuffer,
+ sizeAccumulated,
+ false,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ // [FIX DRC zoom issue] if one buffer contains more than one nalu
+ // for example SPS+PPS+IDR, new_sps/new_pps flags set in parseBuffer
+ // will be flushed in the following updateBuffer.
+ // So that handleNewSequence will not be handled in decodeFrame()
+ if (data->new_sps || data->new_pps) {
+ new_sequence_to_handle = 1;
+ }
+ }
+
+ if (sliceHeaderSize > 0) {
+ memset(mSliceHeaderBuffer + sliceHeaderSize, 0xFF, 4);
+ sliceHeaderSize += 4;
+ status = VideoDecoderBase::updateBuffer(
+ mSliceHeaderBuffer,
+ sliceHeaderSize,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::updateBuffer");
+
+ // in case the flags were flushed but indeed new sequence needed to be handled.
+ if ((1 == new_sequence_to_handle) &&
+ ((data->new_sps == 0) || (data->new_pps == 0))) {
+ data->new_sps = 1;
+ data->new_pps = 1;
+ }
+ }
+
+ if (data == NULL) {
+ ETRACE("Invalid data returned by parser!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+ status = decodeFrame(buffer, data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[5];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ VAEncryptionParameterBuffer encryptParam;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+
+ if (mIsEncryptData) {
+ memset(&encryptParam, 0, sizeof(VAEncryptionParameterBuffer));
+ encryptParam.pavpCounterMode = 4;
+ encryptParam.pavpEncryptionType = 2;
+ encryptParam.hostEncryptMode = 2;
+ encryptParam.pavpHasBeenEnabled = 1;
+ encryptParam.app_id = 0;
+ memcpy(encryptParam.pavpAesCounter, mEncParam.iv, 16);
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ (VABufferType)VAEncryptionParameterBufferType,
+ sizeof(VAEncryptionParameterBuffer),
+ 1,
+ &encryptParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateEncryptionParameterBuffer");
+ bufferIDCount++;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ sliceData->buffer_addr + sliceData->slice_offset,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264Base),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+
+ vaStatus = vaGetConfigAttributes(mVADisplay,profile,VAEntrypointVLD, &attrib[1], 1);
+
+ if (attrib[1].value & VA_DEC_SLICE_MODE_BASE)
+ {
+ ITRACE("AVC short format used");
+ attrib[1].value = VA_DEC_SLICE_MODE_BASE;
+ } else if (attrib[1].value & VA_DEC_SLICE_MODE_NORMAL) {
+ ITRACE("AVC long format ssed");
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ } else {
+ ETRACE("Unsupported Decode Slice Mode!");
+ return DECODE_FAIL;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..2214075
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+#include "secvideoparser.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+
+private:
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ pavp_info_t mEncParam;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mSliceHeaderBuffer;
+ uint32_t mIsEncryptData;
+ uint32_t mFrameSize;
+};
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/baytrail/secvideoparser.h b/videodecoder/securevideo/baytrail/secvideoparser.h
new file mode 100644
index 0000000..f27580a
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/secvideoparser.h
@@ -0,0 +1,150 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef SEC_VIDEO_PARSER_H_
+#define SEC_VIDEO_PARSER_H_
+
+#include <stdint.h>
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+#define MAX_OP 16
+
+enum dec_ref_pic_marking_flags {
+ IDR_PIC_FLAG = 0,
+ NO_OUTPUT_OF_PRIOR_PICS_FLAG,
+ LONG_TERM_REFERENCE_FLAG,
+ ADAPTIVE_REF_PIC_MARKING_MODE_FLAG
+};
+
+typedef struct _dec_ref_pic_marking_t {
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t idr_pic_flag:1;
+ uint8_t no_output_of_prior_pics_flag:1;
+ uint8_t long_term_reference_flag:1;
+ uint8_t adaptive_ref_pic_marking_mode_flag:1;
+ };
+ };
+ struct {
+ uint8_t memory_management_control_operation;
+ union {
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ } op1;
+ struct {
+ uint8_t long_term_pic_num;
+ } op2;
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ uint8_t long_term_frame_idx;
+ } op3;
+ struct {
+ uint8_t max_long_term_frame_idx_plus1;
+ } op4;
+ struct {
+ uint8_t long_term_frame_idx;
+ } op6;
+ };
+ } op[MAX_OP];
+} dec_ref_pic_marking_t;
+
+enum slice_header_flags {
+ FIELD_PIC_FLAG = 0,
+ BOTTOM_FIELD_FLAG
+};
+
+typedef struct _slice_header_t {
+ uint8_t nal_unit_type;
+ uint8_t pps_id;
+ uint8_t padding; // TODO: padding needed because flags in secfw impl. is a big-endian uint16_t
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t field_pic_flag:1;
+ uint8_t bottom_field_flag:1;
+ };
+ };
+ uint32_t first_mb_in_slice;
+ uint32_t frame_num;
+ uint16_t idr_pic_id;
+ uint16_t pic_order_cnt_lsb;
+ int32_t delta_pic_order_cnt[2];
+ int32_t delta_pic_order_cnt_bottom;
+} slice_header_t;
+
+typedef struct {
+ uint8_t type;
+ uint32_t offset;
+ uint8_t* data;
+ uint32_t length;
+ slice_header_t* slice_header;
+} nalu_info_t;
+
+typedef struct {
+ uint32_t iv[4];
+ uint32_t mode;
+ uint32_t app_id;
+} pavp_info_t;
+
+#define MAX_NUM_NALUS 20
+
+typedef struct {
+ uint8_t* data;
+ uint32_t length;
+ pavp_info_t* pavp;
+ dec_ref_pic_marking_t* dec_ref_pic_marking;
+ uint32_t num_nalus;
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+int parser_init(void);
+int parse_frame(uint8_t* frame, uint32_t frame_size, uint8_t* nalu_data, uint32_t* nalu_data_size);
+
+// DEBUG PRINTING
+void print_slice_header(slice_header_t* slice_header);
+void print_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void print_data_bytes(uint8_t* data, uint32_t count);
+void print_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+// BYTESWAPPING
+uint16_t byteswap_16(uint16_t word);
+uint32_t byteswap_32(uint32_t dword);
+void byteswap_slice_header(slice_header_t* slice_header);
+void byteswap_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void byteswap_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+#endif /* SEC_VIDEO_PARSER_H_ */
diff --git a/videodecoder/securevideo/baytrail/va_private.h b/videodecoder/securevideo/baytrail/va_private.h
new file mode 100644
index 0000000..34a4e1b
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/va_private.h
@@ -0,0 +1,64 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+#ifndef __VA_PRIVATE_H__
+#define __VA_PRIVATE_H__
+#include <va/va.h>
+#define ENABLE_PAVP_LINUX 1
+// Misc parameter for encoder
+#define VAEncMiscParameterTypePrivate -2
+// encryption parameters for PAVP
+#define VAEncryptionParameterBufferType -3
+
+typedef struct _VAEncMiscParameterPrivate
+{
+ unsigned int target_usage; // Valid values 1-7 for AVC & MPEG2.
+ unsigned int reserved[7]; // Reserved for future use.
+} VAEncMiscParameterPrivate;
+
+/*VAEncrytpionParameterBuffer*/
+typedef struct _VAEncryptionParameterBuffer
+{
+ //Not used currently
+ unsigned int encryptionSupport;
+ //Not used currently
+ unsigned int hostEncryptMode;
+ // For IV, Counter input
+ unsigned int pavpAesCounter[2][4];
+ // not used currently
+ unsigned int pavpIndex;
+ // PAVP mode, CTR, CBC, DEDE etc
+ unsigned int pavpCounterMode;
+ unsigned int pavpEncryptionType;
+ // not used currently
+ unsigned int pavpInputSize[2];
+ // not used currently
+ unsigned int pavpBufferSize[2];
+ // not used currently
+ VABufferID pvap_buf;
+ // set to TRUE if protected media
+ unsigned int pavpHasBeenEnabled;
+ // not used currently
+ unsigned int IntermmediatedBufReq;
+ // not used currently
+ unsigned int uiCounterIncrement;
+ // AppId: PAVP sessin Index from application
+ unsigned int app_id;
+
+} VAEncryptionParameterBuffer;
+
+#endif
diff --git a/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..18c87b9
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,351 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "va_private.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mSliceHeaderBuffer(NULL) {
+ setParserType(VBP_H264SECURE);
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mNaluHeaderBuffer");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mSliceHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mSliceHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mSliceHeaderBuffer");
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+
+ if (mSliceHeaderBuffer) {
+ delete [] mSliceHeaderBuffer;
+ mSliceHeaderBuffer = NULL;
+ }
+
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sliceHeaderSize = 0;
+ int32_t sizeLeft = 0;
+ int32_t sliceIdx = 0;
+ uint8_t naluType;
+ frame_info_t* pFrameInfo;
+
+ mFrameSize = 0;
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ return VideoDecoderAVC::decode(buffer);
+ }
+
+ if (buffer->size != sizeof(frame_info_t)) {
+ ETRACE("Not enough data to read frame_info_t!");
+ return DECODE_INVALID_DATA;
+ }
+ pFrameInfo = (frame_info_t*) buffer->data;
+
+ mFrameSize = pFrameInfo->length;
+ VTRACE("mFrameSize = %d", mFrameSize);
+
+ memcpy(&mEncParam, pFrameInfo->pavp, sizeof(pavp_info_t));
+ for (int32_t i = 0; i < pFrameInfo->num_nalus; i++) {
+ naluType = pFrameInfo->nalus[i].type & NALU_TYPE_MASK;
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &sliceIdx,
+ sizeof(int32_t));
+ sliceHeaderSize += 4;
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->data,
+ sizeof(uint8_t*));
+ sliceHeaderSize += sizeof(uint8_t*);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].offset,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].length,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ pFrameInfo->nalus[i].slice_header,
+ sizeof(slice_header_t));
+ sliceHeaderSize += sizeof(slice_header_t);
+ if (pFrameInfo->nalus[i].type & 0x60) {
+ memcpy(mSliceHeaderBuffer+sliceHeaderSize, pFrameInfo->dec_ref_pic_marking, sizeof(dec_ref_pic_marking_t));
+ } else {
+ memset(mSliceHeaderBuffer+sliceHeaderSize, 0, sizeof(dec_ref_pic_marking_t));
+ }
+ sliceHeaderSize += sizeof(dec_ref_pic_marking_t);
+ sliceIdx++;
+ } else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ pFrameInfo->nalus[i].data,
+ pFrameInfo->nalus[i].length);
+ sizeAccumulated += pFrameInfo->nalus[i].length;
+ } else {
+ WTRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+
+ vbp_data_h264 *data = NULL;
+
+ if (sizeAccumulated > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ mNaluHeaderBuffer,
+ sizeAccumulated,
+ false,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ }
+
+ if (sliceHeaderSize > 0) {
+ memset(mSliceHeaderBuffer + sliceHeaderSize, 0xFF, 4);
+ sliceHeaderSize += 4;
+ status = VideoDecoderBase::updateBuffer(
+ mSliceHeaderBuffer,
+ sliceHeaderSize,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::updateBuffer");
+ }
+
+ if (data == NULL) {
+ ETRACE("Invalid data returned by parser!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+ status = decodeFrame(buffer, data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[5];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ VAEncryptionParameterBuffer encryptParam;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+
+ if (mIsEncryptData) {
+ memset(&encryptParam, 0, sizeof(VAEncryptionParameterBuffer));
+ encryptParam.pavpCounterMode = 4;
+ encryptParam.pavpEncryptionType = 2;
+ encryptParam.hostEncryptMode = 2;
+ encryptParam.pavpHasBeenEnabled = 1;
+ encryptParam.app_id = 0;
+ memcpy(encryptParam.pavpAesCounter, mEncParam.iv, 16);
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ (VABufferType)VAEncryptionParameterBufferType,
+ sizeof(VAEncryptionParameterBuffer),
+ 1,
+ &encryptParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateEncryptionParameterBuffer");
+ bufferIDCount++;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ sliceData->buffer_addr + sliceData->slice_offset,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264Base),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+
+ vaStatus = vaGetConfigAttributes(mVADisplay,profile,VAEntrypointVLD, &attrib[1], 1);
+
+ if (attrib[1].value & VA_DEC_SLICE_MODE_BASE)
+ {
+ ITRACE("AVC short format used");
+ attrib[1].value = VA_DEC_SLICE_MODE_BASE;
+ } else if (attrib[1].value & VA_DEC_SLICE_MODE_NORMAL) {
+ ITRACE("AVC long format ssed");
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ } else {
+ ETRACE("Unsupported Decode Slice Mode!");
+ return DECODE_FAIL;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..2214075
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+#include "secvideoparser.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+
+private:
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ pavp_info_t mEncParam;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mSliceHeaderBuffer;
+ uint32_t mIsEncryptData;
+ uint32_t mFrameSize;
+};
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/cherrytrail/secvideoparser.h b/videodecoder/securevideo/cherrytrail/secvideoparser.h
new file mode 100644
index 0000000..f27580a
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/secvideoparser.h
@@ -0,0 +1,150 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef SEC_VIDEO_PARSER_H_
+#define SEC_VIDEO_PARSER_H_
+
+#include <stdint.h>
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+#define MAX_OP 16
+
+enum dec_ref_pic_marking_flags {
+ IDR_PIC_FLAG = 0,
+ NO_OUTPUT_OF_PRIOR_PICS_FLAG,
+ LONG_TERM_REFERENCE_FLAG,
+ ADAPTIVE_REF_PIC_MARKING_MODE_FLAG
+};
+
+typedef struct _dec_ref_pic_marking_t {
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t idr_pic_flag:1;
+ uint8_t no_output_of_prior_pics_flag:1;
+ uint8_t long_term_reference_flag:1;
+ uint8_t adaptive_ref_pic_marking_mode_flag:1;
+ };
+ };
+ struct {
+ uint8_t memory_management_control_operation;
+ union {
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ } op1;
+ struct {
+ uint8_t long_term_pic_num;
+ } op2;
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ uint8_t long_term_frame_idx;
+ } op3;
+ struct {
+ uint8_t max_long_term_frame_idx_plus1;
+ } op4;
+ struct {
+ uint8_t long_term_frame_idx;
+ } op6;
+ };
+ } op[MAX_OP];
+} dec_ref_pic_marking_t;
+
+enum slice_header_flags {
+ FIELD_PIC_FLAG = 0,
+ BOTTOM_FIELD_FLAG
+};
+
+typedef struct _slice_header_t {
+ uint8_t nal_unit_type;
+ uint8_t pps_id;
+ uint8_t padding; // TODO: padding needed because flags in secfw impl. is a big-endian uint16_t
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t field_pic_flag:1;
+ uint8_t bottom_field_flag:1;
+ };
+ };
+ uint32_t first_mb_in_slice;
+ uint32_t frame_num;
+ uint16_t idr_pic_id;
+ uint16_t pic_order_cnt_lsb;
+ int32_t delta_pic_order_cnt[2];
+ int32_t delta_pic_order_cnt_bottom;
+} slice_header_t;
+
+typedef struct {
+ uint8_t type;
+ uint32_t offset;
+ uint8_t* data;
+ uint32_t length;
+ slice_header_t* slice_header;
+} nalu_info_t;
+
+typedef struct {
+ uint32_t iv[4];
+ uint32_t mode;
+ uint32_t app_id;
+} pavp_info_t;
+
+#define MAX_NUM_NALUS 20
+
+typedef struct {
+ uint8_t* data;
+ uint32_t length;
+ pavp_info_t* pavp;
+ dec_ref_pic_marking_t* dec_ref_pic_marking;
+ uint32_t num_nalus;
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+int parser_init(void);
+int parse_frame(uint8_t* frame, uint32_t frame_size, uint8_t* nalu_data, uint32_t* nalu_data_size);
+
+// DEBUG PRINTING
+void print_slice_header(slice_header_t* slice_header);
+void print_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void print_data_bytes(uint8_t* data, uint32_t count);
+void print_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+// BYTESWAPPING
+uint16_t byteswap_16(uint16_t word);
+uint32_t byteswap_32(uint32_t dword);
+void byteswap_slice_header(slice_header_t* slice_header);
+void byteswap_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void byteswap_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+#endif /* SEC_VIDEO_PARSER_H_ */
diff --git a/videodecoder/securevideo/cherrytrail/va_private.h b/videodecoder/securevideo/cherrytrail/va_private.h
new file mode 100644
index 0000000..e53e31d
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/va_private.h
@@ -0,0 +1,63 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef __VA_PRIVATE_H__
+#define __VA_PRIVATE_H__
+#include <va/va.h>
+#define ENABLE_PAVP_LINUX 1
+// Misc parameter for encoder
+#define VAEncMiscParameterTypePrivate -2
+// encryption parameters for PAVP
+#define VAEncryptionParameterBufferType -3
+
+typedef struct _VAEncMiscParameterPrivate
+{
+ unsigned int target_usage; // Valid values 1-7 for AVC & MPEG2.
+ unsigned int reserved[7]; // Reserved for future use.
+} VAEncMiscParameterPrivate;
+
+/*VAEncrytpionParameterBuffer*/
+typedef struct _VAEncryptionParameterBuffer
+{
+ //Not used currently
+ unsigned int encryptionSupport;
+ //Not used currently
+ unsigned int hostEncryptMode;
+ // For IV, Counter input
+ unsigned int pavpAesCounter[2][4];
+ // not used currently
+ unsigned int pavpIndex;
+ // PAVP mode, CTR, CBC, DEDE etc
+ unsigned int pavpCounterMode;
+ unsigned int pavpEncryptionType;
+ // not used currently
+ unsigned int pavpInputSize[2];
+ // not used currently
+ unsigned int pavpBufferSize[2];
+ // not used currently
+ VABufferID pvap_buf;
+ // set to TRUE if protected media
+ unsigned int pavpHasBeenEnabled;
+ // not used currently
+ unsigned int IntermmediatedBufReq;
+ // not used currently
+ unsigned int uiCounterIncrement;
+ // AppId: PAVP sessin Index from application
+ unsigned int app_id;
+
+} VAEncryptionParameterBuffer;
+
+#endif
diff --git a/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..d9da2ac
--- /dev/null
+++ b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,507 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+
+#define STARTCODE_00 0x00
+#define STARTCODE_01 0x01
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+
+
+// mask for little endian, to mast the second and fourth bytes in the byte stream
+#define STARTCODE_MASK0 0xFF000000 //0x00FF0000
+#define STARTCODE_MASK1 0x0000FF00 //0x000000FF
+
+
+typedef enum {
+ NAL_UNIT_TYPE_unspecified0 = 0,
+ NAL_UNIT_TYPE_SLICE,
+ NAL_UNIT_TYPE_DPA,
+ NAL_UNIT_TYPE_DPB,
+ NAL_UNIT_TYPE_DPC,
+ NAL_UNIT_TYPE_IDR,
+ NAL_UNIT_TYPE_SEI,
+ NAL_UNIT_TYPE_SPS,
+ NAL_UNIT_TYPE_PPS,
+ NAL_UNIT_TYPE_Acc_unit_delimiter,
+ NAL_UNIT_TYPE_EOSeq,
+ NAL_UNIT_TYPE_EOstream,
+ NAL_UNIT_TYPE_filler_data,
+ NAL_UNIT_TYPE_SPS_extension,
+ NAL_UNIT_TYPE_Reserved14,
+ NAL_UNIT_TYPE_Reserved15,
+ NAL_UNIT_TYPE_Reserved16,
+ NAL_UNIT_TYPE_Reserved17,
+ NAL_UNIT_TYPE_Reserved18,
+ NAL_UNIT_TYPE_ACP,
+ NAL_UNIT_TYPE_Reserved20,
+ NAL_UNIT_TYPE_Reserved21,
+ NAL_UNIT_TYPE_Reserved22,
+ NAL_UNIT_TYPE_Reserved23,
+ NAL_UNIT_TYPE_unspecified24,
+} NAL_UNIT_TYPE;
+
+#ifndef min
+#define min(X, Y) ((X) <(Y) ? (X) : (Y))
+#endif
+
+
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mInputBuffer(NULL) {
+
+ memset(&mMetadata, 0, sizeof(NaluMetadata));
+ memset(&mByteStream, 0, sizeof(NaluByteStream));
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mMetadata.naluInfo = new NaluInfo [MAX_NALU_NUMBER];
+ mByteStream.byteStream = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mMetadata.naluInfo == NULL ||
+ mByteStream.byteStream == NULL ||
+ mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory.");
+ // TODO: release all allocated memory
+ return DECODE_MEMORY_FAIL;
+ }
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mMetadata.naluInfo) {
+ delete [] mMetadata.naluInfo;
+ mMetadata.naluInfo = NULL;
+ }
+
+ if (mByteStream.byteStream) {
+ delete [] mByteStream.byteStream;
+ mByteStream.byteStream = NULL;
+ }
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sizeLeft = 0;
+ uint8_t *pByteStream = NULL;
+ NaluInfo *pNaluInfo = mMetadata.naluInfo;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ pByteStream = buffer->data;
+ sizeLeft = buffer->size;
+ mInputBuffer = NULL;
+ } else {
+ status = parseAnnexBStream(buffer->data, buffer->size, &mByteStream);
+ CHECK_STATUS("parseAnnexBStream");
+ pByteStream = mByteStream.byteStream;
+ sizeLeft = mByteStream.streamPos;
+ mInputBuffer = buffer->data;
+ }
+ if (sizeLeft < 4) {
+ ETRACE("Not enough data to read number of NALU.");
+ return DECODE_INVALID_DATA;
+ }
+
+ // read number of NALU
+ memcpy(&(mMetadata.naluNumber), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+ sizeLeft -= 4;
+
+ if (mMetadata.naluNumber == 0) {
+ WTRACE("Number of NALU is ZERO!");
+ return DECODE_SUCCESS;
+ }
+
+ for (int32_t i = 0; i < mMetadata.naluNumber; i++) {
+ if (sizeLeft < 12) {
+ ETRACE("Not enough data to parse NALU offset, size, header length for NALU %d, left = %d", i, sizeLeft);
+ return DECODE_INVALID_DATA;
+ }
+ sizeLeft -= 12;
+ // read NALU offset
+ memcpy(&(pNaluInfo->naluOffset), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU size
+ memcpy(&(pNaluInfo->naluLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU header length
+ memcpy(&(pNaluInfo->naluHeaderLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ if (sizeLeft < pNaluInfo->naluHeaderLen) {
+ ETRACE("Not enough data to copy NALU header for %d, left = %d, header len = %d", i, sizeLeft, pNaluInfo->naluHeaderLen);
+ return DECODE_INVALID_DATA;
+ }
+
+ sizeLeft -= pNaluInfo->naluHeaderLen;
+
+ if (pNaluInfo->naluHeaderLen) {
+ // copy start code prefix to buffer
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+
+ // copy NALU header
+ memcpy(mNaluHeaderBuffer + sizeAccumulated, pByteStream, pNaluInfo->naluHeaderLen);
+ pByteStream += pNaluInfo->naluHeaderLen;
+
+ sizeAccumulated += pNaluInfo->naluHeaderLen;
+ } else {
+ WTRACE("header len is zero for NALU %d", i);
+ }
+
+ // for next NALU
+ pNaluInfo++;
+ }
+
+ buffer->data = mNaluHeaderBuffer;
+ buffer->size = sizeAccumulated;
+
+ return VideoDecoderAVC::decode(buffer);
+}
+
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[4];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Check there is no reference frame loss before decoding a frame
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ // find which naluinfo is correlated to current slice
+ int naluIndex = 0;
+ uint32_t accumulatedHeaderLen = 0;
+ uint32_t headerLen = 0;
+ for (; naluIndex < mMetadata.naluNumber; naluIndex++) {
+ headerLen = mMetadata.naluInfo[naluIndex].naluHeaderLen;
+ if (headerLen == 0) {
+ WTRACE("lenght of current NAL unit is 0.");
+ continue;
+ }
+ accumulatedHeaderLen += STARTCODE_PREFIX_LEN;
+ if (accumulatedHeaderLen + headerLen > sliceData->slice_offset) {
+ break;
+ }
+ accumulatedHeaderLen += headerLen;
+ }
+
+ if (sliceData->slice_offset != accumulatedHeaderLen) {
+ WTRACE("unexpected slice offset %d, accumulatedHeaderLen = %d", sliceData->slice_offset, accumulatedHeaderLen);
+ }
+
+ sliceParam->slice_data_size = mMetadata.naluInfo[naluIndex].naluLen;
+ sliceData->slice_size = sliceParam->slice_data_size;
+
+ // no need to update:
+ // sliceParam->slice_data_offset - 0 always
+ // sliceParam->slice_data_bit_offset - relative to sliceData->slice_offset
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ // sliceData->slice_offset - accumulatedHeaderLen is the absolute offset to start codes of current NAL unit
+ // offset points to first byte of NAL unit
+ uint32_t sliceOffset = mMetadata.naluInfo[naluIndex].naluOffset;
+ if (mInputBuffer != NULL) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ mInputBuffer + sliceOffset,
+ &bufferIDs[bufferIDCount]);
+ } else {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAProtectedSliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ (uint8_t*)sliceOffset, // IMR offset
+ &bufferIDs[bufferIDCount]);
+ }
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+
+// Parse byte string pattern "0x000001" (3 bytes) in the current buffer.
+// Returns offset of position following the pattern in the buffer if pattern is found or -1 if not found.
+int32_t VideoDecoderAVCSecure::findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length) {
+ uint8_t *ptr;
+ uint32_t left = 0, data = 0, phase = 0;
+ uint8_t mask1 = 0, mask2 = 0;
+
+ /* Meaning of phase:
+ 0: initial status, "0x000001" bytes are not found so far;
+ 1: one "0x00" byte is found;
+ 2: two or more consecutive "0x00" bytes" are found;
+ 3: "0x000001" patten is found ;
+ 4: if there is one more byte after "0x000001";
+ */
+
+ left = length;
+ ptr = (uint8_t *) (stream + offset);
+ phase = 0;
+
+ // parse until there is more data and start code not found
+ while ((left > 0) && (phase < 3)) {
+ // Check if the address is 32-bit aligned & phase=0, if thats the case we can check 4 bytes instead of one byte at a time.
+ if (((((uint32_t)ptr) & 0x3) == 0) && (phase == 0)) {
+ while (left > 3) {
+ data = *((uint32_t *)ptr);
+ mask1 = (STARTCODE_00 != (data & STARTCODE_MASK0));
+ mask2 = (STARTCODE_00 != (data & STARTCODE_MASK1));
+ // If second byte and fourth byte are not zero's then we cannot have a start code here,
+ // as we need two consecutive zero bytes for a start code pattern.
+ if (mask1 && mask2) {
+ // skip 4 bytes and start over
+ ptr += 4;
+ left -=4;
+ continue;
+ } else {
+ break;
+ }
+ }
+ }
+
+ // At this point either data is not on a 32-bit boundary or phase > 0 so we look at one byte at a time
+ if (left > 0) {
+ if (*ptr == STARTCODE_00) {
+ phase++;
+ if (phase > 2) {
+ // more than 2 consecutive '0x00' bytes is found
+ phase = 2;
+ }
+ } else if ((*ptr == STARTCODE_01) && (phase == 2)) {
+ // start code is found
+ phase = 3;
+ } else {
+ // reset lookup
+ phase = 0;
+ }
+ ptr++;
+ left--;
+ }
+ }
+
+ if ((left > 0) && (phase == 3)) {
+ phase = 4;
+ // return offset of position following the pattern in the buffer which matches "0x000001" byte string
+ return (int32_t)(ptr - stream);
+ }
+ return -1;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream) {
+ uint8_t naluType;
+ int32_t naluHeaderLen;
+
+ naluType = *(uint8_t *)(stream + naluStream->naluOffset);
+ naluType &= NALU_TYPE_MASK;
+ // first update nalu header length based on nalu type
+ if (naluType >= NAL_UNIT_TYPE_SLICE && naluType <= NAL_UNIT_TYPE_IDR) {
+ // coded slice, return only up to MAX_SLICE_HEADER_SIZE bytes
+ naluHeaderLen = min(naluStream->naluLen, MAX_SLICE_HEADER_SIZE);
+ } else if (naluType >= NAL_UNIT_TYPE_SEI && naluType <= NAL_UNIT_TYPE_PPS) {
+ //sps, pps, sei, etc, return the entire NAL unit in clear
+ naluHeaderLen = naluStream->naluLen;
+ } else {
+ return DECODE_FRAME_DROPPED;
+ }
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluOffset), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluLen), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &naluHeaderLen, sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ if (naluHeaderLen) {
+ memcpy(naluStream->byteStream + naluStream->streamPos, (uint8_t*)(stream + naluStream->naluOffset), naluHeaderLen);
+ naluStream->streamPos += naluHeaderLen;
+ }
+ return DECODE_SUCCESS;
+}
+
+
+// parse start-code prefixed stream, also knowns as Annex B byte stream, commonly used in AVI, ES, MPEG2 TS container
+Decode_Status VideoDecoderAVCSecure::parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream) {
+ int32_t naluOffset, offset, left;
+ NaluInfo *info;
+ uint32_t ret = DECODE_SUCCESS;
+
+ naluOffset = 0;
+ offset = 0;
+ left = length;
+
+ // leave 4 bytes to copy nalu count
+ naluStream->streamPos = 4;
+ naluStream->naluCount = 0;
+ memset(naluStream->byteStream, 0, MAX_NALU_HEADER_BUFFER);
+
+ for (; ;) {
+ naluOffset = findNalUnitOffset(stream, offset, left);
+ if (naluOffset == -1) {
+ break;
+ }
+
+ if (naluStream->naluCount == 0) {
+ naluStream->naluOffset = naluOffset;
+ } else {
+ naluStream->naluLen = naluOffset - naluStream->naluOffset - STARTCODE_PREFIX_LEN;
+ ret = copyNaluHeader(stream, naluStream);
+ if (ret != DECODE_SUCCESS && ret != DECODE_FRAME_DROPPED) {
+ LOGW("copyNaluHeader returned %d", ret);
+ return ret;
+ }
+ // starting position for next NALU
+ naluStream->naluOffset = naluOffset;
+ }
+
+ if (ret == DECODE_SUCCESS) {
+ naluStream->naluCount++;
+ }
+
+ // update next lookup position and length
+ offset = naluOffset + 1; // skip one byte of NAL unit type
+ left = length - offset;
+ }
+
+ if (naluStream->naluCount > 0) {
+ naluStream->naluLen = length - naluStream->naluOffset;
+ memcpy(naluStream->byteStream, &(naluStream->naluCount), sizeof(int32_t));
+ // ignore return value, either DECODE_SUCCESS or DECODE_FRAME_DROPPED
+ copyNaluHeader(stream, naluStream);
+ return DECODE_SUCCESS;
+ }
+
+ LOGW("number of valid NALU is 0!");
+ return DECODE_SUCCESS;
+}
+
diff --git a/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..ee16073
--- /dev/null
+++ b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h
@@ -0,0 +1,75 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+private:
+ enum {
+ MAX_SLICE_HEADER_SIZE = 30,
+ MAX_NALU_HEADER_BUFFER = 8192,
+ MAX_NALU_NUMBER = 400, // > 4096/12
+ };
+
+ // Information of Network Abstraction Layer Unit
+ struct NaluInfo {
+ int32_t naluOffset; // offset of NAL unit in the firewalled buffer
+ int32_t naluLen; // length of NAL unit
+ int32_t naluHeaderLen; // length of NAL unit header
+ };
+
+ struct NaluMetadata {
+ NaluInfo *naluInfo;
+ int32_t naluNumber; // number of NAL units
+ };
+
+ struct NaluByteStream {
+ int32_t naluOffset;
+ int32_t naluLen;
+ int32_t streamPos;
+ uint8_t *byteStream; // 4 bytes of naluCount, 4 bytes of naluOffset, 4 bytes of naulLen, 4 bytes of naluHeaderLen, followed by naluHeaderData
+ int32_t naluCount;
+ };
+
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+ int32_t findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length);
+ Decode_Status copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream);
+ Decode_Status parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream);
+
+private:
+ NaluMetadata mMetadata;
+ NaluByteStream mByteStream;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mInputBuffer;
+};
+
+
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
new file mode 100755
index 0000000..649402d
--- /dev/null
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,858 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <va/va.h>
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderTrace.h"
+#include "vbp_loader.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoFrameInfo.h"
+
+#define MAX_SLICEHEADER_BUFFER_SIZE 4096
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType){
+ mFrameSize = 0;
+ mFrameData = NULL;
+ mIsEncryptData = 0;
+ mClearData = NULL;
+ mCachedHeader = NULL;
+ setParserType(VBP_H264SECURE);
+ mFrameIdx = 0;
+ mModularMode = 0;
+ mSliceNum = 0;
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::start");
+
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mClearData = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mClearData == NULL) {
+ ETRACE("Failed to allocate memory for mClearData");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mCachedHeader= new uint8_t [MAX_SLICEHEADER_BUFFER_SIZE];
+ if (mCachedHeader == NULL) {
+ ETRACE("Failed to allocate memory for mCachedHeader");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VTRACE("VideoDecoderAVCSecure::stop");
+ VideoDecoderAVC::stop();
+
+ if (mClearData) {
+ delete [] mClearData;
+ mClearData = NULL;
+ }
+
+ if (mCachedHeader) {
+ delete [] mCachedHeader;
+ mCachedHeader = NULL;
+ }
+}
+Decode_Status VideoDecoderAVCSecure::processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ VTRACE("processModularInputBuffer +++");
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+
+ int32_t nalu_num = 0;
+ uint8_t nalu_type = 0;
+ int32_t nalu_offset = 0;
+ uint32_t nalu_size = 0;
+ uint8_t naluType = 0;
+ uint8_t *nalu_data = NULL;
+ uint32_t sliceidx = 0;
+
+ frame_info_t *pFrameInfo = NULL;
+ mSliceNum = 0;
+ memset(&mSliceInfo, 0, sizeof(mSliceInfo));
+ mIsEncryptData = 0;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ pFrameInfo = (frame_info_t *) buffer->data;
+ if (pFrameInfo == NULL) {
+ ETRACE("Invalid parameter: pFrameInfo is NULL!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mFrameData = pFrameInfo->data;
+ mFrameSize = pFrameInfo->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+
+ nalu_num = pFrameInfo->num_nalus;
+ VTRACE("nalu_num = %d", nalu_num);
+
+ if (nalu_num <= 0 || nalu_num >= MAX_NUM_NALUS) {
+ ETRACE("Invalid parameter: nalu_num = %d", nalu_num);
+ return DECODE_MEMORY_FAIL;
+ }
+
+ for (int32_t i = 0; i < nalu_num; i++) {
+
+ nalu_size = pFrameInfo->nalus[i].length;
+ nalu_type = pFrameInfo->nalus[i].type;
+ nalu_offset = pFrameInfo->nalus[i].offset;
+ nalu_data = pFrameInfo->nalus[i].data;
+ naluType = nalu_type & NALU_TYPE_MASK;
+
+ VTRACE("nalu_type = 0x%x, nalu_size = %d, nalu_offset = 0x%x", nalu_type, nalu_size, nalu_offset);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+
+ mIsEncryptData = 1;
+ VTRACE("slice idx = %d", sliceidx);
+ mSliceInfo[sliceidx].sliceHeaderByte = nalu_type;
+ mSliceInfo[sliceidx].sliceStartOffset = (nalu_offset >> 4) << 4;
+ mSliceInfo[sliceidx].sliceByteOffset = nalu_offset - mSliceInfo[sliceidx].sliceStartOffset;
+ mSliceInfo[sliceidx].sliceLength = mSliceInfo[sliceidx].sliceByteOffset + nalu_size;
+ mSliceInfo[sliceidx].sliceSize = (mSliceInfo[sliceidx].sliceByteOffset + nalu_size + 0xF) & ~0xF;
+ VTRACE("sliceHeaderByte = 0x%x", mSliceInfo[sliceidx].sliceHeaderByte);
+ VTRACE("sliceStartOffset = %d", mSliceInfo[sliceidx].sliceStartOffset);
+ VTRACE("sliceByteOffset = %d", mSliceInfo[sliceidx].sliceByteOffset);
+ VTRACE("sliceSize = %d", mSliceInfo[sliceidx].sliceSize);
+ VTRACE("sliceLength = %d", mSliceInfo[sliceidx].sliceLength);
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = mSliceInfo[sliceidx].sliceSize > 64 ? 64 : mSliceInfo[sliceidx].sliceSize ;
+ testdata = (uint8_t *)(mFrameData);
+ for (int i = 0; i < testsize; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ sliceidx++;
+
+ } else if (naluType == h264_NAL_UNIT_TYPE_SPS || naluType == h264_NAL_UNIT_TYPE_PPS) {
+ if (nalu_data == NULL) {
+ ETRACE("Invalid parameter: nalu_data = NULL for naluType 0x%x", naluType);
+ return DECODE_MEMORY_FAIL;
+ }
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ITRACE("Nalu type = 0x%x is skipped", naluType);
+ continue;
+ }
+ }
+ clear_data = mClearData;
+ mSliceNum = sliceidx;
+
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+ uint8_t naluType = 0;
+
+ int32_t num_nalus;
+ int32_t nalu_offset;
+ int32_t offset;
+ uint8_t *data_src;
+ uint8_t *nalu_data;
+ uint32_t nalu_size;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+
+ mFrameData = buffer->data;
+ mFrameSize = buffer->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+ num_nalus = *(uint32_t *)(buffer->data + buffer->size + sizeof(uint32_t));
+ VTRACE("num_nalus = %d", num_nalus);
+ offset = 4;
+ for (int32_t i = 0; i < num_nalus; i++) {
+ VTRACE("%d nalu, offset = %d", i, offset);
+ data_src = buffer->data + buffer->size + sizeof(uint32_t) + offset;
+ nalu_size = *(uint32_t *)(data_src + 2 * sizeof(uint32_t));
+ nalu_size = (nalu_size + 0x03) & (~0x03);
+
+ nalu_data = data_src + 3 *sizeof(uint32_t);
+ naluType = nalu_data[0] & NALU_TYPE_MASK;
+ offset += nalu_size + 3 *sizeof(uint32_t);
+ VTRACE("naluType = 0x%x", naluType);
+ VTRACE("nalu_size = %d, nalu_data = %p", nalu_size, nalu_data);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ ETRACE("Slice NALU received!");
+ return DECODE_INVALID_DATA;
+ }
+
+ else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mClearData + clear_data_size,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ clear_data_size += STARTCODE_PREFIX_LEN;
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ETRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+ clear_data = mClearData;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::decode");
+ Decode_Status status;
+ vbp_data_h264 *data = NULL;
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = buffer->size > 16 ? 16:buffer->size ;
+ testdata = (uint8_t *)(buffer->data);
+ for (int i = 0; i < 16; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ if (buffer->flag & IS_SUBSAMPLE_ENCRYPTION) {
+ mModularMode = 1;
+ }
+
+ if (mModularMode) {
+ status = processModularInputBuffer(buffer,&data);
+ CHECK_STATUS("processModularInputBuffer");
+ }
+ else {
+ status = processClassicInputBuffer(buffer,&data);
+ CHECK_STATUS("processClassicInputBuffer");
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+
+ status = decodeFrame(buffer, data);
+
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::decodeFrame");
+ Decode_Status status;
+ VTRACE("data->has_sps = %d, data->has_pps = %d", data->has_sps, data->has_pps);
+
+#if 0
+ // Don't remove the following codes, it can be enabled for debugging DPB.
+ for (unsigned int i = 0; i < data->num_pictures; i++) {
+ VAPictureH264 &pic = data->pic_data[i].pic_parms->CurrPic;
+ VTRACE("%d: decoding frame %.2f, poc top = %d, poc bottom = %d, flags = %d, reference = %d",
+ i,
+ buffer->timeStamp/1E6,
+ pic.TopFieldOrderCnt,
+ pic.BottomFieldOrderCnt,
+ pic.flags,
+ (pic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (pic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE));
+ }
+#endif
+
+ if (data->new_sps || data->new_pps) {
+ status = handleNewSequence(data);
+ CHECK_STATUS("handleNewSequence");
+ }
+
+ if (mModularMode && (!mIsEncryptData)) {
+ if (data->pic_data[0].num_slices == 0) {
+ ITRACE("No slice available for decoding.");
+ status = mSizeChanged ? DECODE_FORMAT_CHANGE : DECODE_SUCCESS;
+ mSizeChanged = false;
+ return status;
+ }
+ }
+
+ uint64_t lastPTS = mCurrentPTS;
+ mCurrentPTS = buffer->timeStamp;
+
+ // start decoding a new frame
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+
+ if (mModularMode) {
+ parseModularSliceHeader(buffer,data);
+ }
+ else {
+ parseClassicSliceHeader(buffer,data);
+ }
+
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ return status;
+ }
+
+ status = beginDecodingFrame(data);
+ CHECK_STATUS("beginDecodingFrame");
+
+ // finish decoding the last frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+
+ if (isNewFrame(data, lastPTS == mCurrentPTS) == 0) {
+ ETRACE("Can't handle interlaced frames yet");
+ return DECODE_FAIL;
+ }
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::beginDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::beginDecodingFrame");
+ Decode_Status status;
+ VAPictureH264 *picture = &(data->pic_data[0].pic_parms->CurrPic);
+ if ((picture->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (picture->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+ mAcquiredBuffer->referenceFrame = true;
+ } else {
+ mAcquiredBuffer->referenceFrame = false;
+ }
+
+ if (picture->flags & VA_PICTURE_H264_TOP_FIELD) {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD | VA_TOP_FIELD;
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ }
+
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ mAcquiredBuffer->pictureOrder = getPOC(picture);
+
+ if (mSizeChanged) {
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ mSizeChanged = false;
+ }
+
+ status = continueDecodingFrame(data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::continueDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::continueDecodingFrame");
+ Decode_Status status;
+ vbp_picture_data_h264 *picData = data->pic_data;
+
+ if (mAcquiredBuffer == NULL || mAcquiredBuffer->renderBuffer.surface == VA_INVALID_SURFACE) {
+ ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+ return DECODE_FAIL;
+ }
+ VTRACE("data->num_pictures = %d", data->num_pictures);
+ for (uint32_t picIndex = 0; picIndex < data->num_pictures; picIndex++, picData++) {
+ if (picData == NULL || picData->pic_parms == NULL || picData->slc_data == NULL || picData->num_slices == 0) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ if (picIndex > 0 &&
+ (picData->pic_parms->CurrPic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) == 0) {
+ ETRACE("Packed frame is not supported yet!");
+ return DECODE_FAIL;
+ }
+ VTRACE("picData->num_slices = %d", picData->num_slices);
+ for (uint32_t sliceIndex = 0; sliceIndex < picData->num_slices; sliceIndex++) {
+ status = decodeSlice(data, picIndex, sliceIndex);
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ // remove current frame from DPB as it can't be decoded.
+ removeReferenceFromDPB(picData->pic_parms);
+ return status;
+ }
+ }
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseClassicSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+
+ if (mFrameSize <= 0) {
+ return DECODE_SUCCESS;
+ }
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ void *sliceheaderbuf;
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ mFrameData,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mFrameSize;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+
+#if 0
+
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ status = updateSliceParameter(data,sliceheaderbuf);
+ CHECK_STATUS("processSliceHeader");
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseModularSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+ int32_t sliceIdx;
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ if (mFrameSize <= 0 || mSliceNum <=0) {
+ return DECODE_SUCCESS;
+ }
+ void *sliceheaderbuf;
+ memset(mCachedHeader, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+ int32_t offset = 0;
+ int32_t size = 0;
+
+ for (sliceIdx = 0; sliceIdx < mSliceNum; sliceIdx++) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mSliceInfo[sliceIdx].sliceSize, //size
+ 1, //num_elements
+ mFrameData + mSliceInfo[sliceIdx].sliceStartOffset,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mSliceInfo[sliceIdx].sliceLength;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+ data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte;
+ data->pic_parse_buffer->slice_offset = mSliceInfo[sliceIdx].sliceByteOffset;
+
+#if 0
+ VTRACE("data->pic_parse_buffer->slice_offset = 0x%x", data->pic_parse_buffer->slice_offset);
+ VTRACE("pic_parse_buffer->nalu_header.value = %x", data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte);
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ size = *(uint32 *)((uint8 *)sliceheaderbuf + 4) + 4;
+ VTRACE("slice header size = 0x%x, offset = 0x%x", size, offset);
+ if (offset + size <= MAX_SLICEHEADER_BUFFER_SIZE - 4) {
+ memcpy(mCachedHeader+offset, sliceheaderbuf, size);
+ offset += size;
+ } else {
+ WTRACE("Cached slice header is not big enough!");
+ }
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+ }
+ memset(mCachedHeader + offset, 0xFF, 4);
+ status = updateSliceParameter(data,mCachedHeader);
+ CHECK_STATUS("processSliceHeader");
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf) {
+ VTRACE("VideoDecoderAVCSecure::updateSliceParameter");
+ Decode_Status status;
+ status = VideoDecoderBase::updateBuffer(
+ (uint8_t *)sliceheaderbuf,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ (void**)&data);
+ CHECK_STATUS("updateBuffer");
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 3 buffers to render a slice: picture parameter, IQMatrix, slice parameter
+ VABufferID bufferIDs[3];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ uint32_t slice_data_size = 0;
+ uint8_t* slice_data_addr = NULL;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ }
+ VTRACE("Current frameidx = %d", mFrameIdx++);
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ if (mModularMode) {
+ if (mIsEncryptData) {
+ sliceParam->slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_addr = mFrameData + mSliceInfo[sliceIndex].sliceStartOffset;
+ } else {
+ slice_data_size = sliceData->slice_size;
+ slice_data_addr = sliceData->buffer_addr + sliceData->slice_offset;
+ }
+ } else {
+ sliceParam->slice_data_size = mFrameSize;
+ slice_data_size = mFrameSize;
+ slice_data_addr = mFrameData;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ VABufferID slicebufferID;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ slice_data_size, //size
+ 1, //num_elements
+ slice_data_addr,
+ &slicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &slicebufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ if (mModularMode) {
+ attrib[1].value = VA_DEC_SLICE_MODE_SUBSAMPLE;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
new file mode 100755
index 0000000..d4a9f15
--- /dev/null
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
@@ -0,0 +1,69 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H
+#define VIDEO_DECODER_AVC_SECURE_H
+
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderDefs.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+protected:
+ virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+ virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+ Decode_Status parseClassicSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+ Decode_Status parseModularSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+
+ Decode_Status updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf);
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ Decode_Status processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ Decode_Status processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ int32_t mIsEncryptData;
+ int32_t mFrameSize;
+ uint8_t* mFrameData;
+ uint8_t* mClearData;
+ uint8_t* mCachedHeader;
+ int32_t mFrameIdx;
+ int32_t mModularMode;
+
+ enum {
+ MAX_SLICE_HEADER_NUM = 256,
+ };
+ int32_t mSliceNum;
+ // Information of Slices in the Modular DRM Mode
+ struct SliceInfo {
+ uint8_t sliceHeaderByte; // first byte of the slice header
+ uint32_t sliceStartOffset; // offset of Slice unit in the firewalled buffer
+ uint32_t sliceByteOffset; // extra offset from the blockAligned slice offset
+ uint32_t sliceSize; // block aligned length of slice unit
+ uint32_t sliceLength; // actual size of the slice
+ };
+
+ SliceInfo mSliceInfo[MAX_SLICE_HEADER_NUM];
+};
+
+#endif
diff --git a/videodecoder/securevideo/merrifield/VideoFrameInfo.h b/videodecoder/securevideo/merrifield/VideoFrameInfo.h
new file mode 100755
index 0000000..485b0da
--- /dev/null
+++ b/videodecoder/securevideo/merrifield/VideoFrameInfo.h
@@ -0,0 +1,36 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_FRAME_INFO_H_
+#define VIDEO_FRAME_INFO_H_
+
+#define MAX_NUM_NALUS 16
+
+typedef struct {
+ uint8_t type; // nalu type + nal_ref_idc
+ uint32_t offset; // offset to the pointer of the encrypted data
+ uint8_t* data; // if the nalu is encrypted, this field is useless; if current NALU is SPS/PPS, data is the pointer to clear SPS/PPS data
+ uint32_t length; // nalu length
+} nalu_info_t;
+
+typedef struct {
+ uint8_t* data; // pointer to the encrypted data
+ uint32_t size; // encrypted data size
+ uint32_t num_nalus; // number of NALU
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+#endif
diff --git a/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..38039e2
--- /dev/null
+++ b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,510 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+
+#define STARTCODE_00 0x00
+#define STARTCODE_01 0x01
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+
+
+// mask for little endian, to mast the second and fourth bytes in the byte stream
+#define STARTCODE_MASK0 0xFF000000 //0x00FF0000
+#define STARTCODE_MASK1 0x0000FF00 //0x000000FF
+
+
+typedef enum {
+ NAL_UNIT_TYPE_unspecified0 = 0,
+ NAL_UNIT_TYPE_SLICE,
+ NAL_UNIT_TYPE_DPA,
+ NAL_UNIT_TYPE_DPB,
+ NAL_UNIT_TYPE_DPC,
+ NAL_UNIT_TYPE_IDR,
+ NAL_UNIT_TYPE_SEI,
+ NAL_UNIT_TYPE_SPS,
+ NAL_UNIT_TYPE_PPS,
+ NAL_UNIT_TYPE_Acc_unit_delimiter,
+ NAL_UNIT_TYPE_EOSeq,
+ NAL_UNIT_TYPE_EOstream,
+ NAL_UNIT_TYPE_filler_data,
+ NAL_UNIT_TYPE_SPS_extension,
+ NAL_UNIT_TYPE_Reserved14,
+ NAL_UNIT_TYPE_Reserved15,
+ NAL_UNIT_TYPE_Reserved16,
+ NAL_UNIT_TYPE_Reserved17,
+ NAL_UNIT_TYPE_Reserved18,
+ NAL_UNIT_TYPE_ACP,
+ NAL_UNIT_TYPE_Reserved20,
+ NAL_UNIT_TYPE_Reserved21,
+ NAL_UNIT_TYPE_Reserved22,
+ NAL_UNIT_TYPE_Reserved23,
+ NAL_UNIT_TYPE_unspecified24,
+} NAL_UNIT_TYPE;
+
+#ifndef min
+#define min(X, Y) ((X) <(Y) ? (X) : (Y))
+#endif
+
+
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mInputBuffer(NULL) {
+
+ memset(&mMetadata, 0, sizeof(NaluMetadata));
+ memset(&mByteStream, 0, sizeof(NaluByteStream));
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mMetadata.naluInfo = new NaluInfo [MAX_NALU_NUMBER];
+ mByteStream.byteStream = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mMetadata.naluInfo == NULL ||
+ mByteStream.byteStream == NULL ||
+ mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory.");
+ // TODO: release all allocated memory
+ return DECODE_MEMORY_FAIL;
+ }
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mMetadata.naluInfo) {
+ delete [] mMetadata.naluInfo;
+ mMetadata.naluInfo = NULL;
+ }
+
+ if (mByteStream.byteStream) {
+ delete [] mByteStream.byteStream;
+ mByteStream.byteStream = NULL;
+ }
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sizeLeft = 0;
+ uint8_t *pByteStream = NULL;
+ NaluInfo *pNaluInfo = mMetadata.naluInfo;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ // NALU headers are appended to encrypted video bitstream
+ // |...encrypted video bitstream (16 bytes aligned)...| 4 bytes of header size |...NALU headers..|
+ pByteStream = buffer->data + buffer->size + 4;
+ sizeLeft = *(int32_t *)(buffer->data + buffer->size);
+ VTRACE("%s sizeLeft: %d buffer->size: %#x", __func__, sizeLeft, buffer->size);
+ mInputBuffer = buffer->data;
+ } else {
+ status = parseAnnexBStream(buffer->data, buffer->size, &mByteStream);
+ CHECK_STATUS("parseAnnexBStream");
+ pByteStream = mByteStream.byteStream;
+ sizeLeft = mByteStream.streamPos;
+ mInputBuffer = buffer->data;
+ }
+ if (sizeLeft < 4) {
+ ETRACE("Not enough data to read number of NALU.");
+ return DECODE_INVALID_DATA;
+ }
+
+ // read number of NALU
+ memcpy(&(mMetadata.naluNumber), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+ sizeLeft -= 4;
+
+ if (mMetadata.naluNumber == 0) {
+ WTRACE("Number of NALU is ZERO!");
+ return DECODE_SUCCESS;
+ }
+
+ for (int32_t i = 0; i < mMetadata.naluNumber; i++) {
+ if (sizeLeft < 12) {
+ ETRACE("Not enough data to parse NALU offset, size, header length for NALU %d, left = %d", i, sizeLeft);
+ return DECODE_INVALID_DATA;
+ }
+ sizeLeft -= 12;
+ // read NALU offset
+ memcpy(&(pNaluInfo->naluOffset), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU size
+ memcpy(&(pNaluInfo->naluLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU header length
+ memcpy(&(pNaluInfo->naluHeaderLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+
+ if (sizeLeft < pNaluInfo->naluHeaderLen) {
+ ETRACE("Not enough data to copy NALU header for %d, left = %d, header len = %d", i, sizeLeft, pNaluInfo->naluHeaderLen);
+ return DECODE_INVALID_DATA;
+ }
+
+ sizeLeft -= pNaluInfo->naluHeaderLen;
+
+ if (pNaluInfo->naluHeaderLen) {
+ // copy start code prefix to buffer
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+
+ // copy NALU header
+ memcpy(mNaluHeaderBuffer + sizeAccumulated, pByteStream, pNaluInfo->naluHeaderLen);
+ pByteStream += pNaluInfo->naluHeaderLen;
+
+ sizeAccumulated += pNaluInfo->naluHeaderLen;
+ } else {
+ WTRACE("header len is zero for NALU %d", i);
+ }
+
+ // for next NALU
+ pNaluInfo++;
+ }
+
+ buffer->data = mNaluHeaderBuffer;
+ buffer->size = sizeAccumulated;
+
+ return VideoDecoderAVC::decode(buffer);
+}
+
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[4];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Check there is no reference frame loss before decoding a frame
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ // find which naluinfo is correlated to current slice
+ int naluIndex = 0;
+ uint32_t accumulatedHeaderLen = 0;
+ uint32_t headerLen = 0;
+ for (; naluIndex < mMetadata.naluNumber; naluIndex++) {
+ headerLen = mMetadata.naluInfo[naluIndex].naluHeaderLen;
+ if (headerLen == 0) {
+ WTRACE("lenght of current NAL unit is 0.");
+ continue;
+ }
+ accumulatedHeaderLen += STARTCODE_PREFIX_LEN;
+ if (accumulatedHeaderLen + headerLen > sliceData->slice_offset) {
+ break;
+ }
+ accumulatedHeaderLen += headerLen;
+ }
+
+ if (sliceData->slice_offset != accumulatedHeaderLen) {
+ WTRACE("unexpected slice offset %d, accumulatedHeaderLen = %d", sliceData->slice_offset, accumulatedHeaderLen);
+ }
+
+ sliceParam->slice_data_size = mMetadata.naluInfo[naluIndex].naluLen;
+ uint32_t sliceOffset = mMetadata.naluInfo[naluIndex].naluOffset;
+ uint32_t slice_offset_shift = sliceOffset % 16;
+ sliceParam->slice_data_offset += slice_offset_shift;
+ sliceData->slice_size = (sliceParam->slice_data_size + slice_offset_shift + 0xF) & ~0xF;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ // sliceData->slice_offset - accumulatedHeaderLen is the absolute offset to start codes of current NAL unit
+ // offset points to first byte of NAL unit
+
+ if (mInputBuffer != NULL) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ sliceData->slice_size, //Slice size
+ 1, // num_elements
+ mInputBuffer + sliceOffset - slice_offset_shift,
+ &bufferIDs[bufferIDCount]);
+ } else {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAProtectedSliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ (uint8_t*)sliceOffset, // IMR offset
+ &bufferIDs[bufferIDCount]);
+ }
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+
+// Parse byte string pattern "0x000001" (3 bytes) in the current buffer.
+// Returns offset of position following the pattern in the buffer if pattern is found or -1 if not found.
+int32_t VideoDecoderAVCSecure::findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length) {
+ uint8_t *ptr;
+ uint32_t left = 0, data = 0, phase = 0;
+ uint8_t mask1 = 0, mask2 = 0;
+
+ /* Meaning of phase:
+ 0: initial status, "0x000001" bytes are not found so far;
+ 1: one "0x00" byte is found;
+ 2: two or more consecutive "0x00" bytes" are found;
+ 3: "0x000001" patten is found ;
+ 4: if there is one more byte after "0x000001";
+ */
+
+ left = length;
+ ptr = (uint8_t *) (stream + offset);
+ phase = 0;
+
+ // parse until there is more data and start code not found
+ while ((left > 0) && (phase < 3)) {
+ // Check if the address is 32-bit aligned & phase=0, if thats the case we can check 4 bytes instead of one byte at a time.
+ if (((((uint32_t)ptr) & 0x3) == 0) && (phase == 0)) {
+ while (left > 3) {
+ data = *((uint32_t *)ptr);
+ mask1 = (STARTCODE_00 != (data & STARTCODE_MASK0));
+ mask2 = (STARTCODE_00 != (data & STARTCODE_MASK1));
+ // If second byte and fourth byte are not zero's then we cannot have a start code here,
+ // as we need two consecutive zero bytes for a start code pattern.
+ if (mask1 && mask2) {
+ // skip 4 bytes and start over
+ ptr += 4;
+ left -=4;
+ continue;
+ } else {
+ break;
+ }
+ }
+ }
+
+ // At this point either data is not on a 32-bit boundary or phase > 0 so we look at one byte at a time
+ if (left > 0) {
+ if (*ptr == STARTCODE_00) {
+ phase++;
+ if (phase > 2) {
+ // more than 2 consecutive '0x00' bytes is found
+ phase = 2;
+ }
+ } else if ((*ptr == STARTCODE_01) && (phase == 2)) {
+ // start code is found
+ phase = 3;
+ } else {
+ // reset lookup
+ phase = 0;
+ }
+ ptr++;
+ left--;
+ }
+ }
+
+ if ((left > 0) && (phase == 3)) {
+ phase = 4;
+ // return offset of position following the pattern in the buffer which matches "0x000001" byte string
+ return (int32_t)(ptr - stream);
+ }
+ return -1;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream) {
+ uint8_t naluType;
+ int32_t naluHeaderLen;
+
+ naluType = *(uint8_t *)(stream + naluStream->naluOffset);
+ naluType &= NALU_TYPE_MASK;
+ // first update nalu header length based on nalu type
+ if (naluType >= NAL_UNIT_TYPE_SLICE && naluType <= NAL_UNIT_TYPE_IDR) {
+ // coded slice, return only up to MAX_SLICE_HEADER_SIZE bytes
+ naluHeaderLen = min(naluStream->naluLen, MAX_SLICE_HEADER_SIZE);
+ } else if (naluType >= NAL_UNIT_TYPE_SEI && naluType <= NAL_UNIT_TYPE_PPS) {
+ //sps, pps, sei, etc, return the entire NAL unit in clear
+ naluHeaderLen = naluStream->naluLen;
+ } else {
+ return DECODE_FRAME_DROPPED;
+ }
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluOffset), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluLen), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &naluHeaderLen, sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ if (naluHeaderLen) {
+ memcpy(naluStream->byteStream + naluStream->streamPos, (uint8_t*)(stream + naluStream->naluOffset), naluHeaderLen);
+ naluStream->streamPos += naluHeaderLen;
+ }
+ return DECODE_SUCCESS;
+}
+
+
+// parse start-code prefixed stream, also knowns as Annex B byte stream, commonly used in AVI, ES, MPEG2 TS container
+Decode_Status VideoDecoderAVCSecure::parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream) {
+ int32_t naluOffset, offset, left;
+ NaluInfo *info;
+ uint32_t ret = DECODE_SUCCESS;
+
+ naluOffset = 0;
+ offset = 0;
+ left = length;
+
+ // leave 4 bytes to copy nalu count
+ naluStream->streamPos = 4;
+ naluStream->naluCount = 0;
+ memset(naluStream->byteStream, 0, MAX_NALU_HEADER_BUFFER);
+
+ for (; ;) {
+ naluOffset = findNalUnitOffset(stream, offset, left);
+ if (naluOffset == -1) {
+ break;
+ }
+
+ if (naluStream->naluCount == 0) {
+ naluStream->naluOffset = naluOffset;
+ } else {
+ naluStream->naluLen = naluOffset - naluStream->naluOffset - STARTCODE_PREFIX_LEN;
+ ret = copyNaluHeader(stream, naluStream);
+ if (ret != DECODE_SUCCESS && ret != DECODE_FRAME_DROPPED) {
+ LOGW("copyNaluHeader returned %d", ret);
+ return ret;
+ }
+ // starting position for next NALU
+ naluStream->naluOffset = naluOffset;
+ }
+
+ if (ret == DECODE_SUCCESS) {
+ naluStream->naluCount++;
+ }
+
+ // update next lookup position and length
+ offset = naluOffset + 1; // skip one byte of NAL unit type
+ left = length - offset;
+ }
+
+ if (naluStream->naluCount > 0) {
+ naluStream->naluLen = length - naluStream->naluOffset;
+ memcpy(naluStream->byteStream, &(naluStream->naluCount), sizeof(int32_t));
+ // ignore return value, either DECODE_SUCCESS or DECODE_FRAME_DROPPED
+ copyNaluHeader(stream, naluStream);
+ return DECODE_SUCCESS;
+ }
+
+ LOGW("number of valid NALU is 0!");
+ return DECODE_SUCCESS;
+}
+
diff --git a/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..ee16073
--- /dev/null
+++ b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h
@@ -0,0 +1,75 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+private:
+ enum {
+ MAX_SLICE_HEADER_SIZE = 30,
+ MAX_NALU_HEADER_BUFFER = 8192,
+ MAX_NALU_NUMBER = 400, // > 4096/12
+ };
+
+ // Information of Network Abstraction Layer Unit
+ struct NaluInfo {
+ int32_t naluOffset; // offset of NAL unit in the firewalled buffer
+ int32_t naluLen; // length of NAL unit
+ int32_t naluHeaderLen; // length of NAL unit header
+ };
+
+ struct NaluMetadata {
+ NaluInfo *naluInfo;
+ int32_t naluNumber; // number of NAL units
+ };
+
+ struct NaluByteStream {
+ int32_t naluOffset;
+ int32_t naluLen;
+ int32_t streamPos;
+ uint8_t *byteStream; // 4 bytes of naluCount, 4 bytes of naluOffset, 4 bytes of naulLen, 4 bytes of naluHeaderLen, followed by naluHeaderData
+ int32_t naluCount;
+ };
+
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+ int32_t findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length);
+ Decode_Status copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream);
+ Decode_Status parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream);
+
+private:
+ NaluMetadata mMetadata;
+ NaluByteStream mByteStream;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mInputBuffer;
+};
+
+
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..2867ad9
--- /dev/null
+++ b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,861 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <va/va.h>
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderTrace.h"
+#include "vbp_loader.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoFrameInfo.h"
+
+#define MAX_SLICEHEADER_BUFFER_SIZE 4096
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType){
+ mFrameSize = 0;
+ mFrameData = NULL;
+ mIsEncryptData = 0;
+ mClearData = NULL;
+ mCachedHeader = NULL;
+ setParserType(VBP_H264SECURE);
+ mFrameIdx = 0;
+ mModularMode = 0;
+ mSliceNum = 0;
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::start");
+
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mClearData = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mClearData == NULL) {
+ ETRACE("Failed to allocate memory for mClearData");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mCachedHeader= new uint8_t [MAX_SLICEHEADER_BUFFER_SIZE];
+ if (mCachedHeader == NULL) {
+ ETRACE("Failed to allocate memory for mCachedHeader");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VTRACE("VideoDecoderAVCSecure::stop");
+ VideoDecoderAVC::stop();
+
+ if (mClearData) {
+ delete [] mClearData;
+ mClearData = NULL;
+ }
+
+ if (mCachedHeader) {
+ delete [] mCachedHeader;
+ mCachedHeader = NULL;
+ }
+}
+Decode_Status VideoDecoderAVCSecure::processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ VTRACE("processModularInputBuffer +++");
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+
+ int32_t nalu_num = 0;
+ uint8_t nalu_type = 0;
+ int32_t nalu_offset = 0;
+ uint32_t nalu_size = 0;
+ uint8_t naluType = 0;
+ uint8_t *nalu_data = NULL;
+ uint32_t sliceidx = 0;
+
+ frame_info_t *pFrameInfo = NULL;
+ mSliceNum = 0;
+ memset(&mSliceInfo, 0, sizeof(mSliceInfo));
+ mIsEncryptData = 0;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ pFrameInfo = (frame_info_t *) buffer->data;
+ if (pFrameInfo == NULL) {
+ ETRACE("Invalid parameter: pFrameInfo is NULL!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mFrameData = pFrameInfo->data;
+ mFrameSize = pFrameInfo->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+
+ nalu_num = pFrameInfo->num_nalus;
+ VTRACE("nalu_num = %d", nalu_num);
+
+ if (nalu_num <= 0 || nalu_num >= MAX_NUM_NALUS) {
+ ETRACE("Invalid parameter: nalu_num = %d", nalu_num);
+ return DECODE_MEMORY_FAIL;
+ }
+
+ for (int32_t i = 0; i < nalu_num; i++) {
+
+ nalu_size = pFrameInfo->nalus[i].length;
+ nalu_type = pFrameInfo->nalus[i].type;
+ nalu_offset = pFrameInfo->nalus[i].offset;
+ nalu_data = pFrameInfo->nalus[i].data;
+ naluType = nalu_type & NALU_TYPE_MASK;
+
+ VTRACE("nalu_type = 0x%x, nalu_size = %d, nalu_offset = 0x%x", nalu_type, nalu_size, nalu_offset);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+
+ mIsEncryptData = 1;
+ VTRACE("slice idx = %d", sliceidx);
+ mSliceInfo[sliceidx].sliceHeaderByte = nalu_type;
+ mSliceInfo[sliceidx].sliceStartOffset = (nalu_offset >> 4) << 4;
+ mSliceInfo[sliceidx].sliceByteOffset = nalu_offset - mSliceInfo[sliceidx].sliceStartOffset;
+ mSliceInfo[sliceidx].sliceLength = mSliceInfo[sliceidx].sliceByteOffset + nalu_size;
+ mSliceInfo[sliceidx].sliceSize = (mSliceInfo[sliceidx].sliceByteOffset + nalu_size + 0xF) & ~0xF;
+ VTRACE("sliceHeaderByte = 0x%x", mSliceInfo[sliceidx].sliceHeaderByte);
+ VTRACE("sliceStartOffset = %d", mSliceInfo[sliceidx].sliceStartOffset);
+ VTRACE("sliceByteOffset = %d", mSliceInfo[sliceidx].sliceByteOffset);
+ VTRACE("sliceSize = %d", mSliceInfo[sliceidx].sliceSize);
+ VTRACE("sliceLength = %d", mSliceInfo[sliceidx].sliceLength);
+
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = mSliceInfo[sliceidx].sliceSize > 64 ? 64 : mSliceInfo[sliceidx].sliceSize ;
+ testdata = (uint8_t *)(mFrameData);
+ for (int i = 0; i < testsize; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ sliceidx++;
+
+ } else if (naluType == h264_NAL_UNIT_TYPE_SPS || naluType == h264_NAL_UNIT_TYPE_PPS) {
+ if (nalu_data == NULL) {
+ ETRACE("Invalid parameter: nalu_data = NULL for naluType 0x%x", naluType);
+ return DECODE_MEMORY_FAIL;
+ }
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ITRACE("Nalu type = 0x%x is skipped", naluType);
+ continue;
+ }
+ }
+ clear_data = mClearData;
+ mSliceNum = sliceidx;
+
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+ uint8_t naluType = 0;
+
+ int32_t num_nalus;
+ int32_t nalu_offset;
+ int32_t offset;
+ uint8_t *data_src;
+ uint8_t *nalu_data;
+ uint32_t nalu_size;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+
+ mFrameData = buffer->data;
+ mFrameSize = buffer->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+ num_nalus = *(uint32_t *)(buffer->data + buffer->size + sizeof(uint32_t));
+ VTRACE("num_nalus = %d", num_nalus);
+ offset = 4;
+ for (int32_t i = 0; i < num_nalus; i++) {
+ VTRACE("%d nalu, offset = %d", i, offset);
+ data_src = buffer->data + buffer->size + sizeof(uint32_t) + offset;
+ nalu_size = *(uint32_t *)(data_src + 2 * sizeof(uint32_t));
+ nalu_size = (nalu_size + 0x03) & (~0x03);
+
+ nalu_data = data_src + 3 *sizeof(uint32_t);
+ naluType = nalu_data[0] & NALU_TYPE_MASK;
+ offset += nalu_size + 3 *sizeof(uint32_t);
+ VTRACE("naluType = 0x%x", naluType);
+ VTRACE("nalu_size = %d, nalu_data = %p", nalu_size, nalu_data);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ ETRACE("Slice NALU received!");
+ return DECODE_INVALID_DATA;
+ }
+
+ else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mClearData + clear_data_size,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ clear_data_size += STARTCODE_PREFIX_LEN;
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ETRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+ clear_data = mClearData;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::decode");
+ Decode_Status status;
+ vbp_data_h264 *data = NULL;
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = buffer->size > 16 ? 16:buffer->size ;
+ testdata = (uint8_t *)(buffer->data);
+ for (int i = 0; i < 16; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ if (buffer->flag & IS_SUBSAMPLE_ENCRYPTION) {
+ mModularMode = 1;
+ }
+
+ if (mModularMode) {
+ status = processModularInputBuffer(buffer,&data);
+ CHECK_STATUS("processModularInputBuffer");
+ }
+ else {
+ status = processClassicInputBuffer(buffer,&data);
+ CHECK_STATUS("processClassicInputBuffer");
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+
+ status = decodeFrame(buffer, data);
+
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::decodeFrame");
+ Decode_Status status;
+ VTRACE("data->has_sps = %d, data->has_pps = %d", data->has_sps, data->has_pps);
+
+#if 0
+ // Don't remove the following codes, it can be enabled for debugging DPB.
+ for (unsigned int i = 0; i < data->num_pictures; i++) {
+ VAPictureH264 &pic = data->pic_data[i].pic_parms->CurrPic;
+ VTRACE("%d: decoding frame %.2f, poc top = %d, poc bottom = %d, flags = %d, reference = %d",
+ i,
+ buffer->timeStamp/1E6,
+ pic.TopFieldOrderCnt,
+ pic.BottomFieldOrderCnt,
+ pic.flags,
+ (pic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (pic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE));
+ }
+#endif
+
+ if (data->new_sps || data->new_pps) {
+ status = handleNewSequence(data);
+ CHECK_STATUS("handleNewSequence");
+ }
+
+ if (mModularMode && (!mIsEncryptData)) {
+ if (data->pic_data[0].num_slices == 0) {
+ ITRACE("No slice available for decoding.");
+ status = mSizeChanged ? DECODE_FORMAT_CHANGE : DECODE_SUCCESS;
+ mSizeChanged = false;
+ return status;
+ }
+ }
+
+ uint64_t lastPTS = mCurrentPTS;
+ mCurrentPTS = buffer->timeStamp;
+
+ // start decoding a new frame
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+
+ if (mModularMode) {
+ parseModularSliceHeader(data);
+ }
+ else {
+ parseClassicSliceHeader(data);
+ }
+
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ return status;
+ }
+
+ status = beginDecodingFrame(data);
+ CHECK_STATUS("beginDecodingFrame");
+
+ // finish decoding the last frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+
+ if (isNewFrame(data, lastPTS == mCurrentPTS) == 0) {
+ ETRACE("Can't handle interlaced frames yet");
+ return DECODE_FAIL;
+ }
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::beginDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::beginDecodingFrame");
+ Decode_Status status;
+ VAPictureH264 *picture = &(data->pic_data[0].pic_parms->CurrPic);
+ if ((picture->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (picture->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+ mAcquiredBuffer->referenceFrame = true;
+ } else {
+ mAcquiredBuffer->referenceFrame = false;
+ }
+
+ if (picture->flags & VA_PICTURE_H264_TOP_FIELD) {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD | VA_TOP_FIELD;
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ }
+
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ mAcquiredBuffer->pictureOrder = getPOC(picture);
+
+ if (mSizeChanged) {
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ mSizeChanged = false;
+ }
+
+ status = continueDecodingFrame(data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::continueDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::continueDecodingFrame");
+ Decode_Status status;
+ vbp_picture_data_h264 *picData = data->pic_data;
+
+ if (mAcquiredBuffer == NULL || mAcquiredBuffer->renderBuffer.surface == VA_INVALID_SURFACE) {
+ ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+ return DECODE_FAIL;
+ }
+ VTRACE("data->num_pictures = %d", data->num_pictures);
+ for (uint32_t picIndex = 0; picIndex < data->num_pictures; picIndex++, picData++) {
+ if (picData == NULL || picData->pic_parms == NULL || picData->slc_data == NULL || picData->num_slices == 0) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ if (picIndex > 0 &&
+ (picData->pic_parms->CurrPic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) == 0) {
+ ETRACE("Packed frame is not supported yet!");
+ return DECODE_FAIL;
+ }
+ VTRACE("picData->num_slices = %d", picData->num_slices);
+ for (uint32_t sliceIndex = 0; sliceIndex < picData->num_slices; sliceIndex++) {
+ status = decodeSlice(data, picIndex, sliceIndex);
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ // remove current frame from DPB as it can't be decoded.
+ removeReferenceFromDPB(picData->pic_parms);
+ return status;
+ }
+ }
+ }
+ mDecodingFrame = true;
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseClassicSliceHeader(vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+
+ if (mFrameSize <= 0) {
+ return DECODE_SUCCESS;
+ }
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ void *sliceheaderbuf;
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ mFrameData,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mFrameSize;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+
+#if 0
+
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ status = updateSliceParameter(data,sliceheaderbuf);
+ CHECK_STATUS("processSliceHeader");
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseModularSliceHeader(vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+ int32_t sliceIdx;
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ if (mFrameSize <= 0 || mSliceNum <=0) {
+ return DECODE_SUCCESS;
+ }
+ void *sliceheaderbuf;
+ memset(mCachedHeader, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+ int32_t offset = 0;
+ int32_t size = 0;
+
+ for (sliceIdx = 0; sliceIdx < mSliceNum; sliceIdx++) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mSliceInfo[sliceIdx].sliceSize, //size
+ 1, //num_elements
+ mFrameData + mSliceInfo[sliceIdx].sliceStartOffset,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mSliceInfo[sliceIdx].sliceLength;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+ data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte;
+ data->pic_parse_buffer->slice_offset = mSliceInfo[sliceIdx].sliceByteOffset;
+
+#if 0
+ VTRACE("data->pic_parse_buffer->slice_offset = 0x%x", data->pic_parse_buffer->slice_offset);
+ VTRACE("pic_parse_buffer->nalu_header.value = %x", data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte);
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ size = *(uint32 *)((uint8 *)sliceheaderbuf + 4) + 4;
+ VTRACE("slice header size = 0x%x, offset = 0x%x", size, offset);
+ if (offset + size <= MAX_SLICEHEADER_BUFFER_SIZE - 4) {
+ memcpy(mCachedHeader+offset, sliceheaderbuf, size);
+ offset += size;
+ } else {
+ WTRACE("Cached slice header is not big enough!");
+ }
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+ }
+ memset(mCachedHeader + offset, 0xFF, 4);
+ status = updateSliceParameter(data,mCachedHeader);
+ CHECK_STATUS("processSliceHeader");
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf) {
+ VTRACE("VideoDecoderAVCSecure::updateSliceParameter");
+ Decode_Status status;
+ status = VideoDecoderBase::updateBuffer(
+ (uint8_t *)sliceheaderbuf,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ (void**)&data);
+ CHECK_STATUS("updateBuffer");
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 3 buffers to render a slice: picture parameter, IQMatrix, slice parameter
+ VABufferID bufferIDs[3];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ uint32_t slice_data_size = 0;
+ uint8_t* slice_data_addr = NULL;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ }
+ VTRACE("Current frameidx = %d", mFrameIdx++);
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ if (mModularMode) {
+ if (mIsEncryptData) {
+ sliceParam->slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_addr = mFrameData + mSliceInfo[sliceIndex].sliceStartOffset;
+ } else {
+ slice_data_size = sliceData->slice_size;
+ slice_data_addr = sliceData->buffer_addr + sliceData->slice_offset;
+ }
+ } else {
+ sliceParam->slice_data_size = mFrameSize;
+ slice_data_size = mFrameSize;
+ slice_data_addr = mFrameData;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ VABufferID slicebufferID;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ slice_data_size, //size
+ 1, //num_elements
+ slice_data_addr,
+ &slicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &slicebufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ if (mModularMode) {
+ attrib[1].value = VA_DEC_SLICE_MODE_SUBSAMPLE;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..f66d7b8
--- /dev/null
+++ b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h
@@ -0,0 +1,69 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H
+#define VIDEO_DECODER_AVC_SECURE_H
+
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderDefs.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+protected:
+ virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+ virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+ Decode_Status parseClassicSliceHeader(vbp_data_h264 *data);
+ Decode_Status parseModularSliceHeader(vbp_data_h264 *data);
+
+ Decode_Status updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf);
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ Decode_Status processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ Decode_Status processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ int32_t mIsEncryptData;
+ int32_t mFrameSize;
+ uint8_t* mFrameData;
+ uint8_t* mClearData;
+ uint8_t* mCachedHeader;
+ int32_t mFrameIdx;
+ int32_t mModularMode;
+
+ enum {
+ MAX_SLICE_HEADER_NUM = 256,
+ };
+ int32_t mSliceNum;
+ // Information of Slices in the Modular DRM Mode
+ struct SliceInfo {
+ uint8_t sliceHeaderByte; // first byte of the slice header
+ uint32_t sliceStartOffset; // offset of Slice unit in the firewalled buffer
+ uint32_t sliceByteOffset; // extra offset from the blockAligned slice offset
+ uint32_t sliceSize; // block aligned length of slice unit
+ uint32_t sliceLength; // actual size of the slice
+ };
+
+ SliceInfo mSliceInfo[MAX_SLICE_HEADER_NUM];
+};
+
+#endif
diff --git a/videodecoder/securevideo/moorefield/VideoFrameInfo.h b/videodecoder/securevideo/moorefield/VideoFrameInfo.h
new file mode 100755
index 0000000..485b0da
--- /dev/null
+++ b/videodecoder/securevideo/moorefield/VideoFrameInfo.h
@@ -0,0 +1,36 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_FRAME_INFO_H_
+#define VIDEO_FRAME_INFO_H_
+
+#define MAX_NUM_NALUS 16
+
+typedef struct {
+ uint8_t type; // nalu type + nal_ref_idc
+ uint32_t offset; // offset to the pointer of the encrypted data
+ uint8_t* data; // if the nalu is encrypted, this field is useless; if current NALU is SPS/PPS, data is the pointer to clear SPS/PPS data
+ uint32_t length; // nalu length
+} nalu_info_t;
+
+typedef struct {
+ uint8_t* data; // pointer to the encrypted data
+ uint32_t size; // encrypted data size
+ uint32_t num_nalus; // number of NALU
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+#endif
diff --git a/videodecoder/use_util_sse4.h b/videodecoder/use_util_sse4.h
new file mode 100644
index 0000000..454099d
--- /dev/null
+++ b/videodecoder/use_util_sse4.h
@@ -0,0 +1,93 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <emmintrin.h>
+#include <x86intrin.h>
+
+inline void stream_memcpy(void* dst_buff, const void* src_buff, size_t size)
+{
+ bool isAligned = (((size_t)(src_buff) | (size_t)(dst_buff)) & 0xF) == 0;
+ if (!isAligned) {
+ memcpy(dst_buff, src_buff, size);
+ return;
+ }
+
+ static const size_t regs_count = 8;
+
+ __m128i xmm_data0, xmm_data1, xmm_data2, xmm_data3;
+ __m128i xmm_data4, xmm_data5, xmm_data6, xmm_data7;
+
+ size_t remain_data = size & (regs_count * sizeof(xmm_data0) - 1);
+ size_t end_position = 0;
+
+ __m128i* pWb_buff = (__m128i*)dst_buff;
+ __m128i* pWb_buff_end = pWb_buff + ((size - remain_data) >> 4);
+ __m128i* pWc_buff = (__m128i*)src_buff;
+
+ /*sync the wc memory data*/
+ _mm_mfence();
+
+ while (pWb_buff < pWb_buff_end)
+ {
+ xmm_data0 = _mm_stream_load_si128(pWc_buff);
+ xmm_data1 = _mm_stream_load_si128(pWc_buff + 1);
+ xmm_data2 = _mm_stream_load_si128(pWc_buff + 2);
+ xmm_data3 = _mm_stream_load_si128(pWc_buff + 3);
+ xmm_data4 = _mm_stream_load_si128(pWc_buff + 4);
+ xmm_data5 = _mm_stream_load_si128(pWc_buff + 5);
+ xmm_data6 = _mm_stream_load_si128(pWc_buff + 6);
+ xmm_data7 = _mm_stream_load_si128(pWc_buff + 7);
+
+ pWc_buff += regs_count;
+ _mm_store_si128(pWb_buff, xmm_data0);
+ _mm_store_si128(pWb_buff + 1, xmm_data1);
+ _mm_store_si128(pWb_buff + 2, xmm_data2);
+ _mm_store_si128(pWb_buff + 3, xmm_data3);
+ _mm_store_si128(pWb_buff + 4, xmm_data4);
+ _mm_store_si128(pWb_buff + 5, xmm_data5);
+ _mm_store_si128(pWb_buff + 6, xmm_data6);
+ _mm_store_si128(pWb_buff + 7, xmm_data7);
+
+ pWb_buff += regs_count;
+ }
+
+ /*copy data by 16 bytes step from the remainder*/
+ if (remain_data >= 16)
+ {
+ size = remain_data;
+ remain_data = size & 15;
+ end_position = size >> 4;
+ for (size_t i = 0; i < end_position; ++i)
+ {
+ pWb_buff[i] = _mm_stream_load_si128(pWc_buff + i);
+ }
+ }
+
+ /*copy the remainder data, if it still existed*/
+ if (remain_data)
+ {
+ __m128i temp_data = _mm_stream_load_si128(pWc_buff + end_position);
+
+ char* psrc_buf = (char*)(&temp_data);
+ char* pdst_buf = (char*)(pWb_buff + end_position);
+
+ for (size_t i = 0; i < remain_data; ++i)
+ {
+ pdst_buf[i] = psrc_buf[i];
+ }
+ }
+
+}