aboutsummaryrefslogtreecommitdiff
path: root/videodecoder/securevideo
diff options
context:
space:
mode:
authorGuilhem IMBERTON <guilhem.imberton@intel.com>2014-08-06 20:47:04 +0200
committerPatrick Tjin <pattjin@google.com>2014-08-07 14:31:21 -0700
commit82b428e49a70ddc051a36d2b3a25d90db79770dc (patch)
tree3c7387e0ff0d1a4dfebec762a9b0a80f09724ef1 /videodecoder/securevideo
parent4d358311bdb7a2e02671ecf499effeb0262e1fc3 (diff)
downloadlibmix-82b428e49a70ddc051a36d2b3a25d90db79770dc.tar.gz
Initial libmix commit
Change-Id: I7a0b9afdc83a3274189cef0788c7296a871a3d98 Signed-off-by: Guilhem IMBERTON <guilhem.imberton@intel.com>
Diffstat (limited to 'videodecoder/securevideo')
-rw-r--r--videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp367
-rw-r--r--videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h44
-rw-r--r--videodecoder/securevideo/baytrail/secvideoparser.h150
-rw-r--r--videodecoder/securevideo/baytrail/va_private.h64
-rw-r--r--videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp351
-rw-r--r--videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h44
-rw-r--r--videodecoder/securevideo/cherrytrail/secvideoparser.h150
-rw-r--r--videodecoder/securevideo/cherrytrail/va_private.h63
-rw-r--r--videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp507
-rw-r--r--videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h75
-rwxr-xr-xvideodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp858
-rwxr-xr-xvideodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h69
-rwxr-xr-xvideodecoder/securevideo/merrifield/VideoFrameInfo.h36
-rw-r--r--videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp510
-rw-r--r--videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h75
-rw-r--r--videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp861
-rw-r--r--videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h69
-rwxr-xr-xvideodecoder/securevideo/moorefield/VideoFrameInfo.h36
18 files changed, 4329 insertions, 0 deletions
diff --git a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..52a5285
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,367 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "va_private.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mSliceHeaderBuffer(NULL) {
+ setParserType(VBP_H264SECURE);
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mNaluHeaderBuffer");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mSliceHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mSliceHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mSliceHeaderBuffer");
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+
+ if (mSliceHeaderBuffer) {
+ delete [] mSliceHeaderBuffer;
+ mSliceHeaderBuffer = NULL;
+ }
+
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sliceHeaderSize = 0;
+ int32_t sizeLeft = 0;
+ int32_t sliceIdx = 0;
+ uint8_t naluType;
+ frame_info_t* pFrameInfo;
+
+ mFrameSize = 0;
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ return VideoDecoderAVC::decode(buffer);
+ }
+
+ if (buffer->size != sizeof(frame_info_t)) {
+ ETRACE("Not enough data to read frame_info_t!");
+ return DECODE_INVALID_DATA;
+ }
+ pFrameInfo = (frame_info_t*) buffer->data;
+
+ mFrameSize = pFrameInfo->length;
+ VTRACE("mFrameSize = %d", mFrameSize);
+
+ memcpy(&mEncParam, pFrameInfo->pavp, sizeof(pavp_info_t));
+ for (int32_t i = 0; i < pFrameInfo->num_nalus; i++) {
+ naluType = pFrameInfo->nalus[i].type & NALU_TYPE_MASK;
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &sliceIdx,
+ sizeof(int32_t));
+ sliceHeaderSize += 4;
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->data,
+ sizeof(uint8_t*));
+ sliceHeaderSize += sizeof(uint8_t*);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].offset,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].length,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ pFrameInfo->nalus[i].slice_header,
+ sizeof(slice_header_t));
+ sliceHeaderSize += sizeof(slice_header_t);
+ if (pFrameInfo->nalus[i].type & 0x60) {
+ memcpy(mSliceHeaderBuffer+sliceHeaderSize, pFrameInfo->dec_ref_pic_marking, sizeof(dec_ref_pic_marking_t));
+ } else {
+ memset(mSliceHeaderBuffer+sliceHeaderSize, 0, sizeof(dec_ref_pic_marking_t));
+ }
+ sliceHeaderSize += sizeof(dec_ref_pic_marking_t);
+ sliceIdx++;
+ } else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ pFrameInfo->nalus[i].data,
+ pFrameInfo->nalus[i].length);
+ sizeAccumulated += pFrameInfo->nalus[i].length;
+ } else {
+ WTRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+
+ vbp_data_h264 *data = NULL;
+ int new_sequence_to_handle = 0;
+
+ if (sizeAccumulated > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ mNaluHeaderBuffer,
+ sizeAccumulated,
+ false,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+
+ // [FIX DRC zoom issue] if one buffer contains more than one nalu
+ // for example SPS+PPS+IDR, new_sps/new_pps flags set in parseBuffer
+ // will be flushed in the following updateBuffer.
+ // So that handleNewSequence will not be handled in decodeFrame()
+ if (data->new_sps || data->new_pps) {
+ new_sequence_to_handle = 1;
+ }
+ }
+
+ if (sliceHeaderSize > 0) {
+ memset(mSliceHeaderBuffer + sliceHeaderSize, 0xFF, 4);
+ sliceHeaderSize += 4;
+ status = VideoDecoderBase::updateBuffer(
+ mSliceHeaderBuffer,
+ sliceHeaderSize,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::updateBuffer");
+
+ // in case the flags were flushed but indeed new sequence needed to be handled.
+ if ((1 == new_sequence_to_handle) &&
+ ((data->new_sps == 0) || (data->new_pps == 0))) {
+ data->new_sps = 1;
+ data->new_pps = 1;
+ }
+ }
+
+ if (data == NULL) {
+ ETRACE("Invalid data returned by parser!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+ status = decodeFrame(buffer, data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[5];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ VAEncryptionParameterBuffer encryptParam;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+
+ if (mIsEncryptData) {
+ memset(&encryptParam, 0, sizeof(VAEncryptionParameterBuffer));
+ encryptParam.pavpCounterMode = 4;
+ encryptParam.pavpEncryptionType = 2;
+ encryptParam.hostEncryptMode = 2;
+ encryptParam.pavpHasBeenEnabled = 1;
+ encryptParam.app_id = 0;
+ memcpy(encryptParam.pavpAesCounter, mEncParam.iv, 16);
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ (VABufferType)VAEncryptionParameterBufferType,
+ sizeof(VAEncryptionParameterBuffer),
+ 1,
+ &encryptParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateEncryptionParameterBuffer");
+ bufferIDCount++;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ sliceData->buffer_addr + sliceData->slice_offset,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264Base),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+
+ vaStatus = vaGetConfigAttributes(mVADisplay,profile,VAEntrypointVLD, &attrib[1], 1);
+
+ if (attrib[1].value & VA_DEC_SLICE_MODE_BASE)
+ {
+ ITRACE("AVC short format used");
+ attrib[1].value = VA_DEC_SLICE_MODE_BASE;
+ } else if (attrib[1].value & VA_DEC_SLICE_MODE_NORMAL) {
+ ITRACE("AVC long format ssed");
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ } else {
+ ETRACE("Unsupported Decode Slice Mode!");
+ return DECODE_FAIL;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..2214075
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+#include "secvideoparser.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+
+private:
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ pavp_info_t mEncParam;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mSliceHeaderBuffer;
+ uint32_t mIsEncryptData;
+ uint32_t mFrameSize;
+};
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/baytrail/secvideoparser.h b/videodecoder/securevideo/baytrail/secvideoparser.h
new file mode 100644
index 0000000..f27580a
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/secvideoparser.h
@@ -0,0 +1,150 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef SEC_VIDEO_PARSER_H_
+#define SEC_VIDEO_PARSER_H_
+
+#include <stdint.h>
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+#define MAX_OP 16
+
+enum dec_ref_pic_marking_flags {
+ IDR_PIC_FLAG = 0,
+ NO_OUTPUT_OF_PRIOR_PICS_FLAG,
+ LONG_TERM_REFERENCE_FLAG,
+ ADAPTIVE_REF_PIC_MARKING_MODE_FLAG
+};
+
+typedef struct _dec_ref_pic_marking_t {
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t idr_pic_flag:1;
+ uint8_t no_output_of_prior_pics_flag:1;
+ uint8_t long_term_reference_flag:1;
+ uint8_t adaptive_ref_pic_marking_mode_flag:1;
+ };
+ };
+ struct {
+ uint8_t memory_management_control_operation;
+ union {
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ } op1;
+ struct {
+ uint8_t long_term_pic_num;
+ } op2;
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ uint8_t long_term_frame_idx;
+ } op3;
+ struct {
+ uint8_t max_long_term_frame_idx_plus1;
+ } op4;
+ struct {
+ uint8_t long_term_frame_idx;
+ } op6;
+ };
+ } op[MAX_OP];
+} dec_ref_pic_marking_t;
+
+enum slice_header_flags {
+ FIELD_PIC_FLAG = 0,
+ BOTTOM_FIELD_FLAG
+};
+
+typedef struct _slice_header_t {
+ uint8_t nal_unit_type;
+ uint8_t pps_id;
+ uint8_t padding; // TODO: padding needed because flags in secfw impl. is a big-endian uint16_t
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t field_pic_flag:1;
+ uint8_t bottom_field_flag:1;
+ };
+ };
+ uint32_t first_mb_in_slice;
+ uint32_t frame_num;
+ uint16_t idr_pic_id;
+ uint16_t pic_order_cnt_lsb;
+ int32_t delta_pic_order_cnt[2];
+ int32_t delta_pic_order_cnt_bottom;
+} slice_header_t;
+
+typedef struct {
+ uint8_t type;
+ uint32_t offset;
+ uint8_t* data;
+ uint32_t length;
+ slice_header_t* slice_header;
+} nalu_info_t;
+
+typedef struct {
+ uint32_t iv[4];
+ uint32_t mode;
+ uint32_t app_id;
+} pavp_info_t;
+
+#define MAX_NUM_NALUS 20
+
+typedef struct {
+ uint8_t* data;
+ uint32_t length;
+ pavp_info_t* pavp;
+ dec_ref_pic_marking_t* dec_ref_pic_marking;
+ uint32_t num_nalus;
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+int parser_init(void);
+int parse_frame(uint8_t* frame, uint32_t frame_size, uint8_t* nalu_data, uint32_t* nalu_data_size);
+
+// DEBUG PRINTING
+void print_slice_header(slice_header_t* slice_header);
+void print_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void print_data_bytes(uint8_t* data, uint32_t count);
+void print_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+// BYTESWAPPING
+uint16_t byteswap_16(uint16_t word);
+uint32_t byteswap_32(uint32_t dword);
+void byteswap_slice_header(slice_header_t* slice_header);
+void byteswap_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void byteswap_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+#endif /* SEC_VIDEO_PARSER_H_ */
diff --git a/videodecoder/securevideo/baytrail/va_private.h b/videodecoder/securevideo/baytrail/va_private.h
new file mode 100644
index 0000000..34a4e1b
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/va_private.h
@@ -0,0 +1,64 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+#ifndef __VA_PRIVATE_H__
+#define __VA_PRIVATE_H__
+#include <va/va.h>
+#define ENABLE_PAVP_LINUX 1
+// Misc parameter for encoder
+#define VAEncMiscParameterTypePrivate -2
+// encryption parameters for PAVP
+#define VAEncryptionParameterBufferType -3
+
+typedef struct _VAEncMiscParameterPrivate
+{
+ unsigned int target_usage; // Valid values 1-7 for AVC & MPEG2.
+ unsigned int reserved[7]; // Reserved for future use.
+} VAEncMiscParameterPrivate;
+
+/*VAEncrytpionParameterBuffer*/
+typedef struct _VAEncryptionParameterBuffer
+{
+ //Not used currently
+ unsigned int encryptionSupport;
+ //Not used currently
+ unsigned int hostEncryptMode;
+ // For IV, Counter input
+ unsigned int pavpAesCounter[2][4];
+ // not used currently
+ unsigned int pavpIndex;
+ // PAVP mode, CTR, CBC, DEDE etc
+ unsigned int pavpCounterMode;
+ unsigned int pavpEncryptionType;
+ // not used currently
+ unsigned int pavpInputSize[2];
+ // not used currently
+ unsigned int pavpBufferSize[2];
+ // not used currently
+ VABufferID pvap_buf;
+ // set to TRUE if protected media
+ unsigned int pavpHasBeenEnabled;
+ // not used currently
+ unsigned int IntermmediatedBufReq;
+ // not used currently
+ unsigned int uiCounterIncrement;
+ // AppId: PAVP sessin Index from application
+ unsigned int app_id;
+
+} VAEncryptionParameterBuffer;
+
+#endif
diff --git a/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..18c87b9
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,351 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "va_private.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mSliceHeaderBuffer(NULL) {
+ setParserType(VBP_H264SECURE);
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mNaluHeaderBuffer");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mSliceHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mSliceHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory for mSliceHeaderBuffer");
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+
+ if (mSliceHeaderBuffer) {
+ delete [] mSliceHeaderBuffer;
+ mSliceHeaderBuffer = NULL;
+ }
+
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sliceHeaderSize = 0;
+ int32_t sizeLeft = 0;
+ int32_t sliceIdx = 0;
+ uint8_t naluType;
+ frame_info_t* pFrameInfo;
+
+ mFrameSize = 0;
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ return VideoDecoderAVC::decode(buffer);
+ }
+
+ if (buffer->size != sizeof(frame_info_t)) {
+ ETRACE("Not enough data to read frame_info_t!");
+ return DECODE_INVALID_DATA;
+ }
+ pFrameInfo = (frame_info_t*) buffer->data;
+
+ mFrameSize = pFrameInfo->length;
+ VTRACE("mFrameSize = %d", mFrameSize);
+
+ memcpy(&mEncParam, pFrameInfo->pavp, sizeof(pavp_info_t));
+ for (int32_t i = 0; i < pFrameInfo->num_nalus; i++) {
+ naluType = pFrameInfo->nalus[i].type & NALU_TYPE_MASK;
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &sliceIdx,
+ sizeof(int32_t));
+ sliceHeaderSize += 4;
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->data,
+ sizeof(uint8_t*));
+ sliceHeaderSize += sizeof(uint8_t*);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].offset,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ &pFrameInfo->nalus[i].length,
+ sizeof(uint32_t));
+ sliceHeaderSize += sizeof(uint32_t);
+
+ memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+ pFrameInfo->nalus[i].slice_header,
+ sizeof(slice_header_t));
+ sliceHeaderSize += sizeof(slice_header_t);
+ if (pFrameInfo->nalus[i].type & 0x60) {
+ memcpy(mSliceHeaderBuffer+sliceHeaderSize, pFrameInfo->dec_ref_pic_marking, sizeof(dec_ref_pic_marking_t));
+ } else {
+ memset(mSliceHeaderBuffer+sliceHeaderSize, 0, sizeof(dec_ref_pic_marking_t));
+ }
+ sliceHeaderSize += sizeof(dec_ref_pic_marking_t);
+ sliceIdx++;
+ } else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ pFrameInfo->nalus[i].data,
+ pFrameInfo->nalus[i].length);
+ sizeAccumulated += pFrameInfo->nalus[i].length;
+ } else {
+ WTRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+
+ vbp_data_h264 *data = NULL;
+
+ if (sizeAccumulated > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ mNaluHeaderBuffer,
+ sizeAccumulated,
+ false,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ }
+
+ if (sliceHeaderSize > 0) {
+ memset(mSliceHeaderBuffer + sliceHeaderSize, 0xFF, 4);
+ sliceHeaderSize += 4;
+ status = VideoDecoderBase::updateBuffer(
+ mSliceHeaderBuffer,
+ sliceHeaderSize,
+ (void**)&data);
+ CHECK_STATUS("VideoDecoderBase::updateBuffer");
+ }
+
+ if (data == NULL) {
+ ETRACE("Invalid data returned by parser!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+ status = decodeFrame(buffer, data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[5];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ VAEncryptionParameterBuffer encryptParam;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+
+ if (mIsEncryptData) {
+ memset(&encryptParam, 0, sizeof(VAEncryptionParameterBuffer));
+ encryptParam.pavpCounterMode = 4;
+ encryptParam.pavpEncryptionType = 2;
+ encryptParam.hostEncryptMode = 2;
+ encryptParam.pavpHasBeenEnabled = 1;
+ encryptParam.app_id = 0;
+ memcpy(encryptParam.pavpAesCounter, mEncParam.iv, 16);
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ (VABufferType)VAEncryptionParameterBufferType,
+ sizeof(VAEncryptionParameterBuffer),
+ 1,
+ &encryptParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateEncryptionParameterBuffer");
+ bufferIDCount++;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ sliceData->buffer_addr + sliceData->slice_offset,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264Base),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+
+ vaStatus = vaGetConfigAttributes(mVADisplay,profile,VAEntrypointVLD, &attrib[1], 1);
+
+ if (attrib[1].value & VA_DEC_SLICE_MODE_BASE)
+ {
+ ITRACE("AVC short format used");
+ attrib[1].value = VA_DEC_SLICE_MODE_BASE;
+ } else if (attrib[1].value & VA_DEC_SLICE_MODE_NORMAL) {
+ ITRACE("AVC long format ssed");
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ } else {
+ ETRACE("Unsupported Decode Slice Mode!");
+ return DECODE_FAIL;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..2214075
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/VideoDecoderAVCSecure.h
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+#include "secvideoparser.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+protected:
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+
+private:
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ pavp_info_t mEncParam;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mSliceHeaderBuffer;
+ uint32_t mIsEncryptData;
+ uint32_t mFrameSize;
+};
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/cherrytrail/secvideoparser.h b/videodecoder/securevideo/cherrytrail/secvideoparser.h
new file mode 100644
index 0000000..f27580a
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/secvideoparser.h
@@ -0,0 +1,150 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef SEC_VIDEO_PARSER_H_
+#define SEC_VIDEO_PARSER_H_
+
+#include <stdint.h>
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+#define MAX_OP 16
+
+enum dec_ref_pic_marking_flags {
+ IDR_PIC_FLAG = 0,
+ NO_OUTPUT_OF_PRIOR_PICS_FLAG,
+ LONG_TERM_REFERENCE_FLAG,
+ ADAPTIVE_REF_PIC_MARKING_MODE_FLAG
+};
+
+typedef struct _dec_ref_pic_marking_t {
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t idr_pic_flag:1;
+ uint8_t no_output_of_prior_pics_flag:1;
+ uint8_t long_term_reference_flag:1;
+ uint8_t adaptive_ref_pic_marking_mode_flag:1;
+ };
+ };
+ struct {
+ uint8_t memory_management_control_operation;
+ union {
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ } op1;
+ struct {
+ uint8_t long_term_pic_num;
+ } op2;
+ struct {
+ uint8_t difference_of_pic_nums_minus1;
+ uint8_t long_term_frame_idx;
+ } op3;
+ struct {
+ uint8_t max_long_term_frame_idx_plus1;
+ } op4;
+ struct {
+ uint8_t long_term_frame_idx;
+ } op6;
+ };
+ } op[MAX_OP];
+} dec_ref_pic_marking_t;
+
+enum slice_header_flags {
+ FIELD_PIC_FLAG = 0,
+ BOTTOM_FIELD_FLAG
+};
+
+typedef struct _slice_header_t {
+ uint8_t nal_unit_type;
+ uint8_t pps_id;
+ uint8_t padding; // TODO: padding needed because flags in secfw impl. is a big-endian uint16_t
+ union {
+ uint8_t flags;
+ struct {
+ uint8_t field_pic_flag:1;
+ uint8_t bottom_field_flag:1;
+ };
+ };
+ uint32_t first_mb_in_slice;
+ uint32_t frame_num;
+ uint16_t idr_pic_id;
+ uint16_t pic_order_cnt_lsb;
+ int32_t delta_pic_order_cnt[2];
+ int32_t delta_pic_order_cnt_bottom;
+} slice_header_t;
+
+typedef struct {
+ uint8_t type;
+ uint32_t offset;
+ uint8_t* data;
+ uint32_t length;
+ slice_header_t* slice_header;
+} nalu_info_t;
+
+typedef struct {
+ uint32_t iv[4];
+ uint32_t mode;
+ uint32_t app_id;
+} pavp_info_t;
+
+#define MAX_NUM_NALUS 20
+
+typedef struct {
+ uint8_t* data;
+ uint32_t length;
+ pavp_info_t* pavp;
+ dec_ref_pic_marking_t* dec_ref_pic_marking;
+ uint32_t num_nalus;
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+int parser_init(void);
+int parse_frame(uint8_t* frame, uint32_t frame_size, uint8_t* nalu_data, uint32_t* nalu_data_size);
+
+// DEBUG PRINTING
+void print_slice_header(slice_header_t* slice_header);
+void print_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void print_data_bytes(uint8_t* data, uint32_t count);
+void print_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+// BYTESWAPPING
+uint16_t byteswap_16(uint16_t word);
+uint32_t byteswap_32(uint32_t dword);
+void byteswap_slice_header(slice_header_t* slice_header);
+void byteswap_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void byteswap_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+#endif /* SEC_VIDEO_PARSER_H_ */
diff --git a/videodecoder/securevideo/cherrytrail/va_private.h b/videodecoder/securevideo/cherrytrail/va_private.h
new file mode 100644
index 0000000..e53e31d
--- /dev/null
+++ b/videodecoder/securevideo/cherrytrail/va_private.h
@@ -0,0 +1,63 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef __VA_PRIVATE_H__
+#define __VA_PRIVATE_H__
+#include <va/va.h>
+#define ENABLE_PAVP_LINUX 1
+// Misc parameter for encoder
+#define VAEncMiscParameterTypePrivate -2
+// encryption parameters for PAVP
+#define VAEncryptionParameterBufferType -3
+
+typedef struct _VAEncMiscParameterPrivate
+{
+ unsigned int target_usage; // Valid values 1-7 for AVC & MPEG2.
+ unsigned int reserved[7]; // Reserved for future use.
+} VAEncMiscParameterPrivate;
+
+/*VAEncrytpionParameterBuffer*/
+typedef struct _VAEncryptionParameterBuffer
+{
+ //Not used currently
+ unsigned int encryptionSupport;
+ //Not used currently
+ unsigned int hostEncryptMode;
+ // For IV, Counter input
+ unsigned int pavpAesCounter[2][4];
+ // not used currently
+ unsigned int pavpIndex;
+ // PAVP mode, CTR, CBC, DEDE etc
+ unsigned int pavpCounterMode;
+ unsigned int pavpEncryptionType;
+ // not used currently
+ unsigned int pavpInputSize[2];
+ // not used currently
+ unsigned int pavpBufferSize[2];
+ // not used currently
+ VABufferID pvap_buf;
+ // set to TRUE if protected media
+ unsigned int pavpHasBeenEnabled;
+ // not used currently
+ unsigned int IntermmediatedBufReq;
+ // not used currently
+ unsigned int uiCounterIncrement;
+ // AppId: PAVP sessin Index from application
+ unsigned int app_id;
+
+} VAEncryptionParameterBuffer;
+
+#endif
diff --git a/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..d9da2ac
--- /dev/null
+++ b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,507 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+
+#define STARTCODE_00 0x00
+#define STARTCODE_01 0x01
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+
+
+// mask for little endian, to mast the second and fourth bytes in the byte stream
+#define STARTCODE_MASK0 0xFF000000 //0x00FF0000
+#define STARTCODE_MASK1 0x0000FF00 //0x000000FF
+
+
+typedef enum {
+ NAL_UNIT_TYPE_unspecified0 = 0,
+ NAL_UNIT_TYPE_SLICE,
+ NAL_UNIT_TYPE_DPA,
+ NAL_UNIT_TYPE_DPB,
+ NAL_UNIT_TYPE_DPC,
+ NAL_UNIT_TYPE_IDR,
+ NAL_UNIT_TYPE_SEI,
+ NAL_UNIT_TYPE_SPS,
+ NAL_UNIT_TYPE_PPS,
+ NAL_UNIT_TYPE_Acc_unit_delimiter,
+ NAL_UNIT_TYPE_EOSeq,
+ NAL_UNIT_TYPE_EOstream,
+ NAL_UNIT_TYPE_filler_data,
+ NAL_UNIT_TYPE_SPS_extension,
+ NAL_UNIT_TYPE_Reserved14,
+ NAL_UNIT_TYPE_Reserved15,
+ NAL_UNIT_TYPE_Reserved16,
+ NAL_UNIT_TYPE_Reserved17,
+ NAL_UNIT_TYPE_Reserved18,
+ NAL_UNIT_TYPE_ACP,
+ NAL_UNIT_TYPE_Reserved20,
+ NAL_UNIT_TYPE_Reserved21,
+ NAL_UNIT_TYPE_Reserved22,
+ NAL_UNIT_TYPE_Reserved23,
+ NAL_UNIT_TYPE_unspecified24,
+} NAL_UNIT_TYPE;
+
+#ifndef min
+#define min(X, Y) ((X) <(Y) ? (X) : (Y))
+#endif
+
+
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mInputBuffer(NULL) {
+
+ memset(&mMetadata, 0, sizeof(NaluMetadata));
+ memset(&mByteStream, 0, sizeof(NaluByteStream));
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mMetadata.naluInfo = new NaluInfo [MAX_NALU_NUMBER];
+ mByteStream.byteStream = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mMetadata.naluInfo == NULL ||
+ mByteStream.byteStream == NULL ||
+ mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory.");
+ // TODO: release all allocated memory
+ return DECODE_MEMORY_FAIL;
+ }
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mMetadata.naluInfo) {
+ delete [] mMetadata.naluInfo;
+ mMetadata.naluInfo = NULL;
+ }
+
+ if (mByteStream.byteStream) {
+ delete [] mByteStream.byteStream;
+ mByteStream.byteStream = NULL;
+ }
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sizeLeft = 0;
+ uint8_t *pByteStream = NULL;
+ NaluInfo *pNaluInfo = mMetadata.naluInfo;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ pByteStream = buffer->data;
+ sizeLeft = buffer->size;
+ mInputBuffer = NULL;
+ } else {
+ status = parseAnnexBStream(buffer->data, buffer->size, &mByteStream);
+ CHECK_STATUS("parseAnnexBStream");
+ pByteStream = mByteStream.byteStream;
+ sizeLeft = mByteStream.streamPos;
+ mInputBuffer = buffer->data;
+ }
+ if (sizeLeft < 4) {
+ ETRACE("Not enough data to read number of NALU.");
+ return DECODE_INVALID_DATA;
+ }
+
+ // read number of NALU
+ memcpy(&(mMetadata.naluNumber), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+ sizeLeft -= 4;
+
+ if (mMetadata.naluNumber == 0) {
+ WTRACE("Number of NALU is ZERO!");
+ return DECODE_SUCCESS;
+ }
+
+ for (int32_t i = 0; i < mMetadata.naluNumber; i++) {
+ if (sizeLeft < 12) {
+ ETRACE("Not enough data to parse NALU offset, size, header length for NALU %d, left = %d", i, sizeLeft);
+ return DECODE_INVALID_DATA;
+ }
+ sizeLeft -= 12;
+ // read NALU offset
+ memcpy(&(pNaluInfo->naluOffset), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU size
+ memcpy(&(pNaluInfo->naluLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU header length
+ memcpy(&(pNaluInfo->naluHeaderLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ if (sizeLeft < pNaluInfo->naluHeaderLen) {
+ ETRACE("Not enough data to copy NALU header for %d, left = %d, header len = %d", i, sizeLeft, pNaluInfo->naluHeaderLen);
+ return DECODE_INVALID_DATA;
+ }
+
+ sizeLeft -= pNaluInfo->naluHeaderLen;
+
+ if (pNaluInfo->naluHeaderLen) {
+ // copy start code prefix to buffer
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+
+ // copy NALU header
+ memcpy(mNaluHeaderBuffer + sizeAccumulated, pByteStream, pNaluInfo->naluHeaderLen);
+ pByteStream += pNaluInfo->naluHeaderLen;
+
+ sizeAccumulated += pNaluInfo->naluHeaderLen;
+ } else {
+ WTRACE("header len is zero for NALU %d", i);
+ }
+
+ // for next NALU
+ pNaluInfo++;
+ }
+
+ buffer->data = mNaluHeaderBuffer;
+ buffer->size = sizeAccumulated;
+
+ return VideoDecoderAVC::decode(buffer);
+}
+
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[4];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Check there is no reference frame loss before decoding a frame
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ // find which naluinfo is correlated to current slice
+ int naluIndex = 0;
+ uint32_t accumulatedHeaderLen = 0;
+ uint32_t headerLen = 0;
+ for (; naluIndex < mMetadata.naluNumber; naluIndex++) {
+ headerLen = mMetadata.naluInfo[naluIndex].naluHeaderLen;
+ if (headerLen == 0) {
+ WTRACE("lenght of current NAL unit is 0.");
+ continue;
+ }
+ accumulatedHeaderLen += STARTCODE_PREFIX_LEN;
+ if (accumulatedHeaderLen + headerLen > sliceData->slice_offset) {
+ break;
+ }
+ accumulatedHeaderLen += headerLen;
+ }
+
+ if (sliceData->slice_offset != accumulatedHeaderLen) {
+ WTRACE("unexpected slice offset %d, accumulatedHeaderLen = %d", sliceData->slice_offset, accumulatedHeaderLen);
+ }
+
+ sliceParam->slice_data_size = mMetadata.naluInfo[naluIndex].naluLen;
+ sliceData->slice_size = sliceParam->slice_data_size;
+
+ // no need to update:
+ // sliceParam->slice_data_offset - 0 always
+ // sliceParam->slice_data_bit_offset - relative to sliceData->slice_offset
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ // sliceData->slice_offset - accumulatedHeaderLen is the absolute offset to start codes of current NAL unit
+ // offset points to first byte of NAL unit
+ uint32_t sliceOffset = mMetadata.naluInfo[naluIndex].naluOffset;
+ if (mInputBuffer != NULL) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ mInputBuffer + sliceOffset,
+ &bufferIDs[bufferIDCount]);
+ } else {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAProtectedSliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ (uint8_t*)sliceOffset, // IMR offset
+ &bufferIDs[bufferIDCount]);
+ }
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+
+// Parse byte string pattern "0x000001" (3 bytes) in the current buffer.
+// Returns offset of position following the pattern in the buffer if pattern is found or -1 if not found.
+int32_t VideoDecoderAVCSecure::findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length) {
+ uint8_t *ptr;
+ uint32_t left = 0, data = 0, phase = 0;
+ uint8_t mask1 = 0, mask2 = 0;
+
+ /* Meaning of phase:
+ 0: initial status, "0x000001" bytes are not found so far;
+ 1: one "0x00" byte is found;
+ 2: two or more consecutive "0x00" bytes" are found;
+ 3: "0x000001" patten is found ;
+ 4: if there is one more byte after "0x000001";
+ */
+
+ left = length;
+ ptr = (uint8_t *) (stream + offset);
+ phase = 0;
+
+ // parse until there is more data and start code not found
+ while ((left > 0) && (phase < 3)) {
+ // Check if the address is 32-bit aligned & phase=0, if thats the case we can check 4 bytes instead of one byte at a time.
+ if (((((uint32_t)ptr) & 0x3) == 0) && (phase == 0)) {
+ while (left > 3) {
+ data = *((uint32_t *)ptr);
+ mask1 = (STARTCODE_00 != (data & STARTCODE_MASK0));
+ mask2 = (STARTCODE_00 != (data & STARTCODE_MASK1));
+ // If second byte and fourth byte are not zero's then we cannot have a start code here,
+ // as we need two consecutive zero bytes for a start code pattern.
+ if (mask1 && mask2) {
+ // skip 4 bytes and start over
+ ptr += 4;
+ left -=4;
+ continue;
+ } else {
+ break;
+ }
+ }
+ }
+
+ // At this point either data is not on a 32-bit boundary or phase > 0 so we look at one byte at a time
+ if (left > 0) {
+ if (*ptr == STARTCODE_00) {
+ phase++;
+ if (phase > 2) {
+ // more than 2 consecutive '0x00' bytes is found
+ phase = 2;
+ }
+ } else if ((*ptr == STARTCODE_01) && (phase == 2)) {
+ // start code is found
+ phase = 3;
+ } else {
+ // reset lookup
+ phase = 0;
+ }
+ ptr++;
+ left--;
+ }
+ }
+
+ if ((left > 0) && (phase == 3)) {
+ phase = 4;
+ // return offset of position following the pattern in the buffer which matches "0x000001" byte string
+ return (int32_t)(ptr - stream);
+ }
+ return -1;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream) {
+ uint8_t naluType;
+ int32_t naluHeaderLen;
+
+ naluType = *(uint8_t *)(stream + naluStream->naluOffset);
+ naluType &= NALU_TYPE_MASK;
+ // first update nalu header length based on nalu type
+ if (naluType >= NAL_UNIT_TYPE_SLICE && naluType <= NAL_UNIT_TYPE_IDR) {
+ // coded slice, return only up to MAX_SLICE_HEADER_SIZE bytes
+ naluHeaderLen = min(naluStream->naluLen, MAX_SLICE_HEADER_SIZE);
+ } else if (naluType >= NAL_UNIT_TYPE_SEI && naluType <= NAL_UNIT_TYPE_PPS) {
+ //sps, pps, sei, etc, return the entire NAL unit in clear
+ naluHeaderLen = naluStream->naluLen;
+ } else {
+ return DECODE_FRAME_DROPPED;
+ }
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluOffset), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluLen), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &naluHeaderLen, sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ if (naluHeaderLen) {
+ memcpy(naluStream->byteStream + naluStream->streamPos, (uint8_t*)(stream + naluStream->naluOffset), naluHeaderLen);
+ naluStream->streamPos += naluHeaderLen;
+ }
+ return DECODE_SUCCESS;
+}
+
+
+// parse start-code prefixed stream, also knowns as Annex B byte stream, commonly used in AVI, ES, MPEG2 TS container
+Decode_Status VideoDecoderAVCSecure::parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream) {
+ int32_t naluOffset, offset, left;
+ NaluInfo *info;
+ uint32_t ret = DECODE_SUCCESS;
+
+ naluOffset = 0;
+ offset = 0;
+ left = length;
+
+ // leave 4 bytes to copy nalu count
+ naluStream->streamPos = 4;
+ naluStream->naluCount = 0;
+ memset(naluStream->byteStream, 0, MAX_NALU_HEADER_BUFFER);
+
+ for (; ;) {
+ naluOffset = findNalUnitOffset(stream, offset, left);
+ if (naluOffset == -1) {
+ break;
+ }
+
+ if (naluStream->naluCount == 0) {
+ naluStream->naluOffset = naluOffset;
+ } else {
+ naluStream->naluLen = naluOffset - naluStream->naluOffset - STARTCODE_PREFIX_LEN;
+ ret = copyNaluHeader(stream, naluStream);
+ if (ret != DECODE_SUCCESS && ret != DECODE_FRAME_DROPPED) {
+ LOGW("copyNaluHeader returned %d", ret);
+ return ret;
+ }
+ // starting position for next NALU
+ naluStream->naluOffset = naluOffset;
+ }
+
+ if (ret == DECODE_SUCCESS) {
+ naluStream->naluCount++;
+ }
+
+ // update next lookup position and length
+ offset = naluOffset + 1; // skip one byte of NAL unit type
+ left = length - offset;
+ }
+
+ if (naluStream->naluCount > 0) {
+ naluStream->naluLen = length - naluStream->naluOffset;
+ memcpy(naluStream->byteStream, &(naluStream->naluCount), sizeof(int32_t));
+ // ignore return value, either DECODE_SUCCESS or DECODE_FRAME_DROPPED
+ copyNaluHeader(stream, naluStream);
+ return DECODE_SUCCESS;
+ }
+
+ LOGW("number of valid NALU is 0!");
+ return DECODE_SUCCESS;
+}
+
diff --git a/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..ee16073
--- /dev/null
+++ b/videodecoder/securevideo/clovertrail/VideoDecoderAVCSecure.h
@@ -0,0 +1,75 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+private:
+ enum {
+ MAX_SLICE_HEADER_SIZE = 30,
+ MAX_NALU_HEADER_BUFFER = 8192,
+ MAX_NALU_NUMBER = 400, // > 4096/12
+ };
+
+ // Information of Network Abstraction Layer Unit
+ struct NaluInfo {
+ int32_t naluOffset; // offset of NAL unit in the firewalled buffer
+ int32_t naluLen; // length of NAL unit
+ int32_t naluHeaderLen; // length of NAL unit header
+ };
+
+ struct NaluMetadata {
+ NaluInfo *naluInfo;
+ int32_t naluNumber; // number of NAL units
+ };
+
+ struct NaluByteStream {
+ int32_t naluOffset;
+ int32_t naluLen;
+ int32_t streamPos;
+ uint8_t *byteStream; // 4 bytes of naluCount, 4 bytes of naluOffset, 4 bytes of naulLen, 4 bytes of naluHeaderLen, followed by naluHeaderData
+ int32_t naluCount;
+ };
+
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+ int32_t findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length);
+ Decode_Status copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream);
+ Decode_Status parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream);
+
+private:
+ NaluMetadata mMetadata;
+ NaluByteStream mByteStream;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mInputBuffer;
+};
+
+
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
new file mode 100755
index 0000000..649402d
--- /dev/null
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,858 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <va/va.h>
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderTrace.h"
+#include "vbp_loader.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoFrameInfo.h"
+
+#define MAX_SLICEHEADER_BUFFER_SIZE 4096
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType){
+ mFrameSize = 0;
+ mFrameData = NULL;
+ mIsEncryptData = 0;
+ mClearData = NULL;
+ mCachedHeader = NULL;
+ setParserType(VBP_H264SECURE);
+ mFrameIdx = 0;
+ mModularMode = 0;
+ mSliceNum = 0;
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::start");
+
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mClearData = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mClearData == NULL) {
+ ETRACE("Failed to allocate memory for mClearData");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mCachedHeader= new uint8_t [MAX_SLICEHEADER_BUFFER_SIZE];
+ if (mCachedHeader == NULL) {
+ ETRACE("Failed to allocate memory for mCachedHeader");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VTRACE("VideoDecoderAVCSecure::stop");
+ VideoDecoderAVC::stop();
+
+ if (mClearData) {
+ delete [] mClearData;
+ mClearData = NULL;
+ }
+
+ if (mCachedHeader) {
+ delete [] mCachedHeader;
+ mCachedHeader = NULL;
+ }
+}
+Decode_Status VideoDecoderAVCSecure::processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ VTRACE("processModularInputBuffer +++");
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+
+ int32_t nalu_num = 0;
+ uint8_t nalu_type = 0;
+ int32_t nalu_offset = 0;
+ uint32_t nalu_size = 0;
+ uint8_t naluType = 0;
+ uint8_t *nalu_data = NULL;
+ uint32_t sliceidx = 0;
+
+ frame_info_t *pFrameInfo = NULL;
+ mSliceNum = 0;
+ memset(&mSliceInfo, 0, sizeof(mSliceInfo));
+ mIsEncryptData = 0;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ pFrameInfo = (frame_info_t *) buffer->data;
+ if (pFrameInfo == NULL) {
+ ETRACE("Invalid parameter: pFrameInfo is NULL!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mFrameData = pFrameInfo->data;
+ mFrameSize = pFrameInfo->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+
+ nalu_num = pFrameInfo->num_nalus;
+ VTRACE("nalu_num = %d", nalu_num);
+
+ if (nalu_num <= 0 || nalu_num >= MAX_NUM_NALUS) {
+ ETRACE("Invalid parameter: nalu_num = %d", nalu_num);
+ return DECODE_MEMORY_FAIL;
+ }
+
+ for (int32_t i = 0; i < nalu_num; i++) {
+
+ nalu_size = pFrameInfo->nalus[i].length;
+ nalu_type = pFrameInfo->nalus[i].type;
+ nalu_offset = pFrameInfo->nalus[i].offset;
+ nalu_data = pFrameInfo->nalus[i].data;
+ naluType = nalu_type & NALU_TYPE_MASK;
+
+ VTRACE("nalu_type = 0x%x, nalu_size = %d, nalu_offset = 0x%x", nalu_type, nalu_size, nalu_offset);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+
+ mIsEncryptData = 1;
+ VTRACE("slice idx = %d", sliceidx);
+ mSliceInfo[sliceidx].sliceHeaderByte = nalu_type;
+ mSliceInfo[sliceidx].sliceStartOffset = (nalu_offset >> 4) << 4;
+ mSliceInfo[sliceidx].sliceByteOffset = nalu_offset - mSliceInfo[sliceidx].sliceStartOffset;
+ mSliceInfo[sliceidx].sliceLength = mSliceInfo[sliceidx].sliceByteOffset + nalu_size;
+ mSliceInfo[sliceidx].sliceSize = (mSliceInfo[sliceidx].sliceByteOffset + nalu_size + 0xF) & ~0xF;
+ VTRACE("sliceHeaderByte = 0x%x", mSliceInfo[sliceidx].sliceHeaderByte);
+ VTRACE("sliceStartOffset = %d", mSliceInfo[sliceidx].sliceStartOffset);
+ VTRACE("sliceByteOffset = %d", mSliceInfo[sliceidx].sliceByteOffset);
+ VTRACE("sliceSize = %d", mSliceInfo[sliceidx].sliceSize);
+ VTRACE("sliceLength = %d", mSliceInfo[sliceidx].sliceLength);
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = mSliceInfo[sliceidx].sliceSize > 64 ? 64 : mSliceInfo[sliceidx].sliceSize ;
+ testdata = (uint8_t *)(mFrameData);
+ for (int i = 0; i < testsize; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ sliceidx++;
+
+ } else if (naluType == h264_NAL_UNIT_TYPE_SPS || naluType == h264_NAL_UNIT_TYPE_PPS) {
+ if (nalu_data == NULL) {
+ ETRACE("Invalid parameter: nalu_data = NULL for naluType 0x%x", naluType);
+ return DECODE_MEMORY_FAIL;
+ }
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ITRACE("Nalu type = 0x%x is skipped", naluType);
+ continue;
+ }
+ }
+ clear_data = mClearData;
+ mSliceNum = sliceidx;
+
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+ uint8_t naluType = 0;
+
+ int32_t num_nalus;
+ int32_t nalu_offset;
+ int32_t offset;
+ uint8_t *data_src;
+ uint8_t *nalu_data;
+ uint32_t nalu_size;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+
+ mFrameData = buffer->data;
+ mFrameSize = buffer->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+ num_nalus = *(uint32_t *)(buffer->data + buffer->size + sizeof(uint32_t));
+ VTRACE("num_nalus = %d", num_nalus);
+ offset = 4;
+ for (int32_t i = 0; i < num_nalus; i++) {
+ VTRACE("%d nalu, offset = %d", i, offset);
+ data_src = buffer->data + buffer->size + sizeof(uint32_t) + offset;
+ nalu_size = *(uint32_t *)(data_src + 2 * sizeof(uint32_t));
+ nalu_size = (nalu_size + 0x03) & (~0x03);
+
+ nalu_data = data_src + 3 *sizeof(uint32_t);
+ naluType = nalu_data[0] & NALU_TYPE_MASK;
+ offset += nalu_size + 3 *sizeof(uint32_t);
+ VTRACE("naluType = 0x%x", naluType);
+ VTRACE("nalu_size = %d, nalu_data = %p", nalu_size, nalu_data);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ ETRACE("Slice NALU received!");
+ return DECODE_INVALID_DATA;
+ }
+
+ else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mClearData + clear_data_size,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ clear_data_size += STARTCODE_PREFIX_LEN;
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ETRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+ clear_data = mClearData;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::decode");
+ Decode_Status status;
+ vbp_data_h264 *data = NULL;
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = buffer->size > 16 ? 16:buffer->size ;
+ testdata = (uint8_t *)(buffer->data);
+ for (int i = 0; i < 16; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ if (buffer->flag & IS_SUBSAMPLE_ENCRYPTION) {
+ mModularMode = 1;
+ }
+
+ if (mModularMode) {
+ status = processModularInputBuffer(buffer,&data);
+ CHECK_STATUS("processModularInputBuffer");
+ }
+ else {
+ status = processClassicInputBuffer(buffer,&data);
+ CHECK_STATUS("processClassicInputBuffer");
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+
+ status = decodeFrame(buffer, data);
+
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::decodeFrame");
+ Decode_Status status;
+ VTRACE("data->has_sps = %d, data->has_pps = %d", data->has_sps, data->has_pps);
+
+#if 0
+ // Don't remove the following codes, it can be enabled for debugging DPB.
+ for (unsigned int i = 0; i < data->num_pictures; i++) {
+ VAPictureH264 &pic = data->pic_data[i].pic_parms->CurrPic;
+ VTRACE("%d: decoding frame %.2f, poc top = %d, poc bottom = %d, flags = %d, reference = %d",
+ i,
+ buffer->timeStamp/1E6,
+ pic.TopFieldOrderCnt,
+ pic.BottomFieldOrderCnt,
+ pic.flags,
+ (pic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (pic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE));
+ }
+#endif
+
+ if (data->new_sps || data->new_pps) {
+ status = handleNewSequence(data);
+ CHECK_STATUS("handleNewSequence");
+ }
+
+ if (mModularMode && (!mIsEncryptData)) {
+ if (data->pic_data[0].num_slices == 0) {
+ ITRACE("No slice available for decoding.");
+ status = mSizeChanged ? DECODE_FORMAT_CHANGE : DECODE_SUCCESS;
+ mSizeChanged = false;
+ return status;
+ }
+ }
+
+ uint64_t lastPTS = mCurrentPTS;
+ mCurrentPTS = buffer->timeStamp;
+
+ // start decoding a new frame
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+
+ if (mModularMode) {
+ parseModularSliceHeader(buffer,data);
+ }
+ else {
+ parseClassicSliceHeader(buffer,data);
+ }
+
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ return status;
+ }
+
+ status = beginDecodingFrame(data);
+ CHECK_STATUS("beginDecodingFrame");
+
+ // finish decoding the last frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+
+ if (isNewFrame(data, lastPTS == mCurrentPTS) == 0) {
+ ETRACE("Can't handle interlaced frames yet");
+ return DECODE_FAIL;
+ }
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::beginDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::beginDecodingFrame");
+ Decode_Status status;
+ VAPictureH264 *picture = &(data->pic_data[0].pic_parms->CurrPic);
+ if ((picture->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (picture->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+ mAcquiredBuffer->referenceFrame = true;
+ } else {
+ mAcquiredBuffer->referenceFrame = false;
+ }
+
+ if (picture->flags & VA_PICTURE_H264_TOP_FIELD) {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD | VA_TOP_FIELD;
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ }
+
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ mAcquiredBuffer->pictureOrder = getPOC(picture);
+
+ if (mSizeChanged) {
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ mSizeChanged = false;
+ }
+
+ status = continueDecodingFrame(data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::continueDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::continueDecodingFrame");
+ Decode_Status status;
+ vbp_picture_data_h264 *picData = data->pic_data;
+
+ if (mAcquiredBuffer == NULL || mAcquiredBuffer->renderBuffer.surface == VA_INVALID_SURFACE) {
+ ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+ return DECODE_FAIL;
+ }
+ VTRACE("data->num_pictures = %d", data->num_pictures);
+ for (uint32_t picIndex = 0; picIndex < data->num_pictures; picIndex++, picData++) {
+ if (picData == NULL || picData->pic_parms == NULL || picData->slc_data == NULL || picData->num_slices == 0) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ if (picIndex > 0 &&
+ (picData->pic_parms->CurrPic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) == 0) {
+ ETRACE("Packed frame is not supported yet!");
+ return DECODE_FAIL;
+ }
+ VTRACE("picData->num_slices = %d", picData->num_slices);
+ for (uint32_t sliceIndex = 0; sliceIndex < picData->num_slices; sliceIndex++) {
+ status = decodeSlice(data, picIndex, sliceIndex);
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ // remove current frame from DPB as it can't be decoded.
+ removeReferenceFromDPB(picData->pic_parms);
+ return status;
+ }
+ }
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseClassicSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+
+ if (mFrameSize <= 0) {
+ return DECODE_SUCCESS;
+ }
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ void *sliceheaderbuf;
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ mFrameData,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mFrameSize;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+
+#if 0
+
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ status = updateSliceParameter(data,sliceheaderbuf);
+ CHECK_STATUS("processSliceHeader");
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseModularSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+ int32_t sliceIdx;
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ if (mFrameSize <= 0 || mSliceNum <=0) {
+ return DECODE_SUCCESS;
+ }
+ void *sliceheaderbuf;
+ memset(mCachedHeader, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+ int32_t offset = 0;
+ int32_t size = 0;
+
+ for (sliceIdx = 0; sliceIdx < mSliceNum; sliceIdx++) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mSliceInfo[sliceIdx].sliceSize, //size
+ 1, //num_elements
+ mFrameData + mSliceInfo[sliceIdx].sliceStartOffset,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mSliceInfo[sliceIdx].sliceLength;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+ data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte;
+ data->pic_parse_buffer->slice_offset = mSliceInfo[sliceIdx].sliceByteOffset;
+
+#if 0
+ VTRACE("data->pic_parse_buffer->slice_offset = 0x%x", data->pic_parse_buffer->slice_offset);
+ VTRACE("pic_parse_buffer->nalu_header.value = %x", data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte);
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ size = *(uint32 *)((uint8 *)sliceheaderbuf + 4) + 4;
+ VTRACE("slice header size = 0x%x, offset = 0x%x", size, offset);
+ if (offset + size <= MAX_SLICEHEADER_BUFFER_SIZE - 4) {
+ memcpy(mCachedHeader+offset, sliceheaderbuf, size);
+ offset += size;
+ } else {
+ WTRACE("Cached slice header is not big enough!");
+ }
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+ }
+ memset(mCachedHeader + offset, 0xFF, 4);
+ status = updateSliceParameter(data,mCachedHeader);
+ CHECK_STATUS("processSliceHeader");
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf) {
+ VTRACE("VideoDecoderAVCSecure::updateSliceParameter");
+ Decode_Status status;
+ status = VideoDecoderBase::updateBuffer(
+ (uint8_t *)sliceheaderbuf,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ (void**)&data);
+ CHECK_STATUS("updateBuffer");
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 3 buffers to render a slice: picture parameter, IQMatrix, slice parameter
+ VABufferID bufferIDs[3];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ uint32_t slice_data_size = 0;
+ uint8_t* slice_data_addr = NULL;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ }
+ VTRACE("Current frameidx = %d", mFrameIdx++);
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ if (mModularMode) {
+ if (mIsEncryptData) {
+ sliceParam->slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_addr = mFrameData + mSliceInfo[sliceIndex].sliceStartOffset;
+ } else {
+ slice_data_size = sliceData->slice_size;
+ slice_data_addr = sliceData->buffer_addr + sliceData->slice_offset;
+ }
+ } else {
+ sliceParam->slice_data_size = mFrameSize;
+ slice_data_size = mFrameSize;
+ slice_data_addr = mFrameData;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ VABufferID slicebufferID;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ slice_data_size, //size
+ 1, //num_elements
+ slice_data_addr,
+ &slicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &slicebufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ if (mModularMode) {
+ attrib[1].value = VA_DEC_SLICE_MODE_SUBSAMPLE;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
new file mode 100755
index 0000000..d4a9f15
--- /dev/null
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
@@ -0,0 +1,69 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H
+#define VIDEO_DECODER_AVC_SECURE_H
+
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderDefs.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+protected:
+ virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+ virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+ Decode_Status parseClassicSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+ Decode_Status parseModularSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+
+ Decode_Status updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf);
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ Decode_Status processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ Decode_Status processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ int32_t mIsEncryptData;
+ int32_t mFrameSize;
+ uint8_t* mFrameData;
+ uint8_t* mClearData;
+ uint8_t* mCachedHeader;
+ int32_t mFrameIdx;
+ int32_t mModularMode;
+
+ enum {
+ MAX_SLICE_HEADER_NUM = 256,
+ };
+ int32_t mSliceNum;
+ // Information of Slices in the Modular DRM Mode
+ struct SliceInfo {
+ uint8_t sliceHeaderByte; // first byte of the slice header
+ uint32_t sliceStartOffset; // offset of Slice unit in the firewalled buffer
+ uint32_t sliceByteOffset; // extra offset from the blockAligned slice offset
+ uint32_t sliceSize; // block aligned length of slice unit
+ uint32_t sliceLength; // actual size of the slice
+ };
+
+ SliceInfo mSliceInfo[MAX_SLICE_HEADER_NUM];
+};
+
+#endif
diff --git a/videodecoder/securevideo/merrifield/VideoFrameInfo.h b/videodecoder/securevideo/merrifield/VideoFrameInfo.h
new file mode 100755
index 0000000..485b0da
--- /dev/null
+++ b/videodecoder/securevideo/merrifield/VideoFrameInfo.h
@@ -0,0 +1,36 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_FRAME_INFO_H_
+#define VIDEO_FRAME_INFO_H_
+
+#define MAX_NUM_NALUS 16
+
+typedef struct {
+ uint8_t type; // nalu type + nal_ref_idc
+ uint32_t offset; // offset to the pointer of the encrypted data
+ uint8_t* data; // if the nalu is encrypted, this field is useless; if current NALU is SPS/PPS, data is the pointer to clear SPS/PPS data
+ uint32_t length; // nalu length
+} nalu_info_t;
+
+typedef struct {
+ uint8_t* data; // pointer to the encrypted data
+ uint32_t size; // encrypted data size
+ uint32_t num_nalus; // number of NALU
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+#endif
diff --git a/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..38039e2
--- /dev/null
+++ b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,510 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "VideoDecoderAVCSecure.h"
+#include "VideoDecoderTrace.h"
+#include <string.h>
+
+
+#define STARTCODE_00 0x00
+#define STARTCODE_01 0x01
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+
+
+// mask for little endian, to mast the second and fourth bytes in the byte stream
+#define STARTCODE_MASK0 0xFF000000 //0x00FF0000
+#define STARTCODE_MASK1 0x0000FF00 //0x000000FF
+
+
+typedef enum {
+ NAL_UNIT_TYPE_unspecified0 = 0,
+ NAL_UNIT_TYPE_SLICE,
+ NAL_UNIT_TYPE_DPA,
+ NAL_UNIT_TYPE_DPB,
+ NAL_UNIT_TYPE_DPC,
+ NAL_UNIT_TYPE_IDR,
+ NAL_UNIT_TYPE_SEI,
+ NAL_UNIT_TYPE_SPS,
+ NAL_UNIT_TYPE_PPS,
+ NAL_UNIT_TYPE_Acc_unit_delimiter,
+ NAL_UNIT_TYPE_EOSeq,
+ NAL_UNIT_TYPE_EOstream,
+ NAL_UNIT_TYPE_filler_data,
+ NAL_UNIT_TYPE_SPS_extension,
+ NAL_UNIT_TYPE_Reserved14,
+ NAL_UNIT_TYPE_Reserved15,
+ NAL_UNIT_TYPE_Reserved16,
+ NAL_UNIT_TYPE_Reserved17,
+ NAL_UNIT_TYPE_Reserved18,
+ NAL_UNIT_TYPE_ACP,
+ NAL_UNIT_TYPE_Reserved20,
+ NAL_UNIT_TYPE_Reserved21,
+ NAL_UNIT_TYPE_Reserved22,
+ NAL_UNIT_TYPE_Reserved23,
+ NAL_UNIT_TYPE_unspecified24,
+} NAL_UNIT_TYPE;
+
+#ifndef min
+#define min(X, Y) ((X) <(Y) ? (X) : (Y))
+#endif
+
+
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType),
+ mNaluHeaderBuffer(NULL),
+ mInputBuffer(NULL) {
+
+ memset(&mMetadata, 0, sizeof(NaluMetadata));
+ memset(&mByteStream, 0, sizeof(NaluByteStream));
+}
+
+VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mMetadata.naluInfo = new NaluInfo [MAX_NALU_NUMBER];
+ mByteStream.byteStream = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+
+ if (mMetadata.naluInfo == NULL ||
+ mByteStream.byteStream == NULL ||
+ mNaluHeaderBuffer == NULL) {
+ ETRACE("Failed to allocate memory.");
+ // TODO: release all allocated memory
+ return DECODE_MEMORY_FAIL;
+ }
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VideoDecoderAVC::stop();
+
+ if (mMetadata.naluInfo) {
+ delete [] mMetadata.naluInfo;
+ mMetadata.naluInfo = NULL;
+ }
+
+ if (mByteStream.byteStream) {
+ delete [] mByteStream.byteStream;
+ mByteStream.byteStream = NULL;
+ }
+
+ if (mNaluHeaderBuffer) {
+ delete [] mNaluHeaderBuffer;
+ mNaluHeaderBuffer = NULL;
+ }
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ Decode_Status status;
+ int32_t sizeAccumulated = 0;
+ int32_t sizeLeft = 0;
+ uint8_t *pByteStream = NULL;
+ NaluInfo *pNaluInfo = mMetadata.naluInfo;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ // NALU headers are appended to encrypted video bitstream
+ // |...encrypted video bitstream (16 bytes aligned)...| 4 bytes of header size |...NALU headers..|
+ pByteStream = buffer->data + buffer->size + 4;
+ sizeLeft = *(int32_t *)(buffer->data + buffer->size);
+ VTRACE("%s sizeLeft: %d buffer->size: %#x", __func__, sizeLeft, buffer->size);
+ mInputBuffer = buffer->data;
+ } else {
+ status = parseAnnexBStream(buffer->data, buffer->size, &mByteStream);
+ CHECK_STATUS("parseAnnexBStream");
+ pByteStream = mByteStream.byteStream;
+ sizeLeft = mByteStream.streamPos;
+ mInputBuffer = buffer->data;
+ }
+ if (sizeLeft < 4) {
+ ETRACE("Not enough data to read number of NALU.");
+ return DECODE_INVALID_DATA;
+ }
+
+ // read number of NALU
+ memcpy(&(mMetadata.naluNumber), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+ sizeLeft -= 4;
+
+ if (mMetadata.naluNumber == 0) {
+ WTRACE("Number of NALU is ZERO!");
+ return DECODE_SUCCESS;
+ }
+
+ for (int32_t i = 0; i < mMetadata.naluNumber; i++) {
+ if (sizeLeft < 12) {
+ ETRACE("Not enough data to parse NALU offset, size, header length for NALU %d, left = %d", i, sizeLeft);
+ return DECODE_INVALID_DATA;
+ }
+ sizeLeft -= 12;
+ // read NALU offset
+ memcpy(&(pNaluInfo->naluOffset), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU size
+ memcpy(&(pNaluInfo->naluLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+ // read NALU header length
+ memcpy(&(pNaluInfo->naluHeaderLen), pByteStream, sizeof(int32_t));
+ pByteStream += 4;
+
+
+ if (sizeLeft < pNaluInfo->naluHeaderLen) {
+ ETRACE("Not enough data to copy NALU header for %d, left = %d, header len = %d", i, sizeLeft, pNaluInfo->naluHeaderLen);
+ return DECODE_INVALID_DATA;
+ }
+
+ sizeLeft -= pNaluInfo->naluHeaderLen;
+
+ if (pNaluInfo->naluHeaderLen) {
+ // copy start code prefix to buffer
+ memcpy(mNaluHeaderBuffer + sizeAccumulated,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ sizeAccumulated += STARTCODE_PREFIX_LEN;
+
+ // copy NALU header
+ memcpy(mNaluHeaderBuffer + sizeAccumulated, pByteStream, pNaluInfo->naluHeaderLen);
+ pByteStream += pNaluInfo->naluHeaderLen;
+
+ sizeAccumulated += pNaluInfo->naluHeaderLen;
+ } else {
+ WTRACE("header len is zero for NALU %d", i);
+ }
+
+ // for next NALU
+ pNaluInfo++;
+ }
+
+ buffer->data = mNaluHeaderBuffer;
+ buffer->size = sizeAccumulated;
+
+ return VideoDecoderAVC::decode(buffer);
+}
+
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
+ VABufferID bufferIDs[4];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ // TODO: handle the first slice lost
+ }
+ if (mDecodingFrame) {
+ // interlace content, complete decoding the first field
+ vaStatus = vaEndPicture(mVADisplay, mVAContext);
+ CHECK_VA_STATUS("vaEndPicture");
+
+ // for interlace content, top field may be valid only after the second field is parsed
+ mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
+ }
+
+ // Check there is no reference frame loss before decoding a frame
+
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ // start decoding a frame
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ // find which naluinfo is correlated to current slice
+ int naluIndex = 0;
+ uint32_t accumulatedHeaderLen = 0;
+ uint32_t headerLen = 0;
+ for (; naluIndex < mMetadata.naluNumber; naluIndex++) {
+ headerLen = mMetadata.naluInfo[naluIndex].naluHeaderLen;
+ if (headerLen == 0) {
+ WTRACE("lenght of current NAL unit is 0.");
+ continue;
+ }
+ accumulatedHeaderLen += STARTCODE_PREFIX_LEN;
+ if (accumulatedHeaderLen + headerLen > sliceData->slice_offset) {
+ break;
+ }
+ accumulatedHeaderLen += headerLen;
+ }
+
+ if (sliceData->slice_offset != accumulatedHeaderLen) {
+ WTRACE("unexpected slice offset %d, accumulatedHeaderLen = %d", sliceData->slice_offset, accumulatedHeaderLen);
+ }
+
+ sliceParam->slice_data_size = mMetadata.naluInfo[naluIndex].naluLen;
+ uint32_t sliceOffset = mMetadata.naluInfo[naluIndex].naluOffset;
+ uint32_t slice_offset_shift = sliceOffset % 16;
+ sliceParam->slice_data_offset += slice_offset_shift;
+ sliceData->slice_size = (sliceParam->slice_data_size + slice_offset_shift + 0xF) & ~0xF;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ // sliceData->slice_offset - accumulatedHeaderLen is the absolute offset to start codes of current NAL unit
+ // offset points to first byte of NAL unit
+
+ if (mInputBuffer != NULL) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ sliceData->slice_size, //Slice size
+ 1, // num_elements
+ mInputBuffer + sliceOffset - slice_offset_shift,
+ &bufferIDs[bufferIDCount]);
+ } else {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAProtectedSliceDataBufferType,
+ sliceData->slice_size, //size
+ 1, //num_elements
+ (uint8_t*)sliceOffset, // IMR offset
+ &bufferIDs[bufferIDCount]);
+ }
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+}
+
+
+// Parse byte string pattern "0x000001" (3 bytes) in the current buffer.
+// Returns offset of position following the pattern in the buffer if pattern is found or -1 if not found.
+int32_t VideoDecoderAVCSecure::findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length) {
+ uint8_t *ptr;
+ uint32_t left = 0, data = 0, phase = 0;
+ uint8_t mask1 = 0, mask2 = 0;
+
+ /* Meaning of phase:
+ 0: initial status, "0x000001" bytes are not found so far;
+ 1: one "0x00" byte is found;
+ 2: two or more consecutive "0x00" bytes" are found;
+ 3: "0x000001" patten is found ;
+ 4: if there is one more byte after "0x000001";
+ */
+
+ left = length;
+ ptr = (uint8_t *) (stream + offset);
+ phase = 0;
+
+ // parse until there is more data and start code not found
+ while ((left > 0) && (phase < 3)) {
+ // Check if the address is 32-bit aligned & phase=0, if thats the case we can check 4 bytes instead of one byte at a time.
+ if (((((uint32_t)ptr) & 0x3) == 0) && (phase == 0)) {
+ while (left > 3) {
+ data = *((uint32_t *)ptr);
+ mask1 = (STARTCODE_00 != (data & STARTCODE_MASK0));
+ mask2 = (STARTCODE_00 != (data & STARTCODE_MASK1));
+ // If second byte and fourth byte are not zero's then we cannot have a start code here,
+ // as we need two consecutive zero bytes for a start code pattern.
+ if (mask1 && mask2) {
+ // skip 4 bytes and start over
+ ptr += 4;
+ left -=4;
+ continue;
+ } else {
+ break;
+ }
+ }
+ }
+
+ // At this point either data is not on a 32-bit boundary or phase > 0 so we look at one byte at a time
+ if (left > 0) {
+ if (*ptr == STARTCODE_00) {
+ phase++;
+ if (phase > 2) {
+ // more than 2 consecutive '0x00' bytes is found
+ phase = 2;
+ }
+ } else if ((*ptr == STARTCODE_01) && (phase == 2)) {
+ // start code is found
+ phase = 3;
+ } else {
+ // reset lookup
+ phase = 0;
+ }
+ ptr++;
+ left--;
+ }
+ }
+
+ if ((left > 0) && (phase == 3)) {
+ phase = 4;
+ // return offset of position following the pattern in the buffer which matches "0x000001" byte string
+ return (int32_t)(ptr - stream);
+ }
+ return -1;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream) {
+ uint8_t naluType;
+ int32_t naluHeaderLen;
+
+ naluType = *(uint8_t *)(stream + naluStream->naluOffset);
+ naluType &= NALU_TYPE_MASK;
+ // first update nalu header length based on nalu type
+ if (naluType >= NAL_UNIT_TYPE_SLICE && naluType <= NAL_UNIT_TYPE_IDR) {
+ // coded slice, return only up to MAX_SLICE_HEADER_SIZE bytes
+ naluHeaderLen = min(naluStream->naluLen, MAX_SLICE_HEADER_SIZE);
+ } else if (naluType >= NAL_UNIT_TYPE_SEI && naluType <= NAL_UNIT_TYPE_PPS) {
+ //sps, pps, sei, etc, return the entire NAL unit in clear
+ naluHeaderLen = naluStream->naluLen;
+ } else {
+ return DECODE_FRAME_DROPPED;
+ }
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluOffset), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluLen), sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ memcpy(naluStream->byteStream + naluStream->streamPos, &naluHeaderLen, sizeof(int32_t));
+ naluStream->streamPos += 4;
+
+ if (naluHeaderLen) {
+ memcpy(naluStream->byteStream + naluStream->streamPos, (uint8_t*)(stream + naluStream->naluOffset), naluHeaderLen);
+ naluStream->streamPos += naluHeaderLen;
+ }
+ return DECODE_SUCCESS;
+}
+
+
+// parse start-code prefixed stream, also knowns as Annex B byte stream, commonly used in AVI, ES, MPEG2 TS container
+Decode_Status VideoDecoderAVCSecure::parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream) {
+ int32_t naluOffset, offset, left;
+ NaluInfo *info;
+ uint32_t ret = DECODE_SUCCESS;
+
+ naluOffset = 0;
+ offset = 0;
+ left = length;
+
+ // leave 4 bytes to copy nalu count
+ naluStream->streamPos = 4;
+ naluStream->naluCount = 0;
+ memset(naluStream->byteStream, 0, MAX_NALU_HEADER_BUFFER);
+
+ for (; ;) {
+ naluOffset = findNalUnitOffset(stream, offset, left);
+ if (naluOffset == -1) {
+ break;
+ }
+
+ if (naluStream->naluCount == 0) {
+ naluStream->naluOffset = naluOffset;
+ } else {
+ naluStream->naluLen = naluOffset - naluStream->naluOffset - STARTCODE_PREFIX_LEN;
+ ret = copyNaluHeader(stream, naluStream);
+ if (ret != DECODE_SUCCESS && ret != DECODE_FRAME_DROPPED) {
+ LOGW("copyNaluHeader returned %d", ret);
+ return ret;
+ }
+ // starting position for next NALU
+ naluStream->naluOffset = naluOffset;
+ }
+
+ if (ret == DECODE_SUCCESS) {
+ naluStream->naluCount++;
+ }
+
+ // update next lookup position and length
+ offset = naluOffset + 1; // skip one byte of NAL unit type
+ left = length - offset;
+ }
+
+ if (naluStream->naluCount > 0) {
+ naluStream->naluLen = length - naluStream->naluOffset;
+ memcpy(naluStream->byteStream, &(naluStream->naluCount), sizeof(int32_t));
+ // ignore return value, either DECODE_SUCCESS or DECODE_FRAME_DROPPED
+ copyNaluHeader(stream, naluStream);
+ return DECODE_SUCCESS;
+ }
+
+ LOGW("number of valid NALU is 0!");
+ return DECODE_SUCCESS;
+}
+
diff --git a/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..ee16073
--- /dev/null
+++ b/videodecoder/securevideo/merrplus/VideoDecoderAVCSecure.h
@@ -0,0 +1,75 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H_
+#define VIDEO_DECODER_AVC_SECURE_H_
+
+#include "VideoDecoderAVC.h"
+
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual ~VideoDecoderAVCSecure();
+
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+
+private:
+ enum {
+ MAX_SLICE_HEADER_SIZE = 30,
+ MAX_NALU_HEADER_BUFFER = 8192,
+ MAX_NALU_NUMBER = 400, // > 4096/12
+ };
+
+ // Information of Network Abstraction Layer Unit
+ struct NaluInfo {
+ int32_t naluOffset; // offset of NAL unit in the firewalled buffer
+ int32_t naluLen; // length of NAL unit
+ int32_t naluHeaderLen; // length of NAL unit header
+ };
+
+ struct NaluMetadata {
+ NaluInfo *naluInfo;
+ int32_t naluNumber; // number of NAL units
+ };
+
+ struct NaluByteStream {
+ int32_t naluOffset;
+ int32_t naluLen;
+ int32_t streamPos;
+ uint8_t *byteStream; // 4 bytes of naluCount, 4 bytes of naluOffset, 4 bytes of naulLen, 4 bytes of naluHeaderLen, followed by naluHeaderData
+ int32_t naluCount;
+ };
+
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+ int32_t findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length);
+ Decode_Status copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream);
+ Decode_Status parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream);
+
+private:
+ NaluMetadata mMetadata;
+ NaluByteStream mByteStream;
+ uint8_t *mNaluHeaderBuffer;
+ uint8_t *mInputBuffer;
+};
+
+
+
+#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp
new file mode 100644
index 0000000..2867ad9
--- /dev/null
+++ b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.cpp
@@ -0,0 +1,861 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <va/va.h>
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderTrace.h"
+#include "vbp_loader.h"
+#include "VideoDecoderAVCSecure.h"
+#include "VideoFrameInfo.h"
+
+#define MAX_SLICEHEADER_BUFFER_SIZE 4096
+#define STARTCODE_PREFIX_LEN 3
+#define NALU_TYPE_MASK 0x1F
+#define MAX_NALU_HEADER_BUFFER 8192
+static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+ h264_NAL_UNIT_TYPE_unspecified = 0,
+ h264_NAL_UNIT_TYPE_SLICE,
+ h264_NAL_UNIT_TYPE_DPA,
+ h264_NAL_UNIT_TYPE_DPB,
+ h264_NAL_UNIT_TYPE_DPC,
+ h264_NAL_UNIT_TYPE_IDR,
+ h264_NAL_UNIT_TYPE_SEI,
+ h264_NAL_UNIT_TYPE_SPS,
+ h264_NAL_UNIT_TYPE_PPS,
+ h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+ h264_NAL_UNIT_TYPE_EOSeq,
+ h264_NAL_UNIT_TYPE_EOstream,
+ h264_NAL_UNIT_TYPE_filler_data,
+ h264_NAL_UNIT_TYPE_SPS_extension,
+ h264_NAL_UNIT_TYPE_ACP = 19,
+ h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
+ : VideoDecoderAVC(mimeType){
+ mFrameSize = 0;
+ mFrameData = NULL;
+ mIsEncryptData = 0;
+ mClearData = NULL;
+ mCachedHeader = NULL;
+ setParserType(VBP_H264SECURE);
+ mFrameIdx = 0;
+ mModularMode = 0;
+ mSliceNum = 0;
+}
+
+Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::start");
+
+ Decode_Status status = VideoDecoderAVC::start(buffer);
+ if (status != DECODE_SUCCESS) {
+ return status;
+ }
+
+ mClearData = new uint8_t [MAX_NALU_HEADER_BUFFER];
+ if (mClearData == NULL) {
+ ETRACE("Failed to allocate memory for mClearData");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mCachedHeader= new uint8_t [MAX_SLICEHEADER_BUFFER_SIZE];
+ if (mCachedHeader == NULL) {
+ ETRACE("Failed to allocate memory for mCachedHeader");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ return status;
+}
+
+void VideoDecoderAVCSecure::stop(void) {
+ VTRACE("VideoDecoderAVCSecure::stop");
+ VideoDecoderAVC::stop();
+
+ if (mClearData) {
+ delete [] mClearData;
+ mClearData = NULL;
+ }
+
+ if (mCachedHeader) {
+ delete [] mCachedHeader;
+ mCachedHeader = NULL;
+ }
+}
+Decode_Status VideoDecoderAVCSecure::processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ VTRACE("processModularInputBuffer +++");
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+
+ int32_t nalu_num = 0;
+ uint8_t nalu_type = 0;
+ int32_t nalu_offset = 0;
+ uint32_t nalu_size = 0;
+ uint8_t naluType = 0;
+ uint8_t *nalu_data = NULL;
+ uint32_t sliceidx = 0;
+
+ frame_info_t *pFrameInfo = NULL;
+ mSliceNum = 0;
+ memset(&mSliceInfo, 0, sizeof(mSliceInfo));
+ mIsEncryptData = 0;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ pFrameInfo = (frame_info_t *) buffer->data;
+ if (pFrameInfo == NULL) {
+ ETRACE("Invalid parameter: pFrameInfo is NULL!");
+ return DECODE_MEMORY_FAIL;
+ }
+
+ mFrameData = pFrameInfo->data;
+ mFrameSize = pFrameInfo->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+
+ nalu_num = pFrameInfo->num_nalus;
+ VTRACE("nalu_num = %d", nalu_num);
+
+ if (nalu_num <= 0 || nalu_num >= MAX_NUM_NALUS) {
+ ETRACE("Invalid parameter: nalu_num = %d", nalu_num);
+ return DECODE_MEMORY_FAIL;
+ }
+
+ for (int32_t i = 0; i < nalu_num; i++) {
+
+ nalu_size = pFrameInfo->nalus[i].length;
+ nalu_type = pFrameInfo->nalus[i].type;
+ nalu_offset = pFrameInfo->nalus[i].offset;
+ nalu_data = pFrameInfo->nalus[i].data;
+ naluType = nalu_type & NALU_TYPE_MASK;
+
+ VTRACE("nalu_type = 0x%x, nalu_size = %d, nalu_offset = 0x%x", nalu_type, nalu_size, nalu_offset);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+
+ mIsEncryptData = 1;
+ VTRACE("slice idx = %d", sliceidx);
+ mSliceInfo[sliceidx].sliceHeaderByte = nalu_type;
+ mSliceInfo[sliceidx].sliceStartOffset = (nalu_offset >> 4) << 4;
+ mSliceInfo[sliceidx].sliceByteOffset = nalu_offset - mSliceInfo[sliceidx].sliceStartOffset;
+ mSliceInfo[sliceidx].sliceLength = mSliceInfo[sliceidx].sliceByteOffset + nalu_size;
+ mSliceInfo[sliceidx].sliceSize = (mSliceInfo[sliceidx].sliceByteOffset + nalu_size + 0xF) & ~0xF;
+ VTRACE("sliceHeaderByte = 0x%x", mSliceInfo[sliceidx].sliceHeaderByte);
+ VTRACE("sliceStartOffset = %d", mSliceInfo[sliceidx].sliceStartOffset);
+ VTRACE("sliceByteOffset = %d", mSliceInfo[sliceidx].sliceByteOffset);
+ VTRACE("sliceSize = %d", mSliceInfo[sliceidx].sliceSize);
+ VTRACE("sliceLength = %d", mSliceInfo[sliceidx].sliceLength);
+
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = mSliceInfo[sliceidx].sliceSize > 64 ? 64 : mSliceInfo[sliceidx].sliceSize ;
+ testdata = (uint8_t *)(mFrameData);
+ for (int i = 0; i < testsize; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ sliceidx++;
+
+ } else if (naluType == h264_NAL_UNIT_TYPE_SPS || naluType == h264_NAL_UNIT_TYPE_PPS) {
+ if (nalu_data == NULL) {
+ ETRACE("Invalid parameter: nalu_data = NULL for naluType 0x%x", naluType);
+ return DECODE_MEMORY_FAIL;
+ }
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ITRACE("Nalu type = 0x%x is skipped", naluType);
+ continue;
+ }
+ }
+ clear_data = mClearData;
+ mSliceNum = sliceidx;
+
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+ Decode_Status status;
+ int32_t clear_data_size = 0;
+ uint8_t *clear_data = NULL;
+ uint8_t naluType = 0;
+
+ int32_t num_nalus;
+ int32_t nalu_offset;
+ int32_t offset;
+ uint8_t *data_src;
+ uint8_t *nalu_data;
+ uint32_t nalu_size;
+
+ if (buffer->flag & IS_SECURE_DATA) {
+ VTRACE("Decoding protected video ...");
+ mIsEncryptData = 1;
+
+ mFrameData = buffer->data;
+ mFrameSize = buffer->size;
+ VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+ num_nalus = *(uint32_t *)(buffer->data + buffer->size + sizeof(uint32_t));
+ VTRACE("num_nalus = %d", num_nalus);
+ offset = 4;
+ for (int32_t i = 0; i < num_nalus; i++) {
+ VTRACE("%d nalu, offset = %d", i, offset);
+ data_src = buffer->data + buffer->size + sizeof(uint32_t) + offset;
+ nalu_size = *(uint32_t *)(data_src + 2 * sizeof(uint32_t));
+ nalu_size = (nalu_size + 0x03) & (~0x03);
+
+ nalu_data = data_src + 3 *sizeof(uint32_t);
+ naluType = nalu_data[0] & NALU_TYPE_MASK;
+ offset += nalu_size + 3 *sizeof(uint32_t);
+ VTRACE("naluType = 0x%x", naluType);
+ VTRACE("nalu_size = %d, nalu_data = %p", nalu_size, nalu_data);
+
+ if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+ ETRACE("Slice NALU received!");
+ return DECODE_INVALID_DATA;
+ }
+
+ else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+ memcpy(mClearData + clear_data_size,
+ startcodePrefix,
+ STARTCODE_PREFIX_LEN);
+ clear_data_size += STARTCODE_PREFIX_LEN;
+ memcpy(mClearData + clear_data_size,
+ nalu_data,
+ nalu_size);
+ clear_data_size += nalu_size;
+ } else {
+ ETRACE("Failure: DECODE_FRAME_DROPPED");
+ return DECODE_FRAME_DROPPED;
+ }
+ }
+ clear_data = mClearData;
+ } else {
+ VTRACE("Decoding clear video ...");
+ mIsEncryptData = 0;
+ mFrameSize = buffer->size;
+ mFrameData = buffer->data;
+ clear_data = buffer->data;
+ clear_data_size = buffer->size;
+ }
+
+ if (clear_data_size > 0) {
+ status = VideoDecoderBase::parseBuffer(
+ clear_data,
+ clear_data_size,
+ false,
+ (void**)data);
+ CHECK_STATUS("VideoDecoderBase::parseBuffer");
+ } else {
+ status = VideoDecoderBase::queryBuffer((void**)data);
+ CHECK_STATUS("VideoDecoderBase::queryBuffer");
+ }
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+ VTRACE("VideoDecoderAVCSecure::decode");
+ Decode_Status status;
+ vbp_data_h264 *data = NULL;
+ if (buffer == NULL) {
+ return DECODE_INVALID_DATA;
+ }
+
+#if 0
+ uint32_t testsize;
+ uint8_t *testdata;
+ testsize = buffer->size > 16 ? 16:buffer->size ;
+ testdata = (uint8_t *)(buffer->data);
+ for (int i = 0; i < 16; i++) {
+ VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+ }
+#endif
+ if (buffer->flag & IS_SUBSAMPLE_ENCRYPTION) {
+ mModularMode = 1;
+ }
+
+ if (mModularMode) {
+ status = processModularInputBuffer(buffer,&data);
+ CHECK_STATUS("processModularInputBuffer");
+ }
+ else {
+ status = processClassicInputBuffer(buffer,&data);
+ CHECK_STATUS("processClassicInputBuffer");
+ }
+
+ if (!mVAStarted) {
+ if (data->has_sps && data->has_pps) {
+ status = startVA(data);
+ CHECK_STATUS("startVA");
+ } else {
+ WTRACE("Can't start VA as either SPS or PPS is still not available.");
+ return DECODE_SUCCESS;
+ }
+ }
+
+ status = decodeFrame(buffer, data);
+
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::decodeFrame");
+ Decode_Status status;
+ VTRACE("data->has_sps = %d, data->has_pps = %d", data->has_sps, data->has_pps);
+
+#if 0
+ // Don't remove the following codes, it can be enabled for debugging DPB.
+ for (unsigned int i = 0; i < data->num_pictures; i++) {
+ VAPictureH264 &pic = data->pic_data[i].pic_parms->CurrPic;
+ VTRACE("%d: decoding frame %.2f, poc top = %d, poc bottom = %d, flags = %d, reference = %d",
+ i,
+ buffer->timeStamp/1E6,
+ pic.TopFieldOrderCnt,
+ pic.BottomFieldOrderCnt,
+ pic.flags,
+ (pic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (pic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE));
+ }
+#endif
+
+ if (data->new_sps || data->new_pps) {
+ status = handleNewSequence(data);
+ CHECK_STATUS("handleNewSequence");
+ }
+
+ if (mModularMode && (!mIsEncryptData)) {
+ if (data->pic_data[0].num_slices == 0) {
+ ITRACE("No slice available for decoding.");
+ status = mSizeChanged ? DECODE_FORMAT_CHANGE : DECODE_SUCCESS;
+ mSizeChanged = false;
+ return status;
+ }
+ }
+
+ uint64_t lastPTS = mCurrentPTS;
+ mCurrentPTS = buffer->timeStamp;
+
+ // start decoding a new frame
+ status = acquireSurfaceBuffer();
+ CHECK_STATUS("acquireSurfaceBuffer");
+
+ if (mModularMode) {
+ parseModularSliceHeader(data);
+ }
+ else {
+ parseClassicSliceHeader(data);
+ }
+
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ return status;
+ }
+
+ status = beginDecodingFrame(data);
+ CHECK_STATUS("beginDecodingFrame");
+
+ // finish decoding the last frame
+ status = endDecodingFrame(false);
+ CHECK_STATUS("endDecodingFrame");
+
+ if (isNewFrame(data, lastPTS == mCurrentPTS) == 0) {
+ ETRACE("Can't handle interlaced frames yet");
+ return DECODE_FAIL;
+ }
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::beginDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::beginDecodingFrame");
+ Decode_Status status;
+ VAPictureH264 *picture = &(data->pic_data[0].pic_parms->CurrPic);
+ if ((picture->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+ (picture->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+ mAcquiredBuffer->referenceFrame = true;
+ } else {
+ mAcquiredBuffer->referenceFrame = false;
+ }
+
+ if (picture->flags & VA_PICTURE_H264_TOP_FIELD) {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD | VA_TOP_FIELD;
+ } else {
+ mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+ }
+
+ mAcquiredBuffer->renderBuffer.flag = 0;
+ mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+ mAcquiredBuffer->pictureOrder = getPOC(picture);
+
+ if (mSizeChanged) {
+ mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+ mSizeChanged = false;
+ }
+
+ status = continueDecodingFrame(data);
+ return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::continueDecodingFrame(vbp_data_h264 *data) {
+ VTRACE("VideoDecoderAVCSecure::continueDecodingFrame");
+ Decode_Status status;
+ vbp_picture_data_h264 *picData = data->pic_data;
+
+ if (mAcquiredBuffer == NULL || mAcquiredBuffer->renderBuffer.surface == VA_INVALID_SURFACE) {
+ ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+ return DECODE_FAIL;
+ }
+ VTRACE("data->num_pictures = %d", data->num_pictures);
+ for (uint32_t picIndex = 0; picIndex < data->num_pictures; picIndex++, picData++) {
+ if (picData == NULL || picData->pic_parms == NULL || picData->slc_data == NULL || picData->num_slices == 0) {
+ return DECODE_PARSER_FAIL;
+ }
+
+ if (picIndex > 0 &&
+ (picData->pic_parms->CurrPic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) == 0) {
+ ETRACE("Packed frame is not supported yet!");
+ return DECODE_FAIL;
+ }
+ VTRACE("picData->num_slices = %d", picData->num_slices);
+ for (uint32_t sliceIndex = 0; sliceIndex < picData->num_slices; sliceIndex++) {
+ status = decodeSlice(data, picIndex, sliceIndex);
+ if (status != DECODE_SUCCESS) {
+ endDecodingFrame(true);
+ // remove current frame from DPB as it can't be decoded.
+ removeReferenceFromDPB(picData->pic_parms);
+ return status;
+ }
+ }
+ }
+ mDecodingFrame = true;
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseClassicSliceHeader(vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+
+ if (mFrameSize <= 0) {
+ return DECODE_SUCCESS;
+ }
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ void *sliceheaderbuf;
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mFrameSize, //size
+ 1, //num_elements
+ mFrameData,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mFrameSize;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+
+#if 0
+
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ status = updateSliceParameter(data,sliceheaderbuf);
+ CHECK_STATUS("processSliceHeader");
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseModularSliceHeader(vbp_data_h264 *data) {
+ Decode_Status status;
+ VAStatus vaStatus;
+
+ VABufferID sliceheaderbufferID;
+ VABufferID pictureparameterparsingbufferID;
+ VABufferID mSlicebufferID;
+ int32_t sliceIdx;
+
+ vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+ CHECK_VA_STATUS("vaBeginPicture");
+
+ if (mFrameSize <= 0 || mSliceNum <=0) {
+ return DECODE_SUCCESS;
+ }
+ void *sliceheaderbuf;
+ memset(mCachedHeader, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+ int32_t offset = 0;
+ int32_t size = 0;
+
+ for (sliceIdx = 0; sliceIdx < mSliceNum; sliceIdx++) {
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParseSliceHeaderGroupBufferType,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ 1,
+ NULL,
+ &sliceheaderbufferID);
+ CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ mSliceInfo[sliceIdx].sliceSize, //size
+ 1, //num_elements
+ mFrameData + mSliceInfo[sliceIdx].sliceStartOffset,
+ &mSlicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+ data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+ data->pic_parse_buffer->frame_size = mSliceInfo[sliceIdx].sliceLength;
+ data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+ data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte;
+ data->pic_parse_buffer->slice_offset = mSliceInfo[sliceIdx].sliceByteOffset;
+
+#if 0
+ VTRACE("data->pic_parse_buffer->slice_offset = 0x%x", data->pic_parse_buffer->slice_offset);
+ VTRACE("pic_parse_buffer->nalu_header.value = %x", data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte);
+ VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+ VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+ VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+ VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+ VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+ VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+ VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+ VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+ VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+ VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+ VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+ VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+ VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+ VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+ VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+ VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAParsePictureParameterBufferType,
+ sizeof(VAParsePictureParameterBuffer),
+ 1,
+ data->pic_parse_buffer,
+ &pictureparameterparsingbufferID);
+ CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &pictureparameterparsingbufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ vaStatus = vaMapBuffer(
+ mVADisplay,
+ sliceheaderbufferID,
+ &sliceheaderbuf);
+ CHECK_VA_STATUS("vaMapBuffer");
+
+ size = *(uint32 *)((uint8 *)sliceheaderbuf + 4) + 4;
+ VTRACE("slice header size = 0x%x, offset = 0x%x", size, offset);
+ if (offset + size <= MAX_SLICEHEADER_BUFFER_SIZE - 4) {
+ memcpy(mCachedHeader+offset, sliceheaderbuf, size);
+ offset += size;
+ } else {
+ WTRACE("Cached slice header is not big enough!");
+ }
+ vaStatus = vaUnmapBuffer(
+ mVADisplay,
+ sliceheaderbufferID);
+ CHECK_VA_STATUS("vaUnmapBuffer");
+ }
+ memset(mCachedHeader + offset, 0xFF, 4);
+ status = updateSliceParameter(data,mCachedHeader);
+ CHECK_STATUS("processSliceHeader");
+ return DECODE_SUCCESS;
+}
+
+
+Decode_Status VideoDecoderAVCSecure::updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf) {
+ VTRACE("VideoDecoderAVCSecure::updateSliceParameter");
+ Decode_Status status;
+ status = VideoDecoderBase::updateBuffer(
+ (uint8_t *)sliceheaderbuf,
+ MAX_SLICEHEADER_BUFFER_SIZE,
+ (void**)&data);
+ CHECK_STATUS("updateBuffer");
+ return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
+ Decode_Status status;
+ VAStatus vaStatus;
+ uint32_t bufferIDCount = 0;
+ // maximum 3 buffers to render a slice: picture parameter, IQMatrix, slice parameter
+ VABufferID bufferIDs[3];
+
+ vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
+ vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
+ VAPictureParameterBufferH264 *picParam = picData->pic_parms;
+ VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+ uint32_t slice_data_size = 0;
+ uint8_t* slice_data_addr = NULL;
+
+ if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
+ // either condition indicates start of a new frame
+ if (sliceParam->first_mb_in_slice != 0) {
+ WTRACE("The first slice is lost.");
+ }
+ VTRACE("Current frameidx = %d", mFrameIdx++);
+ // Update the reference frames and surface IDs for DPB and current frame
+ status = updateDPB(picParam);
+ CHECK_STATUS("updateDPB");
+
+ //We have to provide a hacked DPB rather than complete DPB for libva as workaround
+ status = updateReferenceFrames(picData);
+ CHECK_STATUS("updateReferenceFrames");
+
+ mDecodingFrame = true;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAPictureParameterBufferType,
+ sizeof(VAPictureParameterBufferH264),
+ 1,
+ picParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreatePictureParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VAIQMatrixBufferType,
+ sizeof(VAIQMatrixBufferH264),
+ 1,
+ data->IQ_matrix_buf,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
+ bufferIDCount++;
+ }
+
+ status = setReference(sliceParam);
+ CHECK_STATUS("setReference");
+
+ if (mModularMode) {
+ if (mIsEncryptData) {
+ sliceParam->slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+ slice_data_addr = mFrameData + mSliceInfo[sliceIndex].sliceStartOffset;
+ } else {
+ slice_data_size = sliceData->slice_size;
+ slice_data_addr = sliceData->buffer_addr + sliceData->slice_offset;
+ }
+ } else {
+ sliceParam->slice_data_size = mFrameSize;
+ slice_data_size = mFrameSize;
+ slice_data_addr = mFrameData;
+ }
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceParameterBufferType,
+ sizeof(VASliceParameterBufferH264),
+ 1,
+ sliceParam,
+ &bufferIDs[bufferIDCount]);
+ CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
+ bufferIDCount++;
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ bufferIDs,
+ bufferIDCount);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ VABufferID slicebufferID;
+
+ vaStatus = vaCreateBuffer(
+ mVADisplay,
+ mVAContext,
+ VASliceDataBufferType,
+ slice_data_size, //size
+ 1, //num_elements
+ slice_data_addr,
+ &slicebufferID);
+ CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+ vaStatus = vaRenderPicture(
+ mVADisplay,
+ mVAContext,
+ &slicebufferID,
+ 1);
+ CHECK_VA_STATUS("vaRenderPicture");
+
+ return DECODE_SUCCESS;
+
+}
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+ VAProfile profile, VAConfigID *config)
+{
+ VAStatus vaStatus;
+ VAConfigAttrib attrib[2];
+
+ if (config == NULL) {
+ ETRACE("Invalid parameter!");
+ return DECODE_FAIL;
+ }
+
+ attrib[0].type = VAConfigAttribRTFormat;
+ attrib[0].value = VA_RT_FORMAT_YUV420;
+ attrib[1].type = VAConfigAttribDecSliceMode;
+ attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+ if (mModularMode) {
+ attrib[1].value = VA_DEC_SLICE_MODE_SUBSAMPLE;
+ }
+
+ vaStatus = vaCreateConfig(
+ mVADisplay,
+ profile,
+ VAEntrypointVLD,
+ &attrib[0],
+ 2,
+ config);
+ CHECK_VA_STATUS("vaCreateConfig");
+
+ return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h
new file mode 100644
index 0000000..f66d7b8
--- /dev/null
+++ b/videodecoder/securevideo/moorefield/VideoDecoderAVCSecure.h
@@ -0,0 +1,69 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_DECODER_AVC_SECURE_H
+#define VIDEO_DECODER_AVC_SECURE_H
+
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
+#include "VideoDecoderDefs.h"
+
+class VideoDecoderAVCSecure : public VideoDecoderAVC {
+public:
+ VideoDecoderAVCSecure(const char *mimeType);
+ virtual Decode_Status start(VideoConfigBuffer *buffer);
+ virtual void stop(void);
+
+ // data in the decoded buffer is all encrypted.
+ virtual Decode_Status decode(VideoDecodeBuffer *buffer);
+protected:
+ virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+ virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
+ virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+ Decode_Status parseClassicSliceHeader(vbp_data_h264 *data);
+ Decode_Status parseModularSliceHeader(vbp_data_h264 *data);
+
+ Decode_Status updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf);
+ virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
+private:
+ Decode_Status processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ Decode_Status processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+ int32_t mIsEncryptData;
+ int32_t mFrameSize;
+ uint8_t* mFrameData;
+ uint8_t* mClearData;
+ uint8_t* mCachedHeader;
+ int32_t mFrameIdx;
+ int32_t mModularMode;
+
+ enum {
+ MAX_SLICE_HEADER_NUM = 256,
+ };
+ int32_t mSliceNum;
+ // Information of Slices in the Modular DRM Mode
+ struct SliceInfo {
+ uint8_t sliceHeaderByte; // first byte of the slice header
+ uint32_t sliceStartOffset; // offset of Slice unit in the firewalled buffer
+ uint32_t sliceByteOffset; // extra offset from the blockAligned slice offset
+ uint32_t sliceSize; // block aligned length of slice unit
+ uint32_t sliceLength; // actual size of the slice
+ };
+
+ SliceInfo mSliceInfo[MAX_SLICE_HEADER_NUM];
+};
+
+#endif
diff --git a/videodecoder/securevideo/moorefield/VideoFrameInfo.h b/videodecoder/securevideo/moorefield/VideoFrameInfo.h
new file mode 100755
index 0000000..485b0da
--- /dev/null
+++ b/videodecoder/securevideo/moorefield/VideoFrameInfo.h
@@ -0,0 +1,36 @@
+/*
+* Copyright (c) 2009-2011 Intel Corporation. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#ifndef VIDEO_FRAME_INFO_H_
+#define VIDEO_FRAME_INFO_H_
+
+#define MAX_NUM_NALUS 16
+
+typedef struct {
+ uint8_t type; // nalu type + nal_ref_idc
+ uint32_t offset; // offset to the pointer of the encrypted data
+ uint8_t* data; // if the nalu is encrypted, this field is useless; if current NALU is SPS/PPS, data is the pointer to clear SPS/PPS data
+ uint32_t length; // nalu length
+} nalu_info_t;
+
+typedef struct {
+ uint8_t* data; // pointer to the encrypted data
+ uint32_t size; // encrypted data size
+ uint32_t num_nalus; // number of NALU
+ nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+#endif