summaryrefslogtreecommitdiff
path: root/devices
diff options
context:
space:
mode:
authorEmilian Peev <epeev@google.com>2019-09-18 12:41:54 -0700
committerEmilian Peev <epeev@google.com>2019-10-23 16:52:39 -0700
commitbc9e116f923a99ea23704b44395a768a7f941203 (patch)
treeec0c0c55131de8d7e019b14d10deda603b420ae3 /devices
parent5ad162cda7bc6ac5636c114b84956980c277c17e (diff)
downloadcamera-bc9e116f923a99ea23704b44395a768a7f941203.tar.gz
EmulatedCamera: Add support for semi-planar YUV processing
The camera output and input YUV420_888 buffers can support both planar and semi-planar layouts. Unfortunately 'libyuv' only supports the planar variant natively. Handle the semi-planar case by splitting and merging the U/V plane accordingly. Bug: 141251857 Bug: 131342297 Test: Manual using Emulator build and Camera2, TestingCamera, TestingCamera2 and ZSLDemo Change-Id: I6c46d01d5d7659008c377c31f3a436f9dac4ecb2
Diffstat (limited to 'devices')
-rw-r--r--devices/EmulatedCamera/hwl/EmulatedRequestProcessor.cpp20
-rw-r--r--devices/EmulatedCamera/hwl/EmulatedSensor.cpp77
2 files changed, 79 insertions, 18 deletions
diff --git a/devices/EmulatedCamera/hwl/EmulatedRequestProcessor.cpp b/devices/EmulatedCamera/hwl/EmulatedRequestProcessor.cpp
index fd693c4..f8d6b87 100644
--- a/devices/EmulatedCamera/hwl/EmulatedRequestProcessor.cpp
+++ b/devices/EmulatedCamera/hwl/EmulatedRequestProcessor.cpp
@@ -20,6 +20,7 @@
#include "EmulatedRequestProcessor.h"
#include <HandleImporter.h>
+#include <hardware/gralloc.h>
#include <log/log.h>
#include <sync/sync.h>
#include <utils/Timers.h>
@@ -198,10 +199,12 @@ status_t EmulatedRequestProcessor::LockSensorBuffer(
auto width = static_cast<int32_t>(stream.width);
auto height = static_cast<int32_t>(stream.height);
+ auto usage = stream.is_input
+ ? GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN
+ : stream.producer_usage;
if (stream.override_format == HAL_PIXEL_FORMAT_YCBCR_420_888) {
IMapper::Rect map_rect = {0, 0, width, height};
- auto yuv_layout =
- importer.lockYCbCr(buffer, stream.producer_usage, map_rect);
+ auto yuv_layout = importer.lockYCbCr(buffer, usage, map_rect);
if ((yuv_layout.y != nullptr) && (yuv_layout.cb != nullptr) &&
(yuv_layout.cr != nullptr)) {
sensor_buffer->plane.img_y_crcb.img_y =
@@ -213,6 +216,15 @@ status_t EmulatedRequestProcessor::LockSensorBuffer(
sensor_buffer->plane.img_y_crcb.y_stride = yuv_layout.yStride;
sensor_buffer->plane.img_y_crcb.cbcr_stride = yuv_layout.cStride;
sensor_buffer->plane.img_y_crcb.cbcr_step = yuv_layout.chromaStep;
+ if ((yuv_layout.chromaStep == 2) &&
+ std::abs(sensor_buffer->plane.img_y_crcb.img_cb -
+ sensor_buffer->plane.img_y_crcb.img_cr) != 1) {
+ ALOGE("%s: Unsupported YUV layout, chroma step: %u U/V plane delta: %u",
+ __FUNCTION__, yuv_layout.chromaStep,
+ std::abs(sensor_buffer->plane.img_y_crcb.img_cb -
+ sensor_buffer->plane.img_y_crcb.img_cr));
+ return BAD_VALUE;
+ }
} else {
ALOGE("%s: Failed to lock output buffer!", __FUNCTION__);
return BAD_VALUE;
@@ -221,8 +233,8 @@ status_t EmulatedRequestProcessor::LockSensorBuffer(
uint32_t buffer_size = 0, stride = 0;
auto ret = GetBufferSizeAndStride(stream, &buffer_size, &stride);
if (ret == OK) {
- sensor_buffer->plane.img.img = static_cast<uint8_t*>(
- importer.lock(buffer, stream.producer_usage, buffer_size));
+ sensor_buffer->plane.img.img =
+ static_cast<uint8_t*>(importer.lock(buffer, usage, buffer_size));
if (sensor_buffer->plane.img.img != nullptr) {
sensor_buffer->plane.img.stride = stride;
sensor_buffer->plane.img.buffer_size = buffer_size;
diff --git a/devices/EmulatedCamera/hwl/EmulatedSensor.cpp b/devices/EmulatedCamera/hwl/EmulatedSensor.cpp
index 317114e..079bf43 100644
--- a/devices/EmulatedCamera/hwl/EmulatedSensor.cpp
+++ b/devices/EmulatedCamera/hwl/EmulatedSensor.cpp
@@ -917,12 +917,33 @@ status_t EmulatedSensor::ProcessYUV420(const YUV420Frame& input,
bool reprocess_request) {
ATRACE_CALL();
size_t input_width, input_height;
- YCbCrPlanes yuv_planes;
- std::vector<uint8_t> temp_yuv;
+ YCbCrPlanes input_planes, output_planes;
+ std::vector<uint8_t> temp_yuv, temp_output_uv, temp_input_uv;
if (reprocess_request) {
input_width = input.width;
input_height = input.height;
- yuv_planes = input.planes;
+ input_planes = input.planes;
+
+ // libyuv only supports planar YUV420 during scaling.
+ // Split the input U/V plane in separate planes if needed.
+ if (input_planes.cbcr_step == 2) {
+ temp_input_uv.resize(input_width * input_height / 2);
+ auto temp_uv_buffer = temp_input_uv.data();
+ input_planes.img_cb = temp_uv_buffer;
+ input_planes.img_cr = temp_uv_buffer + (input_width * input_height) / 4;
+ input_planes.cbcr_stride = input_width / 2;
+ if (input.planes.img_cb < input.planes.img_cr) {
+ libyuv::SplitUVPlane(input.planes.img_cb, input.planes.cbcr_stride,
+ input_planes.img_cb, input_planes.cbcr_stride,
+ input_planes.img_cr, input_planes.cbcr_stride,
+ input_width / 2, input_height / 2);
+ } else {
+ libyuv::SplitUVPlane(input.planes.img_cr, input.planes.cbcr_stride,
+ input_planes.img_cr, input_planes.cbcr_stride,
+ input_planes.img_cb, input_planes.cbcr_stride,
+ input_width / 2, input_height / 2);
+ }
+ }
} else {
// Generate the smallest possible frame with the expected AR and
// then scale using libyuv.
@@ -931,25 +952,53 @@ status_t EmulatedSensor::ProcessYUV420(const YUV420Frame& input,
input_height = EmulatedScene::kSceneHeight;
temp_yuv.reserve((input_width * input_height * 3) / 2);
auto temp_yuv_buffer = temp_yuv.data();
- yuv_planes = {
+ input_planes = {
.img_y = temp_yuv_buffer,
.img_cb = temp_yuv_buffer + input_width * input_height,
.img_cr = temp_yuv_buffer + (input_width * input_height * 5) / 4,
.y_stride = static_cast<uint32_t>(input_width),
.cbcr_stride = static_cast<uint32_t>(input_width) / 2,
.cbcr_step = 1};
- CaptureYUV420(yuv_planes, input_width, input_height, gain);
- }
-
- auto ret = I420Scale(yuv_planes.img_y, yuv_planes.y_stride, yuv_planes.img_cb,
- yuv_planes.cbcr_stride, yuv_planes.img_cr,
- yuv_planes.cbcr_stride, input_width, input_height,
- output.planes.img_y, output.planes.y_stride,
- output.planes.img_cb, output.planes.cbcr_stride,
- output.planes.img_cr, output.planes.cbcr_stride,
- output.width, output.height, libyuv::kFilterNone);
+ CaptureYUV420(input_planes, input_width, input_height, gain);
+ }
+
+ output_planes = output.planes;
+ // libyuv only supports planar YUV420 during scaling.
+ // Treat the output UV space as planar first and then
+ // interleave in the second step.
+ if (output_planes.cbcr_step == 2) {
+ temp_output_uv.resize(output.width * output.height / 2);
+ auto temp_uv_buffer = temp_output_uv.data();
+ output_planes.img_cb = temp_uv_buffer;
+ output_planes.img_cr = temp_uv_buffer + output.width * output.height / 4;
+ output_planes.cbcr_stride = output.width / 2;
+ }
+
+ auto ret = I420Scale(
+ input_planes.img_y, input_planes.y_stride, input_planes.img_cb,
+ input_planes.cbcr_stride, input_planes.img_cr, input_planes.cbcr_stride,
+ input_width, input_height, output_planes.img_y, output_planes.y_stride,
+ output_planes.img_cb, output_planes.cbcr_stride, output_planes.img_cr,
+ output_planes.cbcr_stride, output.width, output.height,
+ libyuv::kFilterNone);
if (ret != 0) {
ALOGE("%s: Failed during YUV scaling: %d", __FUNCTION__, ret);
+ return ret;
+ }
+
+ // Merge U/V Planes for the interleaved case
+ if (output_planes.cbcr_step == 2) {
+ if (output.planes.img_cb < output.planes.img_cr) {
+ libyuv::MergeUVPlane(output_planes.img_cb, output_planes.cbcr_stride,
+ output_planes.img_cr, output_planes.cbcr_stride,
+ output.planes.img_cb, output.planes.cbcr_stride,
+ output.width / 2, output.height / 2);
+ } else {
+ libyuv::MergeUVPlane(output_planes.img_cr, output_planes.cbcr_stride,
+ output_planes.img_cb, output_planes.cbcr_stride,
+ output.planes.img_cr, output.planes.cbcr_stride,
+ output.width / 2, output.height / 2);
+ }
}
return ret;