Add support for multi-plane YUV HardwareBuffers
This adds a new lock method that allows locking all planes in the hardware-buffer.
To do this we also add new structs to hold this buffer information.
Documentation and testing is now in place.
Change-Id: Id4933bbdd8d47d5209a9a08e27391908df26dae1
One-Pager: https://docs.google.com/document/d/1jPlDlaAOzg6QfrX77fZ6OYwR6xeqj_xQlW81mQGigHA/edit?usp=sharing
Bug: 117617861
Test: Manual - run CTS tests
diff --git a/libs/nativewindow/AHardwareBuffer.cpp b/libs/nativewindow/AHardwareBuffer.cpp
index 6ea1270..ff64f61 100644
--- a/libs/nativewindow/AHardwareBuffer.cpp
+++ b/libs/nativewindow/AHardwareBuffer.cpp
@@ -122,6 +122,54 @@
return gbuffer->lockAsync(usage, usage, bounds, outVirtualAddress, fence);
}
+int AHardwareBuffer_lockPlanes(AHardwareBuffer* buffer, uint64_t usage,
+ int32_t fence, const ARect* rect, AHardwareBuffer_Planes* outPlanes) {
+ if (!buffer || !outPlanes) return BAD_VALUE;
+
+ if (usage & ~(AHARDWAREBUFFER_USAGE_CPU_READ_MASK |
+ AHARDWAREBUFFER_USAGE_CPU_WRITE_MASK)) {
+ ALOGE("Invalid usage flags passed to AHardwareBuffer_lock; only "
+ " AHARDWAREBUFFER_USAGE_CPU_* flags are allowed");
+ return BAD_VALUE;
+ }
+
+ usage = AHardwareBuffer_convertToGrallocUsageBits(usage);
+ GraphicBuffer* gBuffer = AHardwareBuffer_to_GraphicBuffer(buffer);
+ Rect bounds;
+ if (!rect) {
+ bounds.set(Rect(gBuffer->getWidth(), gBuffer->getHeight()));
+ } else {
+ bounds.set(Rect(rect->left, rect->top, rect->right, rect->bottom));
+ }
+ int format = AHardwareBuffer_convertFromPixelFormat(uint32_t(gBuffer->getPixelFormat()));
+ memset(outPlanes->planes, 0, sizeof(outPlanes->planes));
+ if (AHardwareBuffer_formatIsYuv(format)) {
+ android_ycbcr yuvData;
+ int result = gBuffer->lockAsyncYCbCr(usage, bounds, &yuvData, fence);
+ if (result == 0) {
+ outPlanes->planeCount = 3;
+ outPlanes->planes[0].data = yuvData.y;
+ outPlanes->planes[0].pixelStride = 1;
+ outPlanes->planes[0].rowStride = yuvData.ystride;
+ outPlanes->planes[1].data = yuvData.cb;
+ outPlanes->planes[1].pixelStride = yuvData.chroma_step;
+ outPlanes->planes[1].rowStride = yuvData.cstride;
+ outPlanes->planes[2].data = yuvData.cr;
+ outPlanes->planes[2].pixelStride = yuvData.chroma_step;
+ outPlanes->planes[2].rowStride = yuvData.cstride;
+ } else {
+ outPlanes->planeCount = 0;
+ }
+ return result;
+ } else {
+ const uint32_t pixelStride = AHardwareBuffer_bytesPerPixel(format);
+ outPlanes->planeCount = 1;
+ outPlanes->planes[0].pixelStride = pixelStride;
+ outPlanes->planes[0].rowStride = gBuffer->getStride() * pixelStride;
+ return gBuffer->lockAsync(usage, usage, bounds, &outPlanes->planes[0].data, fence);
+ }
+}
+
int AHardwareBuffer_unlock(AHardwareBuffer* buffer, int32_t* fence) {
if (!buffer) return BAD_VALUE;
@@ -366,6 +414,19 @@
ALOGE_IF(log, "AHARDWAREBUFFER_FORMAT_BLOB cannot be encoded as video");
return false;
}
+ } else if (AHardwareBuffer_formatIsYuv(desc->format)) {
+ if (desc->layers != 1) {
+ ALOGE_IF(log, "Layers must be 1 for YUV formats.");
+ return false;
+ }
+ const uint64_t yuvInvalidGpuMask =
+ AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
+ AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP;
+ if (desc->usage & yuvInvalidGpuMask) {
+ ALOGE_IF(log, "Invalid usage flags specified for YUV format; "
+ "mip-mapping and cube-mapping are not allowed.");
+ return false;
+ }
} else {
if (desc->usage & AHARDWAREBUFFER_USAGE_SENSOR_DIRECT_DATA) {
ALOGE_IF(log, "AHARDWAREBUFFER_USAGE_SENSOR_DIRECT_DATA requires AHARDWAREBUFFER_FORMAT_BLOB");
@@ -465,6 +526,7 @@
case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT:
case AHARDWAREBUFFER_FORMAT_S8_UINT:
+ case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
// VNDK formats only -- unfortunately we can't differentiate from where we're called
case AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM:
case AHARDWAREBUFFER_FORMAT_YV12:
@@ -475,7 +537,6 @@
case AHARDWAREBUFFER_FORMAT_RAW12:
case AHARDWAREBUFFER_FORMAT_RAW_OPAQUE:
case AHARDWAREBUFFER_FORMAT_IMPLEMENTATION_DEFINED:
- case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
case AHARDWAREBUFFER_FORMAT_YCbCr_422_SP:
case AHARDWAREBUFFER_FORMAT_YCrCb_420_SP:
case AHARDWAREBUFFER_FORMAT_YCbCr_422_I:
@@ -486,6 +547,40 @@
}
}
+bool AHardwareBuffer_formatIsYuv(uint32_t format) {
+ switch (format) {
+ case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
+ case AHARDWAREBUFFER_FORMAT_YV12:
+ case AHARDWAREBUFFER_FORMAT_Y8:
+ case AHARDWAREBUFFER_FORMAT_Y16:
+ case AHARDWAREBUFFER_FORMAT_YCbCr_422_SP:
+ case AHARDWAREBUFFER_FORMAT_YCrCb_420_SP:
+ case AHARDWAREBUFFER_FORMAT_YCbCr_422_I:
+ return true;
+ default:
+ return false;
+ }
+}
+
+uint32_t AHardwareBuffer_bytesPerPixel(uint32_t format) {
+ switch (format) {
+ case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+ case AHARDWAREBUFFER_FORMAT_D16_UNORM:
+ return 2;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM:
+ return 3;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+ case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+ case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
+ case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
uint32_t AHardwareBuffer_convertFromPixelFormat(uint32_t hal_format) {
return hal_format;
}