Implement HW Bitmap for Skia pipeline
Implement HW Bitmap for Skia pipeline. Use new Skia
SkImage::MakeFromAHardwareBuffer API, which will enable to
record HW Bitmap into a picture. Move logic that uploads
SkBitmap into a GraphicBuffer into pipeline specific classes.
Test: All CTS and other tests pass for HWUI pipleine. For Skia
pipeline graphics CTS tests pass, 2 UIRendering CTS tests which
excise HW bitmaps with color spaces fail, bitmapShaderEglImage
macrobench fails (to be fixed by a CL in Skia), HWUI unit tests
pass, no EGL leaks found.
Change-Id: Id5926d7cccd81af8b55400f44fb524a427543d05
diff --git a/libs/hwui/renderthread/OpenGLPipeline.cpp b/libs/hwui/renderthread/OpenGLPipeline.cpp
index e1ae585..6b12012 100644
--- a/libs/hwui/renderthread/OpenGLPipeline.cpp
+++ b/libs/hwui/renderthread/OpenGLPipeline.cpp
@@ -267,6 +267,171 @@
thread.renderState().invokeFunctor(functor, mode, nullptr);
}
+#define FENCE_TIMEOUT 2000000000
+
+class AutoEglFence {
+public:
+ AutoEglFence(EGLDisplay display)
+ : mDisplay(display) {
+ fence = eglCreateSyncKHR(mDisplay, EGL_SYNC_FENCE_KHR, NULL);
+ }
+
+ ~AutoEglFence() {
+ if (fence != EGL_NO_SYNC_KHR) {
+ eglDestroySyncKHR(mDisplay, fence);
+ }
+ }
+
+ EGLSyncKHR fence = EGL_NO_SYNC_KHR;
+private:
+ EGLDisplay mDisplay = EGL_NO_DISPLAY;
+};
+
+class AutoEglImage {
+public:
+ AutoEglImage(EGLDisplay display, EGLClientBuffer clientBuffer)
+ : mDisplay(display) {
+ EGLint imageAttrs[] = { EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE };
+ image = eglCreateImageKHR(display, EGL_NO_CONTEXT,
+ EGL_NATIVE_BUFFER_ANDROID, clientBuffer, imageAttrs);
+ }
+
+ ~AutoEglImage() {
+ if (image != EGL_NO_IMAGE_KHR) {
+ eglDestroyImageKHR(mDisplay, image);
+ }
+ }
+
+ EGLImageKHR image = EGL_NO_IMAGE_KHR;
+private:
+ EGLDisplay mDisplay = EGL_NO_DISPLAY;
+};
+
+class AutoGlTexture {
+public:
+ AutoGlTexture(uirenderer::Caches& caches)
+ : mCaches(caches) {
+ glGenTextures(1, &mTexture);
+ caches.textureState().bindTexture(mTexture);
+ }
+
+ ~AutoGlTexture() {
+ mCaches.textureState().deleteTexture(mTexture);
+ }
+
+private:
+ uirenderer::Caches& mCaches;
+ GLuint mTexture = 0;
+};
+
+static bool uploadBitmapToGraphicBuffer(uirenderer::Caches& caches, SkBitmap& bitmap,
+ GraphicBuffer& buffer, GLint format, GLint type) {
+ EGLDisplay display = eglGetCurrentDisplay();
+ LOG_ALWAYS_FATAL_IF(display == EGL_NO_DISPLAY,
+ "Failed to get EGL_DEFAULT_DISPLAY! err=%s",
+ uirenderer::renderthread::EglManager::eglErrorString());
+ // We use an EGLImage to access the content of the GraphicBuffer
+ // The EGL image is later bound to a 2D texture
+ EGLClientBuffer clientBuffer = (EGLClientBuffer) buffer.getNativeBuffer();
+ AutoEglImage autoImage(display, clientBuffer);
+ if (autoImage.image == EGL_NO_IMAGE_KHR) {
+ ALOGW("Could not create EGL image, err =%s",
+ uirenderer::renderthread::EglManager::eglErrorString());
+ return false;
+ }
+ AutoGlTexture glTexture(caches);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, autoImage.image);
+
+ GL_CHECKPOINT(MODERATE);
+
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, bitmap.width(), bitmap.height(),
+ format, type, bitmap.getPixels());
+
+ GL_CHECKPOINT(MODERATE);
+
+ // The fence is used to wait for the texture upload to finish
+ // properly. We cannot rely on glFlush() and glFinish() as
+ // some drivers completely ignore these API calls
+ AutoEglFence autoFence(display);
+ if (autoFence.fence == EGL_NO_SYNC_KHR) {
+ LOG_ALWAYS_FATAL("Could not create sync fence %#x", eglGetError());
+ return false;
+ }
+ // The flag EGL_SYNC_FLUSH_COMMANDS_BIT_KHR will trigger a
+ // pipeline flush (similar to what a glFlush() would do.)
+ EGLint waitStatus = eglClientWaitSyncKHR(display, autoFence.fence,
+ EGL_SYNC_FLUSH_COMMANDS_BIT_KHR, FENCE_TIMEOUT);
+ if (waitStatus != EGL_CONDITION_SATISFIED_KHR) {
+ LOG_ALWAYS_FATAL("Failed to wait for the fence %#x", eglGetError());
+ return false;
+ }
+ return true;
+}
+
+// TODO: handle SRGB sanely
+static PixelFormat internalFormatToPixelFormat(GLint internalFormat) {
+ switch (internalFormat) {
+ case GL_LUMINANCE:
+ return PIXEL_FORMAT_RGBA_8888;
+ case GL_SRGB8_ALPHA8:
+ return PIXEL_FORMAT_RGBA_8888;
+ case GL_RGBA:
+ return PIXEL_FORMAT_RGBA_8888;
+ case GL_RGB:
+ return PIXEL_FORMAT_RGB_565;
+ case GL_RGBA16F:
+ return PIXEL_FORMAT_RGBA_FP16;
+ default:
+ LOG_ALWAYS_FATAL("Unsupported bitmap colorType: %d", internalFormat);
+ return PIXEL_FORMAT_UNKNOWN;
+ }
+}
+
+sk_sp<Bitmap> OpenGLPipeline::allocateHardwareBitmap(RenderThread& renderThread,
+ SkBitmap& skBitmap) {
+ renderThread.eglManager().initialize();
+ uirenderer::Caches& caches = uirenderer::Caches::getInstance();
+
+ const SkImageInfo& info = skBitmap.info();
+ if (info.colorType() == kUnknown_SkColorType || info.colorType() == kAlpha_8_SkColorType) {
+ ALOGW("unable to create hardware bitmap of colortype: %d", info.colorType());
+ return nullptr;
+ }
+
+ bool needSRGB = uirenderer::transferFunctionCloseToSRGB(skBitmap.info().colorSpace());
+ bool hasLinearBlending = caches.extensions().hasLinearBlending();
+ GLint format, type, internalFormat;
+ uirenderer::Texture::colorTypeToGlFormatAndType(caches, skBitmap.colorType(),
+ needSRGB && hasLinearBlending, &internalFormat, &format, &type);
+
+ PixelFormat pixelFormat = internalFormatToPixelFormat(internalFormat);
+ sp<GraphicBuffer> buffer = new GraphicBuffer(info.width(), info.height(), pixelFormat,
+ GraphicBuffer::USAGE_HW_TEXTURE |
+ GraphicBuffer::USAGE_SW_WRITE_NEVER |
+ GraphicBuffer::USAGE_SW_READ_NEVER,
+ std::string("Bitmap::allocateHardwareBitmap pid [") + std::to_string(getpid()) + "]");
+
+ status_t error = buffer->initCheck();
+ if (error < 0) {
+ ALOGW("createGraphicBuffer() failed in GraphicBuffer.create()");
+ return nullptr;
+ }
+
+ SkBitmap bitmap;
+ if (CC_UNLIKELY(uirenderer::Texture::hasUnsupportedColorType(skBitmap.info(),
+ hasLinearBlending))) {
+ sk_sp<SkColorSpace> sRGB = SkColorSpace::MakeSRGB();
+ bitmap = uirenderer::Texture::uploadToN32(skBitmap, hasLinearBlending, std::move(sRGB));
+ } else {
+ bitmap = skBitmap;
+ }
+
+ if (!uploadBitmapToGraphicBuffer(caches, bitmap, *buffer, format, type)) {
+ return nullptr;
+ }
+ return sk_sp<Bitmap>(new Bitmap(buffer.get(), bitmap.info()));
+}
+
} /* namespace renderthread */
} /* namespace uirenderer */
} /* namespace android */
diff --git a/libs/hwui/renderthread/OpenGLPipeline.h b/libs/hwui/renderthread/OpenGLPipeline.h
index 6df8be4..c434f2e 100644
--- a/libs/hwui/renderthread/OpenGLPipeline.h
+++ b/libs/hwui/renderthread/OpenGLPipeline.h
@@ -61,6 +61,8 @@
static void destroyLayer(RenderNode* node);
static void prepareToDraw(const RenderThread& thread, Bitmap* bitmap);
static void invokeFunctor(const RenderThread& thread, Functor* functor);
+ static sk_sp<Bitmap> allocateHardwareBitmap(RenderThread& thread,
+ SkBitmap& skBitmap);
private:
EglManager& mEglManager;
diff --git a/libs/hwui/renderthread/RenderProxy.cpp b/libs/hwui/renderthread/RenderProxy.cpp
index eed5238..d5875d8 100644
--- a/libs/hwui/renderthread/RenderProxy.cpp
+++ b/libs/hwui/renderthread/RenderProxy.cpp
@@ -664,7 +664,7 @@
}
CREATE_BRIDGE2(allocateHardwareBitmap, RenderThread* thread, SkBitmap* bitmap) {
- sk_sp<Bitmap> hardwareBitmap = Bitmap::allocateHardwareBitmap(*args->thread, *args->bitmap);
+ sk_sp<Bitmap> hardwareBitmap = args->thread->allocateHardwareBitmap(*args->bitmap);
return hardwareBitmap.release();
}
diff --git a/libs/hwui/renderthread/RenderThread.cpp b/libs/hwui/renderthread/RenderThread.cpp
index 1450ec9..0554583 100644
--- a/libs/hwui/renderthread/RenderThread.cpp
+++ b/libs/hwui/renderthread/RenderThread.cpp
@@ -16,8 +16,12 @@
#include "RenderThread.h"
-#include "../renderstate/RenderState.h"
-#include "../pipeline/skia/SkiaOpenGLReadback.h"
+#include "hwui/Bitmap.h"
+#include "renderstate/RenderState.h"
+#include "renderthread/OpenGLPipeline.h"
+#include "pipeline/skia/SkiaOpenGLReadback.h"
+#include "pipeline/skia/SkiaOpenGLPipeline.h"
+#include "pipeline/skia/SkiaVulkanPipeline.h"
#include "CanvasContext.h"
#include "EglManager.h"
#include "OpenGLReadback.h"
@@ -433,6 +437,22 @@
return next;
}
+sk_sp<Bitmap> RenderThread::allocateHardwareBitmap(SkBitmap& skBitmap) {
+ auto renderType = Properties::getRenderPipelineType();
+ switch (renderType) {
+ case RenderPipelineType::OpenGL:
+ return OpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
+ case RenderPipelineType::SkiaGL:
+ return skiapipeline::SkiaOpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
+ case RenderPipelineType::SkiaVulkan:
+ return skiapipeline::SkiaVulkanPipeline::allocateHardwareBitmap(*this, skBitmap);
+ default:
+ LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
+ break;
+ }
+ return nullptr;
+}
+
} /* namespace renderthread */
} /* namespace uirenderer */
} /* namespace android */
diff --git a/libs/hwui/renderthread/RenderThread.h b/libs/hwui/renderthread/RenderThread.h
index 9bc5985..4b5601c 100644
--- a/libs/hwui/renderthread/RenderThread.h
+++ b/libs/hwui/renderthread/RenderThread.h
@@ -24,6 +24,7 @@
#include <GrContext.h>
#include <cutils/compiler.h>
+#include <SkBitmap.h>
#include <ui/DisplayInfo.h>
#include <utils/Looper.h>
#include <utils/Thread.h>
@@ -33,6 +34,7 @@
namespace android {
+class Bitmap;
class DisplayEventReceiver;
namespace uirenderer {
@@ -104,6 +106,8 @@
VulkanManager& vulkanManager() { return *mVkManager; }
+ sk_sp<Bitmap> allocateHardwareBitmap(SkBitmap& skBitmap);
+
protected:
virtual bool threadLoop() override;