hwc: Add support for secure RGB layer
1. Detect and account the number of secure RGB layers present.
2. Mark secure RGB layers for MDP comp in cache based stratergy.
3. If full MDP comp and partial MDP comp fails, Mark secure RGB
layers for MDP composition.
4. In partial MDP comp reject load based composition, if secure
layer is present to avoid GPU composition.
5. Disable idlefallback feature if secure RGB layer is present
to avoid GPU composition.
Change-Id: Idb4c4f09b081ec20240a2602aa3f20332fa49ad6
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 6084d8d..3fb18c9 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -677,7 +677,9 @@
const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
- if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) {
+ // No Idle fall back, if secure display or secure RGB layers are present
+ if(sIdleFallBack && (!ctx->listStats[mDpy].secureUI &&
+ !ctx->listStats[mDpy].secureRGBCount)) {
ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
return false;
}
@@ -1047,6 +1049,8 @@
}
updateYUV(ctx, list, false /*secure only*/);
+ /* mark secure RGB layers for MDP comp */
+ updateSecureRGB(ctx, list);
bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
if(!ret) {
ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
@@ -1237,6 +1241,64 @@
return true;
}
+/* if tryFullFrame fails, try to push all video and secure RGB layers to MDP */
+bool MDPComp::tryMDPOnlyLayers(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ const bool secureOnly = true;
+ return mdpOnlyLayersComp(ctx, list, not secureOnly) or
+ mdpOnlyLayersComp(ctx, list, secureOnly);
+
+}
+
+bool MDPComp::mdpOnlyLayersComp(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list, bool secureOnly) {
+
+ if(sSimulationFlags & MDPCOMP_AVOID_MDP_ONLY_LAYERS)
+ return false;
+
+ /* Bail out if we are processing only secured video layers
+ * and we dont have any */
+ if(!isSecurePresent(ctx, mDpy) && secureOnly){
+ reset(ctx);
+ return false;
+ }
+
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ mCurrentFrame.reset(numAppLayers);
+ mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
+
+ updateYUV(ctx, list, secureOnly);
+ /* mark secure RGB layers for MDP comp */
+ updateSecureRGB(ctx, list);
+
+ if(mCurrentFrame.mdpCount == 0) {
+ reset(ctx);
+ return false;
+ }
+
+ /* find the maximum batch of layers to be marked for framebuffer */
+ bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
+ if(!ret) {
+ ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
+ reset(ctx);
+ return false;
+ }
+
+ if(sEnableYUVsplit){
+ adjustForSourceSplit(ctx, list);
+ }
+
+ if(!postHeuristicsHandling(ctx, list)) {
+ ALOGD_IF(isDebug(), "post heuristic handling failed");
+ reset(ctx);
+ return false;
+ }
+
+ ALOGD_IF(sSimulationFlags,"%s: MDP_ONLY_LAYERS_COMP SUCCEEDED",
+ __FUNCTION__);
+ return true;
+}
+
/* Checks for conditions where YUV layers cannot be bypassed */
bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
if(isSkipLayer(layer)) {
@@ -1270,6 +1332,27 @@
return true;
}
+/* Checks for conditions where Secure RGB layers cannot be bypassed */
+bool MDPComp::isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
+ if(isSkipLayer(layer)) {
+ ALOGD_IF(isDebug(), "%s: Secure RGB layer marked SKIP dpy %d",
+ __FUNCTION__, mDpy);
+ return false;
+ }
+
+ if(isSecuring(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
+ return false;
+ }
+
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: Unsupported secure RGB layer",
+ __FUNCTION__);
+ return false;
+ }
+ return true;
+}
+
/* starts at fromIndex and check for each layer to find
* if it it has overlapping with any Updating layer above it in zorder
* till the end of the batch. returns true if it finds any intersection */
@@ -1487,6 +1570,32 @@
mCurrentFrame.fbCount);
}
+void MDPComp::updateSecureRGB(hwc_context_t* ctx,
+ hwc_display_contents_1_t* list) {
+ int nSecureRGBCount = ctx->listStats[mDpy].secureRGBCount;
+ for(int index = 0;index < nSecureRGBCount; index++){
+ int nSecureRGBIndex = ctx->listStats[mDpy].secureRGBIndices[index];
+ hwc_layer_1_t* layer = &list->hwLayers[nSecureRGBIndex];
+
+ if(!isSecureRGBDoable(ctx, layer)) {
+ if(!mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
+ mCurrentFrame.isFBComposed[nSecureRGBIndex] = true;
+ mCurrentFrame.fbCount++;
+ }
+ } else {
+ if(mCurrentFrame.isFBComposed[nSecureRGBIndex]) {
+ mCurrentFrame.isFBComposed[nSecureRGBIndex] = false;
+ mCurrentFrame.fbCount--;
+ }
+ }
+ }
+
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
+ mCurrentFrame.fbCount - mCurrentFrame.dropCount;
+ ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
+ mCurrentFrame.fbCount);
+}
+
hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
hwc_display_contents_1_t* list){
hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
@@ -1708,7 +1817,10 @@
if(isFrameDoable(ctx)) {
generateROI(ctx, list);
- mModeOn = tryFullFrame(ctx, list) || tryVideoOnly(ctx, list);
+ // if tryFullFrame fails, try to push all video and secure RGB layers
+ // to MDP for composition.
+ mModeOn = tryFullFrame(ctx, list) || tryMDPOnlyLayers(ctx, list) ||
+ tryVideoOnly(ctx, list);
if(mModeOn) {
setMDPCompLayerFlags(ctx, list);
} else {
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index 5214ac6..8c833c2 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -74,6 +74,7 @@
MDPCOMP_AVOID_CACHE_MDP = 0x002,
MDPCOMP_AVOID_LOAD_MDP = 0x004,
MDPCOMP_AVOID_VIDEO_ONLY = 0x008,
+ MDPCOMP_AVOID_MDP_ONLY_LAYERS = 0x010,
};
/* mdp pipe data */
@@ -190,8 +191,14 @@
bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list);
bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
bool secureOnly);
+ /* checks for conditions where only secure RGB and video can be bypassed */
+ bool tryMDPOnlyLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ bool mdpOnlyLayersComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
+ bool secureOnly);
/* checks for conditions where YUV layers cannot be bypassed */
bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
+ /* checks for conditions where Secure RGB layers cannot be bypassed */
+ bool isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
/* checks if MDP/MDSS can process current list w.r.to HW limitations
* All peculiar HW limitations should go here */
bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list);
@@ -217,6 +224,9 @@
/* updates cache map with YUV info */
void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
bool secureOnly);
+ /* updates cache map with secure RGB info */
+ void updateSecureRGB(hwc_context_t* ctx,
+ hwc_display_contents_1_t* list);
/* Validates if the GPU/MDP layer split chosen by a strategy is supported
* by MDP.
* Sets up MDP comp data structures to reflect covnversion from layers to
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index 944f152..f6e5dd0 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -841,6 +841,7 @@
ctx->listStats[dpy].yuv4k2kCount = 0;
ctx->dpyAttr[dpy].mActionSafePresent = isActionSafePresent(ctx, dpy);
ctx->listStats[dpy].renderBufIndexforABC = -1;
+ ctx->listStats[dpy].secureRGBCount = 0;
resetROI(ctx, dpy);
@@ -868,6 +869,12 @@
if (isSecureBuffer(hnd)) {
ctx->listStats[dpy].isSecurePresent = true;
+ if(not isYuvBuffer(hnd)) {
+ // cache secureRGB layer parameters like we cache for YUV layers
+ int& secureRGBCount = ctx->listStats[dpy].secureRGBCount;
+ ctx->listStats[dpy].secureRGBIndices[secureRGBCount] = (int)i;
+ secureRGBCount++;
+ }
}
if (isSkipLayer(&list->hwLayers[i])) {
@@ -1477,22 +1484,20 @@
ovutils::OV_MDP_BLEND_FG_PREMULT);
}
- if(isYuvBuffer(hnd)) {
- if(isSecureBuffer(hnd)) {
- ovutils::setMdpFlags(mdpFlags,
- ovutils::OV_MDP_SECURE_OVERLAY_SESSION);
- }
- if(metadata && (metadata->operation & PP_PARAM_INTERLACED) &&
- metadata->interlaced) {
- ovutils::setMdpFlags(mdpFlags,
- ovutils::OV_MDP_DEINTERLACE);
- }
+ if(metadata && (metadata->operation & PP_PARAM_INTERLACED) &&
+ metadata->interlaced) {
+ ovutils::setMdpFlags(mdpFlags,
+ ovutils::OV_MDP_DEINTERLACE);
+ }
+
+ // Mark MDP flags with SECURE_OVERLAY_SESSION for driver
+ if(isSecureBuffer(hnd)) {
+ ovutils::setMdpFlags(mdpFlags,
+ ovutils::OV_MDP_SECURE_OVERLAY_SESSION);
}
if(isSecureDisplayBuffer(hnd)) {
- // Secure display needs both SECURE_OVERLAY and SECURE_DISPLAY_OV
- ovutils::setMdpFlags(mdpFlags,
- ovutils::OV_MDP_SECURE_OVERLAY_SESSION);
+ // Mark MDP flags with SECURE_DISPLAY_OVERLAY_SESSION for driver
ovutils::setMdpFlags(mdpFlags,
ovutils::OV_MDP_SECURE_DISPLAY_OVERLAY_SESSION);
}
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 3648ba7..88e8ffd 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -133,6 +133,9 @@
hwc_rect_t rRoi; //right ROI. Unused in single DSI panels.
//App Buffer Composition index
int renderBufIndexforABC;
+ // Secure RGB specific
+ int secureRGBCount;
+ int secureRGBIndices[MAX_NUM_APP_LAYERS];
};
//PTOR Comp info