hwc: Add load based partial mdp comp
Add support for load based partial mdp comp.
This is used on geometry changes where a redraw is unavoidable.
We select a batch of layers, that has minimum pixels for FB comp,
the rest go to MDP.
Conflicts:
libhwcomposer/hwc_utils.cpp
libhwcomposer/hwc_utils.h
Change-Id: Ifc5eeb4785c75c37de97a2bb89ca81409d324691
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index ff33e94..e65d5e7 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -576,14 +576,26 @@
bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
{
- int numAppLayers = ctx->listStats[mDpy].numAppLayers;
-
if(!sEnableMixedMode) {
//Mixed mode is disabled. No need to even try caching.
return false;
}
- //Setup mCurrentFrame
+ bool ret = false;
+ if(isLoadBasedCompDoable(ctx, list)) {
+ ret = loadBasedComp(ctx, list);
+ }
+
+ if(!ret) {
+ ret = cacheBasedComp(ctx, list);
+ }
+
+ return ret;
+}
+
+bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
mCurrentFrame.reset(numAppLayers);
updateLayerCache(ctx, list);
@@ -633,6 +645,77 @@
return true;
}
+bool MDPComp::loadBasedComp(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ mCurrentFrame.reset(numAppLayers);
+
+ //TODO BatchSize could be optimized further based on available pipes, split
+ //displays etc.
+ const int batchSize = numAppLayers - (sMaxPipesPerMixer - 1);
+ if(batchSize <= 0) {
+ ALOGD_IF(isDebug(), "%s: Not attempting", __FUNCTION__);
+ return false;
+ }
+
+ int minBatchStart = -1;
+ size_t minBatchPixelCount = SIZE_MAX;
+
+ for(int i = 0; i <= numAppLayers - batchSize; i++) {
+ uint32_t batchPixelCount = 0;
+ for(int j = i; j < i + batchSize; j++) {
+ hwc_layer_1_t* layer = &list->hwLayers[j];
+ hwc_rect_t crop = layer->sourceCrop;
+ batchPixelCount += (crop.right - crop.left) *
+ (crop.bottom - crop.top);
+ }
+
+ if(batchPixelCount < minBatchPixelCount) {
+ minBatchPixelCount = batchPixelCount;
+ minBatchStart = i;
+ }
+ }
+
+ if(minBatchStart < 0) {
+ ALOGD_IF(isDebug(), "%s: No batch found batchSize %d numAppLayers %d",
+ __FUNCTION__, batchSize, numAppLayers);
+ return false;
+ }
+
+ for(int i = 0; i < numAppLayers; i++) {
+ if(i < minBatchStart || i >= minBatchStart + batchSize) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: MDP unsupported layer found at %d",
+ __FUNCTION__, i);
+ return false;
+ }
+ mCurrentFrame.isFBComposed[i] = false;
+ }
+ }
+
+ mCurrentFrame.fbZ = minBatchStart;
+ mCurrentFrame.fbCount = batchSize;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - batchSize;
+
+ if(!arePipesAvailable(ctx, list)) {
+ return false;
+ }
+
+ ALOGD_IF(isDebug(), "%s: fbZ %d batchSize %d",
+ __FUNCTION__, mCurrentFrame.fbZ, batchSize);
+ return true;
+}
+
+bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ if(mDpy or isSecurePresent(ctx, mDpy) or
+ not (list->flags & HWC_GEOMETRY_CHANGED)) {
+ return false;
+ }
+ return true;
+}
+
bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx,
hwc_display_contents_1_t* list, bool secureOnly) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index e1839cd..3882bee 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -144,6 +144,13 @@
bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
/* check if we can use layer cache to do at least partial MDP comp */
bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Partial MDP comp that uses caching to save power as primary goal */
+ bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Partial MDP comp that uses number of pixels to optimize perf goal */
+ bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Checks if its worth doing load based partial comp */
+ bool isLoadBasedCompDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
/* checks for conditions where only video can be bypassed */
bool isOnlyVideoDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list,
bool secureOnly);
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index 7e0ecd8..f5dc70c 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -704,6 +704,7 @@
ctx->listStats[dpy].skipCount = 0;
ctx->listStats[dpy].needsAlphaScale = false;
ctx->listStats[dpy].preMultipliedAlpha = false;
+ ctx->listStats[dpy].isSecurePresent = false;
ctx->listStats[dpy].yuvCount = 0;
char property[PROPERTY_VALUE_MAX];
ctx->listStats[dpy].extOnlyLayerIndex = -1;
@@ -733,6 +734,10 @@
//reset yuv indices
ctx->listStats[dpy].yuvIndices[i] = -1;
+ if (isSecureBuffer(hnd)) {
+ ctx->listStats[dpy].isSecurePresent = true;
+ }
+
if (isSkipLayer(&list->hwLayers[i])) {
ctx->listStats[dpy].skipCount++;
}
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 3f1aace..2510e4f 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -103,6 +103,7 @@
bool isDisplayAnimating;
ovutils::Dim roi;
bool secureUI; // Secure display layer
+ bool isSecurePresent;
};
struct LayerProp {
@@ -421,6 +422,10 @@
return (layer->transform & HWC_TRANSFORM_ROT_90);
}
+inline bool isSecurePresent(hwc_context_t *ctx, int dpy) {
+ return ctx->listStats[dpy].isSecurePresent;
+}
+
};
#endif //HWC_UTILS_H