Merge "libhwcomposer: MDP partial frame update"
diff --git a/libhwcomposer/hwc.cpp b/libhwcomposer/hwc.cpp
index e970d4c..ec9abfb 100644
--- a/libhwcomposer/hwc.cpp
+++ b/libhwcomposer/hwc.cpp
@@ -494,7 +494,8 @@
}
}
- if(!Overlay::displayCommit(ctx->dpyAttr[dpy].fd)) {
+ if(!Overlay::displayCommit(ctx->dpyAttr[dpy].fd,
+ ctx->listStats[dpy].roi)) {
ALOGE("%s: display commit fail for %d dpy!", __FUNCTION__, dpy);
ret = -1;
}
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 788a890..4bbad8b 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -41,6 +41,7 @@
bool MDPComp::sDebugLogs = false;
bool MDPComp::sEnabled = false;
bool MDPComp::sEnableMixedMode = true;
+bool MDPComp::sEnablePartialFrameUpdate = false;
int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
float MDPComp::sMaxBw = 2.3f;
uint32_t MDPComp::sCompBytesClaimed = 0;
@@ -75,9 +76,10 @@
dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
index,
(mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
- mCurrentFrame.layerToMDP[index],
+ mCurrentFrame.layerToMDP[index],
(mCurrentFrame.isFBComposed[index] ?
- (mCurrentFrame.needsRedraw ? "GLES" : "CACHE") : "MDP"),
+ (mCurrentFrame.drop[index] ? "DROP" :
+ (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
(mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
dumpsys_log(buf,"\n");
@@ -106,12 +108,19 @@
sEnableMixedMode = false;
}
- sDebugLogs = false;
if(property_get("debug.mdpcomp.logs", property, NULL) > 0) {
if(atoi(property) != 0)
sDebugLogs = true;
}
+ if(property_get("persist.hwc.partialupdate.enable", property, NULL) > 0) {
+ if((atoi(property) != 0) && ctx->mMDP.panel == MIPI_CMD_PANEL &&
+ qdutils::MDPVersion::getInstance().is8x74v2())
+ sEnablePartialFrameUpdate = true;
+ }
+ ALOGE_IF(isDebug(), "%s: Partial Update applicable?: %d",__FUNCTION__,
+ sEnablePartialFrameUpdate);
+
sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) {
int val = atoi(property);
@@ -182,8 +191,11 @@
layer->compositionType = HWC_OVERLAY;
layer->hints |= HWC_HINT_CLEAR_FB;
} else {
- if(!mCurrentFrame.needsRedraw)
+ /* Drop the layer when its already present in FB OR when it lies
+ * outside frame's ROI */
+ if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
layer->compositionType = HWC_OVERLAY;
+ }
}
}
}
@@ -376,6 +388,91 @@
return ret;
}
+bool MDPComp::validateAndApplyROI(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list, hwc_rect_t roi) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+
+ if(!isValidRect(roi))
+ return false;
+
+ for(int i = 0; i < numAppLayers; i++){
+ const hwc_layer_1_t* layer = &list->hwLayers[i];
+
+ hwc_rect_t dstRect = layer->displayFrame;
+ hwc_rect_t srcRect = layer->sourceCrop;
+ int transform = layer->transform;
+ trimLayer(ctx, mDpy, transform, srcRect, dstRect);
+
+ hwc_rect_t res = getIntersection(roi, dstRect);
+
+ int res_w = res.right - res.left;
+ int res_h = res.bottom - res.top;
+ int dst_w = dstRect.right - dstRect.left;
+ int dst_h = dstRect.bottom - dstRect.top;
+
+ if(!isValidRect(res)) {
+ mCurrentFrame.drop[i] = true;
+ mCurrentFrame.dropCount++;
+ }else {
+ /* Reset frame ROI when any layer which needs scaling also needs ROI
+ * cropping */
+ if((res_w != dst_w || res_h != dst_h) &&
+ needsScaling (ctx, layer, mDpy)) {
+ ALOGE("%s: Resetting ROI due to scaling", __FUNCTION__);
+ memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
+ mCurrentFrame.dropCount = 0;
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void MDPComp::generateROI(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+
+ if(!sEnablePartialFrameUpdate) {
+ return;
+ }
+
+ if(mDpy || isDisplaySplit(ctx, mDpy)){
+ ALOGE_IF(isDebug(), "%s: ROI not supported for"
+ "the (1) external / virtual display's (2) dual DSI displays",
+ __FUNCTION__);
+ return;
+ }
+
+ if(list->flags & HWC_GEOMETRY_CHANGED)
+ return;
+
+ struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
+ for(int index = 0; index < numAppLayers; index++ ) {
+ if ((mCachedFrame.hnd[index] != list->hwLayers[index].handle) ||
+ isYuvBuffer((private_handle_t *)list->hwLayers[index].handle)) {
+ hwc_rect_t dstRect = list->hwLayers[index].displayFrame;
+ hwc_rect_t srcRect = list->hwLayers[index].sourceCrop;
+ int transform = list->hwLayers[index].transform;
+
+ /* Intersect against display boundaries */
+ trimLayer(ctx, mDpy, transform, srcRect, dstRect);
+ roi = getUnion(roi, dstRect);
+ }
+ }
+
+ if(!validateAndApplyROI(ctx, list, roi)){
+ roi = (struct hwc_rect) {0, 0,
+ (int)ctx->dpyAttr[mDpy].xres, (int)ctx->dpyAttr[mDpy].yres};
+ }
+
+ ctx->listStats[mDpy].roi.x = roi.left;
+ ctx->listStats[mDpy].roi.y = roi.top;
+ ctx->listStats[mDpy].roi.w = roi.right - roi.left;
+ ctx->listStats[mDpy].roi.h = roi.bottom - roi.top;
+
+ ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
+ roi.left, roi.top, roi.right, roi.bottom);
+}
+
/* Checks for conditions where all the layers marked for MDP comp cannot be
* bypassed. On such conditions we try to bypass atleast YUV layers */
bool MDPComp::isFullFrameDoable(hwc_context_t *ctx,
@@ -452,15 +549,14 @@
return false;
}
}
-
- //Setup mCurrentFrame
- mCurrentFrame.mdpCount = mCurrentFrame.layerCount;
mCurrentFrame.fbCount = 0;
mCurrentFrame.fbZ = -1;
- memset(&mCurrentFrame.isFBComposed, 0, sizeof(mCurrentFrame.isFBComposed));
+ memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
+ sizeof(mCurrentFrame.isFBComposed));
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
+ mCurrentFrame.dropCount;
- int mdpCount = mCurrentFrame.mdpCount;
- if(mdpCount > sMaxPipesPerMixer) {
+ if(mCurrentFrame.mdpCount > sMaxPipesPerMixer) {
ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
return false;
}
@@ -540,10 +636,11 @@
bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx,
hwc_display_contents_1_t* list, bool secureOnly) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+
mCurrentFrame.reset(numAppLayers);
updateYUV(ctx, list, secureOnly);
int mdpCount = mCurrentFrame.mdpCount;
- int fbNeeded = int(mCurrentFrame.fbCount != 0);
+ int fbNeeded = (mCurrentFrame.fbCount != 0);
if(!isYuvPresent(ctx, mDpy)) {
return false;
@@ -611,6 +708,7 @@
* But cached ones can be marked for MDP*/
int maxBatchStart = -1;
+ int maxBatchEnd = -1;
int maxBatchCount = 0;
/* All or Nothing is cached. No batching needed */
@@ -623,39 +721,46 @@
return true;
}
- /* Search for max number of contiguous (cached) layers */
+ /* Search for max number of contiguous (cached) layers excluding dropped
+ * layers */
int i = 0;
while (i < mCurrentFrame.layerCount) {
int count = 0;
+ int start = i;
while(mCurrentFrame.isFBComposed[i] && i < mCurrentFrame.layerCount) {
- count++; i++;
+ if(!mCurrentFrame.drop[i])
+ count++;
+ i++;
}
if(count > maxBatchCount) {
maxBatchCount = count;
- maxBatchStart = i - count;
+ maxBatchStart = start;
+ maxBatchEnd = i - 1;
mCurrentFrame.fbZ = maxBatchStart;
}
if(i < mCurrentFrame.layerCount) i++;
}
- /* reset rest of the layers for MDP comp */
+ mCurrentFrame.fbCount = maxBatchCount;
+
+ /* reset rest of the layers lying inside ROI for MDP comp */
for(int i = 0; i < mCurrentFrame.layerCount; i++) {
hwc_layer_1_t* layer = &list->hwLayers[i];
- if(i != maxBatchStart) {
- //If an unsupported layer is being attempted to be pulled out we
- //should fail
- if(not isSupportedForMDPComp(ctx, layer)) {
- return false;
+ if((i < maxBatchStart || i > maxBatchEnd) &&
+ mCurrentFrame.isFBComposed[i]){
+ if(!mCurrentFrame.drop[i]){
+ //If an unsupported layer is being attempted to
+ //be pulled out we should fail
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ return false;
+ }
+ mCurrentFrame.isFBComposed[i] = false;
}
- mCurrentFrame.isFBComposed[i] = false;
- } else {
- i += maxBatchCount;
}
}
- mCurrentFrame.fbCount = maxBatchCount;
mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
- mCurrentFrame.fbCount;
+ mCurrentFrame.fbCount - mCurrentFrame.dropCount;
ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
mCurrentFrame.fbCount);
@@ -671,7 +776,8 @@
for(int i = 0; i < numAppLayers; i++) {
hwc_layer_1_t* layer = &list->hwLayers[i];
if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) {
- fbCount++;
+ if(!mCurrentFrame.drop[i])
+ fbCount++;
mCurrentFrame.isFBComposed[i] = true;
} else {
mCurrentFrame.isFBComposed[i] = false;
@@ -680,10 +786,12 @@
}
mCurrentFrame.fbCount = fbCount;
- mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount
+ - mCurrentFrame.dropCount;
- ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d",__FUNCTION__,
- mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
+ ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d"
+ ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount,
+ mCurrentFrame.dropCount);
}
void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
@@ -719,8 +827,8 @@
}
mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
- mCurrentFrame.fbCount;
- ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
+ mCurrentFrame.fbCount - mCurrentFrame.dropCount;
+ ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
mCurrentFrame.fbCount);
}
@@ -745,7 +853,7 @@
layer %d",__FUNCTION__, index);
return false;
}
- } else if(fbBatch == false) {
+ } else if(fbBatch == false && !mCurrentFrame.drop[index]) {
mdpNextZOrder++;
fbBatch = true;
}
@@ -833,6 +941,8 @@
//reset old data
mCurrentFrame.reset(numLayers);
+ memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
+ mCurrentFrame.dropCount = 0;
//number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU
//do not cache the information for next draw cycle.
@@ -853,6 +963,8 @@
goto exit;
}
+ generateROI(ctx, list);
+
//Check whether layers marked for MDP Composition is actually doable.
if(isFullFrameDoable(ctx, list)) {
mCurrentFrame.map();
@@ -922,6 +1034,7 @@
//UpdateLayerFlags
setMDPCompLayerFlags(ctx, list);
+ mCachedFrame.cacheAll(list);
mCachedFrame.updateCounts(mCurrentFrame);
// unlock it before calling dump function to avoid deadlock
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index 7aa75a2..d872cdf 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -90,6 +90,10 @@
/* layer composing on FB? */
int fbCount;
bool isFBComposed[MAX_NUM_APP_LAYERS];
+ /* layers lying outside ROI. Will
+ * be dropped off from the composition */
+ int dropCount;
+ bool drop[MAX_NUM_APP_LAYERS];
bool needsRedraw;
int fbZ;
@@ -150,6 +154,10 @@
hwc_display_contents_1_t* list);
/* checks if the required bandwidth exceeds a certain max */
bool bandwidthCheck(hwc_context_t *ctx, const uint32_t& size);
+ /* generates ROI based on the modified area of the frame */
+ void generateROI(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ bool validateAndApplyROI(hwc_context_t *ctx, hwc_display_contents_1_t* list,
+ hwc_rect_t roi);
/* Is debug enabled */
static bool isDebug() { return sDebugLogs ? true : false; };
@@ -172,6 +180,8 @@
int mDpy;
static bool sEnabled;
static bool sEnableMixedMode;
+ /* Enables Partial frame composition */
+ static bool sEnablePartialFrameUpdate;
static bool sDebugLogs;
static bool sIdleFallBack;
static int sMaxPipesPerMixer;
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index aa66ff7..aa18abf 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -708,6 +708,8 @@
char property[PROPERTY_VALUE_MAX];
ctx->listStats[dpy].extOnlyLayerIndex = -1;
ctx->listStats[dpy].isDisplayAnimating = false;
+ ctx->listStats[dpy].roi = ovutils::Dim(0, 0,
+ (int)ctx->dpyAttr[dpy].xres, (int)ctx->dpyAttr[dpy].yres);
optimizeLayerRects(ctx, list, dpy);
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index b5d76e9..8822af0 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -101,11 +101,12 @@
// Notifies hwcomposer about the start and end of animation
// This will be set to true during animation, otherwise false.
bool isDisplayAnimating;
+ ovutils::Dim roi;
};
struct LayerProp {
uint32_t mFlags; //qcom specific layer flags
- LayerProp():mFlags(0) {};
+ LayerProp():mFlags(0){};
};
struct VsyncState {
diff --git a/liboverlay/overlay.cpp b/liboverlay/overlay.cpp
index afe62e2..b095e9e 100644
--- a/liboverlay/overlay.cpp
+++ b/liboverlay/overlay.cpp
@@ -321,13 +321,23 @@
}
bool Overlay::displayCommit(const int& fd) {
+ utils::Dim roi;
+ return displayCommit(fd, roi);
+}
+
+bool Overlay::displayCommit(const int& fd, const utils::Dim& roi) {
//Commit
struct mdp_display_commit info;
memset(&info, 0, sizeof(struct mdp_display_commit));
info.flags = MDP_DISPLAY_COMMIT_OVERLAY;
+ info.roi.x = roi.x;
+ info.roi.y = roi.y;
+ info.roi.w = roi.w;
+ info.roi.h = roi.h;
+
if(!mdp_wrapper::displayCommit(fd, info)) {
- ALOGE("%s: commit failed", __func__);
- return false;
+ ALOGE("%s: commit failed", __func__);
+ return false;
}
return true;
}
diff --git a/liboverlay/overlay.h b/liboverlay/overlay.h
index 9d7f5c8..c16f6e6 100644
--- a/liboverlay/overlay.h
+++ b/liboverlay/overlay.h
@@ -109,6 +109,7 @@
static int getDMAMode();
/* Returns the framebuffer node backing up the display */
static int getFbForDpy(const int& dpy);
+ static bool displayCommit(const int& fd, const utils::Dim& roi);
static bool displayCommit(const int& fd);
private: