Merge "hwcomposer: use DMA overlay pipe to do frame update on 8x10"
diff --git a/common.mk b/common.mk
index ded2e75..b48a38c 100644
--- a/common.mk
+++ b/common.mk
@@ -27,7 +27,7 @@
common_flags += -D__ARM_HAVE_NEON
endif
-ifeq ($(call is-board-platform-in-list, msm8974 msm8226), true)
+ifeq ($(call is-board-platform-in-list, msm8974 msm8226 msm8610), true)
common_flags += -DVENUS_COLOR_FORMAT
common_flags += -DMDSS_TARGET
endif
diff --git a/libcopybit/copybit_c2d.cpp b/libcopybit/copybit_c2d.cpp
index b804c21..6973704 100644
--- a/libcopybit/copybit_c2d.cpp
+++ b/libcopybit/copybit_c2d.cpp
@@ -105,7 +105,7 @@
// The following defines can be changed as required i.e. as we encounter
// complex use cases.
-#define MAX_RGB_SURFACES 12 // Max. RGB layers currently supported per draw
+#define MAX_RGB_SURFACES 32 // Max. RGB layers currently supported per draw
#define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw
#define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw
// +1 for the destination surface. We cannot have multiple destination surfaces.
diff --git a/libhwcomposer/hwc.cpp b/libhwcomposer/hwc.cpp
index 9bf0c45..f7bb192 100644
--- a/libhwcomposer/hwc.cpp
+++ b/libhwcomposer/hwc.cpp
@@ -221,6 +221,7 @@
ctx->mOverlay->configDone();
ctx->mRotMgr->configDone();
+ MDPComp::resetIdleFallBack();
return ret;
}
diff --git a/libhwcomposer/hwc_copybit.cpp b/libhwcomposer/hwc_copybit.cpp
index 3f40753..f1bf756 100644
--- a/libhwcomposer/hwc_copybit.cpp
+++ b/libhwcomposer/hwc_copybit.cpp
@@ -158,6 +158,11 @@
return false;
}
+ if (ctx->listStats[dpy].numAppLayers > MAX_NUM_LAYERS) {
+ // Reached max layers supported by HWC.
+ return false;
+ }
+
bool useCopybitForYUV = canUseCopybitForYUV(ctx);
bool useCopybitForRGB = canUseCopybitForRGB(ctx, list, dpy);
LayerProp *layerProp = ctx->layerProp[dpy];
diff --git a/libhwcomposer/hwc_fbupdate.cpp b/libhwcomposer/hwc_fbupdate.cpp
index 9f91a99..7b34df9 100644
--- a/libhwcomposer/hwc_fbupdate.cpp
+++ b/libhwcomposer/hwc_fbupdate.cpp
@@ -73,6 +73,8 @@
//Request an RGB pipe
ovutils::eDest dest = ov.nextPipe(ovutils::OV_MDP_PIPE_ANY, mDpy);
if(dest == ovutils::OV_INVALID) { //None available
+ ALOGE("%s: No pipes available to configure fb for dpy %d",
+ __FUNCTION__, mDpy);
return false;
}
@@ -171,11 +173,15 @@
//Request left RGB pipe
ovutils::eDest destL = ov.nextPipe(ovutils::OV_MDP_PIPE_RGB, mDpy);
if(destL == ovutils::OV_INVALID) { //None available
+ ALOGE("%s: No pipes available to configure fb for dpy %d's left"
+ " mixer", __FUNCTION__, mDpy);
return false;
}
//Request right RGB pipe
ovutils::eDest destR = ov.nextPipe(ovutils::OV_MDP_PIPE_RGB, mDpy);
if(destR == ovutils::OV_INVALID) { //None available
+ ALOGE("%s: No pipes available to configure fb for dpy %d's right"
+ " mixer", __FUNCTION__, mDpy);
return false;
}
@@ -225,7 +231,8 @@
const int halfWidth = (displayFrame.right - displayFrame.left) / 2;
const int height = displayFrame.bottom - displayFrame.top;
- ovutils::Dim dposL(MAX_DISPLAY_DIM - halfWidth,
+ const int halfDpy = ctx->dpyAttr[mDpy].xres / 2;
+ ovutils::Dim dposL(halfDpy - halfWidth,
displayFrame.top,
halfWidth,
height);
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 5ebfade..2547da5 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -16,6 +16,7 @@
* limitations under the License.
*/
+#include <math.h>
#include "hwc_mdpcomp.h"
#include <sys/ioctl.h>
#include "external.h"
@@ -159,9 +160,6 @@
layer->compositionType = HWC_OVERLAY;
}
}
- mCachedFrame.mdpCount = mCurrentFrame.mdpCount;
- mCachedFrame.cacheCount = mCurrentFrame.fbCount;
- mCachedFrame.layerCount = ctx->listStats[mDpy].numAppLayers;
}
/*
@@ -204,13 +202,11 @@
return true;
}
MDPComp::FrameInfo::FrameInfo() {
- layerCount = 0;
- reset();
+ reset(0);
}
-void MDPComp::FrameInfo::reset() {
-
- for(int i = 0 ; i < MAX_PIPES_PER_MIXER && layerCount; i++ ) {
+void MDPComp::FrameInfo::reset(const int& numLayers) {
+ for(int i = 0 ; i < MAX_PIPES_PER_MIXER && numLayers; i++ ) {
if(mdpToLayer[i].pipeInfo) {
delete mdpToLayer[i].pipeInfo;
mdpToLayer[i].pipeInfo = NULL;
@@ -221,28 +217,54 @@
memset(&mdpToLayer, 0, sizeof(mdpToLayer));
memset(&layerToMDP, -1, sizeof(layerToMDP));
- memset(&isFBComposed, 0, sizeof(isFBComposed));
+ memset(&isFBComposed, 1, sizeof(isFBComposed));
- layerCount = 0;
+ layerCount = numLayers;
+ fbCount = numLayers;
mdpCount = 0;
- fbCount = 0;
- needsRedraw = false;
+ needsRedraw = true;
fbZ = 0;
}
+void MDPComp::FrameInfo::map() {
+ // populate layer and MDP maps
+ int mdpIdx = 0;
+ for(int idx = 0; idx < layerCount; idx++) {
+ if(!isFBComposed[idx]) {
+ mdpToLayer[mdpIdx].listIndex = idx;
+ layerToMDP[idx] = mdpIdx++;
+ }
+ }
+}
+
MDPComp::LayerCache::LayerCache() {
reset();
}
void MDPComp::LayerCache::reset() {
- memset(&hnd, 0, sizeof(buffer_handle_t));
+ memset(&hnd, 0, sizeof(hnd));
mdpCount = 0;
cacheCount = 0;
layerCount = 0;
+ fbZ = -1;
}
-bool MDPComp::isWidthValid(hwc_context_t *ctx, hwc_layer_1_t *layer) {
+void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) {
+ const int numAppLayers = list->numHwLayers - 1;
+ for(int i = 0; i < numAppLayers; i++) {
+ hnd[i] = list->hwLayers[i].handle;
+ }
+}
+void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
+ mdpCount = curFrame.mdpCount;
+ cacheCount = curFrame.fbCount;
+ layerCount = curFrame.layerCount;
+ fbZ = curFrame.fbZ;
+}
+
+bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
+ const int dpy = HWC_DISPLAY_PRIMARY;
private_handle_t *hnd = (private_handle_t *)layer->handle;
if(!hnd) {
@@ -253,32 +275,49 @@
int hw_w = ctx->dpyAttr[mDpy].xres;
int hw_h = ctx->dpyAttr[mDpy].yres;
- hwc_rect_t sourceCrop = layer->sourceCrop;
- hwc_rect_t displayFrame = layer->displayFrame;
-
- hwc_rect_t crop = sourceCrop;
- int crop_w = crop.right - crop.left;
- int crop_h = crop.bottom - crop.top;
-
- hwc_rect_t dst = displayFrame;
- int dst_w = dst.right - dst.left;
- int dst_h = dst.bottom - dst.top;
+ hwc_rect_t crop = layer->sourceCrop;
+ hwc_rect_t dst = layer->displayFrame;
if(dst.left < 0 || dst.top < 0 || dst.right > hw_w || dst.bottom > hw_h) {
- hwc_rect_t scissor = {0, 0, hw_w, hw_h };
- qhwc::calculate_crop_rects(crop, dst, scissor, layer->transform);
- crop_w = crop.right - crop.left;
- crop_h = crop.bottom - crop.top;
+ hwc_rect_t scissor = {0, 0, hw_w, hw_h };
+ qhwc::calculate_crop_rects(crop, dst, scissor, layer->transform);
}
+ int crop_w = crop.right - crop.left;
+ int crop_h = crop.bottom - crop.top;
+ int dst_w = dst.right - dst.left;
+ int dst_h = dst.bottom - dst.top;
+ float w_dscale = ceilf((float)crop_w / (float)dst_w);
+ float h_dscale = ceilf((float)crop_h / (float)dst_h);
+
/* Workaround for MDP HW limitation in DSI command mode panels where
* FPS will not go beyond 30 if buffers on RGB pipes are of width or height
* less than 5 pixels
- * */
-
+ * There also is a HW limilation in MDP, minimum block size is 2x2
+ * Fallback to GPU if height is less than 2.
+ */
if((crop_w < 5)||(crop_h < 5))
return false;
+ const uint32_t downscale =
+ qdutils::MDPVersion::getInstance().getMaxMDPDownscale();
+ if(ctx->mMDP.version >= qdutils::MDSS_V5) {
+ /* Workaround for downscales larger than 4x.
+ * Will be removed once decimator block is enabled for MDSS
+ */
+ if(!qdutils::MDPVersion::getInstance().supportsDecimation()) {
+ if(crop_w > MAX_DISPLAY_DIM || w_dscale > downscale ||
+ h_dscale > downscale)
+ return false;
+ } else {
+ if(w_dscale > 64 || h_dscale > 64)
+ return false;
+ }
+ } else { //A-family
+ if(w_dscale > downscale || h_dscale > downscale)
+ return false;
+ }
+
return true;
}
@@ -314,31 +353,25 @@
bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ bool ret = true;
if(!isEnabled()) {
ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__);
- return false;
- }
-
- if(ctx->mExtDispConfiguring) {
+ ret = false;
+ } else if(ctx->mExtDispConfiguring) {
ALOGD_IF( isDebug(),"%s: External Display connection is pending",
__FUNCTION__);
- return false;
- }
-
- if(ctx->listStats[mDpy].needsAlphaScale
- && ctx->mMDP.version < qdutils::MDSS_V5) {
- ALOGD_IF(isDebug(), "%s: frame needs alpha downscaling",__FUNCTION__);
- return false;
- }
-
- if(ctx->isPaddingRound) {
+ ret = false;
+ } else if(ctx->isPaddingRound) {
ctx->isPaddingRound = false;
ALOGD_IF(isDebug(), "%s: padding round",__FUNCTION__);
- return false;
+ ret = false;
+ } else if(sIdleFallBack) {
+ ALOGD_IF(isDebug(), "%s: idle fallback",__FUNCTION__);
+ ret = false;
}
- return true;
+ return ret;
}
/* Checks for conditions where all the layers marked for MDP comp cannot be
@@ -346,9 +379,7 @@
bool MDPComp::isFullFrameDoable(hwc_context_t *ctx,
hwc_display_contents_1_t* list){
- int numAppLayers = ctx->listStats[mDpy].numAppLayers;
- int mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount;
- int fbNeeded = int(mCurrentFrame.fbCount != 0);
+ const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
if(mDpy > HWC_DISPLAY_PRIMARY){
ALOGD_IF(isDebug(), "%s: Cannot support External display(s)",
@@ -356,25 +387,16 @@
return false;
}
- if(mdpCount > (sMaxPipesPerMixer - fbNeeded)) {
- ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
- return false;
- }
-
- if(pipesNeeded(ctx, list) > getAvailablePipes(ctx)) {
- ALOGD_IF(isDebug(), "%s: Insufficient MDP pipes",__FUNCTION__);
- return false;
- }
-
if(isSkipPresent(ctx, mDpy)) {
- ALOGD_IF(isDebug(), "%s: Skip layers present",__FUNCTION__);
+ ALOGD_IF(isDebug(),"%s: SKIP present: %d",
+ __FUNCTION__,
+ isSkipPresent(ctx, mDpy));
return false;
}
- //FB composition on idle timeout
- if(sIdleFallBack) {
- sIdleFallBack = false;
- ALOGD_IF(isDebug(), "%s: idle fallback",__FUNCTION__);
+ if(ctx->listStats[mDpy].needsAlphaScale
+ && ctx->mMDP.version < qdutils::MDSS_V5) {
+ ALOGD_IF(isDebug(), "%s: frame needs alpha downscaling",__FUNCTION__);
return false;
}
@@ -385,16 +407,108 @@
hwc_layer_1_t* layer = &list->hwLayers[i];
private_handle_t *hnd = (private_handle_t *)layer->handle;
- if(layer->transform & HWC_TRANSFORM_ROT_90 && !isYuvBuffer(hnd)) {
+ if((layer->transform & HWC_TRANSFORM_ROT_90) && !isYuvBuffer(hnd)) {
ALOGD_IF(isDebug(), "%s: orientation involved",__FUNCTION__);
return false;
}
- if(!isYuvBuffer(hnd) && !isWidthValid(ctx,layer)) {
- ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",__FUNCTION__);
+ if(!isValidDimension(ctx,layer)) {
+ ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
+ __FUNCTION__);
return false;
}
}
+
+ //If all above hard conditions are met we can do full or partial MDP comp.
+ bool ret = false;
+ if(fullMDPComp(ctx, list)) {
+ ret = true;
+ } else if (partialMDPComp(ctx, list)) {
+ ret = true;
+ }
+ return ret;
+}
+
+bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
+ //Setup mCurrentFrame
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount;
+ mCurrentFrame.fbCount = 0;
+ mCurrentFrame.fbZ = -1;
+ memset(&mCurrentFrame.isFBComposed, 0, sizeof(mCurrentFrame.isFBComposed));
+
+ int mdpCount = mCurrentFrame.mdpCount;
+ if(mdpCount > sMaxPipesPerMixer) {
+ ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
+ return false;
+ }
+
+ int numPipesNeeded = pipesNeeded(ctx, list);
+ int availPipes = getAvailablePipes(ctx);
+
+ if(numPipesNeeded > availPipes) {
+ ALOGD_IF(isDebug(), "%s: Insufficient MDP pipes, needed %d, avail %d",
+ __FUNCTION__, numPipesNeeded, availPipes);
+ return false;
+ }
+
+ return true;
+}
+
+bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
+{
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ //Setup mCurrentFrame
+ mCurrentFrame.reset(numAppLayers);
+ updateLayerCache(ctx, list);
+ updateYUV(ctx, list);
+ batchLayers(); //sets up fbZ also
+
+ int mdpCount = mCurrentFrame.mdpCount;
+ if(mdpCount > (sMaxPipesPerMixer - 1)) { // -1 since FB is used
+ ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
+ return false;
+ }
+
+ int numPipesNeeded = pipesNeeded(ctx, list);
+ int availPipes = getAvailablePipes(ctx);
+
+ if(numPipesNeeded > availPipes) {
+ ALOGD_IF(isDebug(), "%s: Insufficient MDP pipes, needed %d, avail %d",
+ __FUNCTION__, numPipesNeeded, availPipes);
+ return false;
+ }
+
+ return true;
+}
+
+bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list){
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ mCurrentFrame.reset(numAppLayers);
+ updateYUV(ctx, list);
+ int mdpCount = mCurrentFrame.mdpCount;
+ int fbNeeded = int(mCurrentFrame.fbCount != 0);
+
+ if(!isYuvPresent(ctx, mDpy)) {
+ return false;
+ }
+
+ if(!mdpCount)
+ return false;
+
+ if(mdpCount > (sMaxPipesPerMixer - fbNeeded)) {
+ ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
+ return false;
+ }
+
+ int numPipesNeeded = pipesNeeded(ctx, list);
+ int availPipes = getAvailablePipes(ctx);
+ if(numPipesNeeded > availPipes) {
+ ALOGD_IF(isDebug(), "%s: Insufficient MDP pipes, needed %d, avail %d",
+ __FUNCTION__, numPipesNeeded, availPipes);
+ return false;
+ }
+
return true;
}
@@ -411,24 +525,12 @@
return false;
}
- if(!qdutils::MDPVersion::getInstance().supportsDecimation()) {
- const uint32_t downscale =
- qdutils::MDPVersion::getInstance().getMaxMDPDownscale();
- hwc_rect_t crop = layer->sourceCrop;
- hwc_rect_t dst = layer->displayFrame;
- int cWidth = crop.right - crop.left;
- int cHeight = crop.bottom - crop.top;
- int dWidth = dst.right - dst.left;
- int dHeight = dst.bottom - dst.top;
-
- if(layer->transform & HAL_TRANSFORM_ROT_90) {
- swap(cWidth, cHeight);
- }
-
- if(cWidth > MAX_DISPLAY_DIM || (cWidth/dWidth) > downscale ||
- (cHeight/dHeight) > downscale)
- return false;
+ if(!isValidDimension(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
+ __FUNCTION__);
+ return false;
}
+
return true;
}
@@ -441,9 +543,14 @@
int maxBatchCount = 0;
/* All or Nothing is cached. No batching needed */
- if(!mCurrentFrame.fbCount ||
- (mCurrentFrame.fbCount == mCurrentFrame.layerCount))
+ if(!mCurrentFrame.fbCount) {
+ mCurrentFrame.fbZ = -1;
return;
+ }
+ if(!mCurrentFrame.mdpCount) {
+ mCurrentFrame.fbZ = 0;
+ return;
+ }
/* Search for max number of contiguous (cached) layers */
int i = 0;
@@ -455,6 +562,7 @@
if(count > maxBatchCount) {
maxBatchCount = count;
maxBatchStart = i - count;
+ mCurrentFrame.fbZ = maxBatchStart;
}
if(i < mCurrentFrame.layerCount) i++;
}
@@ -469,6 +577,8 @@
}
mCurrentFrame.fbCount = maxBatchCount;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
+ mCurrentFrame.fbCount;
ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
mCurrentFrame.fbCount);
@@ -480,23 +590,19 @@
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
int numCacheableLayers = 0;
- if((list->flags & HWC_GEOMETRY_CHANGED) || (isSkipPresent(ctx, mDpy))) {
- ALOGD_IF(isDebug(),"%s: No Caching: \
- GEOMETRY change: %d SKIP present: %d", __FUNCTION__,
- (list->flags & HWC_GEOMETRY_CHANGED),isSkipPresent(ctx, mDpy));
- mCachedFrame.reset();
- return;
- }
-
for(int i = 0; i < numAppLayers; i++) {
if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) {
numCacheableLayers++;
mCurrentFrame.isFBComposed[i] = true;
} else {
+ mCurrentFrame.isFBComposed[i] = false;
mCachedFrame.hnd[i] = list->hwLayers[i].handle;
}
}
+
mCurrentFrame.fbCount = numCacheableLayers;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
+ mCurrentFrame.fbCount;
ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, numCacheableLayers);
}
@@ -517,14 +623,6 @@
return numAvailable;
}
-void MDPComp::resetFrameForFB(hwc_context_t* ctx,
- hwc_display_contents_1_t* list) {
- mCurrentFrame.fbCount = mCurrentFrame.layerCount;
- memset(&mCurrentFrame.isFBComposed, 1,
- sizeof(mCurrentFrame.isFBComposed));
- mCurrentFrame.needsRedraw = true;
-}
-
void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list) {
int nYuvCount = ctx->listStats[mDpy].yuvCount;
@@ -544,22 +642,23 @@
}
}
}
+
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
+ mCurrentFrame.fbCount;
ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
mCurrentFrame.fbCount);
}
-int MDPComp::programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
- int fbZOrder = -1;
-
+bool MDPComp::programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
if(!allocLayerPipes(ctx, list)) {
ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
- goto fn_exit;
+ return false;
}
+ bool fbBatch = false;
for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
- index++) {
+ index++) {
if(!mCurrentFrame.isFBComposed[index]) {
-
int mdpIndex = mCurrentFrame.layerToMDP[index];
hwc_layer_1_t* layer = &list->hwLayers[index];
@@ -569,80 +668,105 @@
if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
layer %d",__FUNCTION__, index);
- goto fn_exit;
+ return false;
}
- } else if(fbZOrder < 0) {
- fbZOrder = mdpNextZOrder++;
- };
+ } else if(fbBatch == false) {
+ mdpNextZOrder++;
+ fbBatch = true;
+ }
}
- return fbZOrder;
+ return true;
+}
- fn_exit:
- //Complete fallback to FB
- resetFrameForFB(ctx, list);
- return 0;
+bool MDPComp::programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
+ if(!allocLayerPipes(ctx, list)) {
+ ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
+ return false;
+ }
+ //If we are in this block, it means we have yuv + rgb layers both
+ int mdpIdx = 0;
+ for (int index = 0; index < mCurrentFrame.layerCount; index++) {
+ if(!mCurrentFrame.isFBComposed[index]) {
+ hwc_layer_1_t* layer = &list->hwLayers[index];
+ int mdpIndex = mCurrentFrame.layerToMDP[index];
+ MdpPipeInfo* cur_pipe =
+ mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
+ cur_pipe->zOrder = mdpIdx++;
+
+ if(configure(ctx, layer,
+ mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
+ ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
+ layer %d",__FUNCTION__, index);
+ return false;
+ }
+ }
+ }
+ return true;
}
int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
//reset old data
- mCurrentFrame.reset();
+ const int numLayers = ctx->listStats[mDpy].numAppLayers;
+ mCurrentFrame.reset(numLayers);
+ //Hard conditions, if not met, cannot do MDP comp
if(!isFrameDoable(ctx)) {
ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
__FUNCTION__);
+ mCurrentFrame.reset(numLayers);
+ mCachedFrame.cacheAll(list);
+ mCachedFrame.updateCounts(mCurrentFrame);
return 0;
}
- mCurrentFrame.layerCount = ctx->listStats[mDpy].numAppLayers;
-
- //Iterate layer list for cached layers
- updateLayerCache(ctx, list);
-
- //Add YUV layers to cached list
- updateYUV(ctx, list);
-
- //Optimze for bypass
- batchLayers();
-
- //list is already parsed / batched for optimal mixed mode composition.
//Check whether layers marked for MDP Composition is actually doable.
- if(!isFullFrameDoable(ctx, list)){
+ if(isFullFrameDoable(ctx, list)){
+ mCurrentFrame.map();
+ //Acquire and Program MDP pipes
+ if(!programMDP(ctx, list)) {
+ mCurrentFrame.reset(numLayers);
+ mCachedFrame.cacheAll(list);
+ } else { //Success
+ //Any change in composition types needs an FB refresh
+ mCurrentFrame.needsRedraw = false;
+ if(mCurrentFrame.fbCount &&
+ ((mCurrentFrame.mdpCount != mCachedFrame.mdpCount) ||
+ (mCurrentFrame.fbCount != mCachedFrame.cacheCount) ||
+ (mCurrentFrame.fbZ != mCachedFrame.fbZ) ||
+ (!mCurrentFrame.mdpCount) ||
+ (list->flags & HWC_GEOMETRY_CHANGED) ||
+ isSkipPresent(ctx, mDpy) ||
+ (mDpy > HWC_DISPLAY_PRIMARY))) {
+ mCurrentFrame.needsRedraw = true;
+ }
+ }
+ } else if(isOnlyVideoDoable(ctx, list)) {
//All layers marked for MDP comp cannot be bypassed.
//Try to compose atleast YUV layers through MDP comp and let
//all the RGB layers compose in FB
- resetFrameForFB(ctx, list);
- updateYUV(ctx, list);
- }
+ //Destination over
+ mCurrentFrame.fbZ = -1;
+ if(mCurrentFrame.fbCount)
+ mCurrentFrame.fbZ = ctx->listStats[mDpy].yuvCount;
- mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
- mCurrentFrame.fbCount;
-
- if(mCurrentFrame.mdpCount) {
- // populate layer and MDP maps
- for(int idx = 0, mdpIdx = 0; idx < mCurrentFrame.layerCount; idx++) {
- if(!mCurrentFrame.isFBComposed[idx]) {
- mCurrentFrame.mdpToLayer[mdpIdx].listIndex = idx;
- mCurrentFrame.layerToMDP[idx] = mdpIdx++;
- }
+ mCurrentFrame.map();
+ if(!programYUV(ctx, list)) {
+ mCurrentFrame.reset(numLayers);
+ mCachedFrame.cacheAll(list);
}
- //Acquire and Program MDP pipes
- mCurrentFrame.fbZ = programMDP(ctx, list);
- }
-
- /* Any change in composition types needs an FB refresh*/
- if(mCurrentFrame.fbCount &&
- ((mCurrentFrame.mdpCount != mCachedFrame.mdpCount) ||
- (mCurrentFrame.fbCount != mCachedFrame.cacheCount) ||
- !mCurrentFrame.mdpCount)) {
- mCurrentFrame.needsRedraw = true;
+ } else {
+ mCurrentFrame.reset(numLayers);
+ mCachedFrame.cacheAll(list);
}
//UpdateLayerFlags
setMDPCompLayerFlags(ctx, list);
+ mCachedFrame.updateCounts(mCurrentFrame);
if(isDebug()) {
+ ALOGD("GEOMETRY change: %d", (list->flags & HWC_GEOMETRY_CHANGED));
android::String8 sDump("");
dump(sDump);
ALOGE("%s",sDump.string());
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index 7970cd3..a0255b7 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -51,6 +51,7 @@
static void timeout_handler(void *udata);
/* Initialize MDP comp*/
static bool init(hwc_context_t *ctx);
+ static void resetIdleFallBack() { sIdleFallBack = false; }
protected:
enum ePipeType {
@@ -93,7 +94,8 @@
/* c'tor */
FrameInfo();
/* clear old frame data */
- void reset();
+ void reset(const int& numLayers);
+ void map();
};
/* cached data */
@@ -101,12 +103,15 @@
int layerCount;
int mdpCount;
int cacheCount;
+ int fbZ;
buffer_handle_t hnd[MAX_NUM_LAYERS];
/* c'tor */
LayerCache();
/* clear caching info*/
void reset();
+ void cacheAll(hwc_display_contents_1_t* list);
+ void updateCounts(const FrameInfo&);
};
/* No of pipes needed for Framebuffer */
@@ -121,7 +126,6 @@
virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
PipeLayerPair& pipeLayerPair) = 0;
-
/* set/reset flags for MDPComp */
void setMDPCompLayerFlags(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
@@ -132,6 +136,12 @@
bool isFrameDoable(hwc_context_t *ctx);
/* checks for conditions where RGB layers cannot be bypassed */
bool isFullFrameDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* checks if full MDP comp can be done */
+ bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* check if we can use layer cache to do at least partial MDP comp */
+ bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* checks for conditions where only video can be bypassed */
+ bool isOnlyVideoDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list);
/* checks for conditions where YUV layers cannot be bypassed */
bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
@@ -141,20 +151,18 @@
static bool isDebug() { return sDebugLogs ? true : false; };
/* Is feature enabled */
static bool isEnabled() { return sEnabled; };
- /* checks for mdp comp width limitation */
- bool isWidthValid(hwc_context_t *ctx, hwc_layer_1_t *layer);
+ /* checks for mdp comp dimension limitation */
+ bool isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer);
/* tracks non updating layers*/
void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list);
- /* resets cache for complete fallback */
- void resetFrameForFB(hwc_context_t* ctx, hwc_display_contents_1_t* list);
/* optimize layers for mdp comp*/
void batchLayers();
/* gets available pipes for mdp comp */
int getAvailablePipes(hwc_context_t* ctx);
/* updates cache map with YUV info */
void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list);
- int programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);
-
+ bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ bool programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list);
int mDpy;
static bool sEnabled;
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index 8ea9767..ca6136a 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -631,6 +631,12 @@
if(transform & HWC_TRANSFORM_ROT_90) {
ovutils::setMdpFlags(mdpFlags,
ovutils::OV_MDP_SOURCE_ROTATED_90);
+ // enable bandwidth compression only if src width < 2048
+ if(qdutils::MDPVersion::getInstance().supportsBWC() &&
+ hnd->width < qdutils::MAX_DISPLAY_DIM) {
+ ovutils::setMdpFlags(mdpFlags,
+ ovutils::OV_MDSS_MDP_BWC_EN);
+ }
}
}
@@ -844,7 +850,7 @@
//Not needed if the layer is confined to one half of the screen.
//If rotator has been used then it has also done the flips, so ignore them.
if((orient & OVERLAY_TRANSFORM_FLIP_V) && lDest != OV_INVALID
- && rDest != OV_INVALID && rot == NULL) {
+ && rDest != OV_INVALID && (*rot) == NULL) {
hwc_rect_t new_cropR;
new_cropR.left = tmp_cropL.left;
new_cropR.right = new_cropR.left + (tmp_cropR.right - tmp_cropR.left);
@@ -880,8 +886,8 @@
if(rDest != OV_INVALID) {
PipeArgs pargR(mdpFlagsR, whf, z, isFg,
static_cast<eRotFlags>(rotFlags));
- tmp_dstR.right = tmp_dstR.right - tmp_dstR.left;
- tmp_dstR.left = 0;
+ tmp_dstR.right = tmp_dstR.right - hw_w/2;
+ tmp_dstR.left = tmp_dstR.left - hw_w/2;
if(configMdp(ctx->mOverlay, pargR, orient,
tmp_cropR, tmp_dstR, metadata, rDest) < 0) {
ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
diff --git a/liboverlay/overlayMdssRot.cpp b/liboverlay/overlayMdssRot.cpp
index 1fabdca..6f2b564 100644
--- a/liboverlay/overlayMdssRot.cpp
+++ b/liboverlay/overlayMdssRot.cpp
@@ -20,6 +20,8 @@
#include "overlayUtils.h"
#include "overlayRotator.h"
+#define DEBUG_MDSS_ROT 0
+
#ifdef VENUS_COLOR_FORMAT
#include <media/msm_media_info.h>
#else
@@ -252,7 +254,11 @@
ovutils::Whf destWhf(mRotInfo.dst_rect.w, mRotInfo.dst_rect.h,
mRotInfo.src.format); //mdss src and dst formats are same.
- opBufSize = Rotator::calcOutputBufSize(destWhf);
+ if (mRotInfo.flags & ovutils::OV_MDSS_MDP_BWC_EN) {
+ opBufSize = calcCompressedBufSize();
+ } else {
+ opBufSize = Rotator::calcOutputBufSize(destWhf);
+ }
if (mRotInfo.flags & utils::OV_MDP_SECURE_OVERLAY_SESSION)
opBufSize = utils::align(opBufSize, SIZE_1M);
@@ -265,4 +271,23 @@
ovutils::getDump(buf, len, "MdssRotData", mRotData);
}
+// Calculate the compressed o/p buffer size for BWC
+uint32_t MdssRot::calcCompressedBufSize() {
+ uint32_t bufSize = 0;
+ int aWidth = ovutils::align(mRotInfo.src_rect.w, 64);
+ int aHeight = ovutils::align(mRotInfo.src_rect.h, 4);
+ int rau_cnt = aWidth/64;
+ int stride0 = (64 * 4 * rau_cnt) + rau_cnt/8;
+ int stride1 = (64 * 2 * rau_cnt) + rau_cnt/8;
+ int stride0_off = (aHeight/4);
+ int stride1_off = (aHeight/2);
+
+ //New o/p size for BWC
+ bufSize = (stride0 * stride0_off + stride1 * stride1_off) +
+ (rau_cnt * 2 * (stride0_off + stride1_off));
+ ALOGD_IF(DEBUG_MDSS_ROT, "%s: width = %d, height = %d raucount = %d"
+ "opBufSize = %d ", __FUNCTION__, aWidth, aHeight, rau_cnt, bufSize);
+ return bufSize;
+}
+
} // namespace overlay
diff --git a/liboverlay/overlayRotator.h b/liboverlay/overlayRotator.h
index c02dfba..120721c 100644
--- a/liboverlay/overlayRotator.h
+++ b/liboverlay/overlayRotator.h
@@ -198,6 +198,8 @@
/* Calculates the rotator's o/p buffer size post the transform calcs and
* knowing the o/p format depending on whether fastYuv is enabled or not */
uint32_t calcOutputBufSize();
+ // Calculate the compressed o/p buffer size for BWC
+ uint32_t calcCompressedBufSize();
/* MdssRot info structure */
mdp_overlay mRotInfo;
diff --git a/liboverlay/overlayUtils.h b/liboverlay/overlayUtils.h
index ceb238d..0893328 100644
--- a/liboverlay/overlayUtils.h
+++ b/liboverlay/overlayUtils.h
@@ -262,6 +262,7 @@
OV_MDP_FLIP_V = MDP_FLIP_UD,
OV_MDSS_MDP_RIGHT_MIXER = MDSS_MDP_RIGHT_MIXER,
OV_MDP_PP_EN = MDP_OVERLAY_PP_CFG_EN,
+ OV_MDSS_MDP_BWC_EN = MDP_BWC_EN,
};
enum eZorder {
diff --git a/libqdutils/mdp_version.cpp b/libqdutils/mdp_version.cpp
index bf65763..86e744d 100644
--- a/libqdutils/mdp_version.cpp
+++ b/libqdutils/mdp_version.cpp
@@ -122,5 +122,9 @@
return mMDPDownscale;
}
+bool MDPVersion::supportsBWC() {
+ // BWC - Bandwidth Compression
+ return (mFeatures & MDP_BWC_EN);
+}
}; //namespace qdutils
diff --git a/libqdutils/mdp_version.h b/libqdutils/mdp_version.h
index 8acbeab..c34c0a4 100644
--- a/libqdutils/mdp_version.h
+++ b/libqdutils/mdp_version.h
@@ -83,6 +83,7 @@
uint8_t getDMAPipes() { return mDMAPipes; }
bool supportsDecimation();
uint32_t getMaxMDPDownscale();
+ bool supportsBWC();
private:
int mMDPVersion;
char mPanelType;