libhwcomposer: Enhance mixed mode composition.

While marking layers for cache, neglect the z-order when
possible, to improve the scope of mixed mode usage.
It is safe to neglect the z-order if a static layer
doesn't have any overlapping region with updating layers
in between static layer & contiguous static layers batch.
- if above layer doesn't have any overlapping with updating
  layer in middle, push it to the batch.
- If above case fails, but layers below(existing batch) does not
  have any overlapping region with the updating layer, bring the
  batch to top and modify fb-zorder accordingly.

Change-Id: I4882a750d0a9a2e11272fcbb146202160673d04f
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 919aa8e..ff33e94 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -600,7 +600,7 @@
     }
 
     updateYUV(ctx, list, false /*secure only*/);
-    bool ret = batchLayers(ctx, list); //sets up fbZ also
+    bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
     if(!ret) {
         ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
         return false;
@@ -702,14 +702,126 @@
     return true;
 }
 
-bool MDPComp::batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
-    /* Idea is to keep as many contiguous non-updating(cached) layers in FB and
-     * send rest of them through MDP. NEVER mark an updating layer for caching.
-     * But cached ones can be marked for MDP*/
+/* starts at fromIndex and check for each layer to find
+ * if it it has overlapping with any Updating layer above it in zorder
+ * till the end of the batch. returns true if it finds any intersection */
+bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
+        int fromIndex, int toIndex) {
+    for(int i = fromIndex; i < toIndex; i++) {
+        if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
+            if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
+                return false;
+            }
+        }
+    }
+    return true;
+}
+
+/* Checks if given layer at targetLayerIndex has any
+ * intersection with all the updating layers in beween
+ * fromIndex and toIndex. Returns true if it finds intersectiion */
+bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
+        int fromIndex, int toIndex, int targetLayerIndex) {
+    for(int i = fromIndex; i <= toIndex; i++) {
+        if(!mCurrentFrame.isFBComposed[i]) {
+            if(areLayersIntersecting(&list->hwLayers[i],
+                        &list->hwLayers[targetLayerIndex]))  {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+int MDPComp::getBatch(hwc_display_contents_1_t* list,
+        int& maxBatchStart, int& maxBatchEnd,
+        int& maxBatchCount) {
+    int i = 0;
+    int updatingLayersAbove = 0;//Updating layer count in middle of batch
+    int fbZOrder =-1;
+    while (i < mCurrentFrame.layerCount) {
+        int batchCount = 0;
+        int batchStart = i;
+        int batchEnd = i;
+        int fbZ = batchStart;
+        int firstZReverseIndex = -1;
+        while(i < mCurrentFrame.layerCount) {
+            if(!mCurrentFrame.isFBComposed[i]) {
+                if(!batchCount) {
+                    i++;
+                    break;
+                }
+                updatingLayersAbove++;
+                i++;
+                continue;
+            } else {
+                if(mCurrentFrame.drop[i]) {
+                    i++;
+                    continue;
+                } else if(updatingLayersAbove <= 0) {
+                    batchCount++;
+                    batchEnd = i;
+                    i++;
+                    continue;
+                } else { //Layer is FBComposed, not a drop & updatingLayer > 0
+
+                    // We have a valid updating layer already. If layer-i not
+                    // have overlapping with all updating layers in between
+                    // batch-start and i, then we can add layer i to batch.
+                    if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
+                        batchCount++;
+                        batchEnd = i;
+                        i++;
+                        continue;
+                    } else if(canPushBatchToTop(list, batchStart, i)) {
+                        //If All the non-updating layers with in this batch
+                        //does not have intersection with the updating layers
+                        //above in z-order, then we can safely move the batch to
+                        //higher z-order. Increment fbZ as it is moving up.
+                        if( firstZReverseIndex < 0) {
+                            firstZReverseIndex = i;
+                        }
+                        batchCount++;
+                        batchEnd = i;
+                        fbZ += updatingLayersAbove;
+                        i++;
+                        updatingLayersAbove = 0;
+                        continue;
+                    } else {
+                        //both failed.start the loop again from here.
+                        if(firstZReverseIndex >= 0) {
+                            i = firstZReverseIndex;
+                        }
+                        break;
+                    }
+                }
+            }
+        }
+        if(batchCount > maxBatchCount) {
+            maxBatchCount = batchCount;
+            maxBatchStart = batchStart;
+            maxBatchEnd = batchEnd;
+            fbZOrder = fbZ;
+        }
+    }
+    return fbZOrder;
+}
+
+bool  MDPComp::markLayersForCaching(hwc_context_t* ctx,
+        hwc_display_contents_1_t* list) {
+    /* Idea is to keep as many non-updating(cached) layers in FB and
+     * send rest of them through MDP. This is done in 2 steps.
+     *   1. Find the maximum contiguous batch of non-updating layers.
+     *   2. See if we can improve this batch size for caching by adding
+     *      opaque layers around the batch, if they don't have
+     *      any overlapping with the updating layers in between.
+     * NEVER mark an updating layer for caching.
+     * But cached ones can be marked for MDP */
 
     int maxBatchStart = -1;
     int maxBatchEnd = -1;
     int maxBatchCount = 0;
+    int fbZ = -1;
 
     /* All or Nothing is cached. No batching needed */
     if(!mCurrentFrame.fbCount) {
@@ -721,33 +833,13 @@
         return true;
     }
 
-    /* Search for max number of contiguous (cached) layers excluding dropped
-     * layers */
-    int i = 0;
-    while (i < mCurrentFrame.layerCount) {
-        int count = 0;
-        int start = i;
-        while(mCurrentFrame.isFBComposed[i] && i < mCurrentFrame.layerCount) {
-            if(!mCurrentFrame.drop[i])
-                count++;
-            i++;
-        }
-        if(count > maxBatchCount) {
-            maxBatchCount = count;
-            maxBatchStart = start;
-            maxBatchEnd = i - 1;
-            mCurrentFrame.fbZ = maxBatchStart;
-        }
-        if(i < mCurrentFrame.layerCount) i++;
-    }
+    fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
 
-    mCurrentFrame.fbCount = maxBatchCount;
-
-    /* reset rest of the layers lying inside ROI for MDP comp  */
+    /* reset rest of the layers lying inside ROI for MDP comp */
     for(int i = 0; i < mCurrentFrame.layerCount; i++) {
         hwc_layer_1_t* layer = &list->hwLayers[i];
         if((i < maxBatchStart || i > maxBatchEnd) &&
-                                    mCurrentFrame.isFBComposed[i]){
+                mCurrentFrame.isFBComposed[i]){
             if(!mCurrentFrame.drop[i]){
                 //If an unsupported layer is being attempted to
                 //be pulled out we should fail
@@ -759,11 +851,14 @@
         }
     }
 
+    // update the frame data
+    mCurrentFrame.fbZ = fbZ;
+    mCurrentFrame.fbCount = maxBatchCount;
     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
 
     ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
-             mCurrentFrame.fbCount);
+            mCurrentFrame.fbCount);
 
     return true;
 }
@@ -838,24 +933,25 @@
         return false;
     }
 
-    bool fbBatch = false;
     for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
             index++) {
         if(!mCurrentFrame.isFBComposed[index]) {
             int mdpIndex = mCurrentFrame.layerToMDP[index];
             hwc_layer_1_t* layer = &list->hwLayers[index];
 
+            //Leave fbZ for framebuffer. CACHE/GLES layers go here.
+            if(mdpNextZOrder == mCurrentFrame.fbZ) {
+                mdpNextZOrder++;
+            }
             MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
             cur_pipe->zOrder = mdpNextZOrder++;
 
+
             if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
                 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
                          layer %d",__FUNCTION__, index);
                 return false;
             }
-        } else if(fbBatch == false && !mCurrentFrame.drop[index]) {
-                mdpNextZOrder++;
-                fbBatch = true;
         }
     }
 
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index d872cdf..e1839cd 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -168,8 +168,17 @@
     /* tracks non updating layers*/
     void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list);
     /* optimize layers for mdp comp*/
-    bool batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list);
-    /* updates cache map with YUV info */
+    bool markLayersForCaching(hwc_context_t* ctx,
+            hwc_display_contents_1_t* list);
+    int getBatch(hwc_display_contents_1_t* list,
+            int& maxBatchStart, int& maxBatchEnd,
+            int& maxBatchCount);
+    bool canPushBatchToTop(const hwc_display_contents_1_t* list,
+            int fromIndex, int toIndex);
+    bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
+            int fromIndex, int toIndex, int targetLayerIndex);
+
+        /* updates cache map with YUV info */
     void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
             bool secureOnly);
     bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index 47afa85..225db5e 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -923,6 +923,13 @@
     crop_b -= crop_h * bottomCutRatio;
 }
 
+bool areLayersIntersecting(const hwc_layer_1_t* layer1,
+        const hwc_layer_1_t* layer2) {
+    hwc_rect_t irect = getIntersection(layer1->displayFrame,
+            layer2->displayFrame);
+    return isValidRect(irect);
+}
+
 bool isValidRect(const hwc_rect& rect)
 {
    return ((rect.bottom > rect.top) && (rect.right > rect.left)) ;
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 3b5d3cb..4ed86dd 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -186,13 +186,14 @@
 void dumpsys_log(android::String8& buf, const char* fmt, ...);
 
 int getExtOrientation(hwc_context_t* ctx);
-
 bool isValidRect(const hwc_rect_t& rect);
 void deductRect(const hwc_layer_1_t* layer, hwc_rect_t& irect);
 hwc_rect_t getIntersection(const hwc_rect_t& rect1, const hwc_rect_t& rect2);
 hwc_rect_t getUnion(const hwc_rect_t& rect1, const hwc_rect_t& rect2);
 void optimizeLayerRects(hwc_context_t *ctx,
-                        const hwc_display_contents_1_t *list, const int& dpy);
+        const hwc_display_contents_1_t *list, const int& dpy);
+bool areLayersIntersecting(const hwc_layer_1_t* layer1,
+        const hwc_layer_1_t* layer2);
 
 /* Calculates the destination position based on the action safe rectangle */
 void getActionSafePosition(hwc_context_t *ctx, int dpy, hwc_rect_t& dst);