initial GL libraries for msm8960

Change-Id: I16451c70a079894ac326d3564d96f1fbafcd4f1b
Signed-off-by: Iliyan Malchev <malchev@google.com>
diff --git a/libgralloc/Android.mk b/libgralloc/Android.mk
new file mode 100644
index 0000000..5377d86
--- /dev/null
+++ b/libgralloc/Android.mk
@@ -0,0 +1,75 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use this flag until pmem/ashmem is implemented in the new gralloc
+LOCAL_PATH := $(call my-dir)
+
+# HAL module implemenation, not prelinked and stored in
+# hw/<OVERLAY_HARDWARE_MODULE_ID>.<ro.product.board>.so
+include $(CLEAR_VARS)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
+LOCAL_SHARED_LIBRARIES := liblog libcutils libGLESv1_CM libutils libmemalloc libQcomUI
+LOCAL_SHARED_LIBRARIES += libgenlock
+
+LOCAL_C_INCLUDES += hardware/qcom/display/libgenlock
+LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
+LOCAL_ADDITIONAL_DEPENDENCIES +=
+LOCAL_SRC_FILES :=  framebuffer.cpp \
+                    gpu.cpp         \
+                    gralloc.cpp     \
+                    mapper.cpp
+
+LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
+LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).gralloc\" -DHOST -DDEBUG_CALC_FPS
+
+ifeq ($(call is-board-platform,msm7627_surf msm7627_6x),true)
+    LOCAL_CFLAGS += -DTARGET_MSM7x27
+endif
+
+ifeq ($(TARGET_HAVE_HDMI_OUT),true)
+    LOCAL_CFLAGS += -DHDMI_DUAL_DISPLAY
+    LOCAL_C_INCLUDES += hardware/qcom/display/liboverlay
+    LOCAL_SHARED_LIBRARIES += liboverlay
+endif
+
+ifeq ($(TARGET_USES_SF_BYPASS),true)
+    LOCAL_CFLAGS += -DSF_BYPASS
+endif
+
+ifeq ($(TARGET_GRALLOC_USES_ASHMEM),true)
+    LOCAL_CFLAGS += -DUSE_ASHMEM
+endif
+
+include $(BUILD_SHARED_LIBRARY)
+
+#MemAlloc Library
+include $(CLEAR_VARS)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
+LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
+LOCAL_ADDITIONAL_DEPENDENCIES +=
+LOCAL_SHARED_LIBRARIES := liblog libcutils libutils
+LOCAL_SRC_FILES :=  ionalloc.cpp \
+                    alloc_controller.cpp
+LOCAL_CFLAGS:= -DLOG_TAG=\"memalloc\"
+
+ifeq ($(TARGET_USES_ION),true)
+    LOCAL_CFLAGS += -DUSE_ION
+endif
+
+LOCAL_MODULE := libmemalloc
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
diff --git a/libgralloc/MODULE_LICENSE_APACHE2 b/libgralloc/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libgralloc/MODULE_LICENSE_APACHE2
diff --git a/libgralloc/NOTICE b/libgralloc/NOTICE
new file mode 100644
index 0000000..3237da6
--- /dev/null
+++ b/libgralloc/NOTICE
@@ -0,0 +1,190 @@
+
+   Copyright (c) 2008-2009, The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
diff --git a/libgralloc/alloc_controller.cpp b/libgralloc/alloc_controller.cpp
new file mode 100644
index 0000000..47cdc68
--- /dev/null
+++ b/libgralloc/alloc_controller.cpp
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cutils/log.h>
+#include <utils/RefBase.h>
+#include <fcntl.h>
+#include "gralloc_priv.h"
+#include "alloc_controller.h"
+#include "memalloc.h"
+#include "ionalloc.h"
+#include "ashmemalloc.h"
+#include "gr.h"
+
+using namespace gralloc;
+using android::sp;
+
+const int GRALLOC_HEAP_MASK = GRALLOC_USAGE_PRIVATE_ADSP_HEAP      |
+                              GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP |
+                              GRALLOC_USAGE_PRIVATE_SMI_HEAP       |
+                              GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP    |
+                              GRALLOC_USAGE_PRIVATE_IOMMU_HEAP     |
+                              GRALLOC_USAGE_PRIVATE_MM_HEAP        |
+                              GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP |
+                              GRALLOC_USAGE_PRIVATE_CAMERA_HEAP;
+
+
+//Common functions
+static bool canFallback(int compositionType, int usage, bool triedSystem)
+{
+    // Fallback to system heap when alloc fails unless
+    // 1. Composition type is MDP
+    // 2. Alloc from system heap was already tried
+    // 3. The heap type is requsted explicitly
+    // 4. The heap type is protected
+    // 5. The buffer is meant for external display only
+
+    if(compositionType == MDP_COMPOSITION)
+        return false;
+    if(triedSystem)
+        return false;
+    if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_PROTECTED))
+        return false;
+    if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_EXTERNAL_ONLY))
+        return false;
+    //Return true by default
+    return true;
+}
+
+static bool useUncached(int usage)
+{
+    // System heaps cannot be uncached
+    if(usage & (GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP |
+                GRALLOC_USAGE_PRIVATE_IOMMU_HEAP))
+        return false;
+    if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
+        return true;
+    return false;
+}
+
+sp<IAllocController> IAllocController::sController = NULL;
+sp<IAllocController> IAllocController::getInstance(bool useMasterHeap)
+{
+    if(sController == NULL) {
+#ifdef USE_ION
+        sController = new IonController();
+#else
+        if(useMasterHeap)
+            sController = new PmemAshmemController();
+        else
+            sController = new PmemKernelController();
+#endif
+    }
+    return sController;
+}
+
+
+//-------------- IonController-----------------------//
+IonController::IonController()
+{
+    mIonAlloc = new IonAlloc();
+}
+
+int IonController::allocate(alloc_data& data, int usage,
+        int compositionType)
+{
+    int ionFlags = 0;
+    int ret;
+    bool noncontig = false;
+
+    data.uncached = useUncached(usage);
+    if(usage & GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP)
+        ionFlags |= ION_HEAP(ION_SF_HEAP_ID);
+
+    if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP) {
+        ionFlags |= ION_HEAP(ION_SYSTEM_HEAP_ID);
+        noncontig = true;
+    }
+
+    if(usage & GRALLOC_USAGE_PRIVATE_IOMMU_HEAP)
+        ionFlags |= ION_HEAP(ION_IOMMU_HEAP_ID);
+
+    if(usage & GRALLOC_USAGE_PRIVATE_MM_HEAP)
+        ionFlags |= ION_HEAP(ION_CP_MM_HEAP_ID);
+
+    if(usage & GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP)
+        ionFlags |= ION_HEAP(ION_CP_WB_HEAP_ID);
+
+    if(usage & GRALLOC_USAGE_PRIVATE_CAMERA_HEAP)
+        ionFlags |= ION_HEAP(ION_CAMERA_HEAP_ID);
+
+    if(usage & GRALLOC_USAGE_PROTECTED)
+        ionFlags |= ION_SECURE;
+
+    if(usage & GRALLOC_USAGE_PRIVATE_DO_NOT_MAP)
+        data.allocType  =  private_handle_t::PRIV_FLAGS_NOT_MAPPED;
+
+    // if no flags are set, default to
+    // SF + IOMMU heaps, so that bypass can work
+    // we can fall back to system heap if
+    // we run out.
+    if(!ionFlags)
+        ionFlags = ION_HEAP(ION_SF_HEAP_ID) | ION_HEAP(ION_IOMMU_HEAP_ID);
+
+    data.flags = ionFlags;
+    ret = mIonAlloc->alloc_buffer(data);
+    // Fallback
+    if(ret < 0 && canFallback(compositionType,
+                              usage,
+                              (ionFlags & ION_SYSTEM_HEAP_ID)))
+    {
+        ALOGW("Falling back to system heap");
+        data.flags = ION_HEAP(ION_SYSTEM_HEAP_ID);
+        noncontig = true;
+        ret = mIonAlloc->alloc_buffer(data);
+    }
+
+    if(ret >= 0 ) {
+        data.allocType = private_handle_t::PRIV_FLAGS_USES_ION;
+        if(noncontig)
+            data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
+        if(ionFlags & ION_SECURE)
+            data.allocType |= private_handle_t::PRIV_FLAGS_SECURE_BUFFER;
+    }
+
+    return ret;
+}
+
+sp<IMemAlloc> IonController::getAllocator(int flags)
+{
+    sp<IMemAlloc> memalloc;
+    if (flags & private_handle_t::PRIV_FLAGS_USES_ION) {
+        memalloc = mIonAlloc;
+    } else {
+        ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
+    }
+
+    return memalloc;
+}
+
+#if 0
+//-------------- PmemKernelController-----------------------//
+
+PmemKernelController::PmemKernelController()
+{
+     mPmemAdspAlloc = new PmemKernelAlloc(DEVICE_PMEM_ADSP);
+     // XXX: Right now, there is no need to maintain an instance
+     // of the SMI allocator as we need it only in a few cases
+}
+
+PmemKernelController::~PmemKernelController()
+{
+}
+
+int PmemKernelController::allocate(alloc_data& data, int usage,
+        int compositionType)
+{
+    int ret = 0;
+    bool adspFallback = false;
+    if (!(usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP))
+        adspFallback = true;
+
+    // Try SMI first
+    if ((usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP) ||
+        (usage & GRALLOC_USAGE_EXTERNAL_DISP)    ||
+        (usage & GRALLOC_USAGE_PROTECTED))
+    {
+        int tempFd = open(DEVICE_PMEM_SMIPOOL, O_RDWR, 0);
+        if(tempFd > 0) {
+            close(tempFd);
+            sp<IMemAlloc> memalloc;
+            memalloc = new PmemKernelAlloc(DEVICE_PMEM_SMIPOOL);
+            ret = memalloc->alloc_buffer(data);
+            if(ret >= 0)
+                return ret;
+            else {
+                if(adspFallback)
+                    ALOGW("Allocation from SMI failed, trying ADSP");
+            }
+        }
+    }
+
+    if ((usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) || adspFallback) {
+        ret = mPmemAdspAlloc->alloc_buffer(data);
+    }
+    return ret;
+}
+
+sp<IMemAlloc> PmemKernelController::getAllocator(int flags)
+{
+    sp<IMemAlloc> memalloc;
+    if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
+        memalloc = mPmemAdspAlloc;
+    else {
+        ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
+        memalloc = NULL;
+    }
+
+    return memalloc;
+}
+
+//-------------- PmemAshmmemController-----------------------//
+
+PmemAshmemController::PmemAshmemController()
+{
+    mPmemUserspaceAlloc = new PmemUserspaceAlloc();
+    mAshmemAlloc = new AshmemAlloc();
+    mPmemKernelCtrl = new PmemKernelController();
+}
+
+PmemAshmemController::~PmemAshmemController()
+{
+}
+
+int PmemAshmemController::allocate(alloc_data& data, int usage,
+        int compositionType)
+{
+    int ret = 0;
+
+    // Make buffers cacheable by default
+        data.uncached = false;
+
+    // Override if we explicitly need uncached buffers
+    if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
+        data.uncached = true;
+
+    // If ADSP or SMI is requested use the kernel controller
+    if(usage & (GRALLOC_USAGE_PRIVATE_ADSP_HEAP|
+                GRALLOC_USAGE_PRIVATE_SMI_HEAP)) {
+        ret = mPmemKernelCtrl->allocate(data, usage, compositionType);
+        if(ret < 0)
+            ALOGE("%s: Failed to allocate ADSP/SMI memory", __func__);
+        else
+            data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
+        return ret;
+    }
+
+    if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP) {
+        ret = mAshmemAlloc->alloc_buffer(data);
+        if(ret >= 0) {
+            data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
+            data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
+        }
+        return ret;
+    }
+
+    // if no memory specific flags are set,
+    // default to EBI heap, so that bypass
+    // can work. We can fall back to system
+    // heap if we run out.
+    ret = mPmemUserspaceAlloc->alloc_buffer(data);
+
+    // Fallback
+    if(ret >= 0 ) {
+        data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM;
+    } else if(ret < 0 && canFallback(compositionType, usage, false)) {
+        ALOGW("Falling back to ashmem");
+        ret = mAshmemAlloc->alloc_buffer(data);
+        if(ret >= 0) {
+            data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
+            data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
+        }
+    }
+
+    return ret;
+}
+
+sp<IMemAlloc> PmemAshmemController::getAllocator(int flags)
+{
+    sp<IMemAlloc> memalloc;
+    if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM)
+        memalloc = mPmemUserspaceAlloc;
+    else if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
+        memalloc = mPmemKernelCtrl->getAllocator(flags);
+    else if (flags & private_handle_t::PRIV_FLAGS_USES_ASHMEM)
+        memalloc = mAshmemAlloc;
+    else {
+        ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
+        memalloc = NULL;
+    }
+
+    return memalloc;
+}
+#endif
+
+size_t getBufferSizeAndDimensions(int width, int height, int format,
+                        int& alignedw, int &alignedh)
+{
+    size_t size;
+
+    alignedw = ALIGN(width, 32);
+    alignedh = ALIGN(height, 32);
+    switch (format) {
+        case HAL_PIXEL_FORMAT_RGBA_8888:
+        case HAL_PIXEL_FORMAT_RGBX_8888:
+        case HAL_PIXEL_FORMAT_BGRA_8888:
+            size = alignedw * alignedh * 4;
+            break;
+        case HAL_PIXEL_FORMAT_RGB_888:
+            size = alignedw * alignedh * 3;
+            break;
+        case HAL_PIXEL_FORMAT_RGB_565:
+        case HAL_PIXEL_FORMAT_RGBA_5551:
+        case HAL_PIXEL_FORMAT_RGBA_4444:
+            size = alignedw * alignedh * 2;
+            break;
+
+            // adreno formats
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:  // NV21
+            size  = ALIGN(alignedw*alignedh, 4096);
+            size += ALIGN(2 * ALIGN(width/2, 32) * ALIGN(height/2, 32), 4096);
+            break;
+        case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:   // NV12
+            // The chroma plane is subsampled,
+            // but the pitch in bytes is unchanged
+            // The GPU needs 4K alignment, but the video decoder needs 8K
+            alignedw = ALIGN(width, 128);
+            size  = ALIGN( alignedw * alignedh, 8192);
+            size += ALIGN( alignedw * ALIGN(height/2, 32), 8192);
+            break;
+        case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
+        case HAL_PIXEL_FORMAT_YCbCr_420_SP:
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+        case HAL_PIXEL_FORMAT_YV12:
+            if ((format == HAL_PIXEL_FORMAT_YV12) && ((width&1) || (height&1))) {
+                ALOGE("w or h is odd for the YV12 format");
+                return -EINVAL;
+            }
+            alignedw = ALIGN(width, 16);
+            alignedh = height;
+            if (HAL_PIXEL_FORMAT_NV12_ENCODEABLE == format) {
+                // The encoder requires a 2K aligned chroma offset.
+                size = ALIGN(alignedw*alignedh, 2048) +
+                       (ALIGN(alignedw/2, 16) * (alignedh/2))*2;
+            } else {
+                size = alignedw*alignedh +
+                    (ALIGN(alignedw/2, 16) * (alignedh/2))*2;
+            }
+            size = ALIGN(size, 4096);
+            break;
+
+        default:
+            ALOGE("unrecognized pixel format: %d", format);
+            return -EINVAL;
+    }
+
+    return size;
+}
+
+// Allocate buffer from width, height and format into a
+// private_handle_t. It is the responsibility of the caller
+// to free the buffer using the free_buffer function
+int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage)
+{
+     alloc_data data;
+     int alignedw, alignedh;
+     android::sp<gralloc::IAllocController> sAlloc =
+         gralloc::IAllocController::getInstance(false);
+     data.base = 0;
+     data.fd = -1;
+     data.offset = 0;
+     data.size = getBufferSizeAndDimensions(w, h, format, alignedw, alignedh);
+     data.align = getpagesize();
+     data.uncached = useUncached(usage);
+     int allocFlags = usage;
+
+     int err = sAlloc->allocate(data, allocFlags, 0);
+     if (0 != err) {
+         ALOGE("%s: allocate failed", __FUNCTION__);
+         return -ENOMEM;
+     }
+
+     private_handle_t* hnd = new private_handle_t(data.fd, data.size,
+                             data.allocType, 0, format, alignedw, alignedh);
+     hnd->base = (int) data.base;
+     hnd->offset = data.offset;
+     hnd->gpuaddr = 0;
+     *pHnd = hnd;
+     return 0;
+}
+
+void free_buffer(private_handle_t *hnd)
+{
+    android::sp<gralloc::IAllocController> sAlloc =
+        gralloc::IAllocController::getInstance(false);
+    if (hnd && hnd->fd > 0) {
+        sp<IMemAlloc> memalloc = sAlloc->getAllocator(hnd->flags);
+        memalloc->free_buffer((void*)hnd->base, hnd->size, hnd->offset, hnd->fd);
+    }
+    if(hnd)
+        delete hnd;
+
+}
diff --git a/libgralloc/alloc_controller.h b/libgralloc/alloc_controller.h
new file mode 100644
index 0000000..6c907d1
--- /dev/null
+++ b/libgralloc/alloc_controller.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef GRALLOC_ALLOCCONTROLLER_H
+#define GRALLOC_ALLOCCONTROLLER_H
+
+#include <utils/RefBase.h>
+
+namespace gralloc {
+
+    struct alloc_data;
+    class IMemAlloc;
+    class IonAlloc;
+
+    class IAllocController : public android::RefBase {
+
+        public:
+            /* Allocate using a suitable method
+             * Returns the type of buffer allocated
+             */
+            virtual int allocate(alloc_data& data, int usage,
+                    int compositionType) = 0;
+
+            virtual android::sp<IMemAlloc> getAllocator(int flags) = 0;
+
+            virtual ~IAllocController() {};
+
+            static android::sp<IAllocController> getInstance(bool useMasterHeap);
+
+        private:
+            static android::sp<IAllocController> sController;
+
+    };
+
+    class IonController : public IAllocController {
+
+        public:
+            virtual int allocate(alloc_data& data, int usage,
+                    int compositionType);
+
+            virtual android::sp<IMemAlloc> getAllocator(int flags);
+
+            IonController();
+
+        private:
+            android::sp<IonAlloc> mIonAlloc;
+
+    };
+
+    class PmemKernelController : public IAllocController {
+
+        public:
+            virtual int allocate(alloc_data& data, int usage,
+                    int compositionType);
+
+            virtual android::sp<IMemAlloc> getAllocator(int flags);
+
+            PmemKernelController ();
+
+            ~PmemKernelController ();
+
+        private:
+            android::sp<IMemAlloc> mPmemAdspAlloc;
+
+    };
+
+    // Main pmem controller - this should only
+    // be used within gralloc
+    class PmemAshmemController : public IAllocController {
+
+        public:
+            virtual int allocate(alloc_data& data, int usage,
+                    int compositionType);
+
+            virtual android::sp<IMemAlloc> getAllocator(int flags);
+
+            PmemAshmemController();
+
+            ~PmemAshmemController();
+
+        private:
+            android::sp<IMemAlloc> mPmemUserspaceAlloc;
+            android::sp<IMemAlloc> mAshmemAlloc;
+            android::sp<IAllocController> mPmemKernelCtrl;
+
+    };
+
+} //end namespace gralloc
+#endif // GRALLOC_ALLOCCONTROLLER_H
diff --git a/libgralloc/ashmemalloc.cpp b/libgralloc/ashmemalloc.cpp
new file mode 100644
index 0000000..8397e21
--- /dev/null
+++ b/libgralloc/ashmemalloc.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <cutils/log.h>
+#include <linux/ashmem.h>
+#include <cutils/ashmem.h>
+#include <errno.h>
+#include "ashmemalloc.h"
+
+using gralloc::AshmemAlloc;
+int AshmemAlloc::alloc_buffer(alloc_data& data)
+{
+    int err = 0;
+    int fd = -1;
+    void* base = 0;
+    int offset = 0;
+    char name[ASHMEM_NAME_LEN];
+    snprintf(name, ASHMEM_NAME_LEN, "gralloc-buffer-%x", data.pHandle);
+    int prot = PROT_READ | PROT_WRITE;
+    fd = ashmem_create_region(name, data.size);
+    if (fd < 0) {
+        ALOGE("couldn't create ashmem (%s)", strerror(errno));
+        err = -errno;
+    } else {
+        if (ashmem_set_prot_region(fd, prot) < 0) {
+            ALOGE("ashmem_set_prot_region(fd=%d, prot=%x) failed (%s)",
+                 fd, prot, strerror(errno));
+            close(fd);
+            err = -errno;
+        } else {
+            base = mmap(0, data.size, prot, MAP_SHARED|MAP_POPULATE|MAP_LOCKED, fd, 0);
+            if (base == MAP_FAILED) {
+                ALOGE("alloc mmap(fd=%d, size=%d, prot=%x) failed (%s)",
+                     fd, data.size, prot, strerror(errno));
+                close(fd);
+                err = -errno;
+            } else {
+                memset((char*)base + offset, 0, data.size);
+            }
+        }
+    }
+    if(err == 0) {
+        data.fd = fd;
+        data.base = base;
+        data.offset = offset;
+        clean_buffer(base, data.size, offset, fd);
+        ALOGD("ashmem: Allocated buffer base:%p size:%d fd:%d",
+                                base, data.size, fd);
+
+    }
+    return err;
+
+}
+
+int AshmemAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+    ALOGD("ashmem: Freeing buffer base:%p size:%d fd:%d",
+                            base, size, fd);
+    int err = 0;
+
+    if(!base) {
+        ALOGE("Invalid free");
+        return -EINVAL;
+    }
+    err = unmap_buffer(base, size, offset);
+    close(fd);
+    return err;
+}
+
+int AshmemAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+    int err = 0;
+    void *base = 0;
+
+    base = mmap(0, size, PROT_READ| PROT_WRITE,
+            MAP_SHARED|MAP_POPULATE, fd, 0);
+    *pBase = base;
+    if(base == MAP_FAILED) {
+        ALOGE("ashmem: Failed to map memory in the client: %s",
+                                strerror(errno));
+        err = -errno;
+    } else {
+        ALOGD("ashmem: Mapped buffer base:%p size:%d fd:%d",
+                 base, size, fd);
+    }
+    return err;
+}
+
+int AshmemAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+    ALOGD("ashmem: Unmapping buffer base: %p size: %d", base, size);
+    int err = munmap(base, size);
+    if(err) {
+        ALOGE("ashmem: Failed to unmap memory at %p: %s",
+                                base, strerror(errno));
+    }
+    return err;
+
+}
+int AshmemAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+    int err = 0;
+    if (ioctl(fd, ASHMEM_CACHE_FLUSH_RANGE, NULL)) {
+        ALOGE("ashmem: ASHMEM_CACHE_FLUSH_RANGE failed fd = %d", fd);
+    }
+
+    return err;
+}
+
diff --git a/libgralloc/ashmemalloc.h b/libgralloc/ashmemalloc.h
new file mode 100644
index 0000000..051dcd1
--- /dev/null
+++ b/libgralloc/ashmemalloc.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_ASHMEMALLOC_H
+#define GRALLOC_ASHMEMALLOC_H
+
+#include "memalloc.h"
+#include <linux/ion.h>
+
+namespace gralloc {
+    class AshmemAlloc : public IMemAlloc  {
+
+        public:
+            virtual int alloc_buffer(alloc_data& data);
+
+            virtual int free_buffer(void *base, size_t size,
+                    int offset, int fd);
+
+            virtual int map_buffer(void **pBase, size_t size,
+                    int offset, int fd);
+
+            virtual int unmap_buffer(void *base, size_t size,
+                    int offset);
+
+            virtual int clean_buffer(void*base, size_t size,
+                    int offset, int fd);
+
+    };
+}
+#endif /* GRALLOC_ASHMEMALLOC_H */
diff --git a/libgralloc/framebuffer.cpp b/libgralloc/framebuffer.cpp
new file mode 100644
index 0000000..b6b4a8f
--- /dev/null
+++ b/libgralloc/framebuffer.cpp
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+* Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/mman.h>
+
+#include <dlfcn.h>
+
+#include <cutils/ashmem.h>
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include <utils/Timers.h>
+
+#include <hardware/hardware.h>
+#include <hardware/gralloc.h>
+
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <utils/Timers.h>
+
+#include <cutils/log.h>
+#include <cutils/atomic.h>
+
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+
+#include <GLES/gl.h>
+
+#include "gralloc_priv.h"
+#include "gr.h"
+#ifdef NO_SURFACEFLINGER_SWAPINTERVAL
+#include <cutils/properties.h>
+#endif
+
+#include <qcom_ui.h>
+
+#define FB_DEBUG 0
+
+#if defined(HDMI_DUAL_DISPLAY)
+#define EVEN_OUT(x) if (x & 0x0001) {x--;}
+using overlay::Overlay;
+/** min of int a, b */
+static inline int min(int a, int b) {
+    return (a<b) ? a : b;
+}
+/** max of int a, b */
+static inline int max(int a, int b) {
+    return (a>b) ? a : b;
+}
+#endif
+
+char framebufferStateName[] = {'S', 'R', 'A'};
+
+/*****************************************************************************/
+
+enum {
+    MDDI_PANEL = '1',
+    EBI2_PANEL = '2',
+    LCDC_PANEL = '3',
+    EXT_MDDI_PANEL = '4',
+    TV_PANEL = '5'
+};
+
+enum {
+    PAGE_FLIP = 0x00000001,
+    LOCKED = 0x00000002
+};
+
+struct fb_context_t {
+    framebuffer_device_t  device;
+};
+
+static int neworientation;
+
+/*****************************************************************************/
+
+static void
+msm_copy_buffer(buffer_handle_t handle, int fd,
+                int width, int height, int format,
+                int x, int y, int w, int h);
+
+static int fb_setSwapInterval(struct framebuffer_device_t* dev,
+            int interval)
+{
+    char pval[PROPERTY_VALUE_MAX];
+    property_get("debug.gr.swapinterval", pval, "-1");
+    int property_interval = atoi(pval);
+    if (property_interval >= 0)
+        interval = property_interval;
+
+    fb_context_t* ctx = (fb_context_t*)dev;
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+    if (interval < dev->minSwapInterval || interval > dev->maxSwapInterval)
+        return -EINVAL;
+
+    m->swapInterval = interval;
+    return 0;
+}
+
+static int fb_setUpdateRect(struct framebuffer_device_t* dev,
+        int l, int t, int w, int h)
+{
+    if (((w|h) <= 0) || ((l|t)<0))
+        return -EINVAL;
+    fb_context_t* ctx = (fb_context_t*)dev;
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+    m->info.reserved[0] = 0x54445055; // "UPDT";
+    m->info.reserved[1] = (uint16_t)l | ((uint32_t)t << 16);
+    m->info.reserved[2] = (uint16_t)(l+w) | ((uint32_t)(t+h) << 16);
+    return 0;
+}
+
+static void *disp_loop(void *ptr)
+{
+    struct qbuf_t nxtBuf;
+    static int cur_buf=-1;
+    private_module_t *m = reinterpret_cast<private_module_t*>(ptr);
+
+    while (1) {
+        pthread_mutex_lock(&(m->qlock));
+
+        // wait (sleep) while display queue is empty;
+        if (m->disp.isEmpty()) {
+            pthread_cond_wait(&(m->qpost),&(m->qlock));
+        }
+
+        // dequeue next buff to display and lock it
+        nxtBuf = m->disp.getHeadValue();
+        m->disp.pop();
+        pthread_mutex_unlock(&(m->qlock));
+
+        // post buf out to display synchronously
+        private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>
+                                                (nxtBuf.buf);
+        const size_t offset = hnd->base - m->framebuffer->base;
+        m->info.activate = FB_ACTIVATE_VBL;
+        m->info.yoffset = offset / m->finfo.line_length;
+
+#if defined(HDMI_DUAL_DISPLAY)
+        pthread_mutex_lock(&m->overlayLock);
+        m->orientation = neworientation;
+        m->currentOffset = offset;
+        m->hdmiStateChanged = true;
+        pthread_cond_signal(&(m->overlayPost));
+        pthread_mutex_unlock(&m->overlayLock);
+#endif
+        if (ioctl(m->framebuffer->fd, FBIOPUT_VSCREENINFO, &m->info) == -1) {
+            ALOGE("ERROR FBIOPUT_VSCREENINFO failed; frame not displayed");
+        }
+
+        CALC_FPS();
+
+        if (cur_buf == -1) {
+            int nxtAvail = ((nxtBuf.idx + 1) % m->numBuffers);
+            pthread_mutex_lock(&(m->avail[nxtBuf.idx].lock));
+            m->avail[nxtBuf.idx].is_avail = true;
+            m->avail[nxtBuf.idx].state = REF;
+            pthread_cond_broadcast(&(m->avail[nxtBuf.idx].cond));
+            pthread_mutex_unlock(&(m->avail[nxtBuf.idx].lock));
+        } else {
+            pthread_mutex_lock(&(m->avail[nxtBuf.idx].lock));
+            if (m->avail[nxtBuf.idx].state != SUB) {
+                ALOGE_IF(m->swapInterval != 0, "[%d] state %c, expected %c", nxtBuf.idx,
+                    framebufferStateName[m->avail[nxtBuf.idx].state],
+                    framebufferStateName[SUB]);
+            }
+            m->avail[nxtBuf.idx].state = REF;
+            pthread_mutex_unlock(&(m->avail[nxtBuf.idx].lock));
+
+            pthread_mutex_lock(&(m->avail[cur_buf].lock));
+            m->avail[cur_buf].is_avail = true;
+            if (m->avail[cur_buf].state != REF) {
+                ALOGE_IF(m->swapInterval != 0, "[%d] state %c, expected %c", cur_buf,
+                    framebufferStateName[m->avail[cur_buf].state],
+                    framebufferStateName[REF]);
+            }
+            m->avail[cur_buf].state = AVL;
+            pthread_cond_broadcast(&(m->avail[cur_buf].cond));
+            pthread_mutex_unlock(&(m->avail[cur_buf].lock));
+        }
+        cur_buf = nxtBuf.idx;
+    }
+    return NULL;
+}
+
+#if defined(HDMI_DUAL_DISPLAY)
+static int closeHDMIChannel(private_module_t* m)
+{
+    Overlay* pTemp = m->pobjOverlay;
+    if(pTemp != NULL)
+        pTemp->closeChannel();
+    return 0;
+}
+
+static void getSecondaryDisplayDestinationInfo(private_module_t* m, overlay_rect&
+                                rect, int& orientation)
+{
+    Overlay* pTemp = m->pobjOverlay;
+    int width = pTemp->getFBWidth();
+    int height = pTemp->getFBHeight();
+    int fbwidth = m->info.xres, fbheight = m->info.yres;
+    rect.x = 0; rect.y = 0;
+    rect.w = width; rect.h = height;
+    int rot = m->orientation;
+    switch(rot) {
+        // ROT_0
+        case 0:
+        // ROT_180
+        case HAL_TRANSFORM_ROT_180:
+            pTemp->getAspectRatioPosition(fbwidth, fbheight,
+                                                   &rect);
+            if(rot ==  HAL_TRANSFORM_ROT_180)
+                orientation = HAL_TRANSFORM_ROT_180;
+            else
+                orientation  = 0;
+            break;
+            // ROT_90
+        case HAL_TRANSFORM_ROT_90:
+            // ROT_270
+        case HAL_TRANSFORM_ROT_270:
+            //Calculate the Aspectratio for the UI
+            //in the landscape mode
+            //Width and height will be swapped as there
+            //is rotation
+            pTemp->getAspectRatioPosition(fbheight, fbwidth,
+                    &rect);
+
+            if(rot == HAL_TRANSFORM_ROT_90)
+                orientation = HAL_TRANSFORM_ROT_270;
+            else if(rot == HAL_TRANSFORM_ROT_270)
+                orientation = HAL_TRANSFORM_ROT_90;
+            break;
+    }
+    return;
+}
+
+static void *hdmi_ui_loop(void *ptr)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            ptr);
+    while (1) {
+        pthread_mutex_lock(&m->overlayLock);
+        while(!(m->hdmiStateChanged))
+            pthread_cond_wait(&(m->overlayPost), &(m->overlayLock));
+        m->hdmiStateChanged = false;
+        if (m->exitHDMIUILoop) {
+            pthread_mutex_unlock(&m->overlayLock);
+            return NULL;
+        }
+        bool waitForVsync = true;
+        int flags = WAIT_FOR_VSYNC;
+        if (m->pobjOverlay) {
+            Overlay* pTemp = m->pobjOverlay;
+            if (m->hdmiMirroringState == HDMI_NO_MIRRORING)
+                closeHDMIChannel(m);
+            else if(m->hdmiMirroringState == HDMI_UI_MIRRORING) {
+                if (!pTemp->isChannelUP()) {
+                   int alignedW = ALIGN(m->info.xres, 32);
+
+                   private_handle_t const* hnd =
+                      reinterpret_cast<private_handle_t const*>(m->framebuffer);
+                   overlay_buffer_info info;
+                   info.width = alignedW;
+                   info.height = hnd->height;
+                   info.format = hnd->format;
+                   info.size = hnd->size;
+
+                   if (m->trueMirrorSupport)
+                       flags &= ~WAIT_FOR_VSYNC;
+                   // start the overlay Channel for mirroring
+                   // m->enableHDMIOutput corresponds to the fbnum
+                   if (pTemp->startChannel(info, m->enableHDMIOutput,
+                                           false, true, 0, VG0_PIPE, flags)) {
+                        pTemp->setFd(m->framebuffer->fd);
+                        pTemp->setCrop(0, 0, m->info.xres, m->info.yres);
+                   } else
+                       closeHDMIChannel(m);
+                }
+
+                if (pTemp->isChannelUP()) {
+                    overlay_rect destRect;
+                    int rot = 0;
+                    int currOrientation = 0;
+                    getSecondaryDisplayDestinationInfo(m, destRect, rot);
+                    pTemp->getOrientation(currOrientation);
+                    if(rot != currOrientation) {
+                        pTemp->setTransform(rot);
+                    }
+                    EVEN_OUT(destRect.x);
+                    EVEN_OUT(destRect.y);
+                    EVEN_OUT(destRect.w);
+                    EVEN_OUT(destRect.h);
+                    int currentX = 0, currentY = 0;
+                    uint32_t currentW = 0, currentH = 0;
+                    if (pTemp->getPosition(currentX, currentY, currentW, currentH)) {
+                        if ((currentX != destRect.x) || (currentY != destRect.y) ||
+                                (currentW != destRect.w) || (currentH != destRect.h)) {
+                            pTemp->setPosition(destRect.x, destRect.y, destRect.w,
+                                                                    destRect.h);
+                        }
+                    }
+                    if (m->trueMirrorSupport) {
+                        // if video is started the UI channel should be NO_WAIT.
+                        flags = !m->videoOverlay ? WAIT_FOR_VSYNC : 0;
+                        pTemp->updateOverlayFlags(flags);
+                    }
+                    pTemp->queueBuffer(m->currentOffset);
+                }
+            }
+            else
+                closeHDMIChannel(m);
+        }
+        pthread_mutex_unlock(&m->overlayLock);
+    }
+    return NULL;
+}
+
+static int fb_videoOverlayStarted(struct framebuffer_device_t* dev, int started)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+    pthread_mutex_lock(&m->overlayLock);
+    Overlay* pTemp = m->pobjOverlay;
+    if(started != m->videoOverlay) {
+        m->videoOverlay = started;
+        if (!m->trueMirrorSupport) {
+            m->hdmiStateChanged = true;
+            if (started && pTemp) {
+                m->hdmiMirroringState = HDMI_NO_MIRRORING;
+                closeHDMIChannel(m);
+            } else if (m->enableHDMIOutput)
+                m->hdmiMirroringState = HDMI_UI_MIRRORING;
+            pthread_cond_signal(&(m->overlayPost));
+        }
+    }
+    pthread_mutex_unlock(&m->overlayLock);
+    return 0;
+}
+
+static int fb_enableHDMIOutput(struct framebuffer_device_t* dev, int externaltype)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+    pthread_mutex_lock(&m->overlayLock);
+    Overlay* pTemp = m->pobjOverlay;
+    //Check if true mirroring can be supported
+    m->trueMirrorSupport = FrameBufferInfo::getInstance()->canSupportTrueMirroring();
+    m->enableHDMIOutput = externaltype;
+    ALOGE("In fb_enableHDMIOutput: externaltype = %d", m->enableHDMIOutput);
+    if(externaltype) {
+        if (m->trueMirrorSupport) {
+            m->hdmiMirroringState = HDMI_UI_MIRRORING;
+        } else {
+            if(!m->videoOverlay)
+                m->hdmiMirroringState = HDMI_UI_MIRRORING;
+        }
+    } else if (!externaltype && pTemp) {
+        m->hdmiMirroringState = HDMI_NO_MIRRORING;
+        closeHDMIChannel(m);
+    }
+    m->hdmiStateChanged = true;
+    pthread_cond_signal(&(m->overlayPost));
+    pthread_mutex_unlock(&m->overlayLock);
+    return 0;
+}
+
+
+static int fb_setActionSafeWidthRatio(struct framebuffer_device_t* dev, float asWidthRatio)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+    pthread_mutex_lock(&m->overlayLock);
+    m->actionsafeWidthRatio = asWidthRatio;
+    pthread_mutex_unlock(&m->overlayLock);
+    return 0;
+}
+
+static int fb_setActionSafeHeightRatio(struct framebuffer_device_t* dev, float asHeightRatio)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+                    dev->common.module);
+    pthread_mutex_lock(&m->overlayLock);
+    m->actionsafeHeightRatio = asHeightRatio;
+    pthread_mutex_unlock(&m->overlayLock);
+    return 0;
+}
+
+static int fb_orientationChanged(struct framebuffer_device_t* dev, int orientation)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+    pthread_mutex_lock(&m->overlayLock);
+    neworientation = orientation;
+    pthread_mutex_unlock(&m->overlayLock);
+    return 0;
+}
+#endif
+
+static int fb_post(struct framebuffer_device_t* dev, buffer_handle_t buffer)
+{
+    if (private_handle_t::validate(buffer) < 0)
+        return -EINVAL;
+
+    int nxtIdx, futureIdx = -1;
+    bool reuse;
+    struct qbuf_t qb;
+    fb_context_t* ctx = (fb_context_t*)dev;
+
+    private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(buffer);
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+
+    if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
+
+        reuse = false;
+        nxtIdx = (m->currentIdx + 1) % m->numBuffers;
+        futureIdx = (nxtIdx + 1) % m->numBuffers;
+
+        if (m->swapInterval == 0) {
+            // if SwapInterval = 0 and no buffers available then reuse
+            // current buf for next rendering so don't post new buffer
+            if (pthread_mutex_trylock(&(m->avail[nxtIdx].lock))) {
+                reuse = true;
+            } else {
+                if (! m->avail[nxtIdx].is_avail)
+                    reuse = true;
+                pthread_mutex_unlock(&(m->avail[nxtIdx].lock));
+            }
+        }
+
+        if(!reuse){
+            // unlock previous ("current") Buffer and lock the new buffer
+            m->base.lock(&m->base, buffer,
+                    private_module_t::PRIV_USAGE_LOCKED_FOR_POST,
+                    0,0, m->info.xres, m->info.yres, NULL);
+
+            // post/queue the new buffer
+            pthread_mutex_lock(&(m->avail[nxtIdx].lock));
+            if (m->avail[nxtIdx].is_avail != true) {
+                ALOGE_IF(m->swapInterval != 0, "Found %d buf to be not avail", nxtIdx);
+            }
+
+            m->avail[nxtIdx].is_avail = false;
+
+            if (m->avail[nxtIdx].state != AVL) {
+                ALOGD("[%d] state %c, expected %c", nxtIdx,
+                    framebufferStateName[m->avail[nxtIdx].state],
+                    framebufferStateName[AVL]);
+            }
+
+            m->avail[nxtIdx].state = SUB;
+            pthread_mutex_unlock(&(m->avail[nxtIdx].lock));
+
+            qb.idx = nxtIdx;
+            qb.buf = buffer;
+            pthread_mutex_lock(&(m->qlock));
+            m->disp.push(qb);
+            pthread_cond_signal(&(m->qpost));
+            pthread_mutex_unlock(&(m->qlock));
+
+            if (m->currentBuffer)
+                m->base.unlock(&m->base, m->currentBuffer);
+
+            m->currentBuffer = buffer;
+            m->currentIdx = nxtIdx;
+        } else {
+            if (m->currentBuffer)
+                m->base.unlock(&m->base, m->currentBuffer);
+            m->base.lock(&m->base, buffer,
+                         private_module_t::PRIV_USAGE_LOCKED_FOR_POST,
+                         0,0, m->info.xres, m->info.yres, NULL);
+            m->currentBuffer = buffer;
+        }
+
+    } else {
+        void* fb_vaddr;
+        void* buffer_vaddr;
+        m->base.lock(&m->base, m->framebuffer,
+                GRALLOC_USAGE_SW_WRITE_RARELY,
+                0, 0, m->info.xres, m->info.yres,
+                &fb_vaddr);
+
+        m->base.lock(&m->base, buffer,
+                GRALLOC_USAGE_SW_READ_RARELY,
+                0, 0, m->info.xres, m->info.yres,
+                &buffer_vaddr);
+
+        //memcpy(fb_vaddr, buffer_vaddr, m->finfo.line_length * m->info.yres);
+
+        msm_copy_buffer(
+                m->framebuffer, m->framebuffer->fd,
+                m->info.xres, m->info.yres, m->fbFormat,
+                m->info.xoffset, m->info.yoffset,
+                m->info.width, m->info.height);
+
+        m->base.unlock(&m->base, buffer);
+        m->base.unlock(&m->base, m->framebuffer);
+    }
+
+    ALOGD_IF(FB_DEBUG, "Framebuffer state: [0] = %c [1] = %c [2] = %c",
+        framebufferStateName[m->avail[0].state],
+        framebufferStateName[m->avail[1].state],
+        framebufferStateName[m->avail[2].state]);
+    return 0;
+}
+
+static int fb_compositionComplete(struct framebuffer_device_t* dev)
+{
+    // TODO: Properly implement composition complete callback
+    glFinish();
+
+    return 0;
+}
+
+static int fb_lockBuffer(struct framebuffer_device_t* dev, int index)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            dev->common.module);
+
+    // Return immediately if the buffer is available
+    if ((m->avail[index].state == AVL) || (m->swapInterval == 0))
+        return 0;
+
+    pthread_mutex_lock(&(m->avail[index].lock));
+    while (m->avail[index].state != AVL) {
+        pthread_cond_wait(&(m->avail[index].cond),
+                         &(m->avail[index].lock));
+    }
+    pthread_mutex_unlock(&(m->avail[index].lock));
+
+    return 0;
+}
+
+/*****************************************************************************/
+
+int mapFrameBufferLocked(struct private_module_t* module)
+{
+    // already initialized...
+    if (module->framebuffer) {
+        return 0;
+    }
+    char const * const device_template[] = {
+            "/dev/graphics/fb%u",
+            "/dev/fb%u",
+            0 };
+
+    int fd = -1;
+    int i=0;
+    char name[64];
+    char property[PROPERTY_VALUE_MAX];
+
+    while ((fd==-1) && device_template[i]) {
+        snprintf(name, 64, device_template[i], 0);
+        fd = open(name, O_RDWR, 0);
+        i++;
+    }
+    if (fd < 0)
+        return -errno;
+
+    struct fb_fix_screeninfo finfo;
+    if (ioctl(fd, FBIOGET_FSCREENINFO, &finfo) == -1)
+        return -errno;
+
+    struct fb_var_screeninfo info;
+    if (ioctl(fd, FBIOGET_VSCREENINFO, &info) == -1)
+        return -errno;
+
+    info.reserved[0] = 0;
+    info.reserved[1] = 0;
+    info.reserved[2] = 0;
+    info.xoffset = 0;
+    info.yoffset = 0;
+    info.activate = FB_ACTIVATE_NOW;
+
+    /* Interpretation of offset for color fields: All offsets are from the right,
+    * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you
+    * can use the offset as right argument to <<). A pixel afterwards is a bit
+    * stream and is written to video memory as that unmodified. This implies
+    * big-endian byte order if bits_per_pixel is greater than 8.
+    */
+
+    if(info.bits_per_pixel == 32) {
+        /*
+         * Explicitly request RGBA_8888
+         */
+        info.bits_per_pixel = 32;
+        info.red.offset     = 24;
+        info.red.length     = 8;
+        info.green.offset   = 16;
+        info.green.length   = 8;
+        info.blue.offset    = 8;
+        info.blue.length    = 8;
+        info.transp.offset  = 0;
+        info.transp.length  = 8;
+
+        /* Note: the GL driver does not have a r=8 g=8 b=8 a=0 config, so if we do
+         * not use the MDP for composition (i.e. hw composition == 0), ask for
+         * RGBA instead of RGBX. */
+        if (property_get("debug.sf.hw", property, NULL) > 0 && atoi(property) == 0)
+            module->fbFormat = HAL_PIXEL_FORMAT_RGBX_8888;
+        else if(property_get("debug.composition.type", property, NULL) > 0 && (strncmp(property, "mdp", 3) == 0))
+            module->fbFormat = HAL_PIXEL_FORMAT_RGBX_8888;
+        else
+            module->fbFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+    } else {
+        /*
+         * Explicitly request 5/6/5
+         */
+        info.bits_per_pixel = 16;
+        info.red.offset     = 11;
+        info.red.length     = 5;
+        info.green.offset   = 5;
+        info.green.length   = 6;
+        info.blue.offset    = 0;
+        info.blue.length    = 5;
+        info.transp.offset  = 0;
+        info.transp.length  = 0;
+        module->fbFormat = HAL_PIXEL_FORMAT_RGB_565;
+    }
+
+    //adreno needs 4k aligned offsets. Max hole size is 4096-1
+    int  size = roundUpToPageSize(info.yres * info.xres * (info.bits_per_pixel/8));
+
+    /*
+     * Request NUM_BUFFERS screens (at lest 2 for page flipping)
+     */
+    int numberOfBuffers = (int)(finfo.smem_len/size);
+    ALOGV("num supported framebuffers in kernel = %d", numberOfBuffers);
+
+    if (property_get("debug.gr.numframebuffers", property, NULL) > 0) {
+        int num = atoi(property);
+        if ((num >= NUM_FRAMEBUFFERS_MIN) && (num <= NUM_FRAMEBUFFERS_MAX)) {
+            numberOfBuffers = num;
+        }
+    }
+    if (numberOfBuffers > NUM_FRAMEBUFFERS_MAX)
+        numberOfBuffers = NUM_FRAMEBUFFERS_MAX;
+
+    ALOGV("We support %d buffers", numberOfBuffers);
+
+    //consider the included hole by 4k alignment
+    uint32_t line_length = (info.xres * info.bits_per_pixel / 8);
+    info.yres_virtual = (size * numberOfBuffers) / line_length;
+
+    uint32_t flags = PAGE_FLIP;
+    if (ioctl(fd, FBIOPUT_VSCREENINFO, &info) == -1) {
+        info.yres_virtual = size / line_length;
+        flags &= ~PAGE_FLIP;
+        ALOGW("FBIOPUT_VSCREENINFO failed, page flipping not supported");
+    }
+
+    if (info.yres_virtual < ((size * 2) / line_length) ) {
+        // we need at least 2 for page-flipping
+        info.yres_virtual = size / line_length;
+        flags &= ~PAGE_FLIP;
+        ALOGW("page flipping not supported (yres_virtual=%d, requested=%d)",
+                info.yres_virtual, info.yres*2);
+    }
+
+    if (ioctl(fd, FBIOGET_VSCREENINFO, &info) == -1)
+        return -errno;
+
+    if (int(info.width) <= 0 || int(info.height) <= 0) {
+        // the driver doesn't return that information
+        // default to 160 dpi
+        info.width  = ((info.xres * 25.4f)/160.0f + 0.5f);
+        info.height = ((info.yres * 25.4f)/160.0f + 0.5f);
+    }
+
+    float xdpi = (info.xres * 25.4f) / info.width;
+    float ydpi = (info.yres * 25.4f) / info.height;
+    //The reserved[4] field is used to store FPS by the driver.
+    float fps  = info.reserved[4];
+
+    ALOGI(   "using (fd=%d)\n"
+            "id           = %s\n"
+            "xres         = %d px\n"
+            "yres         = %d px\n"
+            "xres_virtual = %d px\n"
+            "yres_virtual = %d px\n"
+            "bpp          = %d\n"
+            "r            = %2u:%u\n"
+            "g            = %2u:%u\n"
+            "b            = %2u:%u\n",
+            fd,
+            finfo.id,
+            info.xres,
+            info.yres,
+            info.xres_virtual,
+            info.yres_virtual,
+            info.bits_per_pixel,
+            info.red.offset, info.red.length,
+            info.green.offset, info.green.length,
+            info.blue.offset, info.blue.length
+    );
+
+    ALOGI(   "width        = %d mm (%f dpi)\n"
+            "height       = %d mm (%f dpi)\n"
+            "refresh rate = %.2f Hz\n",
+            info.width,  xdpi,
+            info.height, ydpi,
+            fps
+    );
+
+
+    if (ioctl(fd, FBIOGET_FSCREENINFO, &finfo) == -1)
+        return -errno;
+
+    if (finfo.smem_len <= 0)
+        return -errno;
+
+    module->flags = flags;
+    module->info = info;
+    module->finfo = finfo;
+    module->xdpi = xdpi;
+    module->ydpi = ydpi;
+    module->fps = fps;
+
+#ifdef NO_SURFACEFLINGER_SWAPINTERVAL
+    char pval[PROPERTY_VALUE_MAX];
+    property_get("debug.gr.swapinterval", pval, "1");
+    module->swapInterval = atoi(pval);
+    if (module->swapInterval < private_module_t::PRIV_MIN_SWAP_INTERVAL ||
+        module->swapInterval > private_module_t::PRIV_MAX_SWAP_INTERVAL) {
+        module->swapInterval = 1;
+        ALOGW("Out of range (%d to %d) value for debug.gr.swapinterval, using 1",
+             private_module_t::PRIV_MIN_SWAP_INTERVAL,
+             private_module_t::PRIV_MAX_SWAP_INTERVAL);
+    }
+
+#else
+    /* when surfaceflinger supports swapInterval then can just do this */
+    module->swapInterval = 1;
+#endif
+
+    CALC_INIT();
+
+    module->currentIdx = -1;
+    pthread_cond_init(&(module->qpost), NULL);
+    pthread_mutex_init(&(module->qlock), NULL);
+    for (i = 0; i < NUM_FRAMEBUFFERS_MAX; i++) {
+        pthread_mutex_init(&(module->avail[i].lock), NULL);
+        pthread_cond_init(&(module->avail[i].cond), NULL);
+        module->avail[i].is_avail = true;
+        module->avail[i].state = AVL;
+    }
+
+    /* create display update thread */
+    pthread_t thread1;
+    if (pthread_create(&thread1, NULL, &disp_loop, (void *) module)) {
+         return -errno;
+    }
+
+    /*
+     * map the framebuffer
+     */
+
+    int err;
+    module->numBuffers = info.yres_virtual / info.yres;
+    module->bufferMask = 0;
+    //adreno needs page aligned offsets. Align the fbsize to pagesize.
+    size_t fbSize = roundUpToPageSize(finfo.line_length * info.yres) * module->numBuffers;
+    module->framebuffer = new private_handle_t(fd, fbSize,
+                            private_handle_t::PRIV_FLAGS_USES_PMEM, BUFFER_TYPE_UI,
+                            module->fbFormat, info.xres, info.yres);
+    void* vaddr = mmap(0, fbSize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+    if (vaddr == MAP_FAILED) {
+        ALOGE("Error mapping the framebuffer (%s)", strerror(errno));
+        return -errno;
+    }
+    module->framebuffer->base = intptr_t(vaddr);
+    memset(vaddr, 0, fbSize);
+
+#if defined(HDMI_DUAL_DISPLAY)
+    /* Overlay for HDMI*/
+    pthread_mutex_init(&(module->overlayLock), NULL);
+    pthread_cond_init(&(module->overlayPost), NULL);
+    module->pobjOverlay = new Overlay();
+    module->currentOffset = 0;
+    module->exitHDMIUILoop = false;
+    module->hdmiStateChanged = false;
+    pthread_t hdmiUIThread;
+    pthread_create(&hdmiUIThread, NULL, &hdmi_ui_loop, (void *) module);
+    module->hdmiMirroringState = HDMI_NO_MIRRORING;
+    module->trueMirrorSupport = false;
+#endif
+
+    return 0;
+}
+
+static int mapFrameBuffer(struct private_module_t* module)
+{
+    pthread_mutex_lock(&module->lock);
+    int err = mapFrameBufferLocked(module);
+    pthread_mutex_unlock(&module->lock);
+    return err;
+}
+
+/*****************************************************************************/
+
+static int fb_close(struct hw_device_t *dev)
+{
+    fb_context_t* ctx = (fb_context_t*)dev;
+#if defined(HDMI_DUAL_DISPLAY)
+    private_module_t* m = reinterpret_cast<private_module_t*>(
+            ctx->device.common.module);
+    pthread_mutex_lock(&m->overlayLock);
+    m->exitHDMIUILoop = true;
+    pthread_cond_signal(&(m->overlayPost));
+    pthread_mutex_unlock(&m->overlayLock);
+#endif
+    if (ctx) {
+        free(ctx);
+    }
+    return 0;
+}
+
+int fb_device_open(hw_module_t const* module, const char* name,
+        hw_device_t** device)
+{
+    int status = -EINVAL;
+    if (!strcmp(name, GRALLOC_HARDWARE_FB0)) {
+        alloc_device_t* gralloc_device;
+        status = gralloc_open(module, &gralloc_device);
+        if (status < 0)
+            return status;
+
+        /* initialize our state here */
+        fb_context_t *dev = (fb_context_t*)malloc(sizeof(*dev));
+        memset(dev, 0, sizeof(*dev));
+
+        /* initialize the procs */
+        dev->device.common.tag = HARDWARE_DEVICE_TAG;
+        dev->device.common.version = 0;
+        dev->device.common.module = const_cast<hw_module_t*>(module);
+        dev->device.common.close = fb_close;
+        dev->device.setSwapInterval = fb_setSwapInterval;
+        dev->device.post            = fb_post;
+        dev->device.setUpdateRect = 0;
+        dev->device.compositionComplete = fb_compositionComplete;
+        //dev->device.lockBuffer = fb_lockBuffer;
+#if defined(HDMI_DUAL_DISPLAY)
+        dev->device.orientationChanged = fb_orientationChanged;
+        dev->device.videoOverlayStarted = fb_videoOverlayStarted;
+        dev->device.enableHDMIOutput = fb_enableHDMIOutput;
+        dev->device.setActionSafeWidthRatio = fb_setActionSafeWidthRatio;
+        dev->device.setActionSafeHeightRatio = fb_setActionSafeHeightRatio;
+#endif
+
+        private_module_t* m = (private_module_t*)module;
+        status = mapFrameBuffer(m);
+        if (status >= 0) {
+            int stride = m->finfo.line_length / (m->info.bits_per_pixel >> 3);
+            const_cast<uint32_t&>(dev->device.flags) = 0;
+            const_cast<uint32_t&>(dev->device.width) = m->info.xres;
+            const_cast<uint32_t&>(dev->device.height) = m->info.yres;
+            const_cast<int&>(dev->device.stride) = stride;
+            const_cast<int&>(dev->device.format) = m->fbFormat;
+            const_cast<float&>(dev->device.xdpi) = m->xdpi;
+            const_cast<float&>(dev->device.ydpi) = m->ydpi;
+            const_cast<float&>(dev->device.fps) = m->fps;
+            const_cast<int&>(dev->device.minSwapInterval) = private_module_t::PRIV_MIN_SWAP_INTERVAL;
+            const_cast<int&>(dev->device.maxSwapInterval) = private_module_t::PRIV_MAX_SWAP_INTERVAL;
+            //const_cast<int&>(dev->device.numFramebuffers) = m->numBuffers;
+            if (m->finfo.reserved[0] == 0x5444 &&
+                    m->finfo.reserved[1] == 0x5055) {
+                dev->device.setUpdateRect = fb_setUpdateRect;
+                ALOGD("UPDATE_ON_DEMAND supported");
+            }
+
+            *device = &dev->device.common;
+        }
+
+        // Close the gralloc module
+        gralloc_close(gralloc_device);
+    }
+    return status;
+}
+
+/* Copy a pmem buffer to the framebuffer */
+
+static void
+msm_copy_buffer(buffer_handle_t handle, int fd,
+                int width, int height, int format,
+                int x, int y, int w, int h)
+{
+    struct {
+        unsigned int count;
+        mdp_blit_req req;
+    } blit;
+    private_handle_t *priv = (private_handle_t*) handle;
+
+    memset(&blit, 0, sizeof(blit));
+    blit.count = 1;
+
+    blit.req.flags = 0;
+    blit.req.alpha = 0xff;
+    blit.req.transp_mask = 0xffffffff;
+
+    blit.req.src.width = width;
+    blit.req.src.height = height;
+    blit.req.src.offset = 0;
+    blit.req.src.memory_id = priv->fd;
+
+    blit.req.dst.width = width;
+    blit.req.dst.height = height;
+    blit.req.dst.offset = 0;
+    blit.req.dst.memory_id = fd;
+    blit.req.dst.format = format;
+
+    blit.req.src_rect.x = blit.req.dst_rect.x = x;
+    blit.req.src_rect.y = blit.req.dst_rect.y = y;
+    blit.req.src_rect.w = blit.req.dst_rect.w = w;
+    blit.req.src_rect.h = blit.req.dst_rect.h = h;
+
+    if (ioctl(fd, MSMFB_BLIT, &blit))
+        ALOGE("MSMFB_BLIT failed = %d", -errno);
+}
diff --git a/libgralloc/gpu.cpp b/libgralloc/gpu.cpp
new file mode 100755
index 0000000..77ad174
--- /dev/null
+++ b/libgralloc/gpu.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <cutils/properties.h>
+#include <sys/mman.h>
+
+#include <genlock.h>
+
+#include "gr.h"
+#include "gpu.h"
+#include "memalloc.h"
+#include "alloc_controller.h"
+
+using namespace gralloc;
+using android::sp;
+
+gpu_context_t::gpu_context_t(const private_module_t* module,
+        sp<IAllocController> alloc_ctrl ) :
+    mAllocCtrl(alloc_ctrl)
+{
+    // Zero out the alloc_device_t
+    memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
+
+    char property[PROPERTY_VALUE_MAX];
+    if (property_get("debug.sf.hw", property, NULL) > 0) {
+        if(atoi(property) == 0) {
+            //debug.sf.hw = 0
+            compositionType = CPU_COMPOSITION;
+        } else { //debug.sf.hw = 1
+            // Get the composition type
+            property_get("debug.composition.type", property, NULL);
+            if (property == NULL) {
+                compositionType = GPU_COMPOSITION;
+            } else if ((strncmp(property, "mdp", 3)) == 0) {
+                compositionType = MDP_COMPOSITION;
+            } else if ((strncmp(property, "c2d", 3)) == 0) {
+                compositionType = C2D_COMPOSITION;
+            } else {
+                compositionType = GPU_COMPOSITION;
+            }
+        }
+    } else { //debug.sf.hw is not set. Use cpu composition
+        compositionType = CPU_COMPOSITION;
+    }
+
+    // Initialize the procs
+    common.tag     = HARDWARE_DEVICE_TAG;
+    common.version = 0;
+    common.module  = const_cast<hw_module_t*>(&module->base.common);
+    common.close   = gralloc_close;
+    alloc          = gralloc_alloc;
+#if 0
+    allocSize      = gralloc_alloc_size;
+#endif
+    free           = gralloc_free;
+
+}
+
+int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
+        buffer_handle_t* pHandle)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
+
+    // we don't support allocations with both the FB and PMEM_ADSP flags
+    if (usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) {
+        return -EINVAL;
+    }
+
+    if (m->framebuffer == NULL) {
+        ALOGE("%s: Invalid framebuffer", __FUNCTION__);
+        return -EINVAL;
+    }
+
+    const uint32_t bufferMask = m->bufferMask;
+    const uint32_t numBuffers = m->numBuffers;
+    size_t bufferSize = m->finfo.line_length * m->info.yres;
+
+    //adreno needs FB size to be page aligned
+    bufferSize = roundUpToPageSize(bufferSize);
+
+    if (numBuffers == 1) {
+        // If we have only one buffer, we never use page-flipping. Instead,
+        // we return a regular buffer which will be memcpy'ed to the main
+        // screen when post is called.
+        int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
+        return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
+                                    m->fbFormat, m->info.xres, m->info.yres);
+    }
+
+    if (bufferMask >= ((1LU<<numBuffers)-1)) {
+        // We ran out of buffers.
+        return -ENOMEM;
+    }
+
+    // create a "fake" handles for it
+    // Set the PMEM flag as well, since adreno
+    // treats the FB memory as pmem
+    intptr_t vaddr = intptr_t(m->framebuffer->base);
+    private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), bufferSize,
+                                                 private_handle_t::PRIV_FLAGS_USES_PMEM |
+                                                 private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
+                                                 BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
+                                                 m->info.yres);
+
+    // find a free slot
+    for (uint32_t i=0 ; i<numBuffers ; i++) {
+        if ((bufferMask & (1LU<<i)) == 0) {
+            m->bufferMask |= (1LU<<i);
+            break;
+        }
+        vaddr += bufferSize;
+    }
+
+    hnd->base = vaddr;
+    hnd->offset = vaddr - intptr_t(m->framebuffer->base);
+    *pHandle = hnd;
+    return 0;
+}
+
+
+int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
+        buffer_handle_t* pHandle)
+{
+    private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
+    pthread_mutex_lock(&m->lock);
+    int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
+    pthread_mutex_unlock(&m->lock);
+    return err;
+}
+
+int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
+                                        buffer_handle_t* pHandle, int bufferType,
+                                        int format, int width, int height)
+{
+    int err = 0;
+    int flags = 0;
+    size = roundUpToPageSize(size);
+    alloc_data data;
+    data.offset = 0;
+    data.fd = -1;
+    data.base = 0;
+    data.size = size;
+    if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
+        data.align = 8192;
+    else
+        data.align = getpagesize();
+    data.pHandle = (unsigned int) pHandle;
+    err = mAllocCtrl->allocate(data, usage, compositionType);
+
+    if (usage & GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED) {
+        flags |= private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED;
+    }
+
+    if (usage & GRALLOC_USAGE_EXTERNAL_ONLY) {
+        flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
+        //The EXTERNAL_BLOCK flag is always an add-on
+        if (usage & GRALLOC_USAGE_EXTERNAL_BLOCK) {
+            flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK;
+        }
+    }
+
+    if (err == 0) {
+        flags |= data.allocType;
+        private_handle_t* hnd = new private_handle_t(data.fd, size, flags,
+                bufferType, format, width, height);
+
+        hnd->offset = data.offset;
+        hnd->base = int(data.base) + data.offset;
+        *pHandle = hnd;
+    }
+
+    ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
+    return err;
+}
+
+void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
+                                                    int *colorFormat,
+                                                    int *bufferType)
+{
+    *bufferType = BUFFER_TYPE_VIDEO;
+    *colorFormat = inputFormat;
+
+    if (inputFormat == HAL_PIXEL_FORMAT_YV12) {
+        *bufferType = BUFFER_TYPE_VIDEO;
+    } else if (inputFormat & S3D_FORMAT_MASK) {
+        // S3D format
+        *colorFormat = COLOR_FORMAT(inputFormat);
+    } else if (inputFormat & INTERLACE_MASK) {
+        // Interlaced
+        *colorFormat = inputFormat ^ HAL_PIXEL_FORMAT_INTERLACE;
+    } else if (inputFormat < 0x7) {
+        // RGB formats
+        *colorFormat = inputFormat;
+        *bufferType = BUFFER_TYPE_UI;
+    } else if ((inputFormat == HAL_PIXEL_FORMAT_R_8) ||
+               (inputFormat == HAL_PIXEL_FORMAT_RG_88)) {
+        *colorFormat = inputFormat;
+        *bufferType = BUFFER_TYPE_UI;
+    }
+}
+
+int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
+        buffer_handle_t* pHandle, int* pStride, size_t bufferSize) {
+    if (!pHandle || !pStride)
+        return -EINVAL;
+
+    size_t size;
+    int alignedw, alignedh;
+    int colorFormat, bufferType;
+    getGrallocInformationFromFormat(format, &colorFormat, &bufferType);
+    size = getBufferSizeAndDimensions(w, h, colorFormat, alignedw, alignedh);
+
+    if ((ssize_t)size <= 0)
+        return -EINVAL;
+    size = (bufferSize >= size)? bufferSize : size;
+
+    // All buffers marked as protected or for external
+    // display need to go to overlay
+    if ((usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
+        (usage & GRALLOC_USAGE_PROTECTED)) {
+            bufferType = BUFFER_TYPE_VIDEO;
+    }
+    int err;
+    if (usage & GRALLOC_USAGE_HW_FB) {
+        err = gralloc_alloc_framebuffer(size, usage, pHandle);
+    } else {
+        err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
+                                   format, alignedw, alignedh);
+    }
+
+    if (err < 0) {
+        return err;
+    }
+
+    // Create a genlock lock for this buffer handle.
+    err = genlock_create_lock((native_handle_t*)(*pHandle));
+    if (err) {
+        ALOGE("%s: genlock_create_lock failed", __FUNCTION__);
+        free_impl(reinterpret_cast<private_handle_t*>(pHandle));
+        return err;
+    }
+    *pStride = alignedw;
+    return 0;
+}
+
+int gpu_context_t::free_impl(private_handle_t const* hnd) {
+    private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
+    if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
+        // free this buffer
+        const size_t bufferSize = m->finfo.line_length * m->info.yres;
+        int index = (hnd->base - m->framebuffer->base) / bufferSize;
+        m->bufferMask &= ~(1<<index);
+    } else {
+        sp<IMemAlloc> memalloc = mAllocCtrl->getAllocator(hnd->flags);
+        int err = memalloc->free_buffer((void*)hnd->base, (size_t) hnd->size,
+                hnd->offset, hnd->fd);
+        if(err)
+            return err;
+        terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
+    }
+
+    // Release the genlock
+    int err = genlock_release_lock((native_handle_t*)hnd);
+    if (err) {
+        ALOGE("%s: genlock_release_lock failed", __FUNCTION__);
+    }
+
+    delete hnd;
+    return 0;
+}
+
+int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
+        int usage, buffer_handle_t* pHandle, int* pStride)
+{
+    if (!dev) {
+        return -EINVAL;
+    }
+    gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
+    return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
+}
+int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h, int format,
+        int usage, buffer_handle_t* pHandle, int* pStride, int bufferSize)
+{
+    if (!dev) {
+        return -EINVAL;
+    }
+    gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
+    return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
+}
+
+
+int gpu_context_t::gralloc_free(alloc_device_t* dev,
+        buffer_handle_t handle)
+{
+    if (private_handle_t::validate(handle) < 0)
+        return -EINVAL;
+
+    private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
+    gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
+    return gpu->free_impl(hnd);
+}
+
+/*****************************************************************************/
+
+int gpu_context_t::gralloc_close(struct hw_device_t *dev)
+{
+    gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
+    if (ctx) {
+        /* TODO: keep a list of all buffer_handle_t created, and free them
+         * all here.
+         */
+        delete ctx;
+    }
+    return 0;
+}
+
diff --git a/libgralloc/gpu.h b/libgralloc/gpu.h
new file mode 100644
index 0000000..301c411
--- /dev/null
+++ b/libgralloc/gpu.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRALLOC_GPU_H_
+#define GRALLOC_GPU_H_
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cutils/log.h>
+#include <cutils/ashmem.h>
+#include <utils/RefBase.h>
+
+#include "gralloc_priv.h"
+
+namespace gralloc {
+    class IAllocController;
+    class gpu_context_t : public alloc_device_t {
+        public:
+            gpu_context_t(const private_module_t* module,
+                          android::sp<IAllocController>alloc_ctrl);
+
+            int gralloc_alloc_framebuffer_locked(size_t size, int usage,
+                                                 buffer_handle_t* pHandle);
+
+            int gralloc_alloc_framebuffer(size_t size, int usage,
+                                          buffer_handle_t* pHandle);
+
+            int gralloc_alloc_buffer(size_t size, int usage,
+                                     buffer_handle_t* pHandle,
+                                     int bufferType, int format,
+                                     int width, int height);
+
+            int free_impl(private_handle_t const* hnd);
+
+            int alloc_impl(int w, int h, int format, int usage,
+                           buffer_handle_t* pHandle, int* pStride,
+                           size_t bufferSize = 0);
+
+            static int gralloc_alloc(alloc_device_t* dev, int w, int h,
+                                     int format, int usage,
+                                     buffer_handle_t* pHandle,
+                                     int* pStride);
+
+            static int gralloc_free(alloc_device_t* dev, buffer_handle_t handle);
+
+            static int gralloc_alloc_size(alloc_device_t* dev,
+                                          int w, int h, int format,
+                                          int usage, buffer_handle_t* pHandle,
+                                          int* pStride, int bufferSize);
+
+            static int gralloc_close(struct hw_device_t *dev);
+
+            int get_composition_type() const { return compositionType; }
+
+
+        private:
+            android::sp<IAllocController> mAllocCtrl;
+            int compositionType;
+            void getGrallocInformationFromFormat(int inputFormat,
+                                                 int *colorFormat,
+                                                 int *bufferType);
+    };
+}
+#endif  // GRALLOC_GPU_H
diff --git a/libgralloc/gr.h b/libgralloc/gr.h
new file mode 100644
index 0000000..cc36d9a
--- /dev/null
+++ b/libgralloc/gr.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GR_H_
+#define GR_H_
+
+#include <stdint.h>
+#ifdef HAVE_ANDROID_OS      // just want PAGE_SIZE define
+# include <asm/page.h>
+#else
+# include <sys/user.h>
+#endif
+#include <limits.h>
+#include <sys/cdefs.h>
+#include <hardware/gralloc.h>
+#include <pthread.h>
+#include <errno.h>
+
+#include <cutils/native_handle.h>
+
+/*****************************************************************************/
+
+struct private_module_t;
+struct private_handle_t;
+
+inline size_t roundUpToPageSize(size_t x) {
+    return (x + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
+}
+
+inline size_t ALIGN(size_t x, size_t align) {
+    return (x + align-1) & ~(align-1);
+}
+
+#define FALSE 0
+#define TRUE  1
+
+int mapFrameBufferLocked(struct private_module_t* module);
+int terminateBuffer(gralloc_module_t const* module, private_handle_t* hnd);
+size_t getBufferSizeAndDimensions(int width, int height, int format,
+                        int& alignedw, int &alignedh);
+
+int decideBufferHandlingMechanism(int format, const char *compositionUsed,
+                                   int hasBlitEngine, int *needConversion,
+                                   int *useBufferDirectly);
+
+// Allocate buffer from width, height, format into a private_handle_t
+// It is the responsibility of the caller to free the buffer
+int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage);
+void free_buffer(private_handle_t *hnd);
+
+/*****************************************************************************/
+
+class Locker {
+    pthread_mutex_t mutex;
+public:
+    class Autolock {
+        Locker& locker;
+    public:
+        inline Autolock(Locker& locker) : locker(locker) {  locker.lock(); }
+        inline ~Autolock() { locker.unlock(); }
+    };
+    inline Locker()        { pthread_mutex_init(&mutex, 0); }
+    inline ~Locker()       { pthread_mutex_destroy(&mutex); }
+    inline void lock()     { pthread_mutex_lock(&mutex); }
+    inline void unlock()   { pthread_mutex_unlock(&mutex); }
+};
+
+#endif /* GR_H_ */
diff --git a/libgralloc/gralloc.cpp b/libgralloc/gralloc.cpp
new file mode 100644
index 0000000..a98baf8
--- /dev/null
+++ b/libgralloc/gralloc.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2008, The Android Open Source Project
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <cutils/properties.h>
+#include <utils/RefBase.h>
+
+#include <linux/android_pmem.h>
+
+#include "gr.h"
+#include "gpu.h"
+#include "memalloc.h"
+#include "alloc_controller.h"
+
+using namespace gralloc;
+using android::sp;
+
+int fb_device_open(const hw_module_t* module, const char* name,
+        hw_device_t** device);
+
+static int gralloc_device_open(const hw_module_t* module, const char* name,
+        hw_device_t** device);
+
+extern int gralloc_lock(gralloc_module_t const* module,
+        buffer_handle_t handle, int usage,
+        int l, int t, int w, int h,
+        void** vaddr);
+
+extern int gralloc_unlock(gralloc_module_t const* module,
+        buffer_handle_t handle);
+
+extern int gralloc_register_buffer(gralloc_module_t const* module,
+        buffer_handle_t handle);
+
+extern int gralloc_unregister_buffer(gralloc_module_t const* module,
+        buffer_handle_t handle);
+
+extern int gralloc_perform(struct gralloc_module_t const* module,
+        int operation, ... );
+
+// HAL module methods
+static struct hw_module_methods_t gralloc_module_methods = {
+    open: gralloc_device_open
+};
+
+// HAL module initialize
+struct private_module_t HAL_MODULE_INFO_SYM = {
+    base: {
+        common: {
+            tag: HARDWARE_MODULE_TAG,
+            version_major: 1,
+            version_minor: 0,
+            id: GRALLOC_HARDWARE_MODULE_ID,
+            name: "Graphics Memory Allocator Module",
+            author: "The Android Open Source Project",
+            methods: &gralloc_module_methods,
+            dso: 0,
+            reserved: {0},
+        },
+        registerBuffer: gralloc_register_buffer,
+        unregisterBuffer: gralloc_unregister_buffer,
+        lock: gralloc_lock,
+        unlock: gralloc_unlock,
+        perform: gralloc_perform,
+        reserved_proc: {0},
+    },
+    framebuffer: 0,
+    fbFormat: 0,
+    flags: 0,
+    numBuffers: 0,
+    bufferMask: 0,
+    lock: PTHREAD_MUTEX_INITIALIZER,
+    currentBuffer: 0,
+};
+
+// Open Gralloc device
+int gralloc_device_open(const hw_module_t* module, const char* name,
+        hw_device_t** device)
+{
+    int status = -EINVAL;
+    if (!strcmp(name, GRALLOC_HARDWARE_GPU0)) {
+        const private_module_t* m = reinterpret_cast<const private_module_t*>(
+                module);
+        gpu_context_t *dev;
+        sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
+        dev = new gpu_context_t(m, alloc_ctrl);
+        *device = &dev->common;
+        status = 0;
+    } else {
+        status = fb_device_open(module, name, device);
+    }
+    return status;
+}
diff --git a/libgralloc/gralloc_priv.h b/libgralloc/gralloc_priv.h
new file mode 100644
index 0000000..0679621
--- /dev/null
+++ b/libgralloc/gralloc_priv.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRALLOC_PRIV_H_
+#define GRALLOC_PRIV_H_
+
+#include <stdint.h>
+#include <limits.h>
+#include <sys/cdefs.h>
+#include <hardware/gralloc.h>
+#include <pthread.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <cutils/native_handle.h>
+
+#include <linux/fb.h>
+
+#if defined(__cplusplus) && defined(HDMI_DUAL_DISPLAY)
+#include "overlayLib.h"
+using namespace overlay;
+#endif
+
+#include <cutils/log.h>
+
+enum {
+    /* gralloc usage bits indicating the type
+     * of allocation that should be used */
+
+    /* ADSP heap is deprecated, use only if using pmem */
+    GRALLOC_USAGE_PRIVATE_ADSP_HEAP       =       GRALLOC_USAGE_PRIVATE_0,
+    /* SF heap is used for application buffers, is not secured */
+    GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP  =       GRALLOC_USAGE_PRIVATE_1,
+    /* SMI heap is deprecated, use only if using pmem */
+    GRALLOC_USAGE_PRIVATE_SMI_HEAP        =       GRALLOC_USAGE_PRIVATE_2,
+    /* SYSTEM heap comes from kernel vmalloc,
+     * can never be uncached, is not secured*/
+    GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP     =       GRALLOC_USAGE_PRIVATE_3,
+    /* IOMMU heap comes from manually allocated pages,
+     * can be cached/uncached, is not secured */
+    GRALLOC_USAGE_PRIVATE_IOMMU_HEAP      =       0x01000000,
+    /* MM heap is a carveout heap for video, can be secured*/
+    GRALLOC_USAGE_PRIVATE_MM_HEAP         =       0x02000000,
+    /* WRITEBACK heap is a carveout heap for writeback, can be secured*/
+    GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP  =       0x04000000,
+    /* CAMERA heap is a carveout heap for camera, is not secured*/
+    GRALLOC_USAGE_PRIVATE_CAMERA_HEAP     =       0x08000000,
+
+    /* Set this for allocating uncached memory (using O_DSYNC)
+     * cannot be used with noncontiguous heaps */
+    GRALLOC_USAGE_PRIVATE_UNCACHED        =       0x00100000,
+
+    /* This flag needs to be set when using a non-contiguous heap from ION.
+     * If not set, the system heap is assumed to be coming from ashmem
+     */
+    GRALLOC_USAGE_PRIVATE_ION             =       0x00200000,
+
+    /* This flag can be set to disable genlock synchronization
+     * for the gralloc buffer. If this flag is set the caller
+     * is required to perform explicit synchronization.
+     * WARNING - flag is outside the standard PRIVATE region
+     * and may need to be moved if the gralloc API changes
+     */
+    GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED  =       0X00400000,
+
+    /* Set this flag when you need to avoid mapping the memory in userspace */
+    GRALLOC_USAGE_PRIVATE_DO_NOT_MAP      =       0X00800000,
+
+    /* Buffer content should be displayed on an external display only */
+    GRALLOC_USAGE_EXTERNAL_ONLY           =       0x00010000,
+
+    /* Only this buffer content should be displayed on external, even if
+     * other EXTERNAL_ONLY buffers are available. Used during suspend.
+     */
+    GRALLOC_USAGE_EXTERNAL_BLOCK          =       0x00020000,
+};
+
+enum {
+    /* Gralloc perform enums
+    */
+    GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER = 0x080000001,
+};
+
+
+enum {
+    GPU_COMPOSITION,
+    C2D_COMPOSITION,
+    MDP_COMPOSITION,
+    CPU_COMPOSITION,
+};
+
+/* numbers of max buffers for page flipping */
+#define NUM_FRAMEBUFFERS_MIN 2
+#define NUM_FRAMEBUFFERS_MAX 3
+
+/* number of default bufers for page flipping */
+#define NUM_DEF_FRAME_BUFFERS 2
+#define NO_SURFACEFLINGER_SWAPINTERVAL
+#define INTERLACE_MASK 0x80
+#define S3D_FORMAT_MASK 0xFF000
+#define COLOR_FORMAT(x) (x & 0xFFF) // Max range for colorFormats is 0 - FFF
+#define DEVICE_PMEM "/dev/pmem"
+#define DEVICE_PMEM_ADSP "/dev/pmem_adsp"
+#define DEVICE_PMEM_SMIPOOL "/dev/pmem_smipool"
+/*****************************************************************************/
+#ifdef __cplusplus
+
+//XXX: Remove framebuffer specific classes and defines to a different header
+template <class T>
+struct Node
+{
+    T data;
+    Node<T> *next;
+};
+
+template <class T>
+class Queue
+{
+public:
+    Queue(): front(NULL), back(NULL), len(0) {dummy = new T;}
+    ~Queue()
+    {
+        clear();
+        delete dummy;
+    }
+    void push(const T& item)   //add an item to the back of the queue
+    {
+        if(len != 0) {         //if the queue is not empty
+            back->next = new Node<T>; //create a new node
+            back = back->next; //set the new node as the back node
+            back->data = item;
+            back->next = NULL;
+        } else {
+            back = new Node<T>;
+            back->data = item;
+            back->next = NULL;
+            front = back;
+       }
+       len++;
+    }
+    void pop()                 //remove the first item from the queue
+    {
+        if (isEmpty())
+            return;            //if the queue is empty, no node to dequeue
+        T item = front->data;
+        Node<T> *tmp = front;
+        front = front->next;
+        delete tmp;
+        if(front == NULL)      //if the queue is empty, update the back pointer
+            back = NULL;
+        len--;
+        return;
+    }
+    T& getHeadValue() const    //return the value of the first item in the queue
+    {                          //without modification to the structure
+        if (isEmpty()) {
+            ALOGE("Error can't get head of empty queue");
+            return *dummy;
+        }
+        return front->data;
+    }
+
+    bool isEmpty() const       //returns true if no elements are in the queue
+    {
+        return (front == NULL);
+    }
+
+    size_t size() const        //returns the amount of elements in the queue
+    {
+        return len;
+    }
+
+private:
+    Node<T> *front;
+    Node<T> *back;
+    size_t len;
+    void clear()
+    {
+        while (!isEmpty())
+            pop();
+    }
+    T *dummy;
+};
+#endif
+
+enum {
+    /* OEM specific HAL formats */
+    HAL_PIXEL_FORMAT_NV12_ENCODEABLE  = 0x102,
+    HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED     = 0x108,
+    HAL_PIXEL_FORMAT_YCbCr_420_SP           = 0x109,
+    HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO    = 0x10A,
+    HAL_PIXEL_FORMAT_YCrCb_422_SP           = 0x10B,
+    HAL_PIXEL_FORMAT_R_8                    = 0x10D,
+    HAL_PIXEL_FORMAT_RG_88                  = 0x10E,
+    HAL_PIXEL_FORMAT_INTERLACE              = 0x180,
+
+};
+
+/* possible formats for 3D content*/
+enum {
+    HAL_NO_3D                         = 0x0000,
+    HAL_3D_IN_SIDE_BY_SIDE_L_R        = 0x10000,
+    HAL_3D_IN_TOP_BOTTOM              = 0x20000,
+    HAL_3D_IN_INTERLEAVE              = 0x40000,
+    HAL_3D_IN_SIDE_BY_SIDE_R_L        = 0x80000,
+    HAL_3D_OUT_SIDE_BY_SIDE           = 0x1000,
+    HAL_3D_OUT_TOP_BOTTOM             = 0x2000,
+    HAL_3D_OUT_INTERLEAVE             = 0x4000,
+    HAL_3D_OUT_MONOSCOPIC             = 0x8000
+};
+
+enum {
+	BUFFER_TYPE_UI = 0,
+	BUFFER_TYPE_VIDEO
+};
+
+#if defined(HDMI_DUAL_DISPLAY)
+enum hdmi_mirroring_state {
+    HDMI_NO_MIRRORING,
+    HDMI_UI_MIRRORING,
+    HDMI_ORIGINAL_RESOLUTION_MIRRORING
+};
+#endif
+/*****************************************************************************/
+
+struct private_module_t;
+struct private_handle_t;
+struct PmemAllocator;
+
+struct qbuf_t {
+    buffer_handle_t buf;
+    int  idx;
+};
+
+enum buf_state {
+    SUB,
+    REF,
+    AVL
+};
+
+struct avail_t {
+    pthread_mutex_t lock;
+    pthread_cond_t cond;
+#ifdef __cplusplus
+    bool is_avail;
+    buf_state state;
+#endif
+};
+
+struct private_module_t {
+    gralloc_module_t base;
+
+    struct private_handle_t* framebuffer;
+    uint32_t fbFormat;
+    uint32_t flags;
+    uint32_t numBuffers;
+    uint32_t bufferMask;
+    pthread_mutex_t lock;
+    buffer_handle_t currentBuffer;
+
+    struct fb_var_screeninfo info;
+    struct fb_fix_screeninfo finfo;
+    float xdpi;
+    float ydpi;
+    float fps;
+    int swapInterval;
+#ifdef __cplusplus
+    Queue<struct qbuf_t> disp; // non-empty when buffer is ready for display
+#endif
+    int currentIdx;
+    struct avail_t avail[NUM_FRAMEBUFFERS_MAX];
+    pthread_mutex_t qlock;
+    pthread_cond_t qpost;
+
+    enum {
+        // flag to indicate we'll post this buffer
+        PRIV_USAGE_LOCKED_FOR_POST = 0x80000000,
+        PRIV_MIN_SWAP_INTERVAL = 0,
+        PRIV_MAX_SWAP_INTERVAL = 1,
+    };
+#if defined(__cplusplus) && defined(HDMI_DUAL_DISPLAY)
+    Overlay* pobjOverlay;
+    int orientation;
+    bool videoOverlay;
+    uint32_t currentOffset;
+    int enableHDMIOutput; // holds the type of external display
+    bool trueMirrorSupport;
+    bool exitHDMIUILoop;
+    float actionsafeWidthRatio;
+    float actionsafeHeightRatio;
+    bool hdmiStateChanged;
+    hdmi_mirroring_state hdmiMirroringState;
+    pthread_mutex_t overlayLock;
+    pthread_cond_t overlayPost;
+#endif
+};
+
+/*****************************************************************************/
+
+#ifdef __cplusplus
+struct private_handle_t : public native_handle {
+#else
+struct private_handle_t {
+    native_handle_t nativeHandle;
+#endif
+    enum {
+        PRIV_FLAGS_FRAMEBUFFER    = 0x00000001,
+        PRIV_FLAGS_USES_PMEM      = 0x00000002,
+        PRIV_FLAGS_USES_PMEM_ADSP = 0x00000004,
+        PRIV_FLAGS_USES_ION       = 0x00000008,
+        PRIV_FLAGS_USES_ASHMEM    = 0x00000010,
+        PRIV_FLAGS_NEEDS_FLUSH    = 0x00000020,
+        PRIV_FLAGS_DO_NOT_FLUSH   = 0x00000040,
+        PRIV_FLAGS_SW_LOCK        = 0x00000080,
+        PRIV_FLAGS_NONCONTIGUOUS_MEM = 0x00000100,
+        PRIV_FLAGS_HWC_LOCK       = 0x00000200, // Set by HWC when storing the handle
+        PRIV_FLAGS_SECURE_BUFFER  = 0x00000400,
+        PRIV_FLAGS_UNSYNCHRONIZED = 0x00000800, // For explicit synchronization
+        PRIV_FLAGS_NOT_MAPPED     = 0x00001000, // Not mapped in userspace
+        PRIV_FLAGS_EXTERNAL_ONLY  = 0x00002000, // Display on external only
+        PRIV_FLAGS_EXTERNAL_BLOCK = 0x00004000, // Display only this buffer on external
+    };
+
+    // file-descriptors
+    int     fd;
+    int     genlockHandle; // genlock handle to be dup'd by the binder
+    // ints
+    int     magic;
+    int     flags;
+    int     size;
+    int     offset;
+    int     bufferType;
+
+    // FIXME: the attributes below should be out-of-line
+    int     base;
+    int     gpuaddr; // The gpu address mapped into the mmu. If using ashmem, set to 0 They don't care
+    int     pid;
+    int     format;
+    int     width;
+    int     height;
+    int     genlockPrivFd; // local fd of the genlock device.
+
+#ifdef __cplusplus
+    static const int sNumInts = 12;
+    static const int sNumFds = 2;
+    static const int sMagic = 'gmsm';
+
+    private_handle_t(int fd, int size, int flags, int bufferType, int format, int width, int height) :
+        fd(fd), genlockHandle(-1), magic(sMagic), flags(flags), size(size), offset(0),
+        bufferType(bufferType), base(0), gpuaddr(0), pid(getpid()), format(format),
+        width(width), height(height), genlockPrivFd(-1)
+    {
+        version = sizeof(native_handle);
+        numInts = sNumInts;
+        numFds = sNumFds;
+    }
+    ~private_handle_t() {
+        magic = 0;
+    }
+
+    bool usesPhysicallyContiguousMemory() {
+        return (flags & PRIV_FLAGS_USES_PMEM) != 0;
+    }
+
+    static int validate(const native_handle* h) {
+        const private_handle_t* hnd = (const private_handle_t*)h;
+        if (!h || h->version != sizeof(native_handle) ||
+                h->numInts != sNumInts || h->numFds != sNumFds ||
+                hnd->magic != sMagic)
+        {
+            ALOGE("invalid gralloc handle (at %p)", h);
+            return -EINVAL;
+        }
+        return 0;
+    }
+
+    static private_handle_t* dynamicCast(const native_handle* in) {
+        if (validate(in) == 0) {
+            return (private_handle_t*) in;
+        }
+        return NULL;
+    }
+#endif
+};
+
+#endif /* GRALLOC_PRIV_H_ */
diff --git a/libgralloc/ion_msm.h b/libgralloc/ion_msm.h
new file mode 100644
index 0000000..ae49bce
--- /dev/null
+++ b/libgralloc/ion_msm.h
@@ -0,0 +1,836 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+
+struct ion_handle;
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
+ * 				 carveout heap, allocations are physically
+ * 				 contiguous
+ * @ION_HEAP_TYPE_IOMMU: IOMMU memory
+ * @ION_HEAP_TYPE_CP:	 memory allocated from a prereserved
+ *				carveout heap, allocations are physically
+ *				contiguous. Used for content protection.
+ * @ION_HEAP_END:		helper for iterating over heaps
+ */
+enum ion_heap_type {
+	ION_HEAP_TYPE_SYSTEM,
+	ION_HEAP_TYPE_SYSTEM_CONTIG,
+	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_IOMMU,
+	ION_HEAP_TYPE_CP,
+	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+				 are at the end of this enum */
+	ION_NUM_HEAPS,
+};
+
+#define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_CP_MASK		(1 << ION_HEAP_TYPE_CP)
+
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+	INVALID_HEAP_ID = -1,
+	ION_CP_MM_HEAP_ID = 8,
+	ION_CP_MFC_HEAP_ID = 12,
+	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
+	ION_SF_HEAP_ID = 24,
+	ION_IOMMU_HEAP_ID = 25,
+	ION_QSECOM_HEAP_ID = 27,
+	ION_AUDIO_HEAP_ID = 28,
+
+	ION_MM_FIRMWARE_HEAP_ID = 29,
+	ION_SYSTEM_HEAP_ID = 30,
+
+	ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
+};
+
+enum ion_fixed_position {
+	NOT_FIXED,
+	FIXED_LOW,
+	FIXED_MIDDLE,
+	FIXED_HIGH,
+};
+
+/**
+ * Flag to use when allocating to indicate that a heap is secure.
+ */
+#define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit) (1 << (bit))
+
+#define ION_VMALLOC_HEAP_NAME	"vmalloc"
+#define ION_AUDIO_HEAP_NAME	"audio"
+#define ION_SF_HEAP_NAME	"sf"
+#define ION_MM_HEAP_NAME	"mm"
+#define ION_CAMERA_HEAP_NAME	"camera_preview"
+#define ION_IOMMU_HEAP_NAME	"iommu"
+#define ION_MFC_HEAP_NAME	"mfc"
+#define ION_WB_HEAP_NAME	"wb"
+#define ION_MM_FIRMWARE_HEAP_NAME	"mm_fw"
+#define ION_QSECOM_HEAP_NAME	"qsecom"
+#define ION_FMEM_HEAP_NAME	"fmem"
+
+#define CACHED          1
+#define UNCACHED        0
+
+#define ION_CACHE_SHIFT 0
+
+#define ION_SET_CACHE(__cache)  ((__cache) << ION_CACHE_SHIFT)
+
+#define ION_IS_CACHED(__flags)	((__flags) & (1 << ION_CACHE_SHIFT))
+
+/*
+ * This flag allows clients when mapping into the IOMMU to specify to
+ * defer un-mapping from the IOMMU until the buffer memory is freed.
+ */
+#define ION_IOMMU_UNMAP_DELAYED 1
+
+#ifdef __KERNEL__
+#include <linux/err.h>
+#include <mach/ion.h>
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+   plumbed in the kernel, and all instances of ion_phys_addr_t should
+   be converted to phys_addr_t.  For the time being many kernel interfaces
+   do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+#define ion_virt_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type:	type of the heap from ion_heap_type enum
+ * @id:		unique identifier for heap.  When allocating (lower numbers
+ * 		will be allocated from first)
+ * @name:	used for debug purposes
+ * @base:	base address of heap in physical memory if applicable
+ * @size:	size of the heap in bytes if applicable
+ * @memory_type:Memory type used for the heap
+ * @extra_data:	Extra data specific to each heap type
+ */
+struct ion_platform_heap {
+	enum ion_heap_type type;
+	unsigned int id;
+	const char *name;
+	ion_phys_addr_t base;
+	size_t size;
+	enum ion_memory_types memory_type;
+	void *extra_data;
+};
+
+/**
+ * struct ion_cp_heap_pdata - defines a content protection heap in the given
+ * platform
+ * @permission_type:	Memory ID used to identify the memory to TZ
+ * @align:		Alignment requirement for the memory
+ * @secure_base:	Base address for securing the heap.
+ *			Note: This might be different from actual base address
+ *			of this heap in the case of a shared heap.
+ * @secure_size:	Memory size for securing the heap.
+ *			Note: This might be different from actual size
+ *			of this heap in the case of a shared heap.
+ * @reusable		Flag indicating whether this heap is reusable of not.
+ *			(see FMEM)
+ * @mem_is_fmem		Flag indicating whether this memory is coming from fmem
+ *			or not.
+ * @fixed_position	If nonzero, position in the fixed area.
+ * @virt_addr:		Virtual address used when using fmem.
+ * @request_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_region:	function to be called upon ion registration
+ *
+ */
+struct ion_cp_heap_pdata {
+	enum ion_permission_type permission_type;
+	unsigned int align;
+	ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
+	size_t secure_size; /* Size used for securing heap when heap is shared*/
+	int reusable;
+	int mem_is_fmem;
+	enum ion_fixed_position fixed_position;
+	ion_virt_addr_t *virt_addr;
+	int (*request_region)(void *);
+	int (*release_region)(void *);
+	void *(*setup_region)(void);
+};
+
+/**
+ * struct ion_co_heap_pdata - defines a carveout heap in the given platform
+ * @adjacent_mem_id:	Id of heap that this heap must be adjacent to.
+ * @align:		Alignment requirement for the memory
+ * @mem_is_fmem		Flag indicating whether this memory is coming from fmem
+ *			or not.
+ * @fixed_position	If nonzero, position in the fixed area.
+ * @request_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_region:	function to be called upon ion registration
+ *
+ */
+struct ion_co_heap_pdata {
+	int adjacent_mem_id;
+	unsigned int align;
+	int mem_is_fmem;
+	enum ion_fixed_position fixed_position;
+	int (*request_region)(void *);
+	int (*release_region)(void *);
+	void *(*setup_region)(void);
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr:    number of structures in the array
+ * @request_region: function to be called when the number of allocations goes
+ *						from 0 -> 1
+ * @release_region: function to be called when the number of allocations goes
+ *						from 1 -> 0
+ * @setup_region:   function to be called upon ion registration
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+	int nr;
+	int (*request_region)(void *);
+	int (*release_region)(void *);
+	void *(*setup_region)(void);
+	struct ion_platform_heap heaps[];
+};
+
+#ifdef CONFIG_ION
+
+/**
+ * ion_client_create() -  allocate a client and returns it
+ * @dev:	the global ion device
+ * @heap_mask:	mask of heaps this client can allocate from
+ * @name:	used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     unsigned int heap_mask, const char *name);
+
+/**
+ *  msm_ion_client_create - allocate a client using the ion_device specified in
+ *				drivers/gpu/ion/msm/msm_ion.c
+ *
+ * heap_mask and name are the same as ion_client_create, return values
+ * are the same as ion_client_create.
+ */
+
+struct ion_client *msm_ion_client_create(unsigned int heap_mask,
+					const char *name);
+
+/**
+ * ion_client_destroy() -  free's a client and all it's handles
+ * @client:	the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client:	the client
+ * @len:	size of the allocation
+ * @align:	requested allocation alignment, lots of hardware blocks have
+ *		alignment requirements of some kind
+ * @flags:	mask of heaps to allocate from, if multiple bits are set
+ *		heaps will be tried in order from lowest to highest order bit
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client:	the client
+ * @handle:	the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client:	the client
+ * @handle:	the handle
+ * @addr:	a pointer to put the address in
+ * @len:	a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address.  It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_map_dma should be used
+ * instead.  Returns -EINVAL if the handle is invalid.  This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client:	the client
+ * @handle:	handle to map
+ * @flags:	flags for this mapping
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address. If no flags are specified, this
+ * will return a non-secure uncached mapping.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+			unsigned long flags);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client:	the client
+ * @handle:	handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_map_dma - create a dma mapping for a given handle
+ * @client:	the client
+ * @handle:	handle to map
+ *
+ * Return an sglist describing the given handle
+ */
+struct scatterlist *ion_map_dma(struct ion_client *client,
+				struct ion_handle *handle,
+				unsigned long flags);
+
+/**
+ * ion_unmap_dma() - destroy a dma mapping for a handle
+ * @client:	the client
+ * @handle:	handle to unmap
+ */
+void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share() - given a handle, obtain a buffer to pass to other clients
+ * @client:	the client
+ * @handle:	the handle to share
+ *
+ * Given a handle, return a buffer, which exists in a global name
+ * space, and can be passed to other clients.  Should be passed into ion_import
+ * to obtain a new handle for this buffer.
+ *
+ * NOTE: This function does do not an extra reference.  The burden is on the
+ * caller to make sure the buffer doesn't go away while it's being passed to
+ * another client.  That is, ion_free should not be called on this handle until
+ * the buffer has been imported into the other client.
+ */
+struct ion_buffer *ion_share(struct ion_client *client,
+			     struct ion_handle *handle);
+
+/**
+ * ion_import() - given an buffer in another client, import it
+ * @client:	this blocks client
+ * @buffer:	the buffer to import (as obtained from ion_share)
+ *
+ * Given a buffer, add it to the client and return the handle to use to refer
+ * to it further.  This is called to share a handle from one kernel client to
+ * another.
+ */
+struct ion_handle *ion_import(struct ion_client *client,
+			      struct ion_buffer *buffer);
+
+/**
+ * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
+ * @client:	this blocks client
+ * @fd:		the fd
+ *
+ * A helper function for drivers that will be recieving ion buffers shared
+ * with them from userspace.  These buffers are represented by a file
+ * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
+ * This function coverts that fd into the underlying buffer, and returns
+ * the handle to use to refer to it further.
+ */
+struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
+
+/**
+ * ion_handle_get_flags - get the flags for a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the flags
+ * @flags - pointer to store the flags
+ *
+ * Gets the current flags for a handle. These flags indicate various options
+ * of the buffer (caching, security, etc.)
+ */
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+				unsigned long *flags);
+
+
+/**
+ * ion_map_iommu - map the given handle into an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to map
+ * @domain_num - domain number to map to
+ * @partition_num - partition number to allocate iova from
+ * @align - alignment for the iova
+ * @iova_length - length of iova to map. If the iova length is
+ *		greater than the handle length, the remaining
+ *		address space will be mapped to a dummy buffer.
+ * @iova - pointer to store the iova address
+ * @buffer_size - pointer to store the size of the buffer
+ * @flags - flags for options to map
+ * @iommu_flags - flags specific to the iommu.
+ *
+ * Maps the handle into the iova space specified via domain number. Iova
+ * will be allocated from the partition specified via partition_num.
+ * Returns 0 on success, negative value on error.
+ */
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+			int domain_num, int partition_num, unsigned long align,
+			unsigned long iova_length, unsigned long *iova,
+			unsigned long *buffer_size,
+			unsigned long flags, unsigned long iommu_flags);
+
+
+/**
+ * ion_handle_get_size - get the allocated size of a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the size
+ * @size - pointer to store the size
+ *
+ * gives the allocated size of a handle. returns 0 on success, negative
+ * value on error
+ *
+ * NOTE: This is intended to be used only to get a size to pass to map_iommu.
+ * You should *NOT* rely on this for any other usage.
+ */
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+			unsigned long *size);
+
+/**
+ * ion_unmap_iommu - unmap the handle from an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to unmap
+ * @domain_num - domain to unmap from
+ * @partition_num - partition to unmap from
+ *
+ * Decrement the reference count on the iommu mapping. If the count is
+ * 0, the mapping will be removed from the iommu.
+ */
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+			int domain_num, int partition_num);
+
+
+/**
+ * ion_secure_heap - secure a heap
+ *
+ * @client - a client that has allocated from the heap heap_id
+ * @heap_id - heap id to secure.
+ *
+ * Secure a heap
+ * Returns 0 on success
+ */
+int ion_secure_heap(struct ion_device *dev, int heap_id);
+
+/**
+ * ion_unsecure_heap - un-secure a heap
+ *
+ * @client - a client that has allocated from the heap heap_id
+ * @heap_id - heap id to un-secure.
+ *
+ * Un-secure a heap
+ * Returns 0 on success
+ */
+int ion_unsecure_heap(struct ion_device *dev, int heap_id);
+
+/**
+ * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
+ *
+  * @heap_id - heap id to secure.
+ *
+ * Secure a heap
+ * Returns 0 on success
+ */
+int msm_ion_secure_heap(int heap_id);
+
+/**
+ * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
+ *
+  * @heap_id - heap id to secure.
+ *
+ * Un-secure a heap
+ * Returns 0 on success
+ */
+int msm_ion_unsecure_heap(int heap_id);
+
+/**
+ * msm_ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @vaddr -  virtual address to operate on.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ *		ION_IOC_CLEAN_CACHES
+ *		ION_IOC_INV_CACHES
+ *		ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *vaddr, unsigned long len, unsigned int cmd);
+
+#else
+static inline struct ion_client *ion_client_create(struct ion_device *dev,
+				     unsigned int heap_mask, const char *name)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
+					const char *name)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_client_destroy(struct ion_client *client) { }
+
+static inline struct ion_handle *ion_alloc(struct ion_client *client,
+			size_t len, size_t align, unsigned int flags)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_free(struct ion_client *client,
+	struct ion_handle *handle) { }
+
+
+static inline int ion_phys(struct ion_client *client,
+	struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
+{
+	return -ENODEV;
+}
+
+static inline void *ion_map_kernel(struct ion_client *client,
+	struct ion_handle *handle, unsigned long flags)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_unmap_kernel(struct ion_client *client,
+	struct ion_handle *handle) { }
+
+static inline struct scatterlist *ion_map_dma(struct ion_client *client,
+	struct ion_handle *handle, unsigned long flags)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_unmap_dma(struct ion_client *client,
+	struct ion_handle *handle) { }
+
+static inline struct ion_buffer *ion_share(struct ion_client *client,
+	struct ion_handle *handle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline struct ion_handle *ion_import(struct ion_client *client,
+	struct ion_buffer *buffer)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline struct ion_handle *ion_import_fd(struct ion_client *client,
+	int fd)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_flags(struct ion_client *client,
+	struct ion_handle *handle, unsigned long *flags)
+{
+	return -ENODEV;
+}
+
+static inline int ion_map_iommu(struct ion_client *client,
+			struct ion_handle *handle, int domain_num,
+			int partition_num, unsigned long align,
+			unsigned long iova_length, unsigned long *iova,
+			unsigned long *buffer_size,
+			unsigned long flags,
+			unsigned long iommu_flags)
+{
+	return -ENODEV;
+}
+
+static inline void ion_unmap_iommu(struct ion_client *client,
+			struct ion_handle *handle, int domain_num,
+			int partition_num)
+{
+	return;
+}
+
+static inline int ion_secure_heap(struct ion_device *dev, int heap_id)
+{
+	return -ENODEV;
+
+}
+
+static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ion_secure_heap(int heap_id)
+{
+	return -ENODEV;
+
+}
+
+static inline int msm_ion_unsecure_heap(int heap_id)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ion_do_cache_op(struct ion_client *client,
+			struct ion_handle *handle, void *vaddr,
+			unsigned long len, unsigned int cmd)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_ION */
+#endif /* __KERNEL__ */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:	size of the allocation
+ * @align:	required alignment of the allocation
+ * @flags:	flags passed to heap
+ * @handle:	pointer that will be populated with a cookie to use to refer
+ *		to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+	size_t len;
+	size_t align;
+	unsigned int flags;
+	struct ion_handle *handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:	a handle
+ * @fd:		a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+	struct ion_handle *handle;
+	int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:	a handle
+ */
+struct ion_handle_data {
+	struct ion_handle *handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:	the custom ioctl function to call
+ * @arg:	additional data to pass to the custom ioctl, typically a user
+ *		pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+	unsigned int cmd;
+	unsigned long arg;
+};
+
+
+/* struct ion_flush_data - data passed to ion for flushing caches
+ *
+ * @handle:	handle with data to flush
+ * @fd:		fd to flush
+ * @vaddr:	userspace virtual address mapped with mmap
+ * @offset:	offset into the handle to flush
+ * @length:	length of handle to flush
+ *
+ * Performs cache operations on the handle. If p is the start address
+ * of the handle, p + offset through p + offset + length will have
+ * the cache operations performed
+ */
+struct ion_flush_data {
+	struct ion_handle *handle;
+	int fd;
+	void *vaddr;
+	unsigned int offset;
+	unsigned int length;
+};
+
+/* struct ion_flag_data - information about flags for this buffer
+ *
+ * @handle:	handle to get flags from
+ * @flags:	flags of this handle
+ *
+ * Takes handle as an input and outputs the flags from the handle
+ * in the flag field.
+ */
+struct ion_flag_data {
+	struct ion_handle *handle;
+	unsigned long flags;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
+				      struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, int)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+
+/**
+ * DOC: ION_IOC_CLEAN_CACHES - clean the caches
+ *
+ * Clean the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_CACHES	_IOWR(ION_IOC_MAGIC, 7, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_MSM_IOC_INV_CACHES - invalidate the caches
+ *
+ * Invalidate the caches of the handle specified.
+ */
+#define ION_IOC_INV_CACHES	_IOWR(ION_IOC_MAGIC, 8, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_MSM_IOC_CLEAN_CACHES - clean and invalidate the caches
+ *
+ * Clean and invalidate the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MAGIC, 9, \
+						struct ion_flush_data)
+
+/**
+ * DOC: ION_IOC_GET_FLAGS - get the flags of the handle
+ *
+ * Gets the flags of the current handle which indicate cachability,
+ * secure state etc.
+ */
+#define ION_IOC_GET_FLAGS		_IOWR(ION_IOC_MAGIC, 10, \
+						struct ion_flag_data)
+#endif /* _LINUX_ION_H */
diff --git a/libgralloc/ionalloc.cpp b/libgralloc/ionalloc.cpp
new file mode 100644
index 0000000..9ff0a5e
--- /dev/null
+++ b/libgralloc/ionalloc.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <linux/ioctl.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <cutils/log.h>
+#include <errno.h>
+#include "gralloc_priv.h"
+#include "ionalloc.h"
+
+using gralloc::IonAlloc;
+
+#define ION_DEVICE "/dev/ion"
+
+int IonAlloc::open_device()
+{
+    if(mIonFd == FD_INIT)
+        mIonFd = open(ION_DEVICE, O_RDONLY);
+
+    if(mIonFd < 0 ) {
+        ALOGE("%s: Failed to open ion device - %s",
+                __FUNCTION__, strerror(errno));
+        mIonFd = FD_INIT;
+        return -errno;
+    }
+    return 0;
+}
+
+void IonAlloc::close_device()
+{
+    if(mIonFd >= 0)
+        close(mIonFd);
+    mIonFd = FD_INIT;
+}
+
+int IonAlloc::alloc_buffer(alloc_data& data)
+{
+    int err = 0;
+    int ionSyncFd = FD_INIT;
+    int iFd = FD_INIT;
+    struct ion_handle_data handle_data;
+    struct ion_fd_data fd_data;
+    struct ion_allocation_data ionAllocData;
+
+    void *base = 0;
+
+    ionAllocData.len = data.size;
+    ionAllocData.align = data.align;
+    ionAllocData.flags = data.flags;
+
+    err = open_device();
+    if (err)
+        return err;
+
+    if(data.uncached) {
+        // Use the sync FD to alloc and map
+        // when we need uncached memory
+        // FIX: О–DSYNC defined to open uncached - add that in kernel
+	//ionSyncFd = open(ION_DEVICE, O_RDONLY|O_DSYNC);
+        ionSyncFd = open(ION_DEVICE, O_RDONLY);
+        if(ionSyncFd < 0) {
+            ALOGE("%s: Failed to open ion device - %s",
+                    __FUNCTION__, strerror(errno));
+            return -errno;
+        }
+        iFd = ionSyncFd;
+    } else {
+        iFd = mIonFd;
+    }
+
+    if(ioctl(iFd, ION_IOC_ALLOC, &ionAllocData)) {
+        err = -errno;
+        ALOGE("ION_IOC_ALLOC failed with error - %s", strerror(errno));
+        if(ionSyncFd >= 0)
+            close(ionSyncFd);
+        ionSyncFd = FD_INIT;
+        return err;
+    }
+
+    fd_data.handle = ionAllocData.handle;
+    handle_data.handle = ionAllocData.handle;
+    if(ioctl(iFd, ION_IOC_MAP, &fd_data)) {
+        err = -errno;
+        ALOGE("%s: ION_IOC_MAP failed with error - %s",
+                __FUNCTION__, strerror(errno));
+        ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+        if(ionSyncFd >= 0)
+            close(ionSyncFd);
+        ionSyncFd = FD_INIT;
+        return err;
+    }
+
+    //if(!(data.flags & ION_SECURE) &&
+    if(!(data.allocType & private_handle_t::PRIV_FLAGS_NOT_MAPPED)) {
+
+        base = mmap(0, ionAllocData.len, PROT_READ|PROT_WRITE,
+                                MAP_SHARED, fd_data.fd, 0);
+        if(base == MAP_FAILED) {
+            err = -errno;
+            ALOGE("%s: Failed to map the allocated memory: %s",
+                                    __FUNCTION__, strerror(errno));
+            ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+            ionSyncFd = FD_INIT;
+            return err;
+        }
+        memset(base, 0, ionAllocData.len);
+        // Clean cache after memset
+        clean_buffer(base, data.size, data.offset, fd_data.fd);
+    }
+
+    //Close the uncached FD since we no longer need it;
+    if(ionSyncFd >= 0)
+        close(ionSyncFd);
+    ionSyncFd = FD_INIT;
+
+    data.base = base;
+    data.fd = fd_data.fd;
+    ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+    ALOGD("ion: Allocated buffer base:%p size:%d fd:%d",
+                            data.base, ionAllocData.len, data.fd);
+    return 0;
+}
+
+
+int IonAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+    ALOGD("ion: Freeing buffer base:%p size:%d fd:%d",
+            base, size, fd);
+    int err = 0;
+    err = open_device();
+    if (err)
+        return err;
+
+    if(base)
+        err = unmap_buffer(base, size, offset);
+    close(fd);
+    return err;
+}
+
+int IonAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+    int err = 0;
+    void *base = 0;
+    // It is a (quirky) requirement of ION to have opened the
+    // ion fd in the process that is doing the mapping
+    err = open_device();
+    if (err)
+        return err;
+
+    base = mmap(0, size, PROT_READ| PROT_WRITE,
+            MAP_SHARED, fd, 0);
+    *pBase = base;
+    if(base == MAP_FAILED) {
+        err = -errno;
+        ALOGD("ion: Failed to map memory in the client: %s",
+                                strerror(errno));
+    } else {
+        ALOGD("ion: Mapped buffer base:%p size:%d offset:%d fd:%d",
+                                base, size, offset, fd);
+    }
+    return err;
+}
+
+int IonAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+    ALOGD("ion: Unmapping buffer  base:%p size:%d", base, size);
+    int err = 0;
+    if(munmap(base, size)) {
+        err = -errno;
+        ALOGE("ion: Failed to unmap memory at %p : %s",
+                 base, strerror(errno));
+    }
+    return err;
+
+}
+int IonAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+    struct ion_flush_data flush_data;
+    struct ion_fd_data fd_data;
+    struct ion_handle_data handle_data;
+    struct ion_handle* handle;
+    int err = 0;
+
+    err = open_device();
+    if (err)
+        return err;
+
+    fd_data.fd = fd;
+    if (ioctl(mIonFd, ION_IOC_IMPORT, &fd_data)) {
+        err = -errno;
+        ALOGE("%s: ION_IOC_IMPORT failed with error - %s",
+                __FUNCTION__, strerror(errno));
+        return err;
+    }
+
+    handle_data.handle = fd_data.handle;
+    flush_data.handle  = fd_data.handle;
+    flush_data.vaddr   = base;
+    flush_data.offset  = offset;
+    flush_data.length  = size;
+    if(ioctl(mIonFd, ION_IOC_CLEAN_INV_CACHES, &flush_data)) {
+        err = -errno;
+        ALOGE("%s: ION_IOC_CLEAN_INV_CACHES failed with error - %s",
+                __FUNCTION__, strerror(errno));
+        ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+        return err;
+    }
+    ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+    return 0;
+}
+
diff --git a/libgralloc/ionalloc.h b/libgralloc/ionalloc.h
new file mode 100644
index 0000000..be26cd7
--- /dev/null
+++ b/libgralloc/ionalloc.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_IONALLOC_H
+#define GRALLOC_IONALLOC_H
+
+#include "memalloc.h"
+#include "ion_msm.h"
+
+namespace gralloc {
+
+    class IonAlloc : public IMemAlloc  {
+
+        public:
+            virtual int alloc_buffer(alloc_data& data);
+
+            virtual int free_buffer(void *base, size_t size,
+                    int offset, int fd);
+
+            virtual int map_buffer(void **pBase, size_t size,
+                    int offset, int fd);
+
+            virtual int unmap_buffer(void *base, size_t size,
+                    int offset);
+
+            virtual int clean_buffer(void*base, size_t size,
+                    int offset, int fd);
+
+            IonAlloc() { mIonFd = FD_INIT; }
+
+            ~IonAlloc() { close_device(); }
+
+        private:
+            int mIonFd;
+
+            int open_device();
+
+            void close_device();
+
+    };
+
+}
+
+#endif /* GRALLOC_IONALLOC_H */
+
diff --git a/libgralloc/mapper.cpp b/libgralloc/mapper.cpp
new file mode 100755
index 0000000..c7ee7d4
--- /dev/null
+++ b/libgralloc/mapper.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits.h>
+#include <errno.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdarg.h>
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <linux/ashmem.h>
+
+#include <cutils/log.h>
+#include <cutils/atomic.h>
+#include <cutils/ashmem.h>
+
+#include <hardware/hardware.h>
+#include <hardware/gralloc.h>
+#include <genlock.h>
+
+#include <linux/android_pmem.h>
+
+#include "gralloc_priv.h"
+#include "gr.h"
+#include "alloc_controller.h"
+#include "memalloc.h"
+
+using namespace gralloc;
+using android::sp;
+/*****************************************************************************/
+
+// Return the type of allocator -
+// these are used for mapping/unmapping
+static sp<IMemAlloc> getAllocator(int flags)
+{
+    sp<IMemAlloc> memalloc;
+    sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
+    memalloc = alloc_ctrl->getAllocator(flags);
+    return memalloc;
+}
+
+static int gralloc_map(gralloc_module_t const* module,
+        buffer_handle_t handle,
+        void** vaddr)
+{
+    private_handle_t* hnd = (private_handle_t*)handle;
+    void *mappedAddress;
+    if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) &&
+        !(hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_BUFFER)) {
+        size_t size = hnd->size;
+        sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
+        int err = memalloc->map_buffer(&mappedAddress, size,
+                hnd->offset, hnd->fd);
+        if(err) {
+            ALOGE("Could not mmap handle %p, fd=%d (%s)",
+                    handle, hnd->fd, strerror(errno));
+            hnd->base = 0;
+            return -errno;
+        }
+
+        if (mappedAddress == MAP_FAILED) {
+            ALOGE("Could not mmap handle %p, fd=%d (%s)",
+                    handle, hnd->fd, strerror(errno));
+            hnd->base = 0;
+            return -errno;
+        }
+        hnd->base = intptr_t(mappedAddress) + hnd->offset;
+        //ALOGD("gralloc_map() succeeded fd=%d, off=%d, size=%d, vaddr=%p",
+        //        hnd->fd, hnd->offset, hnd->size, mappedAddress);
+    }
+    *vaddr = (void*)hnd->base;
+    return 0;
+}
+
+static int gralloc_unmap(gralloc_module_t const* module,
+        buffer_handle_t handle)
+{
+    private_handle_t* hnd = (private_handle_t*)handle;
+    if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
+        int err = -EINVAL;
+        void* base = (void*)hnd->base;
+        size_t size = hnd->size;
+        sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
+        if(memalloc != NULL)
+            err = memalloc->unmap_buffer(base, size, hnd->offset);
+        if (err) {
+            ALOGE("Could not unmap memory at address %p", base);
+        }
+    }
+    hnd->base = 0;
+    return 0;
+}
+
+/*****************************************************************************/
+
+static pthread_mutex_t sMapLock = PTHREAD_MUTEX_INITIALIZER;
+
+/*****************************************************************************/
+
+int gralloc_register_buffer(gralloc_module_t const* module,
+        buffer_handle_t handle)
+{
+    if (private_handle_t::validate(handle) < 0)
+        return -EINVAL;
+
+    // In this implementation, we don't need to do anything here
+
+    /* NOTE: we need to initialize the buffer as not mapped/not locked
+     * because it shouldn't when this function is called the first time
+     * in a new process. Ideally these flags shouldn't be part of the
+     * handle, but instead maintained in the kernel or at least
+     * out-of-line
+     */
+
+    // if this handle was created in this process, then we keep it as is.
+    private_handle_t* hnd = (private_handle_t*)handle;
+    if (hnd->pid != getpid()) {
+        hnd->base = 0;
+        void *vaddr;
+        int err = gralloc_map(module, handle, &vaddr);
+        if (err) {
+            ALOGE("%s: gralloc_map failed", __FUNCTION__);
+            return err;
+        }
+
+        // Reset the genlock private fd flag in the handle
+        hnd->genlockPrivFd = -1;
+
+        // Check if there is a valid lock attached to the handle.
+        if (-1 == hnd->genlockHandle) {
+            ALOGE("%s: the lock is invalid.", __FUNCTION__);
+            gralloc_unmap(module, handle);
+            hnd->base = 0;
+            return -EINVAL;
+        }
+
+        // Attach the genlock handle
+        if (GENLOCK_NO_ERROR != genlock_attach_lock((native_handle_t *)handle)) {
+            ALOGE("%s: genlock_attach_lock failed", __FUNCTION__);
+            gralloc_unmap(module, handle);
+            hnd->base = 0;
+            return -EINVAL;
+        }
+    }
+    return 0;
+}
+
+int gralloc_unregister_buffer(gralloc_module_t const* module,
+        buffer_handle_t handle)
+{
+    if (private_handle_t::validate(handle) < 0)
+        return -EINVAL;
+
+    /*
+     * If the buffer has been mapped during a lock operation, it's time
+     * to un-map it. It's an error to be here with a locked buffer.
+     * NOTE: the framebuffer is handled differently and is never unmapped.
+     */
+
+    private_handle_t* hnd = (private_handle_t*)handle;
+
+    // never unmap buffers that were created in this process
+    if (hnd->pid != getpid()) {
+        if (hnd->base != 0) {
+            gralloc_unmap(module, handle);
+        }
+        hnd->base = 0;
+        // Release the genlock
+        if (-1 != hnd->genlockHandle) {
+            return genlock_release_lock((native_handle_t *)handle);
+        } else {
+            ALOGE("%s: there was no genlock attached to this buffer", __FUNCTION__);
+            return -EINVAL;
+        }
+    }
+    return 0;
+}
+
+int terminateBuffer(gralloc_module_t const* module,
+        private_handle_t* hnd)
+{
+    /*
+     * If the buffer has been mapped during a lock operation, it's time
+     * to un-map it. It's an error to be here with a locked buffer.
+     */
+
+    if (hnd->base != 0) {
+        // this buffer was mapped, unmap it now
+        if (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_PMEM |
+                          private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP |
+                          private_handle_t::PRIV_FLAGS_USES_ASHMEM |
+                          private_handle_t::PRIV_FLAGS_USES_ION)) {
+            if (hnd->pid != getpid()) {
+                // ... unless it's a "master" pmem buffer, that is a buffer
+                // mapped in the process it's been allocated.
+                // (see gralloc_alloc_buffer())
+                gralloc_unmap(module, hnd);
+            }
+        } else {
+            ALOGE("terminateBuffer: unmapping a non pmem/ashmem buffer flags = 0x%x", hnd->flags);
+            gralloc_unmap(module, hnd);
+        }
+    }
+
+    return 0;
+}
+
+int gralloc_lock(gralloc_module_t const* module,
+        buffer_handle_t handle, int usage,
+        int l, int t, int w, int h,
+        void** vaddr)
+{
+    if (private_handle_t::validate(handle) < 0)
+        return -EINVAL;
+
+    int err = 0;
+    private_handle_t* hnd = (private_handle_t*)handle;
+    if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
+        if (hnd->base == 0) {
+            // we need to map for real
+            pthread_mutex_t* const lock = &sMapLock;
+            pthread_mutex_lock(lock);
+            err = gralloc_map(module, handle, vaddr);
+            pthread_mutex_unlock(lock);
+        }
+        *vaddr = (void*)hnd->base;
+
+        // Lock the buffer for read/write operation as specified. Write lock
+        // has a higher priority over read lock.
+        int lockType = 0;
+        if (usage & GRALLOC_USAGE_SW_WRITE_MASK) {
+            lockType = GENLOCK_WRITE_LOCK;
+        } else if (usage & GRALLOC_USAGE_SW_READ_MASK) {
+            lockType = GENLOCK_READ_LOCK;
+        }
+
+        int timeout = GENLOCK_MAX_TIMEOUT;
+        if (GENLOCK_NO_ERROR != genlock_lock_buffer((native_handle_t *)handle,
+                                                   (genlock_lock_type)lockType,
+                                                   timeout)) {
+            ALOGE("%s: genlock_lock_buffer (lockType=0x%x) failed", __FUNCTION__,
+                lockType);
+            return -EINVAL;
+        } else {
+            // Mark this buffer as locked for SW read/write operation.
+            hnd->flags |= private_handle_t::PRIV_FLAGS_SW_LOCK;
+        }
+
+        if ((usage & GRALLOC_USAGE_SW_WRITE_MASK) &&
+            !(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
+            // Mark the buffer to be flushed after cpu read/write
+            hnd->flags |= private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
+        }
+    }
+    return err;
+}
+
+int gralloc_unlock(gralloc_module_t const* module,
+        buffer_handle_t handle)
+{
+    if (private_handle_t::validate(handle) < 0)
+        return -EINVAL;
+
+    private_handle_t* hnd = (private_handle_t*)handle;
+
+    if (hnd->flags & private_handle_t::PRIV_FLAGS_NEEDS_FLUSH) {
+        int err;
+        sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
+        err = memalloc->clean_buffer((void*)hnd->base,
+                hnd->size, hnd->offset, hnd->fd);
+        ALOGE_IF(err < 0, "cannot flush handle %p (offs=%x len=%x, flags = 0x%x) err=%s\n",
+                hnd, hnd->offset, hnd->size, hnd->flags, strerror(errno));
+        hnd->flags &= ~private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
+    }
+
+    if ((hnd->flags & private_handle_t::PRIV_FLAGS_SW_LOCK)) {
+        // Unlock the buffer.
+        if (GENLOCK_NO_ERROR != genlock_unlock_buffer((native_handle_t *)handle)) {
+            ALOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
+            return -EINVAL;
+        } else
+            hnd->flags &= ~private_handle_t::PRIV_FLAGS_SW_LOCK;
+    }
+    return 0;
+}
+
+/*****************************************************************************/
+
+int gralloc_perform(struct gralloc_module_t const* module,
+        int operation, ... )
+{
+    int res = -EINVAL;
+    va_list args;
+    va_start(args, operation);
+    switch (operation) {
+        case GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER:
+            {
+                int fd = va_arg(args, int);
+                size_t size = va_arg(args, size_t);
+                size_t offset = va_arg(args, size_t);
+                void* base = va_arg(args, void*);
+                int width = va_arg(args, int);
+                int height = va_arg(args, int);
+                int format = va_arg(args, int);
+
+                native_handle_t** handle = va_arg(args, native_handle_t**);
+                int memoryFlags = va_arg(args, int);
+                private_handle_t* hnd = (private_handle_t*)native_handle_create(
+                        private_handle_t::sNumFds, private_handle_t::sNumInts);
+                hnd->magic = private_handle_t::sMagic;
+                hnd->fd = fd;
+                unsigned int contigFlags = GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
+                                  GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP |
+                                  GRALLOC_USAGE_PRIVATE_SMI_HEAP;
+
+                if (memoryFlags & contigFlags) {
+                    // check if the buffer is a pmem buffer
+                    pmem_region region;
+                    if (ioctl(fd, PMEM_GET_SIZE, &region) < 0)
+                        hnd->flags =  private_handle_t::PRIV_FLAGS_USES_ION;
+                    else
+                        hnd->flags =  private_handle_t::PRIV_FLAGS_USES_PMEM |
+                                      private_handle_t::PRIV_FLAGS_DO_NOT_FLUSH;
+                } else {
+                    if (memoryFlags & GRALLOC_USAGE_PRIVATE_ION)
+                        hnd->flags =  private_handle_t::PRIV_FLAGS_USES_ION;
+                    else
+                        hnd->flags =  private_handle_t::PRIV_FLAGS_USES_ASHMEM;
+                }
+
+                hnd->size = size;
+                hnd->offset = offset;
+                hnd->base = intptr_t(base) + offset;
+                hnd->gpuaddr = 0;
+                hnd->width = width;
+                hnd->height = height;
+                hnd->format = format;
+                *handle = (native_handle_t *)hnd;
+                res = 0;
+                break;
+
+            }
+        default:
+            break;
+    }
+    va_end(args);
+    return res;
+}
diff --git a/libgralloc/memalloc.h b/libgralloc/memalloc.h
new file mode 100644
index 0000000..13a54e7
--- /dev/null
+++ b/libgralloc/memalloc.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_MEMALLOC_H
+#define GRALLOC_MEMALLOC_H
+
+#include <stdlib.h>
+#include <utils/RefBase.h>
+
+namespace gralloc {
+
+    struct alloc_data {
+        void           *base;
+        int            fd;
+        int            offset;
+        size_t         size;
+        size_t         align;
+        unsigned int   pHandle;
+        bool           uncached;
+        unsigned int   flags;
+        int            allocType;
+    };
+
+    class IMemAlloc : public android::RefBase {
+
+        public:
+            // Allocate buffer - fill in the alloc_data
+            // structure and pass it in. Mapped address
+            // and fd are returned in the alloc_data struct
+            virtual int alloc_buffer(alloc_data& data) = 0;
+
+            // Free buffer
+            virtual int free_buffer(void *base, size_t size,
+                    int offset, int fd) = 0;
+
+            // Map buffer
+            virtual int map_buffer(void **pBase, size_t size,
+                    int offset, int fd) = 0;
+
+            // Unmap buffer
+            virtual int unmap_buffer(void *base, size_t size,
+                    int offset) = 0;
+
+            // Clean and invalidate
+            virtual int clean_buffer(void *base, size_t size,
+                    int offset, int fd) = 0;
+
+            // Destructor
+            virtual ~IMemAlloc() {};
+
+            enum {
+                FD_INIT = -1,
+            };
+
+    };
+
+} // end gralloc namespace
+#endif // GRALLOC_MEMALLOC_H
diff --git a/libgralloc/pmem_bestfit_alloc.cpp b/libgralloc/pmem_bestfit_alloc.cpp
new file mode 100644
index 0000000..e3875e9
--- /dev/null
+++ b/libgralloc/pmem_bestfit_alloc.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/log.h>
+
+#include "pmem_bestfit_alloc.h"
+
+
+// align all the memory blocks on a cache-line boundary
+const int SimpleBestFitAllocator::kMemoryAlign = 32;
+
+SimpleBestFitAllocator::SimpleBestFitAllocator()
+    : mHeapSize(0)
+{
+}
+
+SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
+    : mHeapSize(0)
+{
+    setSize(size);
+}
+
+SimpleBestFitAllocator::~SimpleBestFitAllocator()
+{
+    while(!mList.isEmpty()) {
+        delete mList.remove(mList.head());
+    }
+}
+
+ssize_t SimpleBestFitAllocator::setSize(size_t size)
+{
+    Locker::Autolock _l(mLock);
+    if (mHeapSize != 0) return -EINVAL;
+    size_t pagesize = getpagesize();
+    mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
+    chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
+    mList.insertHead(node);
+    return size;
+}
+
+size_t SimpleBestFitAllocator::size() const
+{
+    return mHeapSize;
+}
+
+ssize_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
+{
+    Locker::Autolock _l(mLock);
+    if (mHeapSize == 0) return -EINVAL;
+    ssize_t offset = alloc(size, flags);
+    return offset;
+}
+
+ssize_t SimpleBestFitAllocator::deallocate(size_t offset)
+{
+    Locker::Autolock _l(mLock);
+    if (mHeapSize == 0) return -EINVAL;
+    chunk_t const * const freed = dealloc(offset);
+    if (freed) {
+        return 0;
+    }
+    return -ENOENT;
+}
+
+ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
+{
+    if (size == 0) {
+        return 0;
+    }
+    size = (size + kMemoryAlign-1) / kMemoryAlign;
+    chunk_t* free_chunk = 0;
+    chunk_t* cur = mList.head();
+
+    size_t pagesize = getpagesize();
+    while (cur) {
+        int extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
+
+        // best fit
+        if (cur->free && (cur->size >= (size+extra))) {
+            if ((!free_chunk) || (cur->size < free_chunk->size)) {
+                free_chunk = cur;
+            }
+            if (cur->size == size) {
+                break;
+            }
+        }
+        cur = cur->next;
+    }
+
+    if (free_chunk) {
+        const size_t free_size = free_chunk->size;
+        free_chunk->free = 0;
+        free_chunk->size = size;
+        if (free_size > size) {
+            int extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
+            if (extra) {
+                chunk_t* split = new chunk_t(free_chunk->start, extra);
+                free_chunk->start += extra;
+                mList.insertBefore(free_chunk, split);
+            }
+
+            LOGE_IF(((free_chunk->start*kMemoryAlign)&(pagesize-1)),
+                    "page is not aligned!!!");
+
+            const ssize_t tail_free = free_size - (size+extra);
+            if (tail_free > 0) {
+                chunk_t* split = new chunk_t(
+                        free_chunk->start + free_chunk->size, tail_free);
+                mList.insertAfter(free_chunk, split);
+            }
+        }
+        return (free_chunk->start)*kMemoryAlign;
+    }
+    // we are out of PMEM. Print pmem stats
+    // check if there is any leak or fragmentation
+
+    LOGD (" Out of PMEM. Dumping PMEM stats for debugging");
+    LOGD (" ------------- PRINT PMEM STATS --------------");
+
+    cur = mList.head();
+    static uint32_t node_count;
+    static uint64_t allocated, free_space;
+
+    while (cur) {
+      LOGD (" Node %d -> Start Address : %u Size %u Free info %d",\
+              node_count++, cur->start, cur->size, cur->free);
+
+      // if cur-> free is 1 , the node is free
+      // calculate the total allocated and total free stats also
+
+      if (cur->free)
+         free_space += cur->size;
+      else
+         allocated += cur->size;
+      // read next node
+      cur = cur->next;
+    }
+    LOGD (" Total Allocated: %l Total Free: %l", allocated, free_space );
+
+    node_count = 0;
+    allocated = 0;
+    free_space = 0;
+    LOGD ("----------------------------------------------");
+    return -ENOMEM;
+}
+
+SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
+{
+    start = start / kMemoryAlign;
+    chunk_t* cur = mList.head();
+    while (cur) {
+        if (cur->start == start) {
+            LOG_FATAL_IF(cur->free,
+                "block at offset 0x%08lX of size 0x%08lX already freed",
+                cur->start*kMemoryAlign, cur->size*kMemoryAlign);
+
+            // merge freed blocks together
+            chunk_t* freed = cur;
+            cur->free = 1;
+            do {
+                chunk_t* const p = cur->prev;
+                chunk_t* const n = cur->next;
+                if (p && (p->free || !cur->size)) {
+                    freed = p;
+                    p->size += cur->size;
+                    mList.remove(cur);
+                    delete cur;
+                }
+                cur = n;
+            } while (cur && cur->free);
+
+            LOG_FATAL_IF(!freed->free,
+                "freed block at offset 0x%08lX of size 0x%08lX is not free!",
+                freed->start * kMemoryAlign, freed->size * kMemoryAlign);
+
+            return freed;
+        }
+        cur = cur->next;
+    }
+    return 0;
+}
diff --git a/libgralloc/pmem_bestfit_alloc.h b/libgralloc/pmem_bestfit_alloc.h
new file mode 100644
index 0000000..2ea8452
--- /dev/null
+++ b/libgralloc/pmem_bestfit_alloc.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef GRALLOC_ALLOCATOR_H_
+#define GRALLOC_ALLOCATOR_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "gr.h"
+#include "pmemalloc.h"
+
+// ----------------------------------------------------------------------------
+
+/*
+ * A simple templatized doubly linked-list implementation
+ */
+template <typename NODE>
+class LinkedList
+{
+    NODE*  mFirst;
+    NODE*  mLast;
+
+public:
+                LinkedList() : mFirst(0), mLast(0) { }
+    bool        isEmpty() const { return mFirst == 0; }
+    NODE const* head() const { return mFirst; }
+    NODE*       head() { return mFirst; }
+    NODE const* tail() const { return mLast; }
+    NODE*       tail() { return mLast; }
+
+    void insertAfter(NODE* node, NODE* newNode) {
+        newNode->prev = node;
+        newNode->next = node->next;
+        if (node->next == 0) mLast = newNode;
+        else                 node->next->prev = newNode;
+        node->next = newNode;
+    }
+
+    void insertBefore(NODE* node, NODE* newNode) {
+         newNode->prev = node->prev;
+         newNode->next = node;
+         if (node->prev == 0)   mFirst = newNode;
+         else                   node->prev->next = newNode;
+         node->prev = newNode;
+    }
+
+    void insertHead(NODE* newNode) {
+        if (mFirst == 0) {
+            mFirst = mLast = newNode;
+            newNode->prev = newNode->next = 0;
+        } else {
+            newNode->prev = 0;
+            newNode->next = mFirst;
+            mFirst->prev = newNode;
+            mFirst = newNode;
+        }
+    }
+
+    void insertTail(NODE* newNode) {
+        if (mLast == 0) {
+            insertHead(newNode);
+        } else {
+            newNode->prev = mLast;
+            newNode->next = 0;
+            mLast->next = newNode;
+            mLast = newNode;
+        }
+    }
+
+    NODE* remove(NODE* node) {
+        if (node->prev == 0)    mFirst = node->next;
+        else                    node->prev->next = node->next;
+        if (node->next == 0)    mLast = node->prev;
+        else                    node->next->prev = node->prev;
+        return node;
+    }
+};
+
+class SimpleBestFitAllocator : public gralloc::PmemUserspaceAlloc::Allocator
+{
+public:
+
+    SimpleBestFitAllocator();
+    SimpleBestFitAllocator(size_t size);
+    virtual ~SimpleBestFitAllocator();
+
+    virtual ssize_t setSize(size_t size);
+
+    virtual ssize_t allocate(size_t size, uint32_t flags = 0);
+    virtual ssize_t deallocate(size_t offset);
+    virtual size_t  size() const;
+
+private:
+    struct chunk_t {
+        chunk_t(size_t start, size_t size)
+            : start(start), size(size), free(1), prev(0), next(0) {
+        }
+        size_t              start;
+        size_t              size : 28;
+        int                 free : 4;
+        mutable chunk_t*    prev;
+        mutable chunk_t*    next;
+    };
+
+    ssize_t  alloc(size_t size, uint32_t flags);
+    chunk_t* dealloc(size_t start);
+
+    static const int    kMemoryAlign;
+    mutable Locker      mLock;
+    LinkedList<chunk_t> mList;
+    size_t              mHeapSize;
+};
+#endif /* GRALLOC_ALLOCATOR_H_ */
diff --git a/libgralloc/pmemalloc.cpp b/libgralloc/pmemalloc.cpp
new file mode 100644
index 0000000..ccbf127
--- /dev/null
+++ b/libgralloc/pmemalloc.cpp
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <cutils/log.h>
+#include <errno.h>
+#include <linux/android_pmem.h>
+#include "gralloc_priv.h"
+#include "pmemalloc.h"
+#include "pmem_bestfit_alloc.h"
+
+using namespace gralloc;
+using android::sp;
+
+// Common functions between userspace
+// and kernel allocators
+static int getPmemTotalSize(int fd, size_t* size)
+{
+    //XXX: 7x27
+    int err = 0;
+    pmem_region region;
+    if (ioctl(fd, PMEM_GET_TOTAL_SIZE, &region)) {
+        err = -errno;
+    } else {
+        *size = region.len;
+    }
+    return err;
+}
+
+static int getOpenFlags(bool uncached)
+{
+    if(uncached)
+        return O_RDWR | O_SYNC;
+    else
+        return O_RDWR;
+}
+
+static int connectPmem(int fd, int master_fd) {
+    if (ioctl(fd, PMEM_CONNECT, master_fd))
+        return -errno;
+    return 0;
+}
+
+static int mapSubRegion(int fd, int offset, size_t size) {
+    struct pmem_region sub = { offset, size };
+    if (ioctl(fd, PMEM_MAP, &sub))
+        return -errno;
+    return 0;
+}
+
+static int unmapSubRegion(int fd, int offset, size_t size) {
+    struct pmem_region sub = { offset, size };
+    if (ioctl(fd, PMEM_UNMAP, &sub))
+        return -errno;
+    return 0;
+}
+
+static int alignPmem(int fd, size_t size, int align) {
+    struct pmem_allocation allocation;
+    allocation.size = size;
+    allocation.align = align;
+    if (ioctl(fd, PMEM_ALLOCATE_ALIGNED, &allocation))
+        return -errno;
+    return 0;
+}
+
+static int cleanPmem(void *base, size_t size, int offset, int fd) {
+    struct pmem_addr pmem_addr;
+    pmem_addr.vaddr = (unsigned long) base;
+    pmem_addr.offset = offset;
+    pmem_addr.length = size;
+    if (ioctl(fd, PMEM_CLEAN_INV_CACHES, &pmem_addr))
+        return -errno;
+    return 0;
+}
+
+//-------------- PmemUserspaceAlloc-----------------------//
+PmemUserspaceAlloc::PmemUserspaceAlloc()
+{
+    mPmemDev = DEVICE_PMEM;
+    mMasterFd = FD_INIT;
+    mAllocator = new SimpleBestFitAllocator();
+    pthread_mutex_init(&mLock, NULL);
+}
+
+PmemUserspaceAlloc::~PmemUserspaceAlloc()
+{
+}
+
+int PmemUserspaceAlloc::init_pmem_area_locked()
+{
+    ALOGD("%s: Opening master pmem FD", __FUNCTION__);
+    int err = 0;
+    int fd = open(mPmemDev, O_RDWR, 0);
+    if (fd >= 0) {
+        size_t size = 0;
+        err = getPmemTotalSize(fd, &size);
+        ALOGD("%s: Total pmem size: %d", __FUNCTION__, size);
+        if (err < 0) {
+            ALOGE("%s: PMEM_GET_TOTAL_SIZE failed (%d), limp mode", mPmemDev,
+                    err);
+            size = 8<<20;   // 8 MiB
+        }
+        mAllocator->setSize(size);
+
+        void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
+                0);
+        if (base == MAP_FAILED) {
+            err = -errno;
+            ALOGE("%s: Failed to map pmem master fd: %s", mPmemDev,
+                    strerror(errno));
+            base = 0;
+            close(fd);
+            fd = -1;
+        } else {
+            mMasterFd = fd;
+            mMasterBase = base;
+        }
+    } else {
+        err = -errno;
+        ALOGE("%s: Failed to open pmem device: %s", mPmemDev,
+                strerror(errno));
+    }
+    return err;
+}
+
+int  PmemUserspaceAlloc::init_pmem_area()
+{
+    pthread_mutex_lock(&mLock);
+    int err = mMasterFd;
+    if (err == FD_INIT) {
+        // first time, try to initialize pmem
+        ALOGD("%s: Initializing pmem area", __FUNCTION__);
+        err = init_pmem_area_locked();
+        if (err) {
+            ALOGE("%s: failed to initialize pmem area", mPmemDev);
+            mMasterFd = err;
+        }
+    } else if (err < 0) {
+        // pmem couldn't be initialized, never use it
+    } else {
+        // pmem OK
+        err = 0;
+    }
+    pthread_mutex_unlock(&mLock);
+    return err;
+
+}
+
+int PmemUserspaceAlloc::alloc_buffer(alloc_data& data)
+{
+    int err = init_pmem_area();
+    if (err == 0) {
+        void* base = mMasterBase;
+        size_t size = data.size;
+        int offset = mAllocator->allocate(size);
+        if (offset < 0) {
+            // no more pmem memory
+            ALOGE("%s: No more pmem available", mPmemDev);
+            err = -ENOMEM;
+        } else {
+            int openFlags = getOpenFlags(data.uncached);
+
+            // now create the "sub-heap"
+            int fd = open(mPmemDev, openFlags, 0);
+            err = fd < 0 ? fd : 0;
+
+            // and connect to it
+            if (err == 0)
+                err = connectPmem(fd, mMasterFd);
+
+            // and make it available to the client process
+            if (err == 0)
+                err = mapSubRegion(fd, offset, size);
+
+            if (err < 0) {
+                ALOGE("%s: Failed to initialize pmem sub-heap: %d", mPmemDev,
+                        err);
+                close(fd);
+                mAllocator->deallocate(offset);
+                fd = -1;
+            } else {
+                ALOGD("%s: Allocated buffer base:%p size:%d offset:%d fd:%d",
+                        mPmemDev, base, size, offset, fd);
+                memset((char*)base + offset, 0, size);
+                //Clean cache before flushing to ensure pmem is properly flushed
+                err = clean_buffer((void*)((intptr_t) base + offset), size, offset, fd);
+                if (err < 0) {
+                    ALOGE("cleanPmem failed: (%s)", strerror(errno));
+                }
+                cacheflush(intptr_t(base) + offset, intptr_t(base) + offset + size, 0);
+                data.base = base;
+                data.offset = offset;
+                data.fd = fd;
+            }
+        }
+    }
+    return err;
+
+}
+
+int PmemUserspaceAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+    ALOGD("%s: Freeing buffer base:%p size:%d offset:%d fd:%d",
+            mPmemDev, base, size, offset, fd);
+    int err = 0;
+    if (fd >= 0) {
+        int err = unmapSubRegion(fd, offset, size);
+        ALOGE_IF(err<0, "PMEM_UNMAP failed (%s), fd=%d, sub.offset=%u, "
+                "sub.size=%u", strerror(errno), fd, offset, size);
+        if (err == 0) {
+            // we can't deallocate the memory in case of UNMAP failure
+            // because it would give that process access to someone else's
+            // surfaces, which would be a security breach.
+            mAllocator->deallocate(offset);
+        }
+        close(fd);
+    }
+    return err;
+}
+
+int PmemUserspaceAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+    int err = 0;
+    size += offset;
+    void *base = mmap(0, size, PROT_READ| PROT_WRITE,
+            MAP_SHARED, fd, 0);
+    *pBase = base;
+    if(base == MAP_FAILED) {
+        err = -errno;
+        ALOGE("%s: Failed to map buffer size:%d offset:%d fd:%d Error: %s",
+                mPmemDev, size, offset, fd, strerror(errno));
+    } else {
+        ALOGD("%s: Mapped buffer base:%p size:%d offset:%d fd:%d",
+                mPmemDev, base, size, offset, fd);
+    }
+    return err;
+
+}
+
+int PmemUserspaceAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+    int err = 0;
+    //pmem hack
+    base = (void*)(intptr_t(base) - offset);
+    size += offset;
+    ALOGD("%s: Unmapping buffer base:%p size:%d offset:%d",
+            mPmemDev , base, size, offset);
+    if (munmap(base, size) < 0) {
+
+        err = -errno;
+        ALOGE("%s: Failed to unmap memory at %p :%s",
+                   mPmemDev, base, strerror(errno));
+
+    }
+
+   return err;
+}
+
+int PmemUserspaceAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+    return cleanPmem(base, size, offset, fd);
+}
+
+
+//-------------- PmemKernelAlloc-----------------------//
+
+PmemKernelAlloc::PmemKernelAlloc(const char* pmemdev) :
+    mPmemDev(pmemdev)
+{
+}
+
+PmemKernelAlloc::~PmemKernelAlloc()
+{
+}
+
+int PmemKernelAlloc::alloc_buffer(alloc_data& data)
+{
+    int err, offset = 0;
+    int openFlags = getOpenFlags(data.uncached);
+    int size = data.size;
+
+    int fd = open(mPmemDev, openFlags, 0);
+    if (fd < 0) {
+        err = -errno;
+        ALOGE("%s: Error opening %s", __FUNCTION__, mPmemDev);
+        return err;
+    }
+
+    if (data.align == 8192) {
+        // Tile format buffers need physical alignment to 8K
+        // Default page size does not need this ioctl
+        err = alignPmem(fd, size, 8192);
+        if (err < 0) {
+            ALOGE("alignPmem failed");
+        }
+    }
+    void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+    if (base == MAP_FAILED) {
+        err = -errno;
+        ALOGE("%s: failed to map pmem fd: %s", mPmemDev,
+                strerror(errno));
+        close(fd);
+        return err;
+    }
+    memset(base, 0, size);
+    clean_buffer((void*)((intptr_t) base + offset), size, offset, fd);
+    data.base = base;
+    data.offset = 0;
+    data.fd = fd;
+    ALOGD("%s: Allocated buffer base:%p size:%d fd:%d",
+                            mPmemDev, base, size, fd);
+    return 0;
+
+}
+
+int PmemKernelAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+    ALOGD("%s: Freeing buffer base:%p size:%d fd:%d",
+            mPmemDev, base, size, fd);
+
+    int err =  unmap_buffer(base, size, offset);
+    close(fd);
+    return err;
+}
+
+int PmemKernelAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+    int err = 0;
+    void *base = mmap(0, size, PROT_READ| PROT_WRITE,
+            MAP_SHARED, fd, 0);
+    *pBase = base;
+    if(base == MAP_FAILED) {
+        err = -errno;
+        ALOGE("%s: Failed to map memory in the client: %s",
+                mPmemDev, strerror(errno));
+    } else {
+        ALOGD("%s: Mapped buffer base:%p size:%d, fd:%d",
+                                mPmemDev, base, size, fd);
+    }
+    return err;
+
+}
+
+int PmemKernelAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+    int err = 0;
+    if (munmap(base, size)) {
+        err = -errno;
+        ALOGW("%s: Error unmapping memory at %p: %s",
+                                mPmemDev, base, strerror(err));
+    }
+    return err;
+
+}
+int PmemKernelAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+    return cleanPmem(base, size, offset, fd);
+}
+
diff --git a/libgralloc/pmemalloc.h b/libgralloc/pmemalloc.h
new file mode 100644
index 0000000..4aed0b1
--- /dev/null
+++ b/libgralloc/pmemalloc.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.
+ *   * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_PMEMALLOC_H
+#define GRALLOC_PMEMALLOC_H
+
+#include <linux/ion.h>
+#include <utils/RefBase.h>
+#include "memalloc.h"
+
+namespace gralloc {
+    class PmemUserspaceAlloc : public IMemAlloc  {
+
+        public:
+            class Allocator: public android::RefBase {
+                public:
+                    virtual ~Allocator() {};
+                    virtual ssize_t setSize(size_t size) = 0;
+                    virtual size_t  size() const = 0;
+                    virtual ssize_t allocate(size_t size, uint32_t flags = 0) = 0;
+                    virtual ssize_t deallocate(size_t offset) = 0;
+            };
+
+            virtual int alloc_buffer(alloc_data& data);
+
+            virtual int free_buffer(void *base, size_t size,
+                    int offset, int fd);
+
+            virtual int map_buffer(void **pBase, size_t size,
+                    int offset, int fd);
+
+            virtual int unmap_buffer(void *base, size_t size,
+                    int offset);
+
+            virtual int clean_buffer(void*base, size_t size,
+                    int offset, int fd);
+
+            PmemUserspaceAlloc();
+
+            ~PmemUserspaceAlloc();
+
+        private:
+            int mMasterFd;
+            void* mMasterBase;
+            const char* mPmemDev;
+            android::sp<Allocator> mAllocator;
+            pthread_mutex_t mLock;
+            int init_pmem_area();
+            int init_pmem_area_locked();
+
+    };
+
+    class PmemKernelAlloc : public IMemAlloc  {
+
+        public:
+            virtual int alloc_buffer(alloc_data& data);
+
+            virtual int free_buffer(void *base, size_t size,
+                    int offset, int fd);
+
+            virtual int map_buffer(void **pBase, size_t size,
+                    int offset, int fd);
+
+            virtual int unmap_buffer(void *base, size_t size,
+                    int offset);
+
+            virtual int clean_buffer(void*base, size_t size,
+                    int offset, int fd);
+
+            PmemKernelAlloc(const char* device);
+
+            ~PmemKernelAlloc();
+        private:
+            const char* mPmemDev;
+
+
+    };
+
+}
+#endif /* GRALLOC_PMEMALLOC_H */