initial GL libraries for msm8960
Change-Id: I16451c70a079894ac326d3564d96f1fbafcd4f1b
Signed-off-by: Iliyan Malchev <malchev@google.com>
diff --git a/Android.mk b/Android.mk
new file mode 100644
index 0000000..127a6de
--- /dev/null
+++ b/Android.mk
@@ -0,0 +1,11 @@
+#Enables the listed display HAL modules
+
+#Libs to be built for all targets (including SDK)
+display-hals := libqcomui
+
+#libs to be built for QCOM targets only
+#ifeq ($(call is-vendor-board-platform,QCOM),true)
+display-hals += libgralloc libgenlock libcopybit
+#endif
+
+include $(call all-named-subdir-makefiles,$(display-hals))
diff --git a/libgenlock/Android.mk b/libgenlock/Android.mk
new file mode 100644
index 0000000..740d6ce
--- /dev/null
+++ b/libgenlock/Android.mk
@@ -0,0 +1,15 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
+LOCAL_SHARED_LIBRARIES := liblog libcutils
+LOCAL_C_INCLUDES :=
+LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
+LOCAL_ADDITIONAL_DEPENDENCIES :=
+LOCAL_SRC_FILES := genlock.cpp
+LOCAL_CFLAGS:= -DLOG_TAG=\"libgenlock\"
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE := libgenlock
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/libgenlock/genlock.cpp b/libgenlock/genlock.cpp
new file mode 100644
index 0000000..5d5536b
--- /dev/null
+++ b/libgenlock/genlock.cpp
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cutils/log.h>
+#include <cutils/native_handle.h>
+#include <gralloc_priv.h>
+#include <linux/genlock.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+
+#include "genlock.h"
+
+#define GENLOCK_DEVICE "/dev/genlock"
+
+#ifndef USE_GENLOCK
+#define USE_GENLOCK
+#endif
+
+namespace {
+ /* Internal function to map the userspace locks to the kernel lock types */
+ int get_kernel_lock_type(genlock_lock_type lockType)
+ {
+ int kLockType = 0;
+ // If the user sets both a read and write lock, higher preference is
+ // given to the write lock.
+ if (lockType & GENLOCK_WRITE_LOCK) {
+ kLockType = GENLOCK_WRLOCK;
+ } else if (lockType & GENLOCK_READ_LOCK) {
+ kLockType = GENLOCK_RDLOCK;
+ } else {
+ ALOGE("%s: invalid lockType (lockType = %d)", __FUNCTION__, lockType);
+ return -1;
+ }
+ return kLockType;
+ }
+
+ /* Internal function to perform the actual lock/unlock operations */
+ genlock_status_t perform_lock_unlock_operation(native_handle_t *buffer_handle,
+ int lockType, int timeout)
+ {
+ if (private_handle_t::validate(buffer_handle)) {
+ ALOGE("%s: handle is invalid", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
+ if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
+ if (hnd->genlockPrivFd < 0) {
+ ALOGE("%s: the lock has not been created, or has not been attached",
+ __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ genlock_lock lock;
+ lock.op = lockType;
+ lock.flags = 0;
+ lock.timeout = timeout;
+ lock.fd = hnd->genlockHandle;
+
+ if (ioctl(hnd->genlockPrivFd, GENLOCK_IOC_LOCK, &lock)) {
+ ALOGE("%s: GENLOCK_IOC_LOCK failed (lockType0x%x, err=%s fd=%d)", __FUNCTION__,
+ lockType, strerror(errno), hnd->fd);
+ if (ETIMEDOUT == errno)
+ return GENLOCK_TIMEDOUT;
+
+ return GENLOCK_FAILURE;
+ }
+ }
+ return GENLOCK_NO_ERROR;
+ }
+
+ /* Internal function to close the fd and release the handle */
+ void close_genlock_fd_and_handle(int& fd, int& handle)
+ {
+ if (fd >=0 ) {
+ close(fd);
+ fd = -1;
+ }
+
+ if (handle >= 0) {
+ close(handle);
+ handle = -1;
+ }
+ }
+
+}
+/*
+ * Create a genlock lock. The genlock lock file descriptor and the lock
+ * handle are stored in the buffer_handle.
+ *
+ * @param: handle of the buffer
+ * @return error status.
+ */
+genlock_status_t genlock_create_lock(native_handle_t *buffer_handle)
+{
+ genlock_status_t ret = GENLOCK_NO_ERROR;
+ if (private_handle_t::validate(buffer_handle)) {
+ ALOGE("%s: handle is invalid", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
+#ifdef USE_GENLOCK
+ if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
+ // Open the genlock device
+ int fd = open(GENLOCK_DEVICE, O_RDWR);
+ if (fd < 0) {
+ ALOGE("%s: open genlock device failed (err=%s)", __FUNCTION__,
+ strerror(errno));
+ return GENLOCK_FAILURE;
+ }
+
+ // Create a new lock
+ genlock_lock lock;
+ if (ioctl(fd, GENLOCK_IOC_NEW, NULL)) {
+ ALOGE("%s: GENLOCK_IOC_NEW failed (error=%s)", __FUNCTION__,
+ strerror(errno));
+ close_genlock_fd_and_handle(fd, lock.fd);
+ ret = GENLOCK_FAILURE;
+ }
+
+ // Export the lock for other processes to be able to use it.
+ if (GENLOCK_FAILURE != ret) {
+ if (ioctl(fd, GENLOCK_IOC_EXPORT, &lock)) {
+ ALOGE("%s: GENLOCK_IOC_EXPORT failed (error=%s)", __FUNCTION__,
+ strerror(errno));
+ close_genlock_fd_and_handle(fd, lock.fd);
+ ret = GENLOCK_FAILURE;
+ }
+ }
+
+ // Store the lock params in the handle.
+ hnd->genlockPrivFd = fd;
+ hnd->genlockHandle = lock.fd;
+ } else {
+ hnd->genlockHandle = 0;
+ }
+#else
+ hnd->genlockHandle = 0;
+#endif
+ return ret;
+}
+
+
+/*
+ * Release a genlock lock associated with the handle.
+ *
+ * @param: handle of the buffer
+ * @return error status.
+ */
+genlock_status_t genlock_release_lock(native_handle_t *buffer_handle)
+{
+ genlock_status_t ret = GENLOCK_NO_ERROR;
+#ifdef USE_GENLOCK
+ if (private_handle_t::validate(buffer_handle)) {
+ ALOGE("%s: handle is invalid", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
+ if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
+ if (hnd->genlockPrivFd < 0) {
+ ALOGE("%s: the lock is invalid", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ // Close the fd and reset the parameters.
+ close_genlock_fd_and_handle(hnd->genlockPrivFd, hnd->genlockHandle);
+ }
+#endif
+ return ret;
+}
+
+
+/*
+ * Attach a lock to the buffer handle passed via an IPC.
+ *
+ * @param: handle of the buffer
+ * @return error status.
+ */
+genlock_status_t genlock_attach_lock(native_handle_t *buffer_handle)
+{
+ genlock_status_t ret = GENLOCK_NO_ERROR;
+#ifdef USE_GENLOCK
+ if (private_handle_t::validate(buffer_handle)) {
+ ALOGE("%s: handle is invalid", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
+ if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
+ // Open the genlock device
+ int fd = open(GENLOCK_DEVICE, O_RDWR);
+ if (fd < 0) {
+ ALOGE("%s: open genlock device failed (err=%s)", __FUNCTION__,
+ strerror(errno));
+ return GENLOCK_FAILURE;
+ }
+
+ // Attach the local handle to an existing lock
+ genlock_lock lock;
+ lock.fd = hnd->genlockHandle;
+ if (ioctl(fd, GENLOCK_IOC_ATTACH, &lock)) {
+ ALOGE("%s: GENLOCK_IOC_ATTACH failed (err=%s)", __FUNCTION__,
+ strerror(errno));
+ close_genlock_fd_and_handle(fd, lock.fd);
+ ret = GENLOCK_FAILURE;
+ }
+
+ // Store the relavant information in the handle
+ hnd->genlockPrivFd = fd;
+ }
+#endif
+ return ret;
+}
+
+/*
+ * Lock the buffer specified by the buffer handle. The lock held by the buffer
+ * is specified by the lockType. This function will block if a write lock is
+ * requested on the buffer which has previously been locked for a read or write
+ * operation. A buffer can be locked by multiple clients for read. An optional
+ * timeout value can be specified. By default, there is no timeout.
+ *
+ * @param: handle of the buffer
+ * @param: type of lock to be acquired by the buffer.
+ * @param: timeout value in ms. GENLOCK_MAX_TIMEOUT is the maximum timeout value.
+ * @return error status.
+ */
+genlock_status_t genlock_lock_buffer(native_handle_t *buffer_handle,
+ genlock_lock_type_t lockType,
+ int timeout)
+{
+ genlock_status_t ret = GENLOCK_NO_ERROR;
+#ifdef USE_GENLOCK
+ // Translate the locktype
+ int kLockType = get_kernel_lock_type(lockType);
+ if (-1 == kLockType) {
+ ALOGE("%s: invalid lockType", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ if (0 == timeout) {
+ ALOGW("%s: trying to lock a buffer with timeout = 0", __FUNCTION__);
+ }
+ // Call the private function to perform the lock operation specified.
+ ret = perform_lock_unlock_operation(buffer_handle, kLockType, timeout);
+#endif
+ return ret;
+}
+
+
+/*
+ * Unlocks a buffer that has previously been locked by the client.
+ *
+ * @param: handle of the buffer to be unlocked.
+ * @return: error status.
+*/
+genlock_status_t genlock_unlock_buffer(native_handle_t *buffer_handle)
+{
+ genlock_status_t ret = GENLOCK_NO_ERROR;
+#ifdef USE_GENLOCK
+ // Do the unlock operation by setting the unlock flag. Timeout is always
+ // 0 in this case.
+ ret = perform_lock_unlock_operation(buffer_handle, GENLOCK_UNLOCK, 0);
+#endif
+ return ret;
+}
+
+/*
+ * Blocks the calling process until the lock held on the handle is unlocked.
+ *
+ * @param: handle of the buffer
+ * @param: timeout value for the wait.
+ * return: error status.
+ */
+genlock_status_t genlock_wait(native_handle_t *buffer_handle, int timeout) {
+#ifdef USE_GENLOCK
+ if (private_handle_t::validate(buffer_handle)) {
+ ALOGE("%s: handle is invalid", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
+ if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
+ if (hnd->genlockPrivFd < 0) {
+ ALOGE("%s: the lock is invalid", __FUNCTION__);
+ return GENLOCK_FAILURE;
+ }
+
+ if (0 == timeout)
+ ALOGW("%s: timeout = 0", __FUNCTION__);
+
+ genlock_lock lock;
+ lock.fd = hnd->genlockHandle;
+ lock.timeout = timeout;
+ if (ioctl(hnd->genlockPrivFd, GENLOCK_IOC_WAIT, &lock)) {
+ ALOGE("%s: GENLOCK_IOC_WAIT failed (err=%s)", __FUNCTION__, strerror(errno));
+ return GENLOCK_FAILURE;
+ }
+ }
+#endif
+ return GENLOCK_NO_ERROR;
+}
diff --git a/libgenlock/genlock.h b/libgenlock/genlock.h
new file mode 100644
index 0000000..b394410
--- /dev/null
+++ b/libgenlock/genlock.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef INCLUDE_LIBGENLOCK
+#define INCLUDE_LIBGENLOCK
+
+#include <cutils/native_handle.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Genlock lock types */
+typedef enum genlock_lock_type{
+ GENLOCK_READ_LOCK = 1<<0, // Read lock
+ GENLOCK_WRITE_LOCK = 1<<1, // Write lock
+}genlock_lock_type_t;
+
+/* Genlock return values */
+typedef enum genlock_status{
+ GENLOCK_NO_ERROR = 0,
+ GENLOCK_TIMEDOUT,
+ GENLOCK_FAILURE,
+} genlock_status_t;
+
+/* Genlock defines */
+#define GENLOCK_MAX_TIMEOUT 1000 // Max 1s timeout
+
+/*
+ * Create a genlock lock. The genlock lock file descriptor and the lock
+ * handle are stored in the buffer_handle.
+ *
+ * @param: handle of the buffer
+ * @return error status.
+ */
+genlock_status_t genlock_create_lock(native_handle_t *buffer_handle);
+
+
+/*
+ * Release a genlock lock associated with the handle.
+ *
+ * @param: handle of the buffer
+ * @return error status.
+ */
+genlock_status_t genlock_release_lock(native_handle_t *buffer_handle);
+
+/*
+ * Attach a lock to the buffer handle passed via an IPC.
+ *
+ * @param: handle of the buffer
+ * @return error status.
+ */
+genlock_status_t genlock_attach_lock(native_handle_t *buffer_handle);
+
+/*
+ * Lock the buffer specified by the buffer handle. The lock held by the buffer
+ * is specified by the lockType. This function will block if a write lock is
+ * requested on the buffer which has previously been locked for a read or write
+ * operation. A buffer can be locked by multiple clients for read. An optional
+ * timeout value can be specified. By default, there is no timeout.
+ *
+ * @param: handle of the buffer
+ * @param: type of lock to be acquired by the buffer.
+ * @param: timeout value in ms. GENLOCK_MAX_TIMEOUT is the maximum timeout value.
+ * @return error status.
+ */
+genlock_status_t genlock_lock_buffer(native_handle_t *buffer_handle,
+ genlock_lock_type_t lockType,
+ int timeout);
+
+/*
+ * Unlocks a buffer that has previously been locked by the client.
+ *
+ * @param: handle of the buffer to be unlocked.
+ * @return: error status.
+*/
+genlock_status_t genlock_unlock_buffer(native_handle_t *buffer_handle);
+
+/*
+ * Blocks the calling process until the lock held on the handle is unlocked.
+ *
+ * @param: handle of the buffer
+ * @param: timeout value for the wait.
+ * return: error status.
+ */
+genlock_status_t genlock_wait(native_handle_t *buffer_handle, int timeout);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libgralloc/Android.mk b/libgralloc/Android.mk
new file mode 100644
index 0000000..5377d86
--- /dev/null
+++ b/libgralloc/Android.mk
@@ -0,0 +1,75 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use this flag until pmem/ashmem is implemented in the new gralloc
+LOCAL_PATH := $(call my-dir)
+
+# HAL module implemenation, not prelinked and stored in
+# hw/<OVERLAY_HARDWARE_MODULE_ID>.<ro.product.board>.so
+include $(CLEAR_VARS)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
+LOCAL_SHARED_LIBRARIES := liblog libcutils libGLESv1_CM libutils libmemalloc libQcomUI
+LOCAL_SHARED_LIBRARIES += libgenlock
+
+LOCAL_C_INCLUDES += hardware/qcom/display/libgenlock
+LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
+LOCAL_ADDITIONAL_DEPENDENCIES +=
+LOCAL_SRC_FILES := framebuffer.cpp \
+ gpu.cpp \
+ gralloc.cpp \
+ mapper.cpp
+
+LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
+LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).gralloc\" -DHOST -DDEBUG_CALC_FPS
+
+ifeq ($(call is-board-platform,msm7627_surf msm7627_6x),true)
+ LOCAL_CFLAGS += -DTARGET_MSM7x27
+endif
+
+ifeq ($(TARGET_HAVE_HDMI_OUT),true)
+ LOCAL_CFLAGS += -DHDMI_DUAL_DISPLAY
+ LOCAL_C_INCLUDES += hardware/qcom/display/liboverlay
+ LOCAL_SHARED_LIBRARIES += liboverlay
+endif
+
+ifeq ($(TARGET_USES_SF_BYPASS),true)
+ LOCAL_CFLAGS += -DSF_BYPASS
+endif
+
+ifeq ($(TARGET_GRALLOC_USES_ASHMEM),true)
+ LOCAL_CFLAGS += -DUSE_ASHMEM
+endif
+
+include $(BUILD_SHARED_LIBRARY)
+
+#MemAlloc Library
+include $(CLEAR_VARS)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
+LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
+LOCAL_ADDITIONAL_DEPENDENCIES +=
+LOCAL_SHARED_LIBRARIES := liblog libcutils libutils
+LOCAL_SRC_FILES := ionalloc.cpp \
+ alloc_controller.cpp
+LOCAL_CFLAGS:= -DLOG_TAG=\"memalloc\"
+
+ifeq ($(TARGET_USES_ION),true)
+ LOCAL_CFLAGS += -DUSE_ION
+endif
+
+LOCAL_MODULE := libmemalloc
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
diff --git a/libgralloc/MODULE_LICENSE_APACHE2 b/libgralloc/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libgralloc/MODULE_LICENSE_APACHE2
diff --git a/libgralloc/NOTICE b/libgralloc/NOTICE
new file mode 100644
index 0000000..3237da6
--- /dev/null
+++ b/libgralloc/NOTICE
@@ -0,0 +1,190 @@
+
+ Copyright (c) 2008-2009, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/libgralloc/alloc_controller.cpp b/libgralloc/alloc_controller.cpp
new file mode 100644
index 0000000..47cdc68
--- /dev/null
+++ b/libgralloc/alloc_controller.cpp
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cutils/log.h>
+#include <utils/RefBase.h>
+#include <fcntl.h>
+#include "gralloc_priv.h"
+#include "alloc_controller.h"
+#include "memalloc.h"
+#include "ionalloc.h"
+#include "ashmemalloc.h"
+#include "gr.h"
+
+using namespace gralloc;
+using android::sp;
+
+const int GRALLOC_HEAP_MASK = GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
+ GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP |
+ GRALLOC_USAGE_PRIVATE_SMI_HEAP |
+ GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP |
+ GRALLOC_USAGE_PRIVATE_IOMMU_HEAP |
+ GRALLOC_USAGE_PRIVATE_MM_HEAP |
+ GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP |
+ GRALLOC_USAGE_PRIVATE_CAMERA_HEAP;
+
+
+//Common functions
+static bool canFallback(int compositionType, int usage, bool triedSystem)
+{
+ // Fallback to system heap when alloc fails unless
+ // 1. Composition type is MDP
+ // 2. Alloc from system heap was already tried
+ // 3. The heap type is requsted explicitly
+ // 4. The heap type is protected
+ // 5. The buffer is meant for external display only
+
+ if(compositionType == MDP_COMPOSITION)
+ return false;
+ if(triedSystem)
+ return false;
+ if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_PROTECTED))
+ return false;
+ if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_EXTERNAL_ONLY))
+ return false;
+ //Return true by default
+ return true;
+}
+
+static bool useUncached(int usage)
+{
+ // System heaps cannot be uncached
+ if(usage & (GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP |
+ GRALLOC_USAGE_PRIVATE_IOMMU_HEAP))
+ return false;
+ if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
+ return true;
+ return false;
+}
+
+sp<IAllocController> IAllocController::sController = NULL;
+sp<IAllocController> IAllocController::getInstance(bool useMasterHeap)
+{
+ if(sController == NULL) {
+#ifdef USE_ION
+ sController = new IonController();
+#else
+ if(useMasterHeap)
+ sController = new PmemAshmemController();
+ else
+ sController = new PmemKernelController();
+#endif
+ }
+ return sController;
+}
+
+
+//-------------- IonController-----------------------//
+IonController::IonController()
+{
+ mIonAlloc = new IonAlloc();
+}
+
+int IonController::allocate(alloc_data& data, int usage,
+ int compositionType)
+{
+ int ionFlags = 0;
+ int ret;
+ bool noncontig = false;
+
+ data.uncached = useUncached(usage);
+ if(usage & GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP)
+ ionFlags |= ION_HEAP(ION_SF_HEAP_ID);
+
+ if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP) {
+ ionFlags |= ION_HEAP(ION_SYSTEM_HEAP_ID);
+ noncontig = true;
+ }
+
+ if(usage & GRALLOC_USAGE_PRIVATE_IOMMU_HEAP)
+ ionFlags |= ION_HEAP(ION_IOMMU_HEAP_ID);
+
+ if(usage & GRALLOC_USAGE_PRIVATE_MM_HEAP)
+ ionFlags |= ION_HEAP(ION_CP_MM_HEAP_ID);
+
+ if(usage & GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP)
+ ionFlags |= ION_HEAP(ION_CP_WB_HEAP_ID);
+
+ if(usage & GRALLOC_USAGE_PRIVATE_CAMERA_HEAP)
+ ionFlags |= ION_HEAP(ION_CAMERA_HEAP_ID);
+
+ if(usage & GRALLOC_USAGE_PROTECTED)
+ ionFlags |= ION_SECURE;
+
+ if(usage & GRALLOC_USAGE_PRIVATE_DO_NOT_MAP)
+ data.allocType = private_handle_t::PRIV_FLAGS_NOT_MAPPED;
+
+ // if no flags are set, default to
+ // SF + IOMMU heaps, so that bypass can work
+ // we can fall back to system heap if
+ // we run out.
+ if(!ionFlags)
+ ionFlags = ION_HEAP(ION_SF_HEAP_ID) | ION_HEAP(ION_IOMMU_HEAP_ID);
+
+ data.flags = ionFlags;
+ ret = mIonAlloc->alloc_buffer(data);
+ // Fallback
+ if(ret < 0 && canFallback(compositionType,
+ usage,
+ (ionFlags & ION_SYSTEM_HEAP_ID)))
+ {
+ ALOGW("Falling back to system heap");
+ data.flags = ION_HEAP(ION_SYSTEM_HEAP_ID);
+ noncontig = true;
+ ret = mIonAlloc->alloc_buffer(data);
+ }
+
+ if(ret >= 0 ) {
+ data.allocType = private_handle_t::PRIV_FLAGS_USES_ION;
+ if(noncontig)
+ data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
+ if(ionFlags & ION_SECURE)
+ data.allocType |= private_handle_t::PRIV_FLAGS_SECURE_BUFFER;
+ }
+
+ return ret;
+}
+
+sp<IMemAlloc> IonController::getAllocator(int flags)
+{
+ sp<IMemAlloc> memalloc;
+ if (flags & private_handle_t::PRIV_FLAGS_USES_ION) {
+ memalloc = mIonAlloc;
+ } else {
+ ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
+ }
+
+ return memalloc;
+}
+
+#if 0
+//-------------- PmemKernelController-----------------------//
+
+PmemKernelController::PmemKernelController()
+{
+ mPmemAdspAlloc = new PmemKernelAlloc(DEVICE_PMEM_ADSP);
+ // XXX: Right now, there is no need to maintain an instance
+ // of the SMI allocator as we need it only in a few cases
+}
+
+PmemKernelController::~PmemKernelController()
+{
+}
+
+int PmemKernelController::allocate(alloc_data& data, int usage,
+ int compositionType)
+{
+ int ret = 0;
+ bool adspFallback = false;
+ if (!(usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP))
+ adspFallback = true;
+
+ // Try SMI first
+ if ((usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP) ||
+ (usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
+ (usage & GRALLOC_USAGE_PROTECTED))
+ {
+ int tempFd = open(DEVICE_PMEM_SMIPOOL, O_RDWR, 0);
+ if(tempFd > 0) {
+ close(tempFd);
+ sp<IMemAlloc> memalloc;
+ memalloc = new PmemKernelAlloc(DEVICE_PMEM_SMIPOOL);
+ ret = memalloc->alloc_buffer(data);
+ if(ret >= 0)
+ return ret;
+ else {
+ if(adspFallback)
+ ALOGW("Allocation from SMI failed, trying ADSP");
+ }
+ }
+ }
+
+ if ((usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) || adspFallback) {
+ ret = mPmemAdspAlloc->alloc_buffer(data);
+ }
+ return ret;
+}
+
+sp<IMemAlloc> PmemKernelController::getAllocator(int flags)
+{
+ sp<IMemAlloc> memalloc;
+ if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
+ memalloc = mPmemAdspAlloc;
+ else {
+ ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
+ memalloc = NULL;
+ }
+
+ return memalloc;
+}
+
+//-------------- PmemAshmmemController-----------------------//
+
+PmemAshmemController::PmemAshmemController()
+{
+ mPmemUserspaceAlloc = new PmemUserspaceAlloc();
+ mAshmemAlloc = new AshmemAlloc();
+ mPmemKernelCtrl = new PmemKernelController();
+}
+
+PmemAshmemController::~PmemAshmemController()
+{
+}
+
+int PmemAshmemController::allocate(alloc_data& data, int usage,
+ int compositionType)
+{
+ int ret = 0;
+
+ // Make buffers cacheable by default
+ data.uncached = false;
+
+ // Override if we explicitly need uncached buffers
+ if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
+ data.uncached = true;
+
+ // If ADSP or SMI is requested use the kernel controller
+ if(usage & (GRALLOC_USAGE_PRIVATE_ADSP_HEAP|
+ GRALLOC_USAGE_PRIVATE_SMI_HEAP)) {
+ ret = mPmemKernelCtrl->allocate(data, usage, compositionType);
+ if(ret < 0)
+ ALOGE("%s: Failed to allocate ADSP/SMI memory", __func__);
+ else
+ data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
+ return ret;
+ }
+
+ if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP) {
+ ret = mAshmemAlloc->alloc_buffer(data);
+ if(ret >= 0) {
+ data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
+ data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
+ }
+ return ret;
+ }
+
+ // if no memory specific flags are set,
+ // default to EBI heap, so that bypass
+ // can work. We can fall back to system
+ // heap if we run out.
+ ret = mPmemUserspaceAlloc->alloc_buffer(data);
+
+ // Fallback
+ if(ret >= 0 ) {
+ data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM;
+ } else if(ret < 0 && canFallback(compositionType, usage, false)) {
+ ALOGW("Falling back to ashmem");
+ ret = mAshmemAlloc->alloc_buffer(data);
+ if(ret >= 0) {
+ data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
+ data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
+ }
+ }
+
+ return ret;
+}
+
+sp<IMemAlloc> PmemAshmemController::getAllocator(int flags)
+{
+ sp<IMemAlloc> memalloc;
+ if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM)
+ memalloc = mPmemUserspaceAlloc;
+ else if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
+ memalloc = mPmemKernelCtrl->getAllocator(flags);
+ else if (flags & private_handle_t::PRIV_FLAGS_USES_ASHMEM)
+ memalloc = mAshmemAlloc;
+ else {
+ ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
+ memalloc = NULL;
+ }
+
+ return memalloc;
+}
+#endif
+
+size_t getBufferSizeAndDimensions(int width, int height, int format,
+ int& alignedw, int &alignedh)
+{
+ size_t size;
+
+ alignedw = ALIGN(width, 32);
+ alignedh = ALIGN(height, 32);
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ size = alignedw * alignedh * 4;
+ break;
+ case HAL_PIXEL_FORMAT_RGB_888:
+ size = alignedw * alignedh * 3;
+ break;
+ case HAL_PIXEL_FORMAT_RGB_565:
+ case HAL_PIXEL_FORMAT_RGBA_5551:
+ case HAL_PIXEL_FORMAT_RGBA_4444:
+ size = alignedw * alignedh * 2;
+ break;
+
+ // adreno formats
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO: // NV21
+ size = ALIGN(alignedw*alignedh, 4096);
+ size += ALIGN(2 * ALIGN(width/2, 32) * ALIGN(height/2, 32), 4096);
+ break;
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: // NV12
+ // The chroma plane is subsampled,
+ // but the pitch in bytes is unchanged
+ // The GPU needs 4K alignment, but the video decoder needs 8K
+ alignedw = ALIGN(width, 128);
+ size = ALIGN( alignedw * alignedh, 8192);
+ size += ALIGN( alignedw * ALIGN(height/2, 32), 8192);
+ break;
+ case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP:
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ case HAL_PIXEL_FORMAT_YV12:
+ if ((format == HAL_PIXEL_FORMAT_YV12) && ((width&1) || (height&1))) {
+ ALOGE("w or h is odd for the YV12 format");
+ return -EINVAL;
+ }
+ alignedw = ALIGN(width, 16);
+ alignedh = height;
+ if (HAL_PIXEL_FORMAT_NV12_ENCODEABLE == format) {
+ // The encoder requires a 2K aligned chroma offset.
+ size = ALIGN(alignedw*alignedh, 2048) +
+ (ALIGN(alignedw/2, 16) * (alignedh/2))*2;
+ } else {
+ size = alignedw*alignedh +
+ (ALIGN(alignedw/2, 16) * (alignedh/2))*2;
+ }
+ size = ALIGN(size, 4096);
+ break;
+
+ default:
+ ALOGE("unrecognized pixel format: %d", format);
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+// Allocate buffer from width, height and format into a
+// private_handle_t. It is the responsibility of the caller
+// to free the buffer using the free_buffer function
+int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage)
+{
+ alloc_data data;
+ int alignedw, alignedh;
+ android::sp<gralloc::IAllocController> sAlloc =
+ gralloc::IAllocController::getInstance(false);
+ data.base = 0;
+ data.fd = -1;
+ data.offset = 0;
+ data.size = getBufferSizeAndDimensions(w, h, format, alignedw, alignedh);
+ data.align = getpagesize();
+ data.uncached = useUncached(usage);
+ int allocFlags = usage;
+
+ int err = sAlloc->allocate(data, allocFlags, 0);
+ if (0 != err) {
+ ALOGE("%s: allocate failed", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ private_handle_t* hnd = new private_handle_t(data.fd, data.size,
+ data.allocType, 0, format, alignedw, alignedh);
+ hnd->base = (int) data.base;
+ hnd->offset = data.offset;
+ hnd->gpuaddr = 0;
+ *pHnd = hnd;
+ return 0;
+}
+
+void free_buffer(private_handle_t *hnd)
+{
+ android::sp<gralloc::IAllocController> sAlloc =
+ gralloc::IAllocController::getInstance(false);
+ if (hnd && hnd->fd > 0) {
+ sp<IMemAlloc> memalloc = sAlloc->getAllocator(hnd->flags);
+ memalloc->free_buffer((void*)hnd->base, hnd->size, hnd->offset, hnd->fd);
+ }
+ if(hnd)
+ delete hnd;
+
+}
diff --git a/libgralloc/alloc_controller.h b/libgralloc/alloc_controller.h
new file mode 100644
index 0000000..6c907d1
--- /dev/null
+++ b/libgralloc/alloc_controller.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef GRALLOC_ALLOCCONTROLLER_H
+#define GRALLOC_ALLOCCONTROLLER_H
+
+#include <utils/RefBase.h>
+
+namespace gralloc {
+
+ struct alloc_data;
+ class IMemAlloc;
+ class IonAlloc;
+
+ class IAllocController : public android::RefBase {
+
+ public:
+ /* Allocate using a suitable method
+ * Returns the type of buffer allocated
+ */
+ virtual int allocate(alloc_data& data, int usage,
+ int compositionType) = 0;
+
+ virtual android::sp<IMemAlloc> getAllocator(int flags) = 0;
+
+ virtual ~IAllocController() {};
+
+ static android::sp<IAllocController> getInstance(bool useMasterHeap);
+
+ private:
+ static android::sp<IAllocController> sController;
+
+ };
+
+ class IonController : public IAllocController {
+
+ public:
+ virtual int allocate(alloc_data& data, int usage,
+ int compositionType);
+
+ virtual android::sp<IMemAlloc> getAllocator(int flags);
+
+ IonController();
+
+ private:
+ android::sp<IonAlloc> mIonAlloc;
+
+ };
+
+ class PmemKernelController : public IAllocController {
+
+ public:
+ virtual int allocate(alloc_data& data, int usage,
+ int compositionType);
+
+ virtual android::sp<IMemAlloc> getAllocator(int flags);
+
+ PmemKernelController ();
+
+ ~PmemKernelController ();
+
+ private:
+ android::sp<IMemAlloc> mPmemAdspAlloc;
+
+ };
+
+ // Main pmem controller - this should only
+ // be used within gralloc
+ class PmemAshmemController : public IAllocController {
+
+ public:
+ virtual int allocate(alloc_data& data, int usage,
+ int compositionType);
+
+ virtual android::sp<IMemAlloc> getAllocator(int flags);
+
+ PmemAshmemController();
+
+ ~PmemAshmemController();
+
+ private:
+ android::sp<IMemAlloc> mPmemUserspaceAlloc;
+ android::sp<IMemAlloc> mAshmemAlloc;
+ android::sp<IAllocController> mPmemKernelCtrl;
+
+ };
+
+} //end namespace gralloc
+#endif // GRALLOC_ALLOCCONTROLLER_H
diff --git a/libgralloc/ashmemalloc.cpp b/libgralloc/ashmemalloc.cpp
new file mode 100644
index 0000000..8397e21
--- /dev/null
+++ b/libgralloc/ashmemalloc.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <cutils/log.h>
+#include <linux/ashmem.h>
+#include <cutils/ashmem.h>
+#include <errno.h>
+#include "ashmemalloc.h"
+
+using gralloc::AshmemAlloc;
+int AshmemAlloc::alloc_buffer(alloc_data& data)
+{
+ int err = 0;
+ int fd = -1;
+ void* base = 0;
+ int offset = 0;
+ char name[ASHMEM_NAME_LEN];
+ snprintf(name, ASHMEM_NAME_LEN, "gralloc-buffer-%x", data.pHandle);
+ int prot = PROT_READ | PROT_WRITE;
+ fd = ashmem_create_region(name, data.size);
+ if (fd < 0) {
+ ALOGE("couldn't create ashmem (%s)", strerror(errno));
+ err = -errno;
+ } else {
+ if (ashmem_set_prot_region(fd, prot) < 0) {
+ ALOGE("ashmem_set_prot_region(fd=%d, prot=%x) failed (%s)",
+ fd, prot, strerror(errno));
+ close(fd);
+ err = -errno;
+ } else {
+ base = mmap(0, data.size, prot, MAP_SHARED|MAP_POPULATE|MAP_LOCKED, fd, 0);
+ if (base == MAP_FAILED) {
+ ALOGE("alloc mmap(fd=%d, size=%d, prot=%x) failed (%s)",
+ fd, data.size, prot, strerror(errno));
+ close(fd);
+ err = -errno;
+ } else {
+ memset((char*)base + offset, 0, data.size);
+ }
+ }
+ }
+ if(err == 0) {
+ data.fd = fd;
+ data.base = base;
+ data.offset = offset;
+ clean_buffer(base, data.size, offset, fd);
+ ALOGD("ashmem: Allocated buffer base:%p size:%d fd:%d",
+ base, data.size, fd);
+
+ }
+ return err;
+
+}
+
+int AshmemAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+ ALOGD("ashmem: Freeing buffer base:%p size:%d fd:%d",
+ base, size, fd);
+ int err = 0;
+
+ if(!base) {
+ ALOGE("Invalid free");
+ return -EINVAL;
+ }
+ err = unmap_buffer(base, size, offset);
+ close(fd);
+ return err;
+}
+
+int AshmemAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+ int err = 0;
+ void *base = 0;
+
+ base = mmap(0, size, PROT_READ| PROT_WRITE,
+ MAP_SHARED|MAP_POPULATE, fd, 0);
+ *pBase = base;
+ if(base == MAP_FAILED) {
+ ALOGE("ashmem: Failed to map memory in the client: %s",
+ strerror(errno));
+ err = -errno;
+ } else {
+ ALOGD("ashmem: Mapped buffer base:%p size:%d fd:%d",
+ base, size, fd);
+ }
+ return err;
+}
+
+int AshmemAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+ ALOGD("ashmem: Unmapping buffer base: %p size: %d", base, size);
+ int err = munmap(base, size);
+ if(err) {
+ ALOGE("ashmem: Failed to unmap memory at %p: %s",
+ base, strerror(errno));
+ }
+ return err;
+
+}
+int AshmemAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+ int err = 0;
+ if (ioctl(fd, ASHMEM_CACHE_FLUSH_RANGE, NULL)) {
+ ALOGE("ashmem: ASHMEM_CACHE_FLUSH_RANGE failed fd = %d", fd);
+ }
+
+ return err;
+}
+
diff --git a/libgralloc/ashmemalloc.h b/libgralloc/ashmemalloc.h
new file mode 100644
index 0000000..051dcd1
--- /dev/null
+++ b/libgralloc/ashmemalloc.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_ASHMEMALLOC_H
+#define GRALLOC_ASHMEMALLOC_H
+
+#include "memalloc.h"
+#include <linux/ion.h>
+
+namespace gralloc {
+ class AshmemAlloc : public IMemAlloc {
+
+ public:
+ virtual int alloc_buffer(alloc_data& data);
+
+ virtual int free_buffer(void *base, size_t size,
+ int offset, int fd);
+
+ virtual int map_buffer(void **pBase, size_t size,
+ int offset, int fd);
+
+ virtual int unmap_buffer(void *base, size_t size,
+ int offset);
+
+ virtual int clean_buffer(void*base, size_t size,
+ int offset, int fd);
+
+ };
+}
+#endif /* GRALLOC_ASHMEMALLOC_H */
diff --git a/libgralloc/framebuffer.cpp b/libgralloc/framebuffer.cpp
new file mode 100644
index 0000000..b6b4a8f
--- /dev/null
+++ b/libgralloc/framebuffer.cpp
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+* Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/mman.h>
+
+#include <dlfcn.h>
+
+#include <cutils/ashmem.h>
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include <utils/Timers.h>
+
+#include <hardware/hardware.h>
+#include <hardware/gralloc.h>
+
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <utils/Timers.h>
+
+#include <cutils/log.h>
+#include <cutils/atomic.h>
+
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+
+#include <GLES/gl.h>
+
+#include "gralloc_priv.h"
+#include "gr.h"
+#ifdef NO_SURFACEFLINGER_SWAPINTERVAL
+#include <cutils/properties.h>
+#endif
+
+#include <qcom_ui.h>
+
+#define FB_DEBUG 0
+
+#if defined(HDMI_DUAL_DISPLAY)
+#define EVEN_OUT(x) if (x & 0x0001) {x--;}
+using overlay::Overlay;
+/** min of int a, b */
+static inline int min(int a, int b) {
+ return (a<b) ? a : b;
+}
+/** max of int a, b */
+static inline int max(int a, int b) {
+ return (a>b) ? a : b;
+}
+#endif
+
+char framebufferStateName[] = {'S', 'R', 'A'};
+
+/*****************************************************************************/
+
+enum {
+ MDDI_PANEL = '1',
+ EBI2_PANEL = '2',
+ LCDC_PANEL = '3',
+ EXT_MDDI_PANEL = '4',
+ TV_PANEL = '5'
+};
+
+enum {
+ PAGE_FLIP = 0x00000001,
+ LOCKED = 0x00000002
+};
+
+struct fb_context_t {
+ framebuffer_device_t device;
+};
+
+static int neworientation;
+
+/*****************************************************************************/
+
+static void
+msm_copy_buffer(buffer_handle_t handle, int fd,
+ int width, int height, int format,
+ int x, int y, int w, int h);
+
+static int fb_setSwapInterval(struct framebuffer_device_t* dev,
+ int interval)
+{
+ char pval[PROPERTY_VALUE_MAX];
+ property_get("debug.gr.swapinterval", pval, "-1");
+ int property_interval = atoi(pval);
+ if (property_interval >= 0)
+ interval = property_interval;
+
+ fb_context_t* ctx = (fb_context_t*)dev;
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+ if (interval < dev->minSwapInterval || interval > dev->maxSwapInterval)
+ return -EINVAL;
+
+ m->swapInterval = interval;
+ return 0;
+}
+
+static int fb_setUpdateRect(struct framebuffer_device_t* dev,
+ int l, int t, int w, int h)
+{
+ if (((w|h) <= 0) || ((l|t)<0))
+ return -EINVAL;
+ fb_context_t* ctx = (fb_context_t*)dev;
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+ m->info.reserved[0] = 0x54445055; // "UPDT";
+ m->info.reserved[1] = (uint16_t)l | ((uint32_t)t << 16);
+ m->info.reserved[2] = (uint16_t)(l+w) | ((uint32_t)(t+h) << 16);
+ return 0;
+}
+
+static void *disp_loop(void *ptr)
+{
+ struct qbuf_t nxtBuf;
+ static int cur_buf=-1;
+ private_module_t *m = reinterpret_cast<private_module_t*>(ptr);
+
+ while (1) {
+ pthread_mutex_lock(&(m->qlock));
+
+ // wait (sleep) while display queue is empty;
+ if (m->disp.isEmpty()) {
+ pthread_cond_wait(&(m->qpost),&(m->qlock));
+ }
+
+ // dequeue next buff to display and lock it
+ nxtBuf = m->disp.getHeadValue();
+ m->disp.pop();
+ pthread_mutex_unlock(&(m->qlock));
+
+ // post buf out to display synchronously
+ private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>
+ (nxtBuf.buf);
+ const size_t offset = hnd->base - m->framebuffer->base;
+ m->info.activate = FB_ACTIVATE_VBL;
+ m->info.yoffset = offset / m->finfo.line_length;
+
+#if defined(HDMI_DUAL_DISPLAY)
+ pthread_mutex_lock(&m->overlayLock);
+ m->orientation = neworientation;
+ m->currentOffset = offset;
+ m->hdmiStateChanged = true;
+ pthread_cond_signal(&(m->overlayPost));
+ pthread_mutex_unlock(&m->overlayLock);
+#endif
+ if (ioctl(m->framebuffer->fd, FBIOPUT_VSCREENINFO, &m->info) == -1) {
+ ALOGE("ERROR FBIOPUT_VSCREENINFO failed; frame not displayed");
+ }
+
+ CALC_FPS();
+
+ if (cur_buf == -1) {
+ int nxtAvail = ((nxtBuf.idx + 1) % m->numBuffers);
+ pthread_mutex_lock(&(m->avail[nxtBuf.idx].lock));
+ m->avail[nxtBuf.idx].is_avail = true;
+ m->avail[nxtBuf.idx].state = REF;
+ pthread_cond_broadcast(&(m->avail[nxtBuf.idx].cond));
+ pthread_mutex_unlock(&(m->avail[nxtBuf.idx].lock));
+ } else {
+ pthread_mutex_lock(&(m->avail[nxtBuf.idx].lock));
+ if (m->avail[nxtBuf.idx].state != SUB) {
+ ALOGE_IF(m->swapInterval != 0, "[%d] state %c, expected %c", nxtBuf.idx,
+ framebufferStateName[m->avail[nxtBuf.idx].state],
+ framebufferStateName[SUB]);
+ }
+ m->avail[nxtBuf.idx].state = REF;
+ pthread_mutex_unlock(&(m->avail[nxtBuf.idx].lock));
+
+ pthread_mutex_lock(&(m->avail[cur_buf].lock));
+ m->avail[cur_buf].is_avail = true;
+ if (m->avail[cur_buf].state != REF) {
+ ALOGE_IF(m->swapInterval != 0, "[%d] state %c, expected %c", cur_buf,
+ framebufferStateName[m->avail[cur_buf].state],
+ framebufferStateName[REF]);
+ }
+ m->avail[cur_buf].state = AVL;
+ pthread_cond_broadcast(&(m->avail[cur_buf].cond));
+ pthread_mutex_unlock(&(m->avail[cur_buf].lock));
+ }
+ cur_buf = nxtBuf.idx;
+ }
+ return NULL;
+}
+
+#if defined(HDMI_DUAL_DISPLAY)
+static int closeHDMIChannel(private_module_t* m)
+{
+ Overlay* pTemp = m->pobjOverlay;
+ if(pTemp != NULL)
+ pTemp->closeChannel();
+ return 0;
+}
+
+static void getSecondaryDisplayDestinationInfo(private_module_t* m, overlay_rect&
+ rect, int& orientation)
+{
+ Overlay* pTemp = m->pobjOverlay;
+ int width = pTemp->getFBWidth();
+ int height = pTemp->getFBHeight();
+ int fbwidth = m->info.xres, fbheight = m->info.yres;
+ rect.x = 0; rect.y = 0;
+ rect.w = width; rect.h = height;
+ int rot = m->orientation;
+ switch(rot) {
+ // ROT_0
+ case 0:
+ // ROT_180
+ case HAL_TRANSFORM_ROT_180:
+ pTemp->getAspectRatioPosition(fbwidth, fbheight,
+ &rect);
+ if(rot == HAL_TRANSFORM_ROT_180)
+ orientation = HAL_TRANSFORM_ROT_180;
+ else
+ orientation = 0;
+ break;
+ // ROT_90
+ case HAL_TRANSFORM_ROT_90:
+ // ROT_270
+ case HAL_TRANSFORM_ROT_270:
+ //Calculate the Aspectratio for the UI
+ //in the landscape mode
+ //Width and height will be swapped as there
+ //is rotation
+ pTemp->getAspectRatioPosition(fbheight, fbwidth,
+ &rect);
+
+ if(rot == HAL_TRANSFORM_ROT_90)
+ orientation = HAL_TRANSFORM_ROT_270;
+ else if(rot == HAL_TRANSFORM_ROT_270)
+ orientation = HAL_TRANSFORM_ROT_90;
+ break;
+ }
+ return;
+}
+
+static void *hdmi_ui_loop(void *ptr)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ ptr);
+ while (1) {
+ pthread_mutex_lock(&m->overlayLock);
+ while(!(m->hdmiStateChanged))
+ pthread_cond_wait(&(m->overlayPost), &(m->overlayLock));
+ m->hdmiStateChanged = false;
+ if (m->exitHDMIUILoop) {
+ pthread_mutex_unlock(&m->overlayLock);
+ return NULL;
+ }
+ bool waitForVsync = true;
+ int flags = WAIT_FOR_VSYNC;
+ if (m->pobjOverlay) {
+ Overlay* pTemp = m->pobjOverlay;
+ if (m->hdmiMirroringState == HDMI_NO_MIRRORING)
+ closeHDMIChannel(m);
+ else if(m->hdmiMirroringState == HDMI_UI_MIRRORING) {
+ if (!pTemp->isChannelUP()) {
+ int alignedW = ALIGN(m->info.xres, 32);
+
+ private_handle_t const* hnd =
+ reinterpret_cast<private_handle_t const*>(m->framebuffer);
+ overlay_buffer_info info;
+ info.width = alignedW;
+ info.height = hnd->height;
+ info.format = hnd->format;
+ info.size = hnd->size;
+
+ if (m->trueMirrorSupport)
+ flags &= ~WAIT_FOR_VSYNC;
+ // start the overlay Channel for mirroring
+ // m->enableHDMIOutput corresponds to the fbnum
+ if (pTemp->startChannel(info, m->enableHDMIOutput,
+ false, true, 0, VG0_PIPE, flags)) {
+ pTemp->setFd(m->framebuffer->fd);
+ pTemp->setCrop(0, 0, m->info.xres, m->info.yres);
+ } else
+ closeHDMIChannel(m);
+ }
+
+ if (pTemp->isChannelUP()) {
+ overlay_rect destRect;
+ int rot = 0;
+ int currOrientation = 0;
+ getSecondaryDisplayDestinationInfo(m, destRect, rot);
+ pTemp->getOrientation(currOrientation);
+ if(rot != currOrientation) {
+ pTemp->setTransform(rot);
+ }
+ EVEN_OUT(destRect.x);
+ EVEN_OUT(destRect.y);
+ EVEN_OUT(destRect.w);
+ EVEN_OUT(destRect.h);
+ int currentX = 0, currentY = 0;
+ uint32_t currentW = 0, currentH = 0;
+ if (pTemp->getPosition(currentX, currentY, currentW, currentH)) {
+ if ((currentX != destRect.x) || (currentY != destRect.y) ||
+ (currentW != destRect.w) || (currentH != destRect.h)) {
+ pTemp->setPosition(destRect.x, destRect.y, destRect.w,
+ destRect.h);
+ }
+ }
+ if (m->trueMirrorSupport) {
+ // if video is started the UI channel should be NO_WAIT.
+ flags = !m->videoOverlay ? WAIT_FOR_VSYNC : 0;
+ pTemp->updateOverlayFlags(flags);
+ }
+ pTemp->queueBuffer(m->currentOffset);
+ }
+ }
+ else
+ closeHDMIChannel(m);
+ }
+ pthread_mutex_unlock(&m->overlayLock);
+ }
+ return NULL;
+}
+
+static int fb_videoOverlayStarted(struct framebuffer_device_t* dev, int started)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+ pthread_mutex_lock(&m->overlayLock);
+ Overlay* pTemp = m->pobjOverlay;
+ if(started != m->videoOverlay) {
+ m->videoOverlay = started;
+ if (!m->trueMirrorSupport) {
+ m->hdmiStateChanged = true;
+ if (started && pTemp) {
+ m->hdmiMirroringState = HDMI_NO_MIRRORING;
+ closeHDMIChannel(m);
+ } else if (m->enableHDMIOutput)
+ m->hdmiMirroringState = HDMI_UI_MIRRORING;
+ pthread_cond_signal(&(m->overlayPost));
+ }
+ }
+ pthread_mutex_unlock(&m->overlayLock);
+ return 0;
+}
+
+static int fb_enableHDMIOutput(struct framebuffer_device_t* dev, int externaltype)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+ pthread_mutex_lock(&m->overlayLock);
+ Overlay* pTemp = m->pobjOverlay;
+ //Check if true mirroring can be supported
+ m->trueMirrorSupport = FrameBufferInfo::getInstance()->canSupportTrueMirroring();
+ m->enableHDMIOutput = externaltype;
+ ALOGE("In fb_enableHDMIOutput: externaltype = %d", m->enableHDMIOutput);
+ if(externaltype) {
+ if (m->trueMirrorSupport) {
+ m->hdmiMirroringState = HDMI_UI_MIRRORING;
+ } else {
+ if(!m->videoOverlay)
+ m->hdmiMirroringState = HDMI_UI_MIRRORING;
+ }
+ } else if (!externaltype && pTemp) {
+ m->hdmiMirroringState = HDMI_NO_MIRRORING;
+ closeHDMIChannel(m);
+ }
+ m->hdmiStateChanged = true;
+ pthread_cond_signal(&(m->overlayPost));
+ pthread_mutex_unlock(&m->overlayLock);
+ return 0;
+}
+
+
+static int fb_setActionSafeWidthRatio(struct framebuffer_device_t* dev, float asWidthRatio)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+ pthread_mutex_lock(&m->overlayLock);
+ m->actionsafeWidthRatio = asWidthRatio;
+ pthread_mutex_unlock(&m->overlayLock);
+ return 0;
+}
+
+static int fb_setActionSafeHeightRatio(struct framebuffer_device_t* dev, float asHeightRatio)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+ pthread_mutex_lock(&m->overlayLock);
+ m->actionsafeHeightRatio = asHeightRatio;
+ pthread_mutex_unlock(&m->overlayLock);
+ return 0;
+}
+
+static int fb_orientationChanged(struct framebuffer_device_t* dev, int orientation)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+ pthread_mutex_lock(&m->overlayLock);
+ neworientation = orientation;
+ pthread_mutex_unlock(&m->overlayLock);
+ return 0;
+}
+#endif
+
+static int fb_post(struct framebuffer_device_t* dev, buffer_handle_t buffer)
+{
+ if (private_handle_t::validate(buffer) < 0)
+ return -EINVAL;
+
+ int nxtIdx, futureIdx = -1;
+ bool reuse;
+ struct qbuf_t qb;
+ fb_context_t* ctx = (fb_context_t*)dev;
+
+ private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(buffer);
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+
+ if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
+
+ reuse = false;
+ nxtIdx = (m->currentIdx + 1) % m->numBuffers;
+ futureIdx = (nxtIdx + 1) % m->numBuffers;
+
+ if (m->swapInterval == 0) {
+ // if SwapInterval = 0 and no buffers available then reuse
+ // current buf for next rendering so don't post new buffer
+ if (pthread_mutex_trylock(&(m->avail[nxtIdx].lock))) {
+ reuse = true;
+ } else {
+ if (! m->avail[nxtIdx].is_avail)
+ reuse = true;
+ pthread_mutex_unlock(&(m->avail[nxtIdx].lock));
+ }
+ }
+
+ if(!reuse){
+ // unlock previous ("current") Buffer and lock the new buffer
+ m->base.lock(&m->base, buffer,
+ private_module_t::PRIV_USAGE_LOCKED_FOR_POST,
+ 0,0, m->info.xres, m->info.yres, NULL);
+
+ // post/queue the new buffer
+ pthread_mutex_lock(&(m->avail[nxtIdx].lock));
+ if (m->avail[nxtIdx].is_avail != true) {
+ ALOGE_IF(m->swapInterval != 0, "Found %d buf to be not avail", nxtIdx);
+ }
+
+ m->avail[nxtIdx].is_avail = false;
+
+ if (m->avail[nxtIdx].state != AVL) {
+ ALOGD("[%d] state %c, expected %c", nxtIdx,
+ framebufferStateName[m->avail[nxtIdx].state],
+ framebufferStateName[AVL]);
+ }
+
+ m->avail[nxtIdx].state = SUB;
+ pthread_mutex_unlock(&(m->avail[nxtIdx].lock));
+
+ qb.idx = nxtIdx;
+ qb.buf = buffer;
+ pthread_mutex_lock(&(m->qlock));
+ m->disp.push(qb);
+ pthread_cond_signal(&(m->qpost));
+ pthread_mutex_unlock(&(m->qlock));
+
+ if (m->currentBuffer)
+ m->base.unlock(&m->base, m->currentBuffer);
+
+ m->currentBuffer = buffer;
+ m->currentIdx = nxtIdx;
+ } else {
+ if (m->currentBuffer)
+ m->base.unlock(&m->base, m->currentBuffer);
+ m->base.lock(&m->base, buffer,
+ private_module_t::PRIV_USAGE_LOCKED_FOR_POST,
+ 0,0, m->info.xres, m->info.yres, NULL);
+ m->currentBuffer = buffer;
+ }
+
+ } else {
+ void* fb_vaddr;
+ void* buffer_vaddr;
+ m->base.lock(&m->base, m->framebuffer,
+ GRALLOC_USAGE_SW_WRITE_RARELY,
+ 0, 0, m->info.xres, m->info.yres,
+ &fb_vaddr);
+
+ m->base.lock(&m->base, buffer,
+ GRALLOC_USAGE_SW_READ_RARELY,
+ 0, 0, m->info.xres, m->info.yres,
+ &buffer_vaddr);
+
+ //memcpy(fb_vaddr, buffer_vaddr, m->finfo.line_length * m->info.yres);
+
+ msm_copy_buffer(
+ m->framebuffer, m->framebuffer->fd,
+ m->info.xres, m->info.yres, m->fbFormat,
+ m->info.xoffset, m->info.yoffset,
+ m->info.width, m->info.height);
+
+ m->base.unlock(&m->base, buffer);
+ m->base.unlock(&m->base, m->framebuffer);
+ }
+
+ ALOGD_IF(FB_DEBUG, "Framebuffer state: [0] = %c [1] = %c [2] = %c",
+ framebufferStateName[m->avail[0].state],
+ framebufferStateName[m->avail[1].state],
+ framebufferStateName[m->avail[2].state]);
+ return 0;
+}
+
+static int fb_compositionComplete(struct framebuffer_device_t* dev)
+{
+ // TODO: Properly implement composition complete callback
+ glFinish();
+
+ return 0;
+}
+
+static int fb_lockBuffer(struct framebuffer_device_t* dev, int index)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ dev->common.module);
+
+ // Return immediately if the buffer is available
+ if ((m->avail[index].state == AVL) || (m->swapInterval == 0))
+ return 0;
+
+ pthread_mutex_lock(&(m->avail[index].lock));
+ while (m->avail[index].state != AVL) {
+ pthread_cond_wait(&(m->avail[index].cond),
+ &(m->avail[index].lock));
+ }
+ pthread_mutex_unlock(&(m->avail[index].lock));
+
+ return 0;
+}
+
+/*****************************************************************************/
+
+int mapFrameBufferLocked(struct private_module_t* module)
+{
+ // already initialized...
+ if (module->framebuffer) {
+ return 0;
+ }
+ char const * const device_template[] = {
+ "/dev/graphics/fb%u",
+ "/dev/fb%u",
+ 0 };
+
+ int fd = -1;
+ int i=0;
+ char name[64];
+ char property[PROPERTY_VALUE_MAX];
+
+ while ((fd==-1) && device_template[i]) {
+ snprintf(name, 64, device_template[i], 0);
+ fd = open(name, O_RDWR, 0);
+ i++;
+ }
+ if (fd < 0)
+ return -errno;
+
+ struct fb_fix_screeninfo finfo;
+ if (ioctl(fd, FBIOGET_FSCREENINFO, &finfo) == -1)
+ return -errno;
+
+ struct fb_var_screeninfo info;
+ if (ioctl(fd, FBIOGET_VSCREENINFO, &info) == -1)
+ return -errno;
+
+ info.reserved[0] = 0;
+ info.reserved[1] = 0;
+ info.reserved[2] = 0;
+ info.xoffset = 0;
+ info.yoffset = 0;
+ info.activate = FB_ACTIVATE_NOW;
+
+ /* Interpretation of offset for color fields: All offsets are from the right,
+ * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you
+ * can use the offset as right argument to <<). A pixel afterwards is a bit
+ * stream and is written to video memory as that unmodified. This implies
+ * big-endian byte order if bits_per_pixel is greater than 8.
+ */
+
+ if(info.bits_per_pixel == 32) {
+ /*
+ * Explicitly request RGBA_8888
+ */
+ info.bits_per_pixel = 32;
+ info.red.offset = 24;
+ info.red.length = 8;
+ info.green.offset = 16;
+ info.green.length = 8;
+ info.blue.offset = 8;
+ info.blue.length = 8;
+ info.transp.offset = 0;
+ info.transp.length = 8;
+
+ /* Note: the GL driver does not have a r=8 g=8 b=8 a=0 config, so if we do
+ * not use the MDP for composition (i.e. hw composition == 0), ask for
+ * RGBA instead of RGBX. */
+ if (property_get("debug.sf.hw", property, NULL) > 0 && atoi(property) == 0)
+ module->fbFormat = HAL_PIXEL_FORMAT_RGBX_8888;
+ else if(property_get("debug.composition.type", property, NULL) > 0 && (strncmp(property, "mdp", 3) == 0))
+ module->fbFormat = HAL_PIXEL_FORMAT_RGBX_8888;
+ else
+ module->fbFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+ } else {
+ /*
+ * Explicitly request 5/6/5
+ */
+ info.bits_per_pixel = 16;
+ info.red.offset = 11;
+ info.red.length = 5;
+ info.green.offset = 5;
+ info.green.length = 6;
+ info.blue.offset = 0;
+ info.blue.length = 5;
+ info.transp.offset = 0;
+ info.transp.length = 0;
+ module->fbFormat = HAL_PIXEL_FORMAT_RGB_565;
+ }
+
+ //adreno needs 4k aligned offsets. Max hole size is 4096-1
+ int size = roundUpToPageSize(info.yres * info.xres * (info.bits_per_pixel/8));
+
+ /*
+ * Request NUM_BUFFERS screens (at lest 2 for page flipping)
+ */
+ int numberOfBuffers = (int)(finfo.smem_len/size);
+ ALOGV("num supported framebuffers in kernel = %d", numberOfBuffers);
+
+ if (property_get("debug.gr.numframebuffers", property, NULL) > 0) {
+ int num = atoi(property);
+ if ((num >= NUM_FRAMEBUFFERS_MIN) && (num <= NUM_FRAMEBUFFERS_MAX)) {
+ numberOfBuffers = num;
+ }
+ }
+ if (numberOfBuffers > NUM_FRAMEBUFFERS_MAX)
+ numberOfBuffers = NUM_FRAMEBUFFERS_MAX;
+
+ ALOGV("We support %d buffers", numberOfBuffers);
+
+ //consider the included hole by 4k alignment
+ uint32_t line_length = (info.xres * info.bits_per_pixel / 8);
+ info.yres_virtual = (size * numberOfBuffers) / line_length;
+
+ uint32_t flags = PAGE_FLIP;
+ if (ioctl(fd, FBIOPUT_VSCREENINFO, &info) == -1) {
+ info.yres_virtual = size / line_length;
+ flags &= ~PAGE_FLIP;
+ ALOGW("FBIOPUT_VSCREENINFO failed, page flipping not supported");
+ }
+
+ if (info.yres_virtual < ((size * 2) / line_length) ) {
+ // we need at least 2 for page-flipping
+ info.yres_virtual = size / line_length;
+ flags &= ~PAGE_FLIP;
+ ALOGW("page flipping not supported (yres_virtual=%d, requested=%d)",
+ info.yres_virtual, info.yres*2);
+ }
+
+ if (ioctl(fd, FBIOGET_VSCREENINFO, &info) == -1)
+ return -errno;
+
+ if (int(info.width) <= 0 || int(info.height) <= 0) {
+ // the driver doesn't return that information
+ // default to 160 dpi
+ info.width = ((info.xres * 25.4f)/160.0f + 0.5f);
+ info.height = ((info.yres * 25.4f)/160.0f + 0.5f);
+ }
+
+ float xdpi = (info.xres * 25.4f) / info.width;
+ float ydpi = (info.yres * 25.4f) / info.height;
+ //The reserved[4] field is used to store FPS by the driver.
+ float fps = info.reserved[4];
+
+ ALOGI( "using (fd=%d)\n"
+ "id = %s\n"
+ "xres = %d px\n"
+ "yres = %d px\n"
+ "xres_virtual = %d px\n"
+ "yres_virtual = %d px\n"
+ "bpp = %d\n"
+ "r = %2u:%u\n"
+ "g = %2u:%u\n"
+ "b = %2u:%u\n",
+ fd,
+ finfo.id,
+ info.xres,
+ info.yres,
+ info.xres_virtual,
+ info.yres_virtual,
+ info.bits_per_pixel,
+ info.red.offset, info.red.length,
+ info.green.offset, info.green.length,
+ info.blue.offset, info.blue.length
+ );
+
+ ALOGI( "width = %d mm (%f dpi)\n"
+ "height = %d mm (%f dpi)\n"
+ "refresh rate = %.2f Hz\n",
+ info.width, xdpi,
+ info.height, ydpi,
+ fps
+ );
+
+
+ if (ioctl(fd, FBIOGET_FSCREENINFO, &finfo) == -1)
+ return -errno;
+
+ if (finfo.smem_len <= 0)
+ return -errno;
+
+ module->flags = flags;
+ module->info = info;
+ module->finfo = finfo;
+ module->xdpi = xdpi;
+ module->ydpi = ydpi;
+ module->fps = fps;
+
+#ifdef NO_SURFACEFLINGER_SWAPINTERVAL
+ char pval[PROPERTY_VALUE_MAX];
+ property_get("debug.gr.swapinterval", pval, "1");
+ module->swapInterval = atoi(pval);
+ if (module->swapInterval < private_module_t::PRIV_MIN_SWAP_INTERVAL ||
+ module->swapInterval > private_module_t::PRIV_MAX_SWAP_INTERVAL) {
+ module->swapInterval = 1;
+ ALOGW("Out of range (%d to %d) value for debug.gr.swapinterval, using 1",
+ private_module_t::PRIV_MIN_SWAP_INTERVAL,
+ private_module_t::PRIV_MAX_SWAP_INTERVAL);
+ }
+
+#else
+ /* when surfaceflinger supports swapInterval then can just do this */
+ module->swapInterval = 1;
+#endif
+
+ CALC_INIT();
+
+ module->currentIdx = -1;
+ pthread_cond_init(&(module->qpost), NULL);
+ pthread_mutex_init(&(module->qlock), NULL);
+ for (i = 0; i < NUM_FRAMEBUFFERS_MAX; i++) {
+ pthread_mutex_init(&(module->avail[i].lock), NULL);
+ pthread_cond_init(&(module->avail[i].cond), NULL);
+ module->avail[i].is_avail = true;
+ module->avail[i].state = AVL;
+ }
+
+ /* create display update thread */
+ pthread_t thread1;
+ if (pthread_create(&thread1, NULL, &disp_loop, (void *) module)) {
+ return -errno;
+ }
+
+ /*
+ * map the framebuffer
+ */
+
+ int err;
+ module->numBuffers = info.yres_virtual / info.yres;
+ module->bufferMask = 0;
+ //adreno needs page aligned offsets. Align the fbsize to pagesize.
+ size_t fbSize = roundUpToPageSize(finfo.line_length * info.yres) * module->numBuffers;
+ module->framebuffer = new private_handle_t(fd, fbSize,
+ private_handle_t::PRIV_FLAGS_USES_PMEM, BUFFER_TYPE_UI,
+ module->fbFormat, info.xres, info.yres);
+ void* vaddr = mmap(0, fbSize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (vaddr == MAP_FAILED) {
+ ALOGE("Error mapping the framebuffer (%s)", strerror(errno));
+ return -errno;
+ }
+ module->framebuffer->base = intptr_t(vaddr);
+ memset(vaddr, 0, fbSize);
+
+#if defined(HDMI_DUAL_DISPLAY)
+ /* Overlay for HDMI*/
+ pthread_mutex_init(&(module->overlayLock), NULL);
+ pthread_cond_init(&(module->overlayPost), NULL);
+ module->pobjOverlay = new Overlay();
+ module->currentOffset = 0;
+ module->exitHDMIUILoop = false;
+ module->hdmiStateChanged = false;
+ pthread_t hdmiUIThread;
+ pthread_create(&hdmiUIThread, NULL, &hdmi_ui_loop, (void *) module);
+ module->hdmiMirroringState = HDMI_NO_MIRRORING;
+ module->trueMirrorSupport = false;
+#endif
+
+ return 0;
+}
+
+static int mapFrameBuffer(struct private_module_t* module)
+{
+ pthread_mutex_lock(&module->lock);
+ int err = mapFrameBufferLocked(module);
+ pthread_mutex_unlock(&module->lock);
+ return err;
+}
+
+/*****************************************************************************/
+
+static int fb_close(struct hw_device_t *dev)
+{
+ fb_context_t* ctx = (fb_context_t*)dev;
+#if defined(HDMI_DUAL_DISPLAY)
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ ctx->device.common.module);
+ pthread_mutex_lock(&m->overlayLock);
+ m->exitHDMIUILoop = true;
+ pthread_cond_signal(&(m->overlayPost));
+ pthread_mutex_unlock(&m->overlayLock);
+#endif
+ if (ctx) {
+ free(ctx);
+ }
+ return 0;
+}
+
+int fb_device_open(hw_module_t const* module, const char* name,
+ hw_device_t** device)
+{
+ int status = -EINVAL;
+ if (!strcmp(name, GRALLOC_HARDWARE_FB0)) {
+ alloc_device_t* gralloc_device;
+ status = gralloc_open(module, &gralloc_device);
+ if (status < 0)
+ return status;
+
+ /* initialize our state here */
+ fb_context_t *dev = (fb_context_t*)malloc(sizeof(*dev));
+ memset(dev, 0, sizeof(*dev));
+
+ /* initialize the procs */
+ dev->device.common.tag = HARDWARE_DEVICE_TAG;
+ dev->device.common.version = 0;
+ dev->device.common.module = const_cast<hw_module_t*>(module);
+ dev->device.common.close = fb_close;
+ dev->device.setSwapInterval = fb_setSwapInterval;
+ dev->device.post = fb_post;
+ dev->device.setUpdateRect = 0;
+ dev->device.compositionComplete = fb_compositionComplete;
+ //dev->device.lockBuffer = fb_lockBuffer;
+#if defined(HDMI_DUAL_DISPLAY)
+ dev->device.orientationChanged = fb_orientationChanged;
+ dev->device.videoOverlayStarted = fb_videoOverlayStarted;
+ dev->device.enableHDMIOutput = fb_enableHDMIOutput;
+ dev->device.setActionSafeWidthRatio = fb_setActionSafeWidthRatio;
+ dev->device.setActionSafeHeightRatio = fb_setActionSafeHeightRatio;
+#endif
+
+ private_module_t* m = (private_module_t*)module;
+ status = mapFrameBuffer(m);
+ if (status >= 0) {
+ int stride = m->finfo.line_length / (m->info.bits_per_pixel >> 3);
+ const_cast<uint32_t&>(dev->device.flags) = 0;
+ const_cast<uint32_t&>(dev->device.width) = m->info.xres;
+ const_cast<uint32_t&>(dev->device.height) = m->info.yres;
+ const_cast<int&>(dev->device.stride) = stride;
+ const_cast<int&>(dev->device.format) = m->fbFormat;
+ const_cast<float&>(dev->device.xdpi) = m->xdpi;
+ const_cast<float&>(dev->device.ydpi) = m->ydpi;
+ const_cast<float&>(dev->device.fps) = m->fps;
+ const_cast<int&>(dev->device.minSwapInterval) = private_module_t::PRIV_MIN_SWAP_INTERVAL;
+ const_cast<int&>(dev->device.maxSwapInterval) = private_module_t::PRIV_MAX_SWAP_INTERVAL;
+ //const_cast<int&>(dev->device.numFramebuffers) = m->numBuffers;
+ if (m->finfo.reserved[0] == 0x5444 &&
+ m->finfo.reserved[1] == 0x5055) {
+ dev->device.setUpdateRect = fb_setUpdateRect;
+ ALOGD("UPDATE_ON_DEMAND supported");
+ }
+
+ *device = &dev->device.common;
+ }
+
+ // Close the gralloc module
+ gralloc_close(gralloc_device);
+ }
+ return status;
+}
+
+/* Copy a pmem buffer to the framebuffer */
+
+static void
+msm_copy_buffer(buffer_handle_t handle, int fd,
+ int width, int height, int format,
+ int x, int y, int w, int h)
+{
+ struct {
+ unsigned int count;
+ mdp_blit_req req;
+ } blit;
+ private_handle_t *priv = (private_handle_t*) handle;
+
+ memset(&blit, 0, sizeof(blit));
+ blit.count = 1;
+
+ blit.req.flags = 0;
+ blit.req.alpha = 0xff;
+ blit.req.transp_mask = 0xffffffff;
+
+ blit.req.src.width = width;
+ blit.req.src.height = height;
+ blit.req.src.offset = 0;
+ blit.req.src.memory_id = priv->fd;
+
+ blit.req.dst.width = width;
+ blit.req.dst.height = height;
+ blit.req.dst.offset = 0;
+ blit.req.dst.memory_id = fd;
+ blit.req.dst.format = format;
+
+ blit.req.src_rect.x = blit.req.dst_rect.x = x;
+ blit.req.src_rect.y = blit.req.dst_rect.y = y;
+ blit.req.src_rect.w = blit.req.dst_rect.w = w;
+ blit.req.src_rect.h = blit.req.dst_rect.h = h;
+
+ if (ioctl(fd, MSMFB_BLIT, &blit))
+ ALOGE("MSMFB_BLIT failed = %d", -errno);
+}
diff --git a/libgralloc/gpu.cpp b/libgralloc/gpu.cpp
new file mode 100755
index 0000000..77ad174
--- /dev/null
+++ b/libgralloc/gpu.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <cutils/properties.h>
+#include <sys/mman.h>
+
+#include <genlock.h>
+
+#include "gr.h"
+#include "gpu.h"
+#include "memalloc.h"
+#include "alloc_controller.h"
+
+using namespace gralloc;
+using android::sp;
+
+gpu_context_t::gpu_context_t(const private_module_t* module,
+ sp<IAllocController> alloc_ctrl ) :
+ mAllocCtrl(alloc_ctrl)
+{
+ // Zero out the alloc_device_t
+ memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
+
+ char property[PROPERTY_VALUE_MAX];
+ if (property_get("debug.sf.hw", property, NULL) > 0) {
+ if(atoi(property) == 0) {
+ //debug.sf.hw = 0
+ compositionType = CPU_COMPOSITION;
+ } else { //debug.sf.hw = 1
+ // Get the composition type
+ property_get("debug.composition.type", property, NULL);
+ if (property == NULL) {
+ compositionType = GPU_COMPOSITION;
+ } else if ((strncmp(property, "mdp", 3)) == 0) {
+ compositionType = MDP_COMPOSITION;
+ } else if ((strncmp(property, "c2d", 3)) == 0) {
+ compositionType = C2D_COMPOSITION;
+ } else {
+ compositionType = GPU_COMPOSITION;
+ }
+ }
+ } else { //debug.sf.hw is not set. Use cpu composition
+ compositionType = CPU_COMPOSITION;
+ }
+
+ // Initialize the procs
+ common.tag = HARDWARE_DEVICE_TAG;
+ common.version = 0;
+ common.module = const_cast<hw_module_t*>(&module->base.common);
+ common.close = gralloc_close;
+ alloc = gralloc_alloc;
+#if 0
+ allocSize = gralloc_alloc_size;
+#endif
+ free = gralloc_free;
+
+}
+
+int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
+ buffer_handle_t* pHandle)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
+
+ // we don't support allocations with both the FB and PMEM_ADSP flags
+ if (usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) {
+ return -EINVAL;
+ }
+
+ if (m->framebuffer == NULL) {
+ ALOGE("%s: Invalid framebuffer", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ const uint32_t bufferMask = m->bufferMask;
+ const uint32_t numBuffers = m->numBuffers;
+ size_t bufferSize = m->finfo.line_length * m->info.yres;
+
+ //adreno needs FB size to be page aligned
+ bufferSize = roundUpToPageSize(bufferSize);
+
+ if (numBuffers == 1) {
+ // If we have only one buffer, we never use page-flipping. Instead,
+ // we return a regular buffer which will be memcpy'ed to the main
+ // screen when post is called.
+ int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
+ return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
+ m->fbFormat, m->info.xres, m->info.yres);
+ }
+
+ if (bufferMask >= ((1LU<<numBuffers)-1)) {
+ // We ran out of buffers.
+ return -ENOMEM;
+ }
+
+ // create a "fake" handles for it
+ // Set the PMEM flag as well, since adreno
+ // treats the FB memory as pmem
+ intptr_t vaddr = intptr_t(m->framebuffer->base);
+ private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), bufferSize,
+ private_handle_t::PRIV_FLAGS_USES_PMEM |
+ private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
+ BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
+ m->info.yres);
+
+ // find a free slot
+ for (uint32_t i=0 ; i<numBuffers ; i++) {
+ if ((bufferMask & (1LU<<i)) == 0) {
+ m->bufferMask |= (1LU<<i);
+ break;
+ }
+ vaddr += bufferSize;
+ }
+
+ hnd->base = vaddr;
+ hnd->offset = vaddr - intptr_t(m->framebuffer->base);
+ *pHandle = hnd;
+ return 0;
+}
+
+
+int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
+ buffer_handle_t* pHandle)
+{
+ private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
+ pthread_mutex_lock(&m->lock);
+ int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
+ pthread_mutex_unlock(&m->lock);
+ return err;
+}
+
+int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
+ buffer_handle_t* pHandle, int bufferType,
+ int format, int width, int height)
+{
+ int err = 0;
+ int flags = 0;
+ size = roundUpToPageSize(size);
+ alloc_data data;
+ data.offset = 0;
+ data.fd = -1;
+ data.base = 0;
+ data.size = size;
+ if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
+ data.align = 8192;
+ else
+ data.align = getpagesize();
+ data.pHandle = (unsigned int) pHandle;
+ err = mAllocCtrl->allocate(data, usage, compositionType);
+
+ if (usage & GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED) {
+ flags |= private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED;
+ }
+
+ if (usage & GRALLOC_USAGE_EXTERNAL_ONLY) {
+ flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
+ //The EXTERNAL_BLOCK flag is always an add-on
+ if (usage & GRALLOC_USAGE_EXTERNAL_BLOCK) {
+ flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK;
+ }
+ }
+
+ if (err == 0) {
+ flags |= data.allocType;
+ private_handle_t* hnd = new private_handle_t(data.fd, size, flags,
+ bufferType, format, width, height);
+
+ hnd->offset = data.offset;
+ hnd->base = int(data.base) + data.offset;
+ *pHandle = hnd;
+ }
+
+ ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
+ return err;
+}
+
+void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
+ int *colorFormat,
+ int *bufferType)
+{
+ *bufferType = BUFFER_TYPE_VIDEO;
+ *colorFormat = inputFormat;
+
+ if (inputFormat == HAL_PIXEL_FORMAT_YV12) {
+ *bufferType = BUFFER_TYPE_VIDEO;
+ } else if (inputFormat & S3D_FORMAT_MASK) {
+ // S3D format
+ *colorFormat = COLOR_FORMAT(inputFormat);
+ } else if (inputFormat & INTERLACE_MASK) {
+ // Interlaced
+ *colorFormat = inputFormat ^ HAL_PIXEL_FORMAT_INTERLACE;
+ } else if (inputFormat < 0x7) {
+ // RGB formats
+ *colorFormat = inputFormat;
+ *bufferType = BUFFER_TYPE_UI;
+ } else if ((inputFormat == HAL_PIXEL_FORMAT_R_8) ||
+ (inputFormat == HAL_PIXEL_FORMAT_RG_88)) {
+ *colorFormat = inputFormat;
+ *bufferType = BUFFER_TYPE_UI;
+ }
+}
+
+int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
+ buffer_handle_t* pHandle, int* pStride, size_t bufferSize) {
+ if (!pHandle || !pStride)
+ return -EINVAL;
+
+ size_t size;
+ int alignedw, alignedh;
+ int colorFormat, bufferType;
+ getGrallocInformationFromFormat(format, &colorFormat, &bufferType);
+ size = getBufferSizeAndDimensions(w, h, colorFormat, alignedw, alignedh);
+
+ if ((ssize_t)size <= 0)
+ return -EINVAL;
+ size = (bufferSize >= size)? bufferSize : size;
+
+ // All buffers marked as protected or for external
+ // display need to go to overlay
+ if ((usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
+ (usage & GRALLOC_USAGE_PROTECTED)) {
+ bufferType = BUFFER_TYPE_VIDEO;
+ }
+ int err;
+ if (usage & GRALLOC_USAGE_HW_FB) {
+ err = gralloc_alloc_framebuffer(size, usage, pHandle);
+ } else {
+ err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
+ format, alignedw, alignedh);
+ }
+
+ if (err < 0) {
+ return err;
+ }
+
+ // Create a genlock lock for this buffer handle.
+ err = genlock_create_lock((native_handle_t*)(*pHandle));
+ if (err) {
+ ALOGE("%s: genlock_create_lock failed", __FUNCTION__);
+ free_impl(reinterpret_cast<private_handle_t*>(pHandle));
+ return err;
+ }
+ *pStride = alignedw;
+ return 0;
+}
+
+int gpu_context_t::free_impl(private_handle_t const* hnd) {
+ private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
+ if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
+ // free this buffer
+ const size_t bufferSize = m->finfo.line_length * m->info.yres;
+ int index = (hnd->base - m->framebuffer->base) / bufferSize;
+ m->bufferMask &= ~(1<<index);
+ } else {
+ sp<IMemAlloc> memalloc = mAllocCtrl->getAllocator(hnd->flags);
+ int err = memalloc->free_buffer((void*)hnd->base, (size_t) hnd->size,
+ hnd->offset, hnd->fd);
+ if(err)
+ return err;
+ terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
+ }
+
+ // Release the genlock
+ int err = genlock_release_lock((native_handle_t*)hnd);
+ if (err) {
+ ALOGE("%s: genlock_release_lock failed", __FUNCTION__);
+ }
+
+ delete hnd;
+ return 0;
+}
+
+int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
+ int usage, buffer_handle_t* pHandle, int* pStride)
+{
+ if (!dev) {
+ return -EINVAL;
+ }
+ gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
+ return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
+}
+int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h, int format,
+ int usage, buffer_handle_t* pHandle, int* pStride, int bufferSize)
+{
+ if (!dev) {
+ return -EINVAL;
+ }
+ gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
+ return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
+}
+
+
+int gpu_context_t::gralloc_free(alloc_device_t* dev,
+ buffer_handle_t handle)
+{
+ if (private_handle_t::validate(handle) < 0)
+ return -EINVAL;
+
+ private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
+ gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
+ return gpu->free_impl(hnd);
+}
+
+/*****************************************************************************/
+
+int gpu_context_t::gralloc_close(struct hw_device_t *dev)
+{
+ gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
+ if (ctx) {
+ /* TODO: keep a list of all buffer_handle_t created, and free them
+ * all here.
+ */
+ delete ctx;
+ }
+ return 0;
+}
+
diff --git a/libgralloc/gpu.h b/libgralloc/gpu.h
new file mode 100644
index 0000000..301c411
--- /dev/null
+++ b/libgralloc/gpu.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRALLOC_GPU_H_
+#define GRALLOC_GPU_H_
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cutils/log.h>
+#include <cutils/ashmem.h>
+#include <utils/RefBase.h>
+
+#include "gralloc_priv.h"
+
+namespace gralloc {
+ class IAllocController;
+ class gpu_context_t : public alloc_device_t {
+ public:
+ gpu_context_t(const private_module_t* module,
+ android::sp<IAllocController>alloc_ctrl);
+
+ int gralloc_alloc_framebuffer_locked(size_t size, int usage,
+ buffer_handle_t* pHandle);
+
+ int gralloc_alloc_framebuffer(size_t size, int usage,
+ buffer_handle_t* pHandle);
+
+ int gralloc_alloc_buffer(size_t size, int usage,
+ buffer_handle_t* pHandle,
+ int bufferType, int format,
+ int width, int height);
+
+ int free_impl(private_handle_t const* hnd);
+
+ int alloc_impl(int w, int h, int format, int usage,
+ buffer_handle_t* pHandle, int* pStride,
+ size_t bufferSize = 0);
+
+ static int gralloc_alloc(alloc_device_t* dev, int w, int h,
+ int format, int usage,
+ buffer_handle_t* pHandle,
+ int* pStride);
+
+ static int gralloc_free(alloc_device_t* dev, buffer_handle_t handle);
+
+ static int gralloc_alloc_size(alloc_device_t* dev,
+ int w, int h, int format,
+ int usage, buffer_handle_t* pHandle,
+ int* pStride, int bufferSize);
+
+ static int gralloc_close(struct hw_device_t *dev);
+
+ int get_composition_type() const { return compositionType; }
+
+
+ private:
+ android::sp<IAllocController> mAllocCtrl;
+ int compositionType;
+ void getGrallocInformationFromFormat(int inputFormat,
+ int *colorFormat,
+ int *bufferType);
+ };
+}
+#endif // GRALLOC_GPU_H
diff --git a/libgralloc/gr.h b/libgralloc/gr.h
new file mode 100644
index 0000000..cc36d9a
--- /dev/null
+++ b/libgralloc/gr.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GR_H_
+#define GR_H_
+
+#include <stdint.h>
+#ifdef HAVE_ANDROID_OS // just want PAGE_SIZE define
+# include <asm/page.h>
+#else
+# include <sys/user.h>
+#endif
+#include <limits.h>
+#include <sys/cdefs.h>
+#include <hardware/gralloc.h>
+#include <pthread.h>
+#include <errno.h>
+
+#include <cutils/native_handle.h>
+
+/*****************************************************************************/
+
+struct private_module_t;
+struct private_handle_t;
+
+inline size_t roundUpToPageSize(size_t x) {
+ return (x + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
+}
+
+inline size_t ALIGN(size_t x, size_t align) {
+ return (x + align-1) & ~(align-1);
+}
+
+#define FALSE 0
+#define TRUE 1
+
+int mapFrameBufferLocked(struct private_module_t* module);
+int terminateBuffer(gralloc_module_t const* module, private_handle_t* hnd);
+size_t getBufferSizeAndDimensions(int width, int height, int format,
+ int& alignedw, int &alignedh);
+
+int decideBufferHandlingMechanism(int format, const char *compositionUsed,
+ int hasBlitEngine, int *needConversion,
+ int *useBufferDirectly);
+
+// Allocate buffer from width, height, format into a private_handle_t
+// It is the responsibility of the caller to free the buffer
+int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage);
+void free_buffer(private_handle_t *hnd);
+
+/*****************************************************************************/
+
+class Locker {
+ pthread_mutex_t mutex;
+public:
+ class Autolock {
+ Locker& locker;
+ public:
+ inline Autolock(Locker& locker) : locker(locker) { locker.lock(); }
+ inline ~Autolock() { locker.unlock(); }
+ };
+ inline Locker() { pthread_mutex_init(&mutex, 0); }
+ inline ~Locker() { pthread_mutex_destroy(&mutex); }
+ inline void lock() { pthread_mutex_lock(&mutex); }
+ inline void unlock() { pthread_mutex_unlock(&mutex); }
+};
+
+#endif /* GR_H_ */
diff --git a/libgralloc/gralloc.cpp b/libgralloc/gralloc.cpp
new file mode 100644
index 0000000..a98baf8
--- /dev/null
+++ b/libgralloc/gralloc.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2008, The Android Open Source Project
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <cutils/properties.h>
+#include <utils/RefBase.h>
+
+#include <linux/android_pmem.h>
+
+#include "gr.h"
+#include "gpu.h"
+#include "memalloc.h"
+#include "alloc_controller.h"
+
+using namespace gralloc;
+using android::sp;
+
+int fb_device_open(const hw_module_t* module, const char* name,
+ hw_device_t** device);
+
+static int gralloc_device_open(const hw_module_t* module, const char* name,
+ hw_device_t** device);
+
+extern int gralloc_lock(gralloc_module_t const* module,
+ buffer_handle_t handle, int usage,
+ int l, int t, int w, int h,
+ void** vaddr);
+
+extern int gralloc_unlock(gralloc_module_t const* module,
+ buffer_handle_t handle);
+
+extern int gralloc_register_buffer(gralloc_module_t const* module,
+ buffer_handle_t handle);
+
+extern int gralloc_unregister_buffer(gralloc_module_t const* module,
+ buffer_handle_t handle);
+
+extern int gralloc_perform(struct gralloc_module_t const* module,
+ int operation, ... );
+
+// HAL module methods
+static struct hw_module_methods_t gralloc_module_methods = {
+ open: gralloc_device_open
+};
+
+// HAL module initialize
+struct private_module_t HAL_MODULE_INFO_SYM = {
+ base: {
+ common: {
+ tag: HARDWARE_MODULE_TAG,
+ version_major: 1,
+ version_minor: 0,
+ id: GRALLOC_HARDWARE_MODULE_ID,
+ name: "Graphics Memory Allocator Module",
+ author: "The Android Open Source Project",
+ methods: &gralloc_module_methods,
+ dso: 0,
+ reserved: {0},
+ },
+ registerBuffer: gralloc_register_buffer,
+ unregisterBuffer: gralloc_unregister_buffer,
+ lock: gralloc_lock,
+ unlock: gralloc_unlock,
+ perform: gralloc_perform,
+ reserved_proc: {0},
+ },
+ framebuffer: 0,
+ fbFormat: 0,
+ flags: 0,
+ numBuffers: 0,
+ bufferMask: 0,
+ lock: PTHREAD_MUTEX_INITIALIZER,
+ currentBuffer: 0,
+};
+
+// Open Gralloc device
+int gralloc_device_open(const hw_module_t* module, const char* name,
+ hw_device_t** device)
+{
+ int status = -EINVAL;
+ if (!strcmp(name, GRALLOC_HARDWARE_GPU0)) {
+ const private_module_t* m = reinterpret_cast<const private_module_t*>(
+ module);
+ gpu_context_t *dev;
+ sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
+ dev = new gpu_context_t(m, alloc_ctrl);
+ *device = &dev->common;
+ status = 0;
+ } else {
+ status = fb_device_open(module, name, device);
+ }
+ return status;
+}
diff --git a/libgralloc/gralloc_priv.h b/libgralloc/gralloc_priv.h
new file mode 100644
index 0000000..0679621
--- /dev/null
+++ b/libgralloc/gralloc_priv.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRALLOC_PRIV_H_
+#define GRALLOC_PRIV_H_
+
+#include <stdint.h>
+#include <limits.h>
+#include <sys/cdefs.h>
+#include <hardware/gralloc.h>
+#include <pthread.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <cutils/native_handle.h>
+
+#include <linux/fb.h>
+
+#if defined(__cplusplus) && defined(HDMI_DUAL_DISPLAY)
+#include "overlayLib.h"
+using namespace overlay;
+#endif
+
+#include <cutils/log.h>
+
+enum {
+ /* gralloc usage bits indicating the type
+ * of allocation that should be used */
+
+ /* ADSP heap is deprecated, use only if using pmem */
+ GRALLOC_USAGE_PRIVATE_ADSP_HEAP = GRALLOC_USAGE_PRIVATE_0,
+ /* SF heap is used for application buffers, is not secured */
+ GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP = GRALLOC_USAGE_PRIVATE_1,
+ /* SMI heap is deprecated, use only if using pmem */
+ GRALLOC_USAGE_PRIVATE_SMI_HEAP = GRALLOC_USAGE_PRIVATE_2,
+ /* SYSTEM heap comes from kernel vmalloc,
+ * can never be uncached, is not secured*/
+ GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP = GRALLOC_USAGE_PRIVATE_3,
+ /* IOMMU heap comes from manually allocated pages,
+ * can be cached/uncached, is not secured */
+ GRALLOC_USAGE_PRIVATE_IOMMU_HEAP = 0x01000000,
+ /* MM heap is a carveout heap for video, can be secured*/
+ GRALLOC_USAGE_PRIVATE_MM_HEAP = 0x02000000,
+ /* WRITEBACK heap is a carveout heap for writeback, can be secured*/
+ GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP = 0x04000000,
+ /* CAMERA heap is a carveout heap for camera, is not secured*/
+ GRALLOC_USAGE_PRIVATE_CAMERA_HEAP = 0x08000000,
+
+ /* Set this for allocating uncached memory (using O_DSYNC)
+ * cannot be used with noncontiguous heaps */
+ GRALLOC_USAGE_PRIVATE_UNCACHED = 0x00100000,
+
+ /* This flag needs to be set when using a non-contiguous heap from ION.
+ * If not set, the system heap is assumed to be coming from ashmem
+ */
+ GRALLOC_USAGE_PRIVATE_ION = 0x00200000,
+
+ /* This flag can be set to disable genlock synchronization
+ * for the gralloc buffer. If this flag is set the caller
+ * is required to perform explicit synchronization.
+ * WARNING - flag is outside the standard PRIVATE region
+ * and may need to be moved if the gralloc API changes
+ */
+ GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED = 0X00400000,
+
+ /* Set this flag when you need to avoid mapping the memory in userspace */
+ GRALLOC_USAGE_PRIVATE_DO_NOT_MAP = 0X00800000,
+
+ /* Buffer content should be displayed on an external display only */
+ GRALLOC_USAGE_EXTERNAL_ONLY = 0x00010000,
+
+ /* Only this buffer content should be displayed on external, even if
+ * other EXTERNAL_ONLY buffers are available. Used during suspend.
+ */
+ GRALLOC_USAGE_EXTERNAL_BLOCK = 0x00020000,
+};
+
+enum {
+ /* Gralloc perform enums
+ */
+ GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER = 0x080000001,
+};
+
+
+enum {
+ GPU_COMPOSITION,
+ C2D_COMPOSITION,
+ MDP_COMPOSITION,
+ CPU_COMPOSITION,
+};
+
+/* numbers of max buffers for page flipping */
+#define NUM_FRAMEBUFFERS_MIN 2
+#define NUM_FRAMEBUFFERS_MAX 3
+
+/* number of default bufers for page flipping */
+#define NUM_DEF_FRAME_BUFFERS 2
+#define NO_SURFACEFLINGER_SWAPINTERVAL
+#define INTERLACE_MASK 0x80
+#define S3D_FORMAT_MASK 0xFF000
+#define COLOR_FORMAT(x) (x & 0xFFF) // Max range for colorFormats is 0 - FFF
+#define DEVICE_PMEM "/dev/pmem"
+#define DEVICE_PMEM_ADSP "/dev/pmem_adsp"
+#define DEVICE_PMEM_SMIPOOL "/dev/pmem_smipool"
+/*****************************************************************************/
+#ifdef __cplusplus
+
+//XXX: Remove framebuffer specific classes and defines to a different header
+template <class T>
+struct Node
+{
+ T data;
+ Node<T> *next;
+};
+
+template <class T>
+class Queue
+{
+public:
+ Queue(): front(NULL), back(NULL), len(0) {dummy = new T;}
+ ~Queue()
+ {
+ clear();
+ delete dummy;
+ }
+ void push(const T& item) //add an item to the back of the queue
+ {
+ if(len != 0) { //if the queue is not empty
+ back->next = new Node<T>; //create a new node
+ back = back->next; //set the new node as the back node
+ back->data = item;
+ back->next = NULL;
+ } else {
+ back = new Node<T>;
+ back->data = item;
+ back->next = NULL;
+ front = back;
+ }
+ len++;
+ }
+ void pop() //remove the first item from the queue
+ {
+ if (isEmpty())
+ return; //if the queue is empty, no node to dequeue
+ T item = front->data;
+ Node<T> *tmp = front;
+ front = front->next;
+ delete tmp;
+ if(front == NULL) //if the queue is empty, update the back pointer
+ back = NULL;
+ len--;
+ return;
+ }
+ T& getHeadValue() const //return the value of the first item in the queue
+ { //without modification to the structure
+ if (isEmpty()) {
+ ALOGE("Error can't get head of empty queue");
+ return *dummy;
+ }
+ return front->data;
+ }
+
+ bool isEmpty() const //returns true if no elements are in the queue
+ {
+ return (front == NULL);
+ }
+
+ size_t size() const //returns the amount of elements in the queue
+ {
+ return len;
+ }
+
+private:
+ Node<T> *front;
+ Node<T> *back;
+ size_t len;
+ void clear()
+ {
+ while (!isEmpty())
+ pop();
+ }
+ T *dummy;
+};
+#endif
+
+enum {
+ /* OEM specific HAL formats */
+ HAL_PIXEL_FORMAT_NV12_ENCODEABLE = 0x102,
+ HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED = 0x108,
+ HAL_PIXEL_FORMAT_YCbCr_420_SP = 0x109,
+ HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO = 0x10A,
+ HAL_PIXEL_FORMAT_YCrCb_422_SP = 0x10B,
+ HAL_PIXEL_FORMAT_R_8 = 0x10D,
+ HAL_PIXEL_FORMAT_RG_88 = 0x10E,
+ HAL_PIXEL_FORMAT_INTERLACE = 0x180,
+
+};
+
+/* possible formats for 3D content*/
+enum {
+ HAL_NO_3D = 0x0000,
+ HAL_3D_IN_SIDE_BY_SIDE_L_R = 0x10000,
+ HAL_3D_IN_TOP_BOTTOM = 0x20000,
+ HAL_3D_IN_INTERLEAVE = 0x40000,
+ HAL_3D_IN_SIDE_BY_SIDE_R_L = 0x80000,
+ HAL_3D_OUT_SIDE_BY_SIDE = 0x1000,
+ HAL_3D_OUT_TOP_BOTTOM = 0x2000,
+ HAL_3D_OUT_INTERLEAVE = 0x4000,
+ HAL_3D_OUT_MONOSCOPIC = 0x8000
+};
+
+enum {
+ BUFFER_TYPE_UI = 0,
+ BUFFER_TYPE_VIDEO
+};
+
+#if defined(HDMI_DUAL_DISPLAY)
+enum hdmi_mirroring_state {
+ HDMI_NO_MIRRORING,
+ HDMI_UI_MIRRORING,
+ HDMI_ORIGINAL_RESOLUTION_MIRRORING
+};
+#endif
+/*****************************************************************************/
+
+struct private_module_t;
+struct private_handle_t;
+struct PmemAllocator;
+
+struct qbuf_t {
+ buffer_handle_t buf;
+ int idx;
+};
+
+enum buf_state {
+ SUB,
+ REF,
+ AVL
+};
+
+struct avail_t {
+ pthread_mutex_t lock;
+ pthread_cond_t cond;
+#ifdef __cplusplus
+ bool is_avail;
+ buf_state state;
+#endif
+};
+
+struct private_module_t {
+ gralloc_module_t base;
+
+ struct private_handle_t* framebuffer;
+ uint32_t fbFormat;
+ uint32_t flags;
+ uint32_t numBuffers;
+ uint32_t bufferMask;
+ pthread_mutex_t lock;
+ buffer_handle_t currentBuffer;
+
+ struct fb_var_screeninfo info;
+ struct fb_fix_screeninfo finfo;
+ float xdpi;
+ float ydpi;
+ float fps;
+ int swapInterval;
+#ifdef __cplusplus
+ Queue<struct qbuf_t> disp; // non-empty when buffer is ready for display
+#endif
+ int currentIdx;
+ struct avail_t avail[NUM_FRAMEBUFFERS_MAX];
+ pthread_mutex_t qlock;
+ pthread_cond_t qpost;
+
+ enum {
+ // flag to indicate we'll post this buffer
+ PRIV_USAGE_LOCKED_FOR_POST = 0x80000000,
+ PRIV_MIN_SWAP_INTERVAL = 0,
+ PRIV_MAX_SWAP_INTERVAL = 1,
+ };
+#if defined(__cplusplus) && defined(HDMI_DUAL_DISPLAY)
+ Overlay* pobjOverlay;
+ int orientation;
+ bool videoOverlay;
+ uint32_t currentOffset;
+ int enableHDMIOutput; // holds the type of external display
+ bool trueMirrorSupport;
+ bool exitHDMIUILoop;
+ float actionsafeWidthRatio;
+ float actionsafeHeightRatio;
+ bool hdmiStateChanged;
+ hdmi_mirroring_state hdmiMirroringState;
+ pthread_mutex_t overlayLock;
+ pthread_cond_t overlayPost;
+#endif
+};
+
+/*****************************************************************************/
+
+#ifdef __cplusplus
+struct private_handle_t : public native_handle {
+#else
+struct private_handle_t {
+ native_handle_t nativeHandle;
+#endif
+ enum {
+ PRIV_FLAGS_FRAMEBUFFER = 0x00000001,
+ PRIV_FLAGS_USES_PMEM = 0x00000002,
+ PRIV_FLAGS_USES_PMEM_ADSP = 0x00000004,
+ PRIV_FLAGS_USES_ION = 0x00000008,
+ PRIV_FLAGS_USES_ASHMEM = 0x00000010,
+ PRIV_FLAGS_NEEDS_FLUSH = 0x00000020,
+ PRIV_FLAGS_DO_NOT_FLUSH = 0x00000040,
+ PRIV_FLAGS_SW_LOCK = 0x00000080,
+ PRIV_FLAGS_NONCONTIGUOUS_MEM = 0x00000100,
+ PRIV_FLAGS_HWC_LOCK = 0x00000200, // Set by HWC when storing the handle
+ PRIV_FLAGS_SECURE_BUFFER = 0x00000400,
+ PRIV_FLAGS_UNSYNCHRONIZED = 0x00000800, // For explicit synchronization
+ PRIV_FLAGS_NOT_MAPPED = 0x00001000, // Not mapped in userspace
+ PRIV_FLAGS_EXTERNAL_ONLY = 0x00002000, // Display on external only
+ PRIV_FLAGS_EXTERNAL_BLOCK = 0x00004000, // Display only this buffer on external
+ };
+
+ // file-descriptors
+ int fd;
+ int genlockHandle; // genlock handle to be dup'd by the binder
+ // ints
+ int magic;
+ int flags;
+ int size;
+ int offset;
+ int bufferType;
+
+ // FIXME: the attributes below should be out-of-line
+ int base;
+ int gpuaddr; // The gpu address mapped into the mmu. If using ashmem, set to 0 They don't care
+ int pid;
+ int format;
+ int width;
+ int height;
+ int genlockPrivFd; // local fd of the genlock device.
+
+#ifdef __cplusplus
+ static const int sNumInts = 12;
+ static const int sNumFds = 2;
+ static const int sMagic = 'gmsm';
+
+ private_handle_t(int fd, int size, int flags, int bufferType, int format, int width, int height) :
+ fd(fd), genlockHandle(-1), magic(sMagic), flags(flags), size(size), offset(0),
+ bufferType(bufferType), base(0), gpuaddr(0), pid(getpid()), format(format),
+ width(width), height(height), genlockPrivFd(-1)
+ {
+ version = sizeof(native_handle);
+ numInts = sNumInts;
+ numFds = sNumFds;
+ }
+ ~private_handle_t() {
+ magic = 0;
+ }
+
+ bool usesPhysicallyContiguousMemory() {
+ return (flags & PRIV_FLAGS_USES_PMEM) != 0;
+ }
+
+ static int validate(const native_handle* h) {
+ const private_handle_t* hnd = (const private_handle_t*)h;
+ if (!h || h->version != sizeof(native_handle) ||
+ h->numInts != sNumInts || h->numFds != sNumFds ||
+ hnd->magic != sMagic)
+ {
+ ALOGE("invalid gralloc handle (at %p)", h);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ static private_handle_t* dynamicCast(const native_handle* in) {
+ if (validate(in) == 0) {
+ return (private_handle_t*) in;
+ }
+ return NULL;
+ }
+#endif
+};
+
+#endif /* GRALLOC_PRIV_H_ */
diff --git a/libgralloc/ion_msm.h b/libgralloc/ion_msm.h
new file mode 100644
index 0000000..ae49bce
--- /dev/null
+++ b/libgralloc/ion_msm.h
@@ -0,0 +1,836 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+
+struct ion_handle;
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_TYPE_IOMMU: IOMMU memory
+ * @ION_HEAP_TYPE_CP: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous. Used for content protection.
+ * @ION_HEAP_END: helper for iterating over heaps
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_IOMMU,
+ ION_HEAP_TYPE_CP,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP)
+
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+ INVALID_HEAP_ID = -1,
+ ION_CP_MM_HEAP_ID = 8,
+ ION_CP_MFC_HEAP_ID = 12,
+ ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+ ION_CAMERA_HEAP_ID = 20, /* 8660 only */
+ ION_SF_HEAP_ID = 24,
+ ION_IOMMU_HEAP_ID = 25,
+ ION_QSECOM_HEAP_ID = 27,
+ ION_AUDIO_HEAP_ID = 28,
+
+ ION_MM_FIRMWARE_HEAP_ID = 29,
+ ION_SYSTEM_HEAP_ID = 30,
+
+ ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
+};
+
+enum ion_fixed_position {
+ NOT_FIXED,
+ FIXED_LOW,
+ FIXED_MIDDLE,
+ FIXED_HIGH,
+};
+
+/**
+ * Flag to use when allocating to indicate that a heap is secure.
+ */
+#define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit) (1 << (bit))
+
+#define ION_VMALLOC_HEAP_NAME "vmalloc"
+#define ION_AUDIO_HEAP_NAME "audio"
+#define ION_SF_HEAP_NAME "sf"
+#define ION_MM_HEAP_NAME "mm"
+#define ION_CAMERA_HEAP_NAME "camera_preview"
+#define ION_IOMMU_HEAP_NAME "iommu"
+#define ION_MFC_HEAP_NAME "mfc"
+#define ION_WB_HEAP_NAME "wb"
+#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
+#define ION_QSECOM_HEAP_NAME "qsecom"
+#define ION_FMEM_HEAP_NAME "fmem"
+
+#define CACHED 1
+#define UNCACHED 0
+
+#define ION_CACHE_SHIFT 0
+
+#define ION_SET_CACHE(__cache) ((__cache) << ION_CACHE_SHIFT)
+
+#define ION_IS_CACHED(__flags) ((__flags) & (1 << ION_CACHE_SHIFT))
+
+/*
+ * This flag allows clients when mapping into the IOMMU to specify to
+ * defer un-mapping from the IOMMU until the buffer memory is freed.
+ */
+#define ION_IOMMU_UNMAP_DELAYED 1
+
+#ifdef __KERNEL__
+#include <linux/err.h>
+#include <mach/ion.h>
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+#define ion_virt_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating (lower numbers
+ * will be allocated from first)
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ * @memory_type:Memory type used for the heap
+ * @extra_data: Extra data specific to each heap type
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+ enum ion_memory_types memory_type;
+ void *extra_data;
+};
+
+/**
+ * struct ion_cp_heap_pdata - defines a content protection heap in the given
+ * platform
+ * @permission_type: Memory ID used to identify the memory to TZ
+ * @align: Alignment requirement for the memory
+ * @secure_base: Base address for securing the heap.
+ * Note: This might be different from actual base address
+ * of this heap in the case of a shared heap.
+ * @secure_size: Memory size for securing the heap.
+ * Note: This might be different from actual size
+ * of this heap in the case of a shared heap.
+ * @reusable Flag indicating whether this heap is reusable of not.
+ * (see FMEM)
+ * @mem_is_fmem Flag indicating whether this memory is coming from fmem
+ * or not.
+ * @fixed_position If nonzero, position in the fixed area.
+ * @virt_addr: Virtual address used when using fmem.
+ * @request_region: function to be called when the number of allocations
+ * goes from 0 -> 1
+ * @release_region: function to be called when the number of allocations
+ * goes from 1 -> 0
+ * @setup_region: function to be called upon ion registration
+ *
+ */
+struct ion_cp_heap_pdata {
+ enum ion_permission_type permission_type;
+ unsigned int align;
+ ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
+ size_t secure_size; /* Size used for securing heap when heap is shared*/
+ int reusable;
+ int mem_is_fmem;
+ enum ion_fixed_position fixed_position;
+ ion_virt_addr_t *virt_addr;
+ int (*request_region)(void *);
+ int (*release_region)(void *);
+ void *(*setup_region)(void);
+};
+
+/**
+ * struct ion_co_heap_pdata - defines a carveout heap in the given platform
+ * @adjacent_mem_id: Id of heap that this heap must be adjacent to.
+ * @align: Alignment requirement for the memory
+ * @mem_is_fmem Flag indicating whether this memory is coming from fmem
+ * or not.
+ * @fixed_position If nonzero, position in the fixed area.
+ * @request_region: function to be called when the number of allocations
+ * goes from 0 -> 1
+ * @release_region: function to be called when the number of allocations
+ * goes from 1 -> 0
+ * @setup_region: function to be called upon ion registration
+ *
+ */
+struct ion_co_heap_pdata {
+ int adjacent_mem_id;
+ unsigned int align;
+ int mem_is_fmem;
+ enum ion_fixed_position fixed_position;
+ int (*request_region)(void *);
+ int (*release_region)(void *);
+ void *(*setup_region)(void);
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @request_region: function to be called when the number of allocations goes
+ * from 0 -> 1
+ * @release_region: function to be called when the number of allocations goes
+ * from 1 -> 0
+ * @setup_region: function to be called upon ion registration
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ int (*request_region)(void *);
+ int (*release_region)(void *);
+ void *(*setup_region)(void);
+ struct ion_platform_heap heaps[];
+};
+
+#ifdef CONFIG_ION
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ unsigned int heap_mask, const char *name);
+
+/**
+ * msm_ion_client_create - allocate a client using the ion_device specified in
+ * drivers/gpu/ion/msm/msm_ion.c
+ *
+ * heap_mask and name are the same as ion_client_create, return values
+ * are the same as ion_client_create.
+ */
+
+struct ion_client *msm_ion_client_create(unsigned int heap_mask,
+ const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks have
+ * alignment requirements of some kind
+ * @flags: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from lowest to highest order bit
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_map_dma should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ * @flags: flags for this mapping
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address. If no flags are specified, this
+ * will return a non-secure uncached mapping.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+ unsigned long flags);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_map_dma - create a dma mapping for a given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Return an sglist describing the given handle
+ */
+struct scatterlist *ion_map_dma(struct ion_client *client,
+ struct ion_handle *handle,
+ unsigned long flags);
+
+/**
+ * ion_unmap_dma() - destroy a dma mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share() - given a handle, obtain a buffer to pass to other clients
+ * @client: the client
+ * @handle: the handle to share
+ *
+ * Given a handle, return a buffer, which exists in a global name
+ * space, and can be passed to other clients. Should be passed into ion_import
+ * to obtain a new handle for this buffer.
+ *
+ * NOTE: This function does do not an extra reference. The burden is on the
+ * caller to make sure the buffer doesn't go away while it's being passed to
+ * another client. That is, ion_free should not be called on this handle until
+ * the buffer has been imported into the other client.
+ */
+struct ion_buffer *ion_share(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_import() - given an buffer in another client, import it
+ * @client: this blocks client
+ * @buffer: the buffer to import (as obtained from ion_share)
+ *
+ * Given a buffer, add it to the client and return the handle to use to refer
+ * to it further. This is called to share a handle from one kernel client to
+ * another.
+ */
+struct ion_handle *ion_import(struct ion_client *client,
+ struct ion_buffer *buffer);
+
+/**
+ * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
+ * @client: this blocks client
+ * @fd: the fd
+ *
+ * A helper function for drivers that will be recieving ion buffers shared
+ * with them from userspace. These buffers are represented by a file
+ * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
+ * This function coverts that fd into the underlying buffer, and returns
+ * the handle to use to refer to it further.
+ */
+struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
+
+/**
+ * ion_handle_get_flags - get the flags for a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the flags
+ * @flags - pointer to store the flags
+ *
+ * Gets the current flags for a handle. These flags indicate various options
+ * of the buffer (caching, security, etc.)
+ */
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *flags);
+
+
+/**
+ * ion_map_iommu - map the given handle into an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to map
+ * @domain_num - domain number to map to
+ * @partition_num - partition number to allocate iova from
+ * @align - alignment for the iova
+ * @iova_length - length of iova to map. If the iova length is
+ * greater than the handle length, the remaining
+ * address space will be mapped to a dummy buffer.
+ * @iova - pointer to store the iova address
+ * @buffer_size - pointer to store the size of the buffer
+ * @flags - flags for options to map
+ * @iommu_flags - flags specific to the iommu.
+ *
+ * Maps the handle into the iova space specified via domain number. Iova
+ * will be allocated from the partition specified via partition_num.
+ * Returns 0 on success, negative value on error.
+ */
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags);
+
+
+/**
+ * ion_handle_get_size - get the allocated size of a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the size
+ * @size - pointer to store the size
+ *
+ * gives the allocated size of a handle. returns 0 on success, negative
+ * value on error
+ *
+ * NOTE: This is intended to be used only to get a size to pass to map_iommu.
+ * You should *NOT* rely on this for any other usage.
+ */
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *size);
+
+/**
+ * ion_unmap_iommu - unmap the handle from an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to unmap
+ * @domain_num - domain to unmap from
+ * @partition_num - partition to unmap from
+ *
+ * Decrement the reference count on the iommu mapping. If the count is
+ * 0, the mapping will be removed from the iommu.
+ */
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num);
+
+
+/**
+ * ion_secure_heap - secure a heap
+ *
+ * @client - a client that has allocated from the heap heap_id
+ * @heap_id - heap id to secure.
+ *
+ * Secure a heap
+ * Returns 0 on success
+ */
+int ion_secure_heap(struct ion_device *dev, int heap_id);
+
+/**
+ * ion_unsecure_heap - un-secure a heap
+ *
+ * @client - a client that has allocated from the heap heap_id
+ * @heap_id - heap id to un-secure.
+ *
+ * Un-secure a heap
+ * Returns 0 on success
+ */
+int ion_unsecure_heap(struct ion_device *dev, int heap_id);
+
+/**
+ * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
+ *
+ * @heap_id - heap id to secure.
+ *
+ * Secure a heap
+ * Returns 0 on success
+ */
+int msm_ion_secure_heap(int heap_id);
+
+/**
+ * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
+ *
+ * @heap_id - heap id to secure.
+ *
+ * Un-secure a heap
+ * Returns 0 on success
+ */
+int msm_ion_unsecure_heap(int heap_id);
+
+/**
+ * msm_ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @vaddr - virtual address to operate on.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ * ION_IOC_CLEAN_CACHES
+ * ION_IOC_INV_CACHES
+ * ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+ void *vaddr, unsigned long len, unsigned int cmd);
+
+#else
+static inline struct ion_client *ion_client_create(struct ion_device *dev,
+ unsigned int heap_mask, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
+ const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_client_destroy(struct ion_client *client) { }
+
+static inline struct ion_handle *ion_alloc(struct ion_client *client,
+ size_t len, size_t align, unsigned int flags)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_free(struct ion_client *client,
+ struct ion_handle *handle) { }
+
+
+static inline int ion_phys(struct ion_client *client,
+ struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
+{
+ return -ENODEV;
+}
+
+static inline void *ion_map_kernel(struct ion_client *client,
+ struct ion_handle *handle, unsigned long flags)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_unmap_kernel(struct ion_client *client,
+ struct ion_handle *handle) { }
+
+static inline struct scatterlist *ion_map_dma(struct ion_client *client,
+ struct ion_handle *handle, unsigned long flags)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_unmap_dma(struct ion_client *client,
+ struct ion_handle *handle) { }
+
+static inline struct ion_buffer *ion_share(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct ion_handle *ion_import(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct ion_handle *ion_import_fd(struct ion_client *client,
+ int fd)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_flags(struct ion_client *client,
+ struct ion_handle *handle, unsigned long *flags)
+{
+ return -ENODEV;
+}
+
+static inline int ion_map_iommu(struct ion_client *client,
+ struct ion_handle *handle, int domain_num,
+ int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags,
+ unsigned long iommu_flags)
+{
+ return -ENODEV;
+}
+
+static inline void ion_unmap_iommu(struct ion_client *client,
+ struct ion_handle *handle, int domain_num,
+ int partition_num)
+{
+ return;
+}
+
+static inline int ion_secure_heap(struct ion_device *dev, int heap_id)
+{
+ return -ENODEV;
+
+}
+
+static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id)
+{
+ return -ENODEV;
+}
+
+static inline int msm_ion_secure_heap(int heap_id)
+{
+ return -ENODEV;
+
+}
+
+static inline int msm_ion_unsecure_heap(int heap_id)
+{
+ return -ENODEV;
+}
+
+static inline int msm_ion_do_cache_op(struct ion_client *client,
+ struct ion_handle *handle, void *vaddr,
+ unsigned long len, unsigned int cmd)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ION */
+#endif /* __KERNEL__ */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to refer
+ * to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int flags;
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ struct ion_handle *handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+
+/* struct ion_flush_data - data passed to ion for flushing caches
+ *
+ * @handle: handle with data to flush
+ * @fd: fd to flush
+ * @vaddr: userspace virtual address mapped with mmap
+ * @offset: offset into the handle to flush
+ * @length: length of handle to flush
+ *
+ * Performs cache operations on the handle. If p is the start address
+ * of the handle, p + offset through p + offset + length will have
+ * the cache operations performed
+ */
+struct ion_flush_data {
+ struct ion_handle *handle;
+ int fd;
+ void *vaddr;
+ unsigned int offset;
+ unsigned int length;
+};
+
+/* struct ion_flag_data - information about flags for this buffer
+ *
+ * @handle: handle to get flags from
+ * @flags: flags of this handle
+ *
+ * Takes handle as an input and outputs the flags from the handle
+ * in the flag field.
+ */
+struct ion_flag_data {
+ struct ion_handle *handle;
+ unsigned long flags;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+
+/**
+ * DOC: ION_IOC_CLEAN_CACHES - clean the caches
+ *
+ * Clean the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MAGIC, 7, \
+ struct ion_flush_data)
+/**
+ * DOC: ION_MSM_IOC_INV_CACHES - invalidate the caches
+ *
+ * Invalidate the caches of the handle specified.
+ */
+#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MAGIC, 8, \
+ struct ion_flush_data)
+/**
+ * DOC: ION_MSM_IOC_CLEAN_CACHES - clean and invalidate the caches
+ *
+ * Clean and invalidate the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MAGIC, 9, \
+ struct ion_flush_data)
+
+/**
+ * DOC: ION_IOC_GET_FLAGS - get the flags of the handle
+ *
+ * Gets the flags of the current handle which indicate cachability,
+ * secure state etc.
+ */
+#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MAGIC, 10, \
+ struct ion_flag_data)
+#endif /* _LINUX_ION_H */
diff --git a/libgralloc/ionalloc.cpp b/libgralloc/ionalloc.cpp
new file mode 100644
index 0000000..9ff0a5e
--- /dev/null
+++ b/libgralloc/ionalloc.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <linux/ioctl.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <cutils/log.h>
+#include <errno.h>
+#include "gralloc_priv.h"
+#include "ionalloc.h"
+
+using gralloc::IonAlloc;
+
+#define ION_DEVICE "/dev/ion"
+
+int IonAlloc::open_device()
+{
+ if(mIonFd == FD_INIT)
+ mIonFd = open(ION_DEVICE, O_RDONLY);
+
+ if(mIonFd < 0 ) {
+ ALOGE("%s: Failed to open ion device - %s",
+ __FUNCTION__, strerror(errno));
+ mIonFd = FD_INIT;
+ return -errno;
+ }
+ return 0;
+}
+
+void IonAlloc::close_device()
+{
+ if(mIonFd >= 0)
+ close(mIonFd);
+ mIonFd = FD_INIT;
+}
+
+int IonAlloc::alloc_buffer(alloc_data& data)
+{
+ int err = 0;
+ int ionSyncFd = FD_INIT;
+ int iFd = FD_INIT;
+ struct ion_handle_data handle_data;
+ struct ion_fd_data fd_data;
+ struct ion_allocation_data ionAllocData;
+
+ void *base = 0;
+
+ ionAllocData.len = data.size;
+ ionAllocData.align = data.align;
+ ionAllocData.flags = data.flags;
+
+ err = open_device();
+ if (err)
+ return err;
+
+ if(data.uncached) {
+ // Use the sync FD to alloc and map
+ // when we need uncached memory
+ // FIX: О–DSYNC defined to open uncached - add that in kernel
+ //ionSyncFd = open(ION_DEVICE, O_RDONLY|O_DSYNC);
+ ionSyncFd = open(ION_DEVICE, O_RDONLY);
+ if(ionSyncFd < 0) {
+ ALOGE("%s: Failed to open ion device - %s",
+ __FUNCTION__, strerror(errno));
+ return -errno;
+ }
+ iFd = ionSyncFd;
+ } else {
+ iFd = mIonFd;
+ }
+
+ if(ioctl(iFd, ION_IOC_ALLOC, &ionAllocData)) {
+ err = -errno;
+ ALOGE("ION_IOC_ALLOC failed with error - %s", strerror(errno));
+ if(ionSyncFd >= 0)
+ close(ionSyncFd);
+ ionSyncFd = FD_INIT;
+ return err;
+ }
+
+ fd_data.handle = ionAllocData.handle;
+ handle_data.handle = ionAllocData.handle;
+ if(ioctl(iFd, ION_IOC_MAP, &fd_data)) {
+ err = -errno;
+ ALOGE("%s: ION_IOC_MAP failed with error - %s",
+ __FUNCTION__, strerror(errno));
+ ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+ if(ionSyncFd >= 0)
+ close(ionSyncFd);
+ ionSyncFd = FD_INIT;
+ return err;
+ }
+
+ //if(!(data.flags & ION_SECURE) &&
+ if(!(data.allocType & private_handle_t::PRIV_FLAGS_NOT_MAPPED)) {
+
+ base = mmap(0, ionAllocData.len, PROT_READ|PROT_WRITE,
+ MAP_SHARED, fd_data.fd, 0);
+ if(base == MAP_FAILED) {
+ err = -errno;
+ ALOGE("%s: Failed to map the allocated memory: %s",
+ __FUNCTION__, strerror(errno));
+ ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+ ionSyncFd = FD_INIT;
+ return err;
+ }
+ memset(base, 0, ionAllocData.len);
+ // Clean cache after memset
+ clean_buffer(base, data.size, data.offset, fd_data.fd);
+ }
+
+ //Close the uncached FD since we no longer need it;
+ if(ionSyncFd >= 0)
+ close(ionSyncFd);
+ ionSyncFd = FD_INIT;
+
+ data.base = base;
+ data.fd = fd_data.fd;
+ ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+ ALOGD("ion: Allocated buffer base:%p size:%d fd:%d",
+ data.base, ionAllocData.len, data.fd);
+ return 0;
+}
+
+
+int IonAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+ ALOGD("ion: Freeing buffer base:%p size:%d fd:%d",
+ base, size, fd);
+ int err = 0;
+ err = open_device();
+ if (err)
+ return err;
+
+ if(base)
+ err = unmap_buffer(base, size, offset);
+ close(fd);
+ return err;
+}
+
+int IonAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+ int err = 0;
+ void *base = 0;
+ // It is a (quirky) requirement of ION to have opened the
+ // ion fd in the process that is doing the mapping
+ err = open_device();
+ if (err)
+ return err;
+
+ base = mmap(0, size, PROT_READ| PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ *pBase = base;
+ if(base == MAP_FAILED) {
+ err = -errno;
+ ALOGD("ion: Failed to map memory in the client: %s",
+ strerror(errno));
+ } else {
+ ALOGD("ion: Mapped buffer base:%p size:%d offset:%d fd:%d",
+ base, size, offset, fd);
+ }
+ return err;
+}
+
+int IonAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+ ALOGD("ion: Unmapping buffer base:%p size:%d", base, size);
+ int err = 0;
+ if(munmap(base, size)) {
+ err = -errno;
+ ALOGE("ion: Failed to unmap memory at %p : %s",
+ base, strerror(errno));
+ }
+ return err;
+
+}
+int IonAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+ struct ion_flush_data flush_data;
+ struct ion_fd_data fd_data;
+ struct ion_handle_data handle_data;
+ struct ion_handle* handle;
+ int err = 0;
+
+ err = open_device();
+ if (err)
+ return err;
+
+ fd_data.fd = fd;
+ if (ioctl(mIonFd, ION_IOC_IMPORT, &fd_data)) {
+ err = -errno;
+ ALOGE("%s: ION_IOC_IMPORT failed with error - %s",
+ __FUNCTION__, strerror(errno));
+ return err;
+ }
+
+ handle_data.handle = fd_data.handle;
+ flush_data.handle = fd_data.handle;
+ flush_data.vaddr = base;
+ flush_data.offset = offset;
+ flush_data.length = size;
+ if(ioctl(mIonFd, ION_IOC_CLEAN_INV_CACHES, &flush_data)) {
+ err = -errno;
+ ALOGE("%s: ION_IOC_CLEAN_INV_CACHES failed with error - %s",
+ __FUNCTION__, strerror(errno));
+ ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+ return err;
+ }
+ ioctl(mIonFd, ION_IOC_FREE, &handle_data);
+ return 0;
+}
+
diff --git a/libgralloc/ionalloc.h b/libgralloc/ionalloc.h
new file mode 100644
index 0000000..be26cd7
--- /dev/null
+++ b/libgralloc/ionalloc.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_IONALLOC_H
+#define GRALLOC_IONALLOC_H
+
+#include "memalloc.h"
+#include "ion_msm.h"
+
+namespace gralloc {
+
+ class IonAlloc : public IMemAlloc {
+
+ public:
+ virtual int alloc_buffer(alloc_data& data);
+
+ virtual int free_buffer(void *base, size_t size,
+ int offset, int fd);
+
+ virtual int map_buffer(void **pBase, size_t size,
+ int offset, int fd);
+
+ virtual int unmap_buffer(void *base, size_t size,
+ int offset);
+
+ virtual int clean_buffer(void*base, size_t size,
+ int offset, int fd);
+
+ IonAlloc() { mIonFd = FD_INIT; }
+
+ ~IonAlloc() { close_device(); }
+
+ private:
+ int mIonFd;
+
+ int open_device();
+
+ void close_device();
+
+ };
+
+}
+
+#endif /* GRALLOC_IONALLOC_H */
+
diff --git a/libgralloc/mapper.cpp b/libgralloc/mapper.cpp
new file mode 100755
index 0000000..c7ee7d4
--- /dev/null
+++ b/libgralloc/mapper.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits.h>
+#include <errno.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdarg.h>
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <linux/ashmem.h>
+
+#include <cutils/log.h>
+#include <cutils/atomic.h>
+#include <cutils/ashmem.h>
+
+#include <hardware/hardware.h>
+#include <hardware/gralloc.h>
+#include <genlock.h>
+
+#include <linux/android_pmem.h>
+
+#include "gralloc_priv.h"
+#include "gr.h"
+#include "alloc_controller.h"
+#include "memalloc.h"
+
+using namespace gralloc;
+using android::sp;
+/*****************************************************************************/
+
+// Return the type of allocator -
+// these are used for mapping/unmapping
+static sp<IMemAlloc> getAllocator(int flags)
+{
+ sp<IMemAlloc> memalloc;
+ sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
+ memalloc = alloc_ctrl->getAllocator(flags);
+ return memalloc;
+}
+
+static int gralloc_map(gralloc_module_t const* module,
+ buffer_handle_t handle,
+ void** vaddr)
+{
+ private_handle_t* hnd = (private_handle_t*)handle;
+ void *mappedAddress;
+ if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) &&
+ !(hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_BUFFER)) {
+ size_t size = hnd->size;
+ sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
+ int err = memalloc->map_buffer(&mappedAddress, size,
+ hnd->offset, hnd->fd);
+ if(err) {
+ ALOGE("Could not mmap handle %p, fd=%d (%s)",
+ handle, hnd->fd, strerror(errno));
+ hnd->base = 0;
+ return -errno;
+ }
+
+ if (mappedAddress == MAP_FAILED) {
+ ALOGE("Could not mmap handle %p, fd=%d (%s)",
+ handle, hnd->fd, strerror(errno));
+ hnd->base = 0;
+ return -errno;
+ }
+ hnd->base = intptr_t(mappedAddress) + hnd->offset;
+ //ALOGD("gralloc_map() succeeded fd=%d, off=%d, size=%d, vaddr=%p",
+ // hnd->fd, hnd->offset, hnd->size, mappedAddress);
+ }
+ *vaddr = (void*)hnd->base;
+ return 0;
+}
+
+static int gralloc_unmap(gralloc_module_t const* module,
+ buffer_handle_t handle)
+{
+ private_handle_t* hnd = (private_handle_t*)handle;
+ if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
+ int err = -EINVAL;
+ void* base = (void*)hnd->base;
+ size_t size = hnd->size;
+ sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
+ if(memalloc != NULL)
+ err = memalloc->unmap_buffer(base, size, hnd->offset);
+ if (err) {
+ ALOGE("Could not unmap memory at address %p", base);
+ }
+ }
+ hnd->base = 0;
+ return 0;
+}
+
+/*****************************************************************************/
+
+static pthread_mutex_t sMapLock = PTHREAD_MUTEX_INITIALIZER;
+
+/*****************************************************************************/
+
+int gralloc_register_buffer(gralloc_module_t const* module,
+ buffer_handle_t handle)
+{
+ if (private_handle_t::validate(handle) < 0)
+ return -EINVAL;
+
+ // In this implementation, we don't need to do anything here
+
+ /* NOTE: we need to initialize the buffer as not mapped/not locked
+ * because it shouldn't when this function is called the first time
+ * in a new process. Ideally these flags shouldn't be part of the
+ * handle, but instead maintained in the kernel or at least
+ * out-of-line
+ */
+
+ // if this handle was created in this process, then we keep it as is.
+ private_handle_t* hnd = (private_handle_t*)handle;
+ if (hnd->pid != getpid()) {
+ hnd->base = 0;
+ void *vaddr;
+ int err = gralloc_map(module, handle, &vaddr);
+ if (err) {
+ ALOGE("%s: gralloc_map failed", __FUNCTION__);
+ return err;
+ }
+
+ // Reset the genlock private fd flag in the handle
+ hnd->genlockPrivFd = -1;
+
+ // Check if there is a valid lock attached to the handle.
+ if (-1 == hnd->genlockHandle) {
+ ALOGE("%s: the lock is invalid.", __FUNCTION__);
+ gralloc_unmap(module, handle);
+ hnd->base = 0;
+ return -EINVAL;
+ }
+
+ // Attach the genlock handle
+ if (GENLOCK_NO_ERROR != genlock_attach_lock((native_handle_t *)handle)) {
+ ALOGE("%s: genlock_attach_lock failed", __FUNCTION__);
+ gralloc_unmap(module, handle);
+ hnd->base = 0;
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int gralloc_unregister_buffer(gralloc_module_t const* module,
+ buffer_handle_t handle)
+{
+ if (private_handle_t::validate(handle) < 0)
+ return -EINVAL;
+
+ /*
+ * If the buffer has been mapped during a lock operation, it's time
+ * to un-map it. It's an error to be here with a locked buffer.
+ * NOTE: the framebuffer is handled differently and is never unmapped.
+ */
+
+ private_handle_t* hnd = (private_handle_t*)handle;
+
+ // never unmap buffers that were created in this process
+ if (hnd->pid != getpid()) {
+ if (hnd->base != 0) {
+ gralloc_unmap(module, handle);
+ }
+ hnd->base = 0;
+ // Release the genlock
+ if (-1 != hnd->genlockHandle) {
+ return genlock_release_lock((native_handle_t *)handle);
+ } else {
+ ALOGE("%s: there was no genlock attached to this buffer", __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int terminateBuffer(gralloc_module_t const* module,
+ private_handle_t* hnd)
+{
+ /*
+ * If the buffer has been mapped during a lock operation, it's time
+ * to un-map it. It's an error to be here with a locked buffer.
+ */
+
+ if (hnd->base != 0) {
+ // this buffer was mapped, unmap it now
+ if (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_PMEM |
+ private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP |
+ private_handle_t::PRIV_FLAGS_USES_ASHMEM |
+ private_handle_t::PRIV_FLAGS_USES_ION)) {
+ if (hnd->pid != getpid()) {
+ // ... unless it's a "master" pmem buffer, that is a buffer
+ // mapped in the process it's been allocated.
+ // (see gralloc_alloc_buffer())
+ gralloc_unmap(module, hnd);
+ }
+ } else {
+ ALOGE("terminateBuffer: unmapping a non pmem/ashmem buffer flags = 0x%x", hnd->flags);
+ gralloc_unmap(module, hnd);
+ }
+ }
+
+ return 0;
+}
+
+int gralloc_lock(gralloc_module_t const* module,
+ buffer_handle_t handle, int usage,
+ int l, int t, int w, int h,
+ void** vaddr)
+{
+ if (private_handle_t::validate(handle) < 0)
+ return -EINVAL;
+
+ int err = 0;
+ private_handle_t* hnd = (private_handle_t*)handle;
+ if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
+ if (hnd->base == 0) {
+ // we need to map for real
+ pthread_mutex_t* const lock = &sMapLock;
+ pthread_mutex_lock(lock);
+ err = gralloc_map(module, handle, vaddr);
+ pthread_mutex_unlock(lock);
+ }
+ *vaddr = (void*)hnd->base;
+
+ // Lock the buffer for read/write operation as specified. Write lock
+ // has a higher priority over read lock.
+ int lockType = 0;
+ if (usage & GRALLOC_USAGE_SW_WRITE_MASK) {
+ lockType = GENLOCK_WRITE_LOCK;
+ } else if (usage & GRALLOC_USAGE_SW_READ_MASK) {
+ lockType = GENLOCK_READ_LOCK;
+ }
+
+ int timeout = GENLOCK_MAX_TIMEOUT;
+ if (GENLOCK_NO_ERROR != genlock_lock_buffer((native_handle_t *)handle,
+ (genlock_lock_type)lockType,
+ timeout)) {
+ ALOGE("%s: genlock_lock_buffer (lockType=0x%x) failed", __FUNCTION__,
+ lockType);
+ return -EINVAL;
+ } else {
+ // Mark this buffer as locked for SW read/write operation.
+ hnd->flags |= private_handle_t::PRIV_FLAGS_SW_LOCK;
+ }
+
+ if ((usage & GRALLOC_USAGE_SW_WRITE_MASK) &&
+ !(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
+ // Mark the buffer to be flushed after cpu read/write
+ hnd->flags |= private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
+ }
+ }
+ return err;
+}
+
+int gralloc_unlock(gralloc_module_t const* module,
+ buffer_handle_t handle)
+{
+ if (private_handle_t::validate(handle) < 0)
+ return -EINVAL;
+
+ private_handle_t* hnd = (private_handle_t*)handle;
+
+ if (hnd->flags & private_handle_t::PRIV_FLAGS_NEEDS_FLUSH) {
+ int err;
+ sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
+ err = memalloc->clean_buffer((void*)hnd->base,
+ hnd->size, hnd->offset, hnd->fd);
+ ALOGE_IF(err < 0, "cannot flush handle %p (offs=%x len=%x, flags = 0x%x) err=%s\n",
+ hnd, hnd->offset, hnd->size, hnd->flags, strerror(errno));
+ hnd->flags &= ~private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
+ }
+
+ if ((hnd->flags & private_handle_t::PRIV_FLAGS_SW_LOCK)) {
+ // Unlock the buffer.
+ if (GENLOCK_NO_ERROR != genlock_unlock_buffer((native_handle_t *)handle)) {
+ ALOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
+ return -EINVAL;
+ } else
+ hnd->flags &= ~private_handle_t::PRIV_FLAGS_SW_LOCK;
+ }
+ return 0;
+}
+
+/*****************************************************************************/
+
+int gralloc_perform(struct gralloc_module_t const* module,
+ int operation, ... )
+{
+ int res = -EINVAL;
+ va_list args;
+ va_start(args, operation);
+ switch (operation) {
+ case GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER:
+ {
+ int fd = va_arg(args, int);
+ size_t size = va_arg(args, size_t);
+ size_t offset = va_arg(args, size_t);
+ void* base = va_arg(args, void*);
+ int width = va_arg(args, int);
+ int height = va_arg(args, int);
+ int format = va_arg(args, int);
+
+ native_handle_t** handle = va_arg(args, native_handle_t**);
+ int memoryFlags = va_arg(args, int);
+ private_handle_t* hnd = (private_handle_t*)native_handle_create(
+ private_handle_t::sNumFds, private_handle_t::sNumInts);
+ hnd->magic = private_handle_t::sMagic;
+ hnd->fd = fd;
+ unsigned int contigFlags = GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
+ GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP |
+ GRALLOC_USAGE_PRIVATE_SMI_HEAP;
+
+ if (memoryFlags & contigFlags) {
+ // check if the buffer is a pmem buffer
+ pmem_region region;
+ if (ioctl(fd, PMEM_GET_SIZE, ®ion) < 0)
+ hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
+ else
+ hnd->flags = private_handle_t::PRIV_FLAGS_USES_PMEM |
+ private_handle_t::PRIV_FLAGS_DO_NOT_FLUSH;
+ } else {
+ if (memoryFlags & GRALLOC_USAGE_PRIVATE_ION)
+ hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
+ else
+ hnd->flags = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
+ }
+
+ hnd->size = size;
+ hnd->offset = offset;
+ hnd->base = intptr_t(base) + offset;
+ hnd->gpuaddr = 0;
+ hnd->width = width;
+ hnd->height = height;
+ hnd->format = format;
+ *handle = (native_handle_t *)hnd;
+ res = 0;
+ break;
+
+ }
+ default:
+ break;
+ }
+ va_end(args);
+ return res;
+}
diff --git a/libgralloc/memalloc.h b/libgralloc/memalloc.h
new file mode 100644
index 0000000..13a54e7
--- /dev/null
+++ b/libgralloc/memalloc.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_MEMALLOC_H
+#define GRALLOC_MEMALLOC_H
+
+#include <stdlib.h>
+#include <utils/RefBase.h>
+
+namespace gralloc {
+
+ struct alloc_data {
+ void *base;
+ int fd;
+ int offset;
+ size_t size;
+ size_t align;
+ unsigned int pHandle;
+ bool uncached;
+ unsigned int flags;
+ int allocType;
+ };
+
+ class IMemAlloc : public android::RefBase {
+
+ public:
+ // Allocate buffer - fill in the alloc_data
+ // structure and pass it in. Mapped address
+ // and fd are returned in the alloc_data struct
+ virtual int alloc_buffer(alloc_data& data) = 0;
+
+ // Free buffer
+ virtual int free_buffer(void *base, size_t size,
+ int offset, int fd) = 0;
+
+ // Map buffer
+ virtual int map_buffer(void **pBase, size_t size,
+ int offset, int fd) = 0;
+
+ // Unmap buffer
+ virtual int unmap_buffer(void *base, size_t size,
+ int offset) = 0;
+
+ // Clean and invalidate
+ virtual int clean_buffer(void *base, size_t size,
+ int offset, int fd) = 0;
+
+ // Destructor
+ virtual ~IMemAlloc() {};
+
+ enum {
+ FD_INIT = -1,
+ };
+
+ };
+
+} // end gralloc namespace
+#endif // GRALLOC_MEMALLOC_H
diff --git a/libgralloc/pmem_bestfit_alloc.cpp b/libgralloc/pmem_bestfit_alloc.cpp
new file mode 100644
index 0000000..e3875e9
--- /dev/null
+++ b/libgralloc/pmem_bestfit_alloc.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/log.h>
+
+#include "pmem_bestfit_alloc.h"
+
+
+// align all the memory blocks on a cache-line boundary
+const int SimpleBestFitAllocator::kMemoryAlign = 32;
+
+SimpleBestFitAllocator::SimpleBestFitAllocator()
+ : mHeapSize(0)
+{
+}
+
+SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
+ : mHeapSize(0)
+{
+ setSize(size);
+}
+
+SimpleBestFitAllocator::~SimpleBestFitAllocator()
+{
+ while(!mList.isEmpty()) {
+ delete mList.remove(mList.head());
+ }
+}
+
+ssize_t SimpleBestFitAllocator::setSize(size_t size)
+{
+ Locker::Autolock _l(mLock);
+ if (mHeapSize != 0) return -EINVAL;
+ size_t pagesize = getpagesize();
+ mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
+ chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
+ mList.insertHead(node);
+ return size;
+}
+
+size_t SimpleBestFitAllocator::size() const
+{
+ return mHeapSize;
+}
+
+ssize_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
+{
+ Locker::Autolock _l(mLock);
+ if (mHeapSize == 0) return -EINVAL;
+ ssize_t offset = alloc(size, flags);
+ return offset;
+}
+
+ssize_t SimpleBestFitAllocator::deallocate(size_t offset)
+{
+ Locker::Autolock _l(mLock);
+ if (mHeapSize == 0) return -EINVAL;
+ chunk_t const * const freed = dealloc(offset);
+ if (freed) {
+ return 0;
+ }
+ return -ENOENT;
+}
+
+ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
+{
+ if (size == 0) {
+ return 0;
+ }
+ size = (size + kMemoryAlign-1) / kMemoryAlign;
+ chunk_t* free_chunk = 0;
+ chunk_t* cur = mList.head();
+
+ size_t pagesize = getpagesize();
+ while (cur) {
+ int extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
+
+ // best fit
+ if (cur->free && (cur->size >= (size+extra))) {
+ if ((!free_chunk) || (cur->size < free_chunk->size)) {
+ free_chunk = cur;
+ }
+ if (cur->size == size) {
+ break;
+ }
+ }
+ cur = cur->next;
+ }
+
+ if (free_chunk) {
+ const size_t free_size = free_chunk->size;
+ free_chunk->free = 0;
+ free_chunk->size = size;
+ if (free_size > size) {
+ int extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
+ if (extra) {
+ chunk_t* split = new chunk_t(free_chunk->start, extra);
+ free_chunk->start += extra;
+ mList.insertBefore(free_chunk, split);
+ }
+
+ LOGE_IF(((free_chunk->start*kMemoryAlign)&(pagesize-1)),
+ "page is not aligned!!!");
+
+ const ssize_t tail_free = free_size - (size+extra);
+ if (tail_free > 0) {
+ chunk_t* split = new chunk_t(
+ free_chunk->start + free_chunk->size, tail_free);
+ mList.insertAfter(free_chunk, split);
+ }
+ }
+ return (free_chunk->start)*kMemoryAlign;
+ }
+ // we are out of PMEM. Print pmem stats
+ // check if there is any leak or fragmentation
+
+ LOGD (" Out of PMEM. Dumping PMEM stats for debugging");
+ LOGD (" ------------- PRINT PMEM STATS --------------");
+
+ cur = mList.head();
+ static uint32_t node_count;
+ static uint64_t allocated, free_space;
+
+ while (cur) {
+ LOGD (" Node %d -> Start Address : %u Size %u Free info %d",\
+ node_count++, cur->start, cur->size, cur->free);
+
+ // if cur-> free is 1 , the node is free
+ // calculate the total allocated and total free stats also
+
+ if (cur->free)
+ free_space += cur->size;
+ else
+ allocated += cur->size;
+ // read next node
+ cur = cur->next;
+ }
+ LOGD (" Total Allocated: %l Total Free: %l", allocated, free_space );
+
+ node_count = 0;
+ allocated = 0;
+ free_space = 0;
+ LOGD ("----------------------------------------------");
+ return -ENOMEM;
+}
+
+SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
+{
+ start = start / kMemoryAlign;
+ chunk_t* cur = mList.head();
+ while (cur) {
+ if (cur->start == start) {
+ LOG_FATAL_IF(cur->free,
+ "block at offset 0x%08lX of size 0x%08lX already freed",
+ cur->start*kMemoryAlign, cur->size*kMemoryAlign);
+
+ // merge freed blocks together
+ chunk_t* freed = cur;
+ cur->free = 1;
+ do {
+ chunk_t* const p = cur->prev;
+ chunk_t* const n = cur->next;
+ if (p && (p->free || !cur->size)) {
+ freed = p;
+ p->size += cur->size;
+ mList.remove(cur);
+ delete cur;
+ }
+ cur = n;
+ } while (cur && cur->free);
+
+ LOG_FATAL_IF(!freed->free,
+ "freed block at offset 0x%08lX of size 0x%08lX is not free!",
+ freed->start * kMemoryAlign, freed->size * kMemoryAlign);
+
+ return freed;
+ }
+ cur = cur->next;
+ }
+ return 0;
+}
diff --git a/libgralloc/pmem_bestfit_alloc.h b/libgralloc/pmem_bestfit_alloc.h
new file mode 100644
index 0000000..2ea8452
--- /dev/null
+++ b/libgralloc/pmem_bestfit_alloc.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef GRALLOC_ALLOCATOR_H_
+#define GRALLOC_ALLOCATOR_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "gr.h"
+#include "pmemalloc.h"
+
+// ----------------------------------------------------------------------------
+
+/*
+ * A simple templatized doubly linked-list implementation
+ */
+template <typename NODE>
+class LinkedList
+{
+ NODE* mFirst;
+ NODE* mLast;
+
+public:
+ LinkedList() : mFirst(0), mLast(0) { }
+ bool isEmpty() const { return mFirst == 0; }
+ NODE const* head() const { return mFirst; }
+ NODE* head() { return mFirst; }
+ NODE const* tail() const { return mLast; }
+ NODE* tail() { return mLast; }
+
+ void insertAfter(NODE* node, NODE* newNode) {
+ newNode->prev = node;
+ newNode->next = node->next;
+ if (node->next == 0) mLast = newNode;
+ else node->next->prev = newNode;
+ node->next = newNode;
+ }
+
+ void insertBefore(NODE* node, NODE* newNode) {
+ newNode->prev = node->prev;
+ newNode->next = node;
+ if (node->prev == 0) mFirst = newNode;
+ else node->prev->next = newNode;
+ node->prev = newNode;
+ }
+
+ void insertHead(NODE* newNode) {
+ if (mFirst == 0) {
+ mFirst = mLast = newNode;
+ newNode->prev = newNode->next = 0;
+ } else {
+ newNode->prev = 0;
+ newNode->next = mFirst;
+ mFirst->prev = newNode;
+ mFirst = newNode;
+ }
+ }
+
+ void insertTail(NODE* newNode) {
+ if (mLast == 0) {
+ insertHead(newNode);
+ } else {
+ newNode->prev = mLast;
+ newNode->next = 0;
+ mLast->next = newNode;
+ mLast = newNode;
+ }
+ }
+
+ NODE* remove(NODE* node) {
+ if (node->prev == 0) mFirst = node->next;
+ else node->prev->next = node->next;
+ if (node->next == 0) mLast = node->prev;
+ else node->next->prev = node->prev;
+ return node;
+ }
+};
+
+class SimpleBestFitAllocator : public gralloc::PmemUserspaceAlloc::Allocator
+{
+public:
+
+ SimpleBestFitAllocator();
+ SimpleBestFitAllocator(size_t size);
+ virtual ~SimpleBestFitAllocator();
+
+ virtual ssize_t setSize(size_t size);
+
+ virtual ssize_t allocate(size_t size, uint32_t flags = 0);
+ virtual ssize_t deallocate(size_t offset);
+ virtual size_t size() const;
+
+private:
+ struct chunk_t {
+ chunk_t(size_t start, size_t size)
+ : start(start), size(size), free(1), prev(0), next(0) {
+ }
+ size_t start;
+ size_t size : 28;
+ int free : 4;
+ mutable chunk_t* prev;
+ mutable chunk_t* next;
+ };
+
+ ssize_t alloc(size_t size, uint32_t flags);
+ chunk_t* dealloc(size_t start);
+
+ static const int kMemoryAlign;
+ mutable Locker mLock;
+ LinkedList<chunk_t> mList;
+ size_t mHeapSize;
+};
+#endif /* GRALLOC_ALLOCATOR_H_ */
diff --git a/libgralloc/pmemalloc.cpp b/libgralloc/pmemalloc.cpp
new file mode 100644
index 0000000..ccbf127
--- /dev/null
+++ b/libgralloc/pmemalloc.cpp
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <cutils/log.h>
+#include <errno.h>
+#include <linux/android_pmem.h>
+#include "gralloc_priv.h"
+#include "pmemalloc.h"
+#include "pmem_bestfit_alloc.h"
+
+using namespace gralloc;
+using android::sp;
+
+// Common functions between userspace
+// and kernel allocators
+static int getPmemTotalSize(int fd, size_t* size)
+{
+ //XXX: 7x27
+ int err = 0;
+ pmem_region region;
+ if (ioctl(fd, PMEM_GET_TOTAL_SIZE, ®ion)) {
+ err = -errno;
+ } else {
+ *size = region.len;
+ }
+ return err;
+}
+
+static int getOpenFlags(bool uncached)
+{
+ if(uncached)
+ return O_RDWR | O_SYNC;
+ else
+ return O_RDWR;
+}
+
+static int connectPmem(int fd, int master_fd) {
+ if (ioctl(fd, PMEM_CONNECT, master_fd))
+ return -errno;
+ return 0;
+}
+
+static int mapSubRegion(int fd, int offset, size_t size) {
+ struct pmem_region sub = { offset, size };
+ if (ioctl(fd, PMEM_MAP, &sub))
+ return -errno;
+ return 0;
+}
+
+static int unmapSubRegion(int fd, int offset, size_t size) {
+ struct pmem_region sub = { offset, size };
+ if (ioctl(fd, PMEM_UNMAP, &sub))
+ return -errno;
+ return 0;
+}
+
+static int alignPmem(int fd, size_t size, int align) {
+ struct pmem_allocation allocation;
+ allocation.size = size;
+ allocation.align = align;
+ if (ioctl(fd, PMEM_ALLOCATE_ALIGNED, &allocation))
+ return -errno;
+ return 0;
+}
+
+static int cleanPmem(void *base, size_t size, int offset, int fd) {
+ struct pmem_addr pmem_addr;
+ pmem_addr.vaddr = (unsigned long) base;
+ pmem_addr.offset = offset;
+ pmem_addr.length = size;
+ if (ioctl(fd, PMEM_CLEAN_INV_CACHES, &pmem_addr))
+ return -errno;
+ return 0;
+}
+
+//-------------- PmemUserspaceAlloc-----------------------//
+PmemUserspaceAlloc::PmemUserspaceAlloc()
+{
+ mPmemDev = DEVICE_PMEM;
+ mMasterFd = FD_INIT;
+ mAllocator = new SimpleBestFitAllocator();
+ pthread_mutex_init(&mLock, NULL);
+}
+
+PmemUserspaceAlloc::~PmemUserspaceAlloc()
+{
+}
+
+int PmemUserspaceAlloc::init_pmem_area_locked()
+{
+ ALOGD("%s: Opening master pmem FD", __FUNCTION__);
+ int err = 0;
+ int fd = open(mPmemDev, O_RDWR, 0);
+ if (fd >= 0) {
+ size_t size = 0;
+ err = getPmemTotalSize(fd, &size);
+ ALOGD("%s: Total pmem size: %d", __FUNCTION__, size);
+ if (err < 0) {
+ ALOGE("%s: PMEM_GET_TOTAL_SIZE failed (%d), limp mode", mPmemDev,
+ err);
+ size = 8<<20; // 8 MiB
+ }
+ mAllocator->setSize(size);
+
+ void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
+ 0);
+ if (base == MAP_FAILED) {
+ err = -errno;
+ ALOGE("%s: Failed to map pmem master fd: %s", mPmemDev,
+ strerror(errno));
+ base = 0;
+ close(fd);
+ fd = -1;
+ } else {
+ mMasterFd = fd;
+ mMasterBase = base;
+ }
+ } else {
+ err = -errno;
+ ALOGE("%s: Failed to open pmem device: %s", mPmemDev,
+ strerror(errno));
+ }
+ return err;
+}
+
+int PmemUserspaceAlloc::init_pmem_area()
+{
+ pthread_mutex_lock(&mLock);
+ int err = mMasterFd;
+ if (err == FD_INIT) {
+ // first time, try to initialize pmem
+ ALOGD("%s: Initializing pmem area", __FUNCTION__);
+ err = init_pmem_area_locked();
+ if (err) {
+ ALOGE("%s: failed to initialize pmem area", mPmemDev);
+ mMasterFd = err;
+ }
+ } else if (err < 0) {
+ // pmem couldn't be initialized, never use it
+ } else {
+ // pmem OK
+ err = 0;
+ }
+ pthread_mutex_unlock(&mLock);
+ return err;
+
+}
+
+int PmemUserspaceAlloc::alloc_buffer(alloc_data& data)
+{
+ int err = init_pmem_area();
+ if (err == 0) {
+ void* base = mMasterBase;
+ size_t size = data.size;
+ int offset = mAllocator->allocate(size);
+ if (offset < 0) {
+ // no more pmem memory
+ ALOGE("%s: No more pmem available", mPmemDev);
+ err = -ENOMEM;
+ } else {
+ int openFlags = getOpenFlags(data.uncached);
+
+ // now create the "sub-heap"
+ int fd = open(mPmemDev, openFlags, 0);
+ err = fd < 0 ? fd : 0;
+
+ // and connect to it
+ if (err == 0)
+ err = connectPmem(fd, mMasterFd);
+
+ // and make it available to the client process
+ if (err == 0)
+ err = mapSubRegion(fd, offset, size);
+
+ if (err < 0) {
+ ALOGE("%s: Failed to initialize pmem sub-heap: %d", mPmemDev,
+ err);
+ close(fd);
+ mAllocator->deallocate(offset);
+ fd = -1;
+ } else {
+ ALOGD("%s: Allocated buffer base:%p size:%d offset:%d fd:%d",
+ mPmemDev, base, size, offset, fd);
+ memset((char*)base + offset, 0, size);
+ //Clean cache before flushing to ensure pmem is properly flushed
+ err = clean_buffer((void*)((intptr_t) base + offset), size, offset, fd);
+ if (err < 0) {
+ ALOGE("cleanPmem failed: (%s)", strerror(errno));
+ }
+ cacheflush(intptr_t(base) + offset, intptr_t(base) + offset + size, 0);
+ data.base = base;
+ data.offset = offset;
+ data.fd = fd;
+ }
+ }
+ }
+ return err;
+
+}
+
+int PmemUserspaceAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+ ALOGD("%s: Freeing buffer base:%p size:%d offset:%d fd:%d",
+ mPmemDev, base, size, offset, fd);
+ int err = 0;
+ if (fd >= 0) {
+ int err = unmapSubRegion(fd, offset, size);
+ ALOGE_IF(err<0, "PMEM_UNMAP failed (%s), fd=%d, sub.offset=%u, "
+ "sub.size=%u", strerror(errno), fd, offset, size);
+ if (err == 0) {
+ // we can't deallocate the memory in case of UNMAP failure
+ // because it would give that process access to someone else's
+ // surfaces, which would be a security breach.
+ mAllocator->deallocate(offset);
+ }
+ close(fd);
+ }
+ return err;
+}
+
+int PmemUserspaceAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+ int err = 0;
+ size += offset;
+ void *base = mmap(0, size, PROT_READ| PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ *pBase = base;
+ if(base == MAP_FAILED) {
+ err = -errno;
+ ALOGE("%s: Failed to map buffer size:%d offset:%d fd:%d Error: %s",
+ mPmemDev, size, offset, fd, strerror(errno));
+ } else {
+ ALOGD("%s: Mapped buffer base:%p size:%d offset:%d fd:%d",
+ mPmemDev, base, size, offset, fd);
+ }
+ return err;
+
+}
+
+int PmemUserspaceAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+ int err = 0;
+ //pmem hack
+ base = (void*)(intptr_t(base) - offset);
+ size += offset;
+ ALOGD("%s: Unmapping buffer base:%p size:%d offset:%d",
+ mPmemDev , base, size, offset);
+ if (munmap(base, size) < 0) {
+
+ err = -errno;
+ ALOGE("%s: Failed to unmap memory at %p :%s",
+ mPmemDev, base, strerror(errno));
+
+ }
+
+ return err;
+}
+
+int PmemUserspaceAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+ return cleanPmem(base, size, offset, fd);
+}
+
+
+//-------------- PmemKernelAlloc-----------------------//
+
+PmemKernelAlloc::PmemKernelAlloc(const char* pmemdev) :
+ mPmemDev(pmemdev)
+{
+}
+
+PmemKernelAlloc::~PmemKernelAlloc()
+{
+}
+
+int PmemKernelAlloc::alloc_buffer(alloc_data& data)
+{
+ int err, offset = 0;
+ int openFlags = getOpenFlags(data.uncached);
+ int size = data.size;
+
+ int fd = open(mPmemDev, openFlags, 0);
+ if (fd < 0) {
+ err = -errno;
+ ALOGE("%s: Error opening %s", __FUNCTION__, mPmemDev);
+ return err;
+ }
+
+ if (data.align == 8192) {
+ // Tile format buffers need physical alignment to 8K
+ // Default page size does not need this ioctl
+ err = alignPmem(fd, size, 8192);
+ if (err < 0) {
+ ALOGE("alignPmem failed");
+ }
+ }
+ void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (base == MAP_FAILED) {
+ err = -errno;
+ ALOGE("%s: failed to map pmem fd: %s", mPmemDev,
+ strerror(errno));
+ close(fd);
+ return err;
+ }
+ memset(base, 0, size);
+ clean_buffer((void*)((intptr_t) base + offset), size, offset, fd);
+ data.base = base;
+ data.offset = 0;
+ data.fd = fd;
+ ALOGD("%s: Allocated buffer base:%p size:%d fd:%d",
+ mPmemDev, base, size, fd);
+ return 0;
+
+}
+
+int PmemKernelAlloc::free_buffer(void* base, size_t size, int offset, int fd)
+{
+ ALOGD("%s: Freeing buffer base:%p size:%d fd:%d",
+ mPmemDev, base, size, fd);
+
+ int err = unmap_buffer(base, size, offset);
+ close(fd);
+ return err;
+}
+
+int PmemKernelAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
+{
+ int err = 0;
+ void *base = mmap(0, size, PROT_READ| PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ *pBase = base;
+ if(base == MAP_FAILED) {
+ err = -errno;
+ ALOGE("%s: Failed to map memory in the client: %s",
+ mPmemDev, strerror(errno));
+ } else {
+ ALOGD("%s: Mapped buffer base:%p size:%d, fd:%d",
+ mPmemDev, base, size, fd);
+ }
+ return err;
+
+}
+
+int PmemKernelAlloc::unmap_buffer(void *base, size_t size, int offset)
+{
+ int err = 0;
+ if (munmap(base, size)) {
+ err = -errno;
+ ALOGW("%s: Error unmapping memory at %p: %s",
+ mPmemDev, base, strerror(err));
+ }
+ return err;
+
+}
+int PmemKernelAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
+{
+ return cleanPmem(base, size, offset, fd);
+}
+
diff --git a/libgralloc/pmemalloc.h b/libgralloc/pmemalloc.h
new file mode 100644
index 0000000..4aed0b1
--- /dev/null
+++ b/libgralloc/pmemalloc.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRALLOC_PMEMALLOC_H
+#define GRALLOC_PMEMALLOC_H
+
+#include <linux/ion.h>
+#include <utils/RefBase.h>
+#include "memalloc.h"
+
+namespace gralloc {
+ class PmemUserspaceAlloc : public IMemAlloc {
+
+ public:
+ class Allocator: public android::RefBase {
+ public:
+ virtual ~Allocator() {};
+ virtual ssize_t setSize(size_t size) = 0;
+ virtual size_t size() const = 0;
+ virtual ssize_t allocate(size_t size, uint32_t flags = 0) = 0;
+ virtual ssize_t deallocate(size_t offset) = 0;
+ };
+
+ virtual int alloc_buffer(alloc_data& data);
+
+ virtual int free_buffer(void *base, size_t size,
+ int offset, int fd);
+
+ virtual int map_buffer(void **pBase, size_t size,
+ int offset, int fd);
+
+ virtual int unmap_buffer(void *base, size_t size,
+ int offset);
+
+ virtual int clean_buffer(void*base, size_t size,
+ int offset, int fd);
+
+ PmemUserspaceAlloc();
+
+ ~PmemUserspaceAlloc();
+
+ private:
+ int mMasterFd;
+ void* mMasterBase;
+ const char* mPmemDev;
+ android::sp<Allocator> mAllocator;
+ pthread_mutex_t mLock;
+ int init_pmem_area();
+ int init_pmem_area_locked();
+
+ };
+
+ class PmemKernelAlloc : public IMemAlloc {
+
+ public:
+ virtual int alloc_buffer(alloc_data& data);
+
+ virtual int free_buffer(void *base, size_t size,
+ int offset, int fd);
+
+ virtual int map_buffer(void **pBase, size_t size,
+ int offset, int fd);
+
+ virtual int unmap_buffer(void *base, size_t size,
+ int offset);
+
+ virtual int clean_buffer(void*base, size_t size,
+ int offset, int fd);
+
+ PmemKernelAlloc(const char* device);
+
+ ~PmemKernelAlloc();
+ private:
+ const char* mPmemDev;
+
+
+ };
+
+}
+#endif /* GRALLOC_PMEMALLOC_H */
diff --git a/libhwcomposer/Android.mk b/libhwcomposer/Android.mk
new file mode 100644
index 0000000..a600f97
--- /dev/null
+++ b/libhwcomposer/Android.mk
@@ -0,0 +1,39 @@
+LOCAL_PATH := $(call my-dir)
+
+# HAL module implemenation, not prelinked and stored in
+# hw/<OVERLAY_HARDWARE_MODULE_ID>.<ro.product.board>.so
+include $(CLEAR_VARS)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
+LOCAL_SHARED_LIBRARIES := liblog libcutils libEGL libhardware libutils liboverlay
+LOCAL_SHARED_LIBRARIES += libgenlock libQcomUI libmemalloc
+
+LOCAL_SRC_FILES := \
+ hwcomposer.cpp \
+ external_display_only.h
+
+LOCAL_MODULE := hwcomposer.$(TARGET_BOARD_PLATFORM)
+LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).hwcomposer\" -DDEBUG_CALC_FPS
+
+LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
+LOCAL_C_INCLUDES += hardware/qcom/display/liboverlay
+LOCAL_C_INCLUDES += hardware/qcom/display/libcopybit
+LOCAL_C_INCLUDES += hardware/qcom/display/libgenlock
+LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
+LOCAL_C_INCLUDES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include
+
+LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
+ifeq ($(TARGET_HAVE_HDMI_OUT),true)
+LOCAL_CFLAGS += -DHDMI_DUAL_DISPLAY
+endif
+ifeq ($(TARGET_USES_OVERLAY),true)
+LOCAL_CFLAGS += -DUSE_OVERLAY
+endif
+ifeq ($(TARGET_HAVE_BYPASS),true)
+LOCAL_CFLAGS += -DCOMPOSITION_BYPASS
+endif
+ifeq ($(TARGET_USE_HDMI_AS_PRIMARY),true)
+LOCAL_CFLAGS += -DHDMI_AS_PRIMARY
+endif
+LOCAL_MODULE_TAGS := optional eng
+include $(BUILD_SHARED_LIBRARY)
diff --git a/libhwcomposer/external_display_only.h b/libhwcomposer/external_display_only.h
new file mode 100644
index 0000000..fa24642
--- /dev/null
+++ b/libhwcomposer/external_display_only.h
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define EXTDEBUG 0
+class ExtDispOnly {
+
+ enum ExternalOnlyMode {
+ EXT_ONLY_MODE_OFF = 0,
+ EXT_ONLY_MODE_ON = 1,
+ };
+
+ enum {
+ MAX_EXT_ONLY_LAYERS = 2,
+ };
+
+public:
+ /* Initialize, allocate data members */
+ static void init();
+
+ /* Deallocate data members */
+ static void destroy();
+
+ /* Closes all the overlay channels */
+ static void close();
+
+ /* Prepare overlay and configures mdp pipes */
+ static int prepare(hwc_context_t *ctx, hwc_layer_t *layer, int index,
+ bool waitForVsync);
+
+ /* Returns status of external-only mode */
+ static bool isModeOn();
+
+ /* Updates stats and pipe config related to external_only and external_block layers
+ * If we are staring or stopping this mode, update default mirroring.
+ */
+ static int update(hwc_context_t* ctx, hwc_layer_list_t* list);
+
+ /* Stores the locked handle for the buffer that was successfully queued */
+ static void storeLockedHandles(hwc_layer_list_t* list);
+
+ /* Queue buffers to mdp for display */
+ static int draw(hwc_context_t *ctx, hwc_layer_list_t *list);
+
+private:
+ /* Locks a buffer and marks it as locked */
+ static void lockBuffer(native_handle_t *hnd);
+
+ /* Unlocks a buffer and clears the locked flag */
+ static void unlockBuffer(native_handle_t *hnd);
+
+ /* Unlocks buffers queued in previous round (and displayed by now)
+ * Clears the handle cache.
+ */
+ static void unlockPreviousBuffers();
+
+ /* Closes the a range of overlay channels */
+ static void closeRange(int start);
+
+ /* Start default external mirroring */
+ static void startDefaultMirror(hwc_context_t* ctx);
+
+ /* Stop default external mirroring */
+ static void stopDefaultMirror(hwc_context_t* ctx);
+
+ /* Checks if external-only mode is starting */
+ static bool isExtModeStarting(hwc_context_t* ctx, const int&
+ numExtLayers);
+
+ /* Checks if external-only mode is stopping */
+ static bool isExtModeStopping(hwc_context_t* ctx, const int&
+ numExtLayers);
+
+ //Data members
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ static overlay::OverlayUI* sOvExtUI[MAX_EXT_ONLY_LAYERS];
+ static native_handle_t* sPreviousExtHandle[MAX_EXT_ONLY_LAYERS];
+ static ExternalOnlyMode sExtOnlyMode;
+ static int sNumExtOnlyLayers;
+ static bool sSkipLayerPresent;
+ static bool sBlockLayerPresent;
+ static int sBlockLayerIndex;
+#endif
+}; //class ExtDispOnly
+
+void ExtDispOnly::lockBuffer(native_handle_t *hnd) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ private_handle_t* phnd = (private_handle_t*)hnd;
+
+ //Genlock is reference counted and recursive.
+ //Do not accidently lock a locked buffer.
+ if(phnd && (phnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK)) {
+ LOGE_IF(EXTDEBUG, "%s: handle %p already locked", __func__, phnd);
+ return;
+ }
+ if (GENLOCK_FAILURE == genlock_lock_buffer(hnd, GENLOCK_READ_LOCK,
+ GENLOCK_MAX_TIMEOUT)) {
+ LOGE("%s: genlock_lock_buffer(READ) failed", __func__);
+ return;
+ }
+ phnd->flags |= private_handle_t::PRIV_FLAGS_HWC_LOCK;
+ LOGE_IF(EXTDEBUG, "%s: locked handle = %p", __func__, hnd);
+#endif
+}
+
+void ExtDispOnly::unlockBuffer(native_handle_t *hnd) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ //Check if buffer is still around
+ if(private_handle_t::validate(hnd) != 0) {
+ LOGE("%s Handle already deallocated", __func__);
+ return;
+ }
+
+ private_handle_t* phnd = (private_handle_t*)hnd;
+
+ //Check if buffer was locked in the first place
+ if((phnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK) == 0) {
+ LOGE("%s Handle not locked, cannot unlock", __func__);
+ return;
+ }
+
+ //Actually try to unlock
+ if (GENLOCK_FAILURE == genlock_unlock_buffer(hnd)) {
+ LOGE("%s: genlock_unlock_buffer failed", __func__);
+ return;
+ }
+
+ //Clear the locked flag
+ phnd->flags &= ~private_handle_t::PRIV_FLAGS_HWC_LOCK;
+ LOGE_IF(EXTDEBUG, "%s: unlocked handle = %p", __func__, hnd);
+#endif
+}
+
+void ExtDispOnly::unlockPreviousBuffers() {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ for(int i = 0; (i < MAX_EXT_ONLY_LAYERS) && sPreviousExtHandle[i]; i++) {
+ LOGE_IF(EXTDEBUG, "%s", __func__);
+ ExtDispOnly::unlockBuffer(sPreviousExtHandle[i]);
+ sPreviousExtHandle[i] = NULL;
+ }
+#endif
+}
+
+void ExtDispOnly::init() {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ for(int i = 0; i < MAX_EXT_ONLY_LAYERS; i++) {
+ sOvExtUI[i] = new overlay::OverlayUI();
+ sPreviousExtHandle[i] = NULL;
+ }
+ sExtOnlyMode = EXT_ONLY_MODE_OFF;
+ sNumExtOnlyLayers = 0;
+ sSkipLayerPresent = false;
+ sBlockLayerPresent = false;
+ sBlockLayerIndex = -1;
+ LOGE_IF(EXTDEBUG, "%s", __func__);
+#endif
+}
+
+void ExtDispOnly::destroy() {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ for(int i = 0; i < MAX_EXT_ONLY_LAYERS; i++) {
+ delete sOvExtUI[i];
+ }
+#endif
+}
+
+void ExtDispOnly::closeRange(int start) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ for (int index = start; index < MAX_EXT_ONLY_LAYERS; index++) {
+ if(sPreviousExtHandle[index]) {
+ LOGE_IF(EXTDEBUG, "%s", __func__);
+ ExtDispOnly::unlockBuffer(sPreviousExtHandle[index]);
+ sPreviousExtHandle[index] = NULL;
+ }
+ sOvExtUI[index]->closeChannel();
+ }
+#endif
+}
+
+void inline ExtDispOnly::close() {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ closeRange(0);
+#endif
+}
+
+int ExtDispOnly::prepare(hwc_context_t *ctx, hwc_layer_t *layer, int index,
+ bool waitForVsync) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ if(ctx->mHDMIEnabled == EXT_DISPLAY_OFF ||
+ ctx->pendingHDMI == true)
+ return -1;
+
+ if (ctx && sOvExtUI[index]) {
+ private_hwc_module_t* hwcModule = reinterpret_cast<
+ private_hwc_module_t*>(ctx->device.common.module);
+ if (!hwcModule) {
+ LOGE("%s null module", __func__);
+ return -1;
+ }
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(!hnd) {
+ LOGE("%s handle null", __func__);
+ return -1;
+ }
+ overlay::OverlayUI *ovUI = sOvExtUI[index];
+ int ret = 0;
+ //int orientation = layer->transform;
+ //Assuming layers will always be source landscape
+ const int orientation = 0;
+ overlay_buffer_info info;
+ hwc_rect_t sourceCrop = layer->sourceCrop;
+ info.width = sourceCrop.right - sourceCrop.left;
+ info.height = sourceCrop.bottom - sourceCrop.top;
+ info.format = hnd->format;
+ info.size = hnd->size;
+
+
+ const int fbnum = ctx->mHDMIEnabled; //HDMI or WFD
+ const bool isFg = false;
+ //Just to differentiate zorders for different layers
+ const int zorder = index;
+ const bool isVGPipe = true;
+ ovUI->setSource(info, orientation);
+ ovUI->setDisplayParams(fbnum, waitForVsync, isFg, zorder, isVGPipe);
+ const int fbWidth = ovUI->getFBWidth();
+ const int fbHeight = ovUI->getFBHeight();
+ ovUI->setPosition(0, 0, fbWidth, fbHeight);
+ if(ovUI->commit() != overlay::NO_ERROR) {
+ LOGE("%s: Overlay Commit failed", __func__);
+ return -1;
+ }
+ }
+ LOGE_IF(EXTDEBUG, "%s", __func__);
+#endif
+ return overlay::NO_ERROR;
+}
+
+inline void ExtDispOnly::startDefaultMirror(hwc_context_t* ctx) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ hwc_composer_device_t* dev = (hwc_composer_device_t*) ctx;
+ private_hwc_module_t* hwcModule =
+ reinterpret_cast<private_hwc_module_t*>(dev->common.module);
+ framebuffer_device_t *fbDev = hwcModule->fbDevice;
+ if (fbDev) {
+ //mHDMIEnabled could be HDMI/WFD/NO EXTERNAL
+ fbDev->enableHDMIOutput(fbDev, ctx->mHDMIEnabled);
+ }
+#endif
+}
+
+inline void ExtDispOnly::stopDefaultMirror(hwc_context_t* ctx) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ hwc_composer_device_t* dev = (hwc_composer_device_t*) ctx;
+ private_hwc_module_t* hwcModule =
+ reinterpret_cast<private_hwc_module_t*>(dev->common.module);
+ framebuffer_device_t *fbDev = hwcModule->fbDevice;
+ if (fbDev) {
+ fbDev->enableHDMIOutput(fbDev, EXT_DISPLAY_OFF);
+ }
+#endif
+}
+
+inline bool ExtDispOnly::isExtModeStarting(hwc_context_t* ctx, const int&
+ numExtLayers) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ return ((sExtOnlyMode == EXT_ONLY_MODE_OFF) && numExtLayers);
+#endif
+ return false;
+}
+
+inline bool ExtDispOnly::isExtModeStopping(hwc_context_t* ctx, const int&
+ numExtLayers) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ return ((sExtOnlyMode == EXT_ONLY_MODE_ON) && (numExtLayers == 0));
+#endif
+ return false;
+}
+
+inline bool ExtDispOnly::isModeOn() {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ return (sExtOnlyMode == EXT_ONLY_MODE_ON);
+#endif
+ return false;
+}
+
+int ExtDispOnly::update(hwc_context_t* ctx, hwc_layer_list_t* list) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ int aNumExtLayers = 0;
+ bool aSkipLayerPresent = false;
+ bool aBlockLayerPresent = false;
+ int aBlockLayerIndex = -1;
+
+ //Book-keeping done each cycle
+ for (size_t i = 0; i < list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ // Dont draw in this round
+ if(list->hwLayers[i].flags & HWC_SKIP_LAYER) {
+ aSkipLayerPresent = true;
+ }
+ if(hnd && (hnd->flags & private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY)) {
+ aNumExtLayers++;
+ // No way we can let this be drawn by GPU to fb0
+ if(list->hwLayers[i].flags & HWC_SKIP_LAYER) {
+ list->hwLayers[i].flags &= ~ HWC_SKIP_LAYER;
+ }
+ list->hwLayers[i].flags |= HWC_USE_EXT_ONLY;
+ list->hwLayers[i].compositionType = HWC_USE_OVERLAY;
+ list->hwLayers[i].hints &= ~HWC_HINT_CLEAR_FB;
+ //EXTERNAL_BLOCK is always an add-on
+ if(hnd && (hnd->flags &
+ private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK)) {
+ aBlockLayerPresent = true;
+ aBlockLayerIndex = i;
+ list->hwLayers[i].flags |= HWC_USE_EXT_BLOCK;
+ }
+ }
+ }
+
+ //Update Default mirroring state
+ if (isExtModeStarting(ctx, aNumExtLayers)) {
+ stopDefaultMirror(ctx);
+ } else if (isExtModeStopping(ctx, aNumExtLayers)) {
+ startDefaultMirror(ctx);
+ }
+
+ //Cache our stats
+ sExtOnlyMode = aNumExtLayers ? EXT_ONLY_MODE_ON : EXT_ONLY_MODE_OFF;
+ sNumExtOnlyLayers = aNumExtLayers;
+ sSkipLayerPresent = aSkipLayerPresent;
+ sBlockLayerPresent = aBlockLayerPresent;
+ sBlockLayerIndex = aBlockLayerIndex;
+
+ LOGE_IF(EXTDEBUG, "%s: numExtLayers = %d skipLayerPresent = %d", __func__,
+ aNumExtLayers, aSkipLayerPresent);
+ //If skip layer present return. Buffers to be unlocked in draw phase.
+ if(aSkipLayerPresent) {
+ return overlay::NO_ERROR;
+ }
+
+ //If External is not connected, dont setup pipes, just return
+ if(ctx->mHDMIEnabled == EXT_DISPLAY_OFF ||
+ ctx->pendingHDMI == true) {
+ ExtDispOnly::close();
+ return -1;
+ }
+
+
+ //Update pipes
+ bool waitForVsync = true;
+ bool index = 0;
+
+ if (aBlockLayerPresent) {
+ ExtDispOnly::closeRange(1);
+ ExtDispOnly::prepare(ctx, &(list->hwLayers[aBlockLayerIndex]),
+ index, waitForVsync);
+ } else if (aNumExtLayers) {
+ ExtDispOnly::closeRange(aNumExtLayers);
+ for (size_t i = 0; i < list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ if(hnd && hnd->flags & private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY) {
+ waitForVsync = (index == (aNumExtLayers - 1));
+ ExtDispOnly::prepare(ctx, &(list->hwLayers[i]),
+ index, waitForVsync);
+ index++;
+ }
+ }
+ } else {
+ ExtDispOnly::close();
+ }
+#endif
+ return overlay::NO_ERROR;
+}
+
+void ExtDispOnly::storeLockedHandles(hwc_layer_list_t* list) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ int index = 0;
+ if(sBlockLayerPresent) {
+ private_handle_t *hnd = (private_handle_t *)
+ list->hwLayers[sBlockLayerIndex].handle;
+ if(list->hwLayers[sBlockLayerIndex].flags & HWC_USE_EXT_ONLY) {
+ if(!(hnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK)) {
+ ExtDispOnly::lockBuffer(hnd);
+ }
+ sPreviousExtHandle[index] = hnd;
+ LOGE_IF(EXTDEBUG, "%s BLOCK: handle = %p", __func__, hnd);
+ return;
+ }
+ }
+ for(int i = 0; i < list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ if(list->hwLayers[i].flags & HWC_USE_EXT_ONLY) {
+ if(!(hnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK)) {
+ ExtDispOnly::lockBuffer(hnd);
+ }
+ sPreviousExtHandle[index] = hnd;
+ index++;
+ LOGE_IF(EXTDEBUG, "%s: handle = %p", __func__, hnd);
+ }
+ }
+#endif
+}
+
+int ExtDispOnly::draw(hwc_context_t *ctx, hwc_layer_list_t *list) {
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+ LOGE_IF(EXTDEBUG, "%s", __func__);
+ if(ctx->mHDMIEnabled == EXT_DISPLAY_OFF ||
+ ctx->pendingHDMI == true) {
+ ExtDispOnly::close();
+ return -1;
+ }
+
+ int ret = overlay::NO_ERROR;
+ int index = 0;
+
+ //If skip layer present or list invalid unlock and return.
+ if(sSkipLayerPresent || list == NULL) {
+ ExtDispOnly::unlockPreviousBuffers();
+ return overlay::NO_ERROR;
+ }
+
+ if(sBlockLayerPresent) {
+ private_handle_t *hnd = (private_handle_t*)
+ list->hwLayers[sBlockLayerIndex].handle;
+ ExtDispOnly::lockBuffer(hnd);
+ ret = sOvExtUI[index]->queueBuffer(hnd);
+ if (ret) {
+ LOGE("%s queueBuffer failed", __func__);
+ // Unlock the locked buffer
+ ExtDispOnly::unlockBuffer(hnd);
+ ExtDispOnly::close();
+ return -1;
+ }
+ ExtDispOnly::unlockPreviousBuffers();
+ ExtDispOnly::storeLockedHandles(list);
+ return overlay::NO_ERROR;
+ }
+
+ for(int i = 0; i < list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ if(hnd && list->hwLayers[i].flags & HWC_USE_EXT_ONLY) {
+ overlay::OverlayUI *ovUI = sOvExtUI[index];
+ ExtDispOnly::lockBuffer(hnd);
+ ret = ovUI->queueBuffer(hnd);
+ if (ret) {
+ LOGE("%s queueBuffer failed", __func__);
+ // Unlock the all the currently locked buffers
+ for (int j = 0; j <= i; j++) {
+ private_handle_t *tmphnd =
+ (private_handle_t *)list->hwLayers[j].handle;
+ if(hnd && list->hwLayers[j].flags & HWC_USE_EXT_ONLY)
+ ExtDispOnly::unlockBuffer(tmphnd);
+ }
+ ExtDispOnly::close();
+ return -1;
+ }
+ index++;
+ }
+ }
+ ExtDispOnly::unlockPreviousBuffers();
+ ExtDispOnly::storeLockedHandles(list);
+#endif
+ return overlay::NO_ERROR;
+}
+
+#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
+overlay::OverlayUI* ExtDispOnly::sOvExtUI[MAX_EXT_ONLY_LAYERS];
+native_handle_t* ExtDispOnly::sPreviousExtHandle[MAX_EXT_ONLY_LAYERS];
+ExtDispOnly::ExternalOnlyMode ExtDispOnly::sExtOnlyMode;
+int ExtDispOnly::sNumExtOnlyLayers;
+bool ExtDispOnly::sSkipLayerPresent;
+bool ExtDispOnly::sBlockLayerPresent;
+int ExtDispOnly::sBlockLayerIndex;
+#endif
diff --git a/libhwcomposer/hwcomposer.cpp b/libhwcomposer/hwcomposer.cpp
new file mode 100755
index 0000000..c43fa04
--- /dev/null
+++ b/libhwcomposer/hwcomposer.cpp
@@ -0,0 +1,1734 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <hardware/hardware.h>
+
+#include <fcntl.h>
+#include <errno.h>
+
+#include <cutils/log.h>
+#include <cutils/atomic.h>
+#include <cutils/properties.h>
+
+#include <hardware/hwcomposer.h>
+#include <overlayLib.h>
+#include <overlayLibUI.h>
+#include <copybit.h>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <ui/android_native_buffer.h>
+#include <gralloc_priv.h>
+#include <genlock.h>
+#include <qcom_ui.h>
+#include <gr.h>
+
+/*****************************************************************************/
+#define ALIGN(x, align) (((x) + ((align)-1)) & ~((align)-1))
+#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true ))
+#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false ))
+
+#ifdef COMPOSITION_BYPASS
+#define MAX_BYPASS_LAYERS 3
+#define BYPASS_DEBUG 0
+#define BYPASS_INDEX_OFFSET 4
+
+enum BypassState {
+ BYPASS_ON,
+ BYPASS_OFF,
+ BYPASS_OFF_PENDING,
+};
+
+enum BypassBufferLockState {
+ BYPASS_BUFFER_UNLOCKED,
+ BYPASS_BUFFER_LOCKED,
+};
+#endif
+
+enum HWCLayerType{
+ HWC_SINGLE_VIDEO = 0x1,
+ HWC_ORIG_RESOLUTION = 0x2,
+ HWC_S3D_LAYER = 0x4,
+ HWC_STOP_UI_MIRRORING_MASK = 0xF
+};
+
+enum eHWCOverlayStatus {
+ HWC_OVERLAY_OPEN,
+ HWC_OVERLAY_PREPARE_TO_CLOSE,
+ HWC_OVERLAY_CLOSED
+};
+
+struct hwc_context_t {
+ hwc_composer_device_t device;
+ /* our private state goes below here */
+ overlay::Overlay* mOverlayLibObject;
+ native_handle_t *previousOverlayHandle;
+#ifdef COMPOSITION_BYPASS
+ overlay::OverlayUI* mOvUI[MAX_BYPASS_LAYERS];
+ native_handle_t* previousBypassHandle[MAX_BYPASS_LAYERS];
+ BypassBufferLockState bypassBufferLockState[MAX_BYPASS_LAYERS];
+ int layerindex[MAX_BYPASS_LAYERS];
+ int nPipesUsed;
+ BypassState bypassState;
+#endif
+#if defined HDMI_DUAL_DISPLAY
+ external_display mHDMIEnabled; // Type of external display
+ bool pendingHDMI;
+#endif
+ int previousLayerCount;
+ eHWCOverlayStatus hwcOverlayStatus;
+};
+
+static int hwc_device_open(const struct hw_module_t* module, const char* name,
+ struct hw_device_t** device);
+
+static struct hw_module_methods_t hwc_module_methods = {
+ open: hwc_device_open
+};
+
+
+struct private_hwc_module_t {
+ hwc_module_t base;
+ copybit_device_t *copybitEngine;
+ framebuffer_device_t *fbDevice;
+ int compositionType;
+ bool isBypassEnabled; //from build.prop ro.sf.compbypass.enable
+};
+
+struct private_hwc_module_t HAL_MODULE_INFO_SYM = {
+ base: {
+ common: {
+ tag: HARDWARE_MODULE_TAG,
+ version_major: 1,
+ version_minor: 0,
+ id: HWC_HARDWARE_MODULE_ID,
+ name: "Hardware Composer Module",
+ author: "The Android Open Source Project",
+ methods: &hwc_module_methods,
+ }
+ },
+ copybitEngine: NULL,
+ fbDevice: NULL,
+ compositionType: 0,
+ isBypassEnabled: false,
+};
+
+//Only at this point would the compiler know all storage class sizes.
+//The header has hooks which need to know those beforehand.
+#include "external_display_only.h"
+
+/*****************************************************************************/
+
+static void dump_layer(hwc_layer_t const* l) {
+ LOGD("\ttype=%d, flags=%08x, handle=%p, tr=%02x, blend=%04x, {%d,%d,%d,%d}, {%d,%d,%d,%d}",
+ l->compositionType, l->flags, l->handle, l->transform, l->blending,
+ l->sourceCrop.left,
+ l->sourceCrop.top,
+ l->sourceCrop.right,
+ l->sourceCrop.bottom,
+ l->displayFrame.left,
+ l->displayFrame.top,
+ l->displayFrame.right,
+ l->displayFrame.bottom);
+}
+
+static inline int min(const int& a, const int& b) {
+ return (a < b) ? a : b;
+}
+
+static inline int max(const int& a, const int& b) {
+ return (a > b) ? a : b;
+}
+#ifdef COMPOSITION_BYPASS
+void setLayerbypassIndex(hwc_layer_t* layer, const int bypass_index)
+{
+ layer->flags &= ~HWC_BYPASS_INDEX_MASK;
+ layer->flags |= bypass_index << BYPASS_INDEX_OFFSET;
+}
+
+int getLayerbypassIndex(hwc_layer_t* layer)
+{
+ int byp_index = -1;
+
+ if(layer->flags & HWC_COMP_BYPASS) {
+ byp_index = ((layer->flags & HWC_BYPASS_INDEX_MASK) >> BYPASS_INDEX_OFFSET);
+ byp_index = (byp_index < MAX_BYPASS_LAYERS ? byp_index : -1 );
+ }
+ return byp_index;
+}
+
+void unlockPreviousBypassBuffers(hwc_context_t* ctx) {
+ // Unlock the previous bypass buffers. We can blindly unlock the buffers here,
+ // because buffers will be in this list only if the lock was successfully acquired.
+ for(int i = 0; i < MAX_BYPASS_LAYERS && ctx->previousBypassHandle[i]; i++) {
+ private_handle_t *hnd = (private_handle_t*) ctx->previousBypassHandle[i];
+
+ // Validate the handle to make sure it hasn't been deallocated.
+ if (private_handle_t::validate(ctx->previousBypassHandle[i])) {
+ continue;
+ }
+ // Check if the handle was locked previously
+ if (private_handle_t::PRIV_FLAGS_HWC_LOCK & hnd->flags) {
+ if (GENLOCK_FAILURE == genlock_unlock_buffer(ctx->previousBypassHandle[i])) {
+ LOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
+ } else {
+ ctx->previousBypassHandle[i] = NULL;
+ // Reset the lock flag
+ hnd->flags &= ~private_handle_t::PRIV_FLAGS_HWC_LOCK;
+ }
+ }
+ }
+}
+
+void print_info(hwc_layer_t* layer)
+{
+ hwc_rect_t sourceCrop = layer->sourceCrop;
+ hwc_rect_t displayFrame = layer->displayFrame;
+
+ int s_l = sourceCrop.left;
+ int s_t = sourceCrop.top;
+ int s_r = sourceCrop.right;
+ int s_b = sourceCrop.bottom;
+
+ int d_l = displayFrame.left;
+ int d_t = displayFrame.top;
+ int d_r = displayFrame.right;
+ int d_b = displayFrame.bottom;
+
+ LOGE_IF(BYPASS_DEBUG, "src:[%d,%d,%d,%d] (%d x %d) dst:[%d,%d,%d,%d] (%d x %d)",
+ s_l, s_t, s_r, s_b, (s_r - s_l), (s_b - s_t),
+ d_l, d_t, d_r, d_b, (d_r - d_l), (d_b - d_t));
+}
+
+//Crops source buffer against destination and FB boundaries
+void calculate_crop_rects(hwc_rect_t& crop, hwc_rect_t& dst, int hw_w, int hw_h) {
+
+ int& crop_x = crop.left;
+ int& crop_y = crop.top;
+ int& crop_r = crop.right;
+ int& crop_b = crop.bottom;
+ int crop_w = crop.right - crop.left;
+ int crop_h = crop.bottom - crop.top;
+
+ int& dst_x = dst.left;
+ int& dst_y = dst.top;
+ int& dst_r = dst.right;
+ int& dst_b = dst.bottom;
+ int dst_w = dst.right - dst.left;
+ int dst_h = dst.bottom - dst.top;
+
+ if(dst_x < 0) {
+ float scale_x = crop_w * 1.0f / dst_w;
+ float diff_factor = (scale_x * abs(dst_x));
+ crop_x = crop_x + (int)diff_factor;
+ crop_w = crop_r - crop_x;
+
+ dst_x = 0;
+ dst_w = dst_r - dst_x;;
+ }
+ if(dst_r > hw_w) {
+ float scale_x = crop_w * 1.0f / dst_w;
+ float diff_factor = scale_x * (dst_r - hw_w);
+ crop_r = crop_r - diff_factor;
+ crop_w = crop_r - crop_x;
+
+ dst_r = hw_w;
+ dst_w = dst_r - dst_x;
+ }
+ if(dst_y < 0) {
+ float scale_y = crop_h * 1.0f / dst_h;
+ float diff_factor = scale_y * abs(dst_y);
+ crop_y = crop_y + diff_factor;
+ crop_h = crop_b - crop_y;
+
+ dst_y = 0;
+ dst_h = dst_b - dst_y;
+ }
+ if(dst_b > hw_h) {
+ float scale_y = crop_h * 1.0f / dst_h;
+ float diff_factor = scale_y * (dst_b - hw_h);
+ crop_b = crop_b - diff_factor;
+ crop_h = crop_b - crop_y;
+
+ dst_b = hw_h;
+ dst_h = dst_b - dst_y;
+ }
+
+ LOGE_IF(BYPASS_DEBUG,"crop: [%d,%d,%d,%d] dst:[%d,%d,%d,%d]",
+ crop_x, crop_y, crop_w, crop_h,dst_x, dst_y, dst_w, dst_h);
+}
+
+/*
+ * Configures pipe(s) for composition bypass
+ */
+static int prepareBypass(hwc_context_t *ctx, hwc_layer_t *layer,
+ int nPipeIndex, int vsync_wait, int isFG) {
+
+ if (ctx && ctx->mOvUI[nPipeIndex]) {
+ overlay::OverlayUI *ovUI = ctx->mOvUI[nPipeIndex];
+
+ private_hwc_module_t* hwcModule = reinterpret_cast<
+ private_hwc_module_t*>(ctx->device.common.module);
+ if (!hwcModule) {
+ LOGE("%s: NULL Module", __FUNCTION__);
+ return -1;
+ }
+
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(!hnd) {
+ LOGE("%s: layer handle is NULL", __FUNCTION__);
+ return -1;
+ }
+
+ int hw_w = hwcModule->fbDevice->width;
+ int hw_h = hwcModule->fbDevice->height;
+
+ hwc_rect_t sourceCrop = layer->sourceCrop;
+ hwc_rect_t displayFrame = layer->displayFrame;
+
+ const int src_w = sourceCrop.right - sourceCrop.left;
+ const int src_h = sourceCrop.bottom - sourceCrop.top;
+
+ hwc_rect_t crop = sourceCrop;
+ int crop_w = crop.right - crop.left;
+ int crop_h = crop.bottom - crop.top;
+
+ hwc_rect_t dst = displayFrame;
+ int dst_w = dst.right - dst.left;
+ int dst_h = dst.bottom - dst.top;
+
+ if(hnd != NULL && (hnd->flags & private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM )) {
+ LOGE("%s: Unable to setup bypass due to non-pmem memory",__FUNCTION__);
+ return -1;
+ }
+
+ if(dst.left < 0 || dst.top < 0 || dst.right > hw_w || dst.bottom > hw_h) {
+ LOGE_IF(BYPASS_DEBUG,"%s: Destination has negative coordinates", __FUNCTION__);
+
+ calculate_crop_rects(crop, dst, hw_w, hw_h);
+
+ //Update calulated width and height
+ crop_w = crop.right - crop.left;
+ crop_h = crop.bottom - crop.top;
+
+ dst_w = dst.right - dst.left;
+ dst_h = dst.bottom - dst.top;
+ }
+
+ if( (dst_w > hw_w)|| (dst_h > hw_h)) {
+ LOGE_IF(BYPASS_DEBUG,"%s: Destination rectangle exceeds FB resolution", __FUNCTION__);
+ print_info(layer);
+ dst_w = hw_w;
+ dst_h = hw_h;
+ }
+
+ overlay_buffer_info info;
+ info.width = src_w;
+ info.height = src_h;
+ info.format = hnd->format;
+ info.size = hnd->size;
+
+ int fbnum = 0;
+ int orientation = layer->transform;
+ const bool useVGPipe = (nPipeIndex != (MAX_BYPASS_LAYERS-1));
+ //only last layer should wait for vsync
+ const bool waitForVsync = vsync_wait;
+ const bool isFg = isFG;
+ //Just to differentiate zorders for different layers
+ const int zorder = nPipeIndex;
+
+ ovUI->setSource(info, orientation);
+ ovUI->setCrop(crop.left, crop.top, crop_w, crop_h);
+ ovUI->setDisplayParams(fbnum, waitForVsync, isFg, zorder, useVGPipe);
+ ovUI->setPosition(dst.left, dst.top, dst_w, dst_h);
+
+ LOGE_IF(BYPASS_DEBUG,"%s: Bypass set: crop[%d,%d,%d,%d] dst[%d,%d,%d,%d] waitforVsync: %d \
+ isFg: %d zorder: %d VG = %d nPipe: %d",__FUNCTION__,
+ crop.left, crop.top, crop_w, crop_h,
+ dst.left, dst.top, dst_w, dst_h,
+ waitForVsync, isFg, zorder, useVGPipe, nPipeIndex );
+
+ if(ovUI->commit() != overlay::NO_ERROR) {
+ LOGE("%s: Overlay Commit failed", __FUNCTION__);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Checks if doing comp. bypass is possible.
+ * It is possible if
+ * 1. No MDP pipe is used
+ * 2. Rotation is not needed
+ * 3. We have atmost MAX_BYPASS_LAYERS
+ */
+inline static bool isBypassDoable(hwc_composer_device_t *dev, const int yuvCount,
+ const hwc_layer_list_t* list) {
+ hwc_context_t* ctx = (hwc_context_t*)(dev);
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ dev->common.module);
+ //Check if enabled in build.prop
+ if(hwcModule->isBypassEnabled == false) {
+ return false;
+ }
+
+ if(list->numHwLayers < 1) {
+ return false;
+ }
+
+#if defined HDMI_DUAL_DISPLAY
+ //Disable bypass when HDMI is enabled
+ if(ctx->mHDMIEnabled || ctx->pendingHDMI) {
+ return false;
+ }
+#endif
+
+ if(ExtDispOnly::isModeOn()) {
+ return false;
+ }
+
+ //Bypass is not efficient if rotation or asynchronous mode is needed.
+ for(int i = 0; i < list->numHwLayers; ++i) {
+ if(list->hwLayers[i].transform) {
+ return false;
+ }
+ if(list->hwLayers[i].flags & HWC_LAYER_ASYNCHRONOUS) {
+ return false;
+ }
+ }
+
+ return (yuvCount == 0) && (ctx->hwcOverlayStatus == HWC_OVERLAY_CLOSED)
+ && (list->numHwLayers <= MAX_BYPASS_LAYERS);
+}
+
+void setBypassLayerFlags(hwc_context_t* ctx, hwc_layer_list_t* list)
+{
+ for(int index = 0 ; index < MAX_BYPASS_LAYERS; index++ )
+ {
+ int layer_index = ctx->layerindex[index];
+ if(layer_index >= 0) {
+ hwc_layer_t* layer = &(list->hwLayers[layer_index]);
+
+ layer->flags |= HWC_COMP_BYPASS;
+ layer->compositionType = HWC_USE_OVERLAY;
+ layer->hints |= HWC_HINT_CLEAR_FB;
+ }
+ }
+
+ if( list->numHwLayers > ctx->nPipesUsed ) {
+ list->flags &= ~HWC_SKIP_COMPOSITION; //Compose to FB
+ } else {
+ list->flags |= HWC_SKIP_COMPOSITION; // Dont
+ }
+}
+
+bool setupBypass(hwc_context_t* ctx, hwc_layer_list_t* list) {
+ int nPipeIndex, vsync_wait, isFG;
+ int numHwLayers = list->numHwLayers;
+ int nPipeAvailable = MAX_BYPASS_LAYERS;
+
+ for (int index = 0 ; (index < numHwLayers) && nPipeAvailable; index++) {
+
+ hwc_layer_t* layer = &(list->hwLayers[index]);
+
+ nPipeIndex = MAX_BYPASS_LAYERS - nPipeAvailable;
+ //Set VSYNC wait is needed only for the last pipe queued
+ vsync_wait = (nPipeIndex == (numHwLayers-1));
+ //Set isFG to true for layer with z-order zero
+ isFG = !index;
+
+ //Clear Bypass flags for the layer
+ layer->flags &= ~HWC_COMP_BYPASS;
+ layer->flags |= HWC_BYPASS_INDEX_MASK;
+
+ if( prepareBypass(ctx, &(list->hwLayers[index]), nPipeIndex, vsync_wait, isFG) != 0 ) {
+ LOGE_IF(BYPASS_DEBUG, "%s: layer %d failed to configure bypass for pipe index: %d",
+ __FUNCTION__, index, nPipeIndex);
+ return false;
+ } else {
+ ctx->layerindex[nPipeIndex] = index;
+ setLayerbypassIndex(layer, nPipeIndex);
+ nPipeAvailable--;
+ }
+ }
+ ctx->nPipesUsed = MAX_BYPASS_LAYERS - nPipeAvailable;
+ return true;
+}
+
+void unsetBypassLayerFlags(hwc_layer_list_t* list) {
+ if (!list)
+ return;
+
+ for (int index = 0 ; index < list->numHwLayers; index++) {
+ if(list->hwLayers[index].flags & HWC_COMP_BYPASS) {
+ list->hwLayers[index].flags &= ~HWC_COMP_BYPASS;
+ }
+ }
+}
+
+void unsetBypassBufferLockState(hwc_context_t* ctx) {
+ for (int i= 0; i< MAX_BYPASS_LAYERS; i++) {
+ ctx->bypassBufferLockState[i] = BYPASS_BUFFER_UNLOCKED;
+ }
+}
+
+void storeLockedBypassHandle(hwc_layer_list_t* list, hwc_context_t* ctx) {
+ if (!list)
+ return;
+
+ for(int index = 0; index < MAX_BYPASS_LAYERS; index++ ) {
+ hwc_layer_t layer = list->hwLayers[ctx->layerindex[index]];
+
+ if (layer.flags & HWC_COMP_BYPASS) {
+ private_handle_t *hnd = (private_handle_t*)layer.handle;
+
+ if (ctx->bypassBufferLockState[index] == BYPASS_BUFFER_LOCKED) {
+ ctx->previousBypassHandle[index] = (native_handle_t*)layer.handle;
+ hnd->flags |= private_handle_t::PRIV_FLAGS_HWC_LOCK;
+ } else {
+ ctx->previousBypassHandle[index] = NULL;
+ }
+ }
+ }
+}
+
+void closeExtraPipes(hwc_context_t* ctx) {
+
+ int pipes_used = ctx->nPipesUsed;
+
+ //Unused pipes must be of higher z-order
+ for (int i = pipes_used ; i < MAX_BYPASS_LAYERS; i++) {
+ if (ctx->previousBypassHandle[i]) {
+ private_handle_t *hnd = (private_handle_t*) ctx->previousBypassHandle[i];
+
+ if (!private_handle_t::validate(ctx->previousBypassHandle[i])) {
+ if (GENLOCK_FAILURE == genlock_unlock_buffer(ctx->previousBypassHandle[i])) {
+ LOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
+ } else {
+ ctx->previousBypassHandle[i] = NULL;
+ ctx->bypassBufferLockState[i] = BYPASS_BUFFER_UNLOCKED;
+ hnd->flags &= ~private_handle_t::PRIV_FLAGS_HWC_LOCK;
+ }
+ }
+ }
+ ctx->mOvUI[i]->closeChannel();
+ ctx->layerindex[i] = -1;
+ }
+}
+#endif //COMPOSITION_BYPASS
+
+static int setVideoOverlayStatusInGralloc(hwc_context_t* ctx, const bool enable) {
+#if defined HDMI_DUAL_DISPLAY
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ ctx->device.common.module);
+ if(!hwcModule) {
+ LOGE("%s: invalid params", __FUNCTION__);
+ return -1;
+ }
+
+ framebuffer_device_t *fbDev = hwcModule->fbDevice;
+ if (!fbDev) {
+ LOGE("%s: fbDev is NULL", __FUNCTION__);
+ return -1;
+ }
+
+ // Inform the gralloc to stop or start UI mirroring
+ fbDev->videoOverlayStarted(fbDev, enable);
+#endif
+ return 0;
+}
+
+static void setHWCOverlayStatus(hwc_context_t *ctx, bool isVideoPresent) {
+
+ switch (ctx->hwcOverlayStatus) {
+ case HWC_OVERLAY_OPEN:
+ ctx->hwcOverlayStatus =
+ isVideoPresent ? HWC_OVERLAY_OPEN : HWC_OVERLAY_PREPARE_TO_CLOSE;
+ break;
+ case HWC_OVERLAY_PREPARE_TO_CLOSE:
+ ctx->hwcOverlayStatus =
+ isVideoPresent ? HWC_OVERLAY_OPEN : HWC_OVERLAY_CLOSED;
+ break;
+ case HWC_OVERLAY_CLOSED:
+ ctx->hwcOverlayStatus =
+ isVideoPresent ? HWC_OVERLAY_OPEN : HWC_OVERLAY_CLOSED;
+ break;
+ default:
+ LOGE("%s: Invalid hwcOverlayStatus (status =%d)", __FUNCTION__,
+ ctx->hwcOverlayStatus);
+ break;
+ }
+}
+
+static int hwc_closeOverlayChannels(hwc_context_t* ctx) {
+#ifdef USE_OVERLAY
+ overlay::Overlay *ovLibObject = ctx->mOverlayLibObject;
+ if(!ovLibObject) {
+ LOGE("%s: invalid params", __FUNCTION__);
+ return -1;
+ }
+
+ if (HWC_OVERLAY_PREPARE_TO_CLOSE == ctx->hwcOverlayStatus) {
+ // Video mirroring is going on, and we do not have any layers to
+ // mirror directly. Close the current video channel and inform the
+ // gralloc to start UI mirroring
+ ovLibObject->closeChannel();
+ // Inform the gralloc that video overlay has stopped.
+ setVideoOverlayStatusInGralloc(ctx, false);
+ }
+#endif
+ return 0;
+}
+
+/*
+ * Configures mdp pipes
+ */
+static int prepareOverlay(hwc_context_t *ctx, hwc_layer_t *layer, const int flags) {
+ int ret = 0;
+
+#ifdef COMPOSITION_BYPASS
+ if(ctx && (ctx->bypassState != BYPASS_OFF)) {
+ ctx->nPipesUsed = 0;
+ closeExtraPipes(ctx);
+ ctx->bypassState = BYPASS_OFF;
+ }
+#endif
+
+ if (LIKELY(ctx && ctx->mOverlayLibObject)) {
+ private_hwc_module_t* hwcModule =
+ reinterpret_cast<private_hwc_module_t*>(ctx->device.common.module);
+ if (UNLIKELY(!hwcModule)) {
+ LOGE("prepareOverlay null module ");
+ return -1;
+ }
+
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ overlay::Overlay *ovLibObject = ctx->mOverlayLibObject;
+ overlay_buffer_info info;
+ info.width = hnd->width;
+ info.height = hnd->height;
+ info.format = hnd->format;
+ info.size = hnd->size;
+
+ int hdmiConnected = 0;
+
+#if defined HDMI_DUAL_DISPLAY
+ if(!ctx->pendingHDMI) //makes sure the UI channel is opened first
+ hdmiConnected = (int)ctx->mHDMIEnabled;
+#endif
+ ret = ovLibObject->setSource(info, layer->transform,
+ hdmiConnected, flags);
+ if (!ret) {
+ LOGE("prepareOverlay setSource failed");
+ return -1;
+ }
+
+ ret = ovLibObject->setTransform(layer->transform);
+ if (!ret) {
+ LOGE("prepareOverlay setTransform failed transform %x",
+ layer->transform);
+ return -1;
+ }
+
+ hwc_rect_t sourceCrop = layer->sourceCrop;
+ ret = ovLibObject->setCrop(sourceCrop.left, sourceCrop.top,
+ (sourceCrop.right - sourceCrop.left),
+ (sourceCrop.bottom - sourceCrop.top));
+ if (!ret) {
+ LOGE("prepareOverlay setCrop failed");
+ return -1;
+ }
+#if defined HDMI_DUAL_DISPLAY
+ // Send the device orientation to overlayLib
+ if(hwcModule) {
+ framebuffer_device_t *fbDev = reinterpret_cast<framebuffer_device_t*>
+ (hwcModule->fbDevice);
+ if(fbDev) {
+ private_module_t* m = reinterpret_cast<private_module_t*>(
+ fbDev->common.module);
+ if(m)
+ ovLibObject->setDeviceOrientation(m->orientation);
+ }
+ }
+#endif
+ if (layer->flags & HWC_USE_ORIGINAL_RESOLUTION) {
+ framebuffer_device_t* fbDev = hwcModule->fbDevice;
+ ret = ovLibObject->setPosition(0, 0,
+ fbDev->width, fbDev->height);
+ } else {
+ hwc_rect_t displayFrame = layer->displayFrame;
+ ret = ovLibObject->setPosition(displayFrame.left, displayFrame.top,
+ (displayFrame.right - displayFrame.left),
+ (displayFrame.bottom - displayFrame.top));
+ }
+ if (!ret) {
+ LOGE("prepareOverlay setPosition failed");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+void unlockPreviousOverlayBuffer(hwc_context_t* ctx)
+{
+ if (ctx->previousOverlayHandle) {
+ // Validate the handle before attempting to use it.
+ if (!private_handle_t::validate(ctx->previousOverlayHandle)) {
+ private_handle_t *hnd = (private_handle_t*)ctx->previousOverlayHandle;
+ // Unlock any previously locked buffers
+ if (private_handle_t::PRIV_FLAGS_HWC_LOCK & hnd->flags) {
+ if (GENLOCK_NO_ERROR == genlock_unlock_buffer(ctx->previousOverlayHandle)) {
+ ctx->previousOverlayHandle = NULL;
+ hnd->flags &= ~private_handle_t::PRIV_FLAGS_HWC_LOCK;
+ } else {
+ LOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
+ }
+ }
+ }
+ }
+}
+
+bool canSkipComposition(hwc_context_t* ctx, int yuvBufferCount, int currentLayerCount,
+ int numLayersNotUpdating)
+{
+ if (!ctx) {
+ LOGE("%s: invalid context",__FUNCTION__);
+ return false;
+ }
+
+ hwc_composer_device_t* dev = (hwc_composer_device_t *)(ctx);
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ dev->common.module);
+ if (hwcModule->compositionType == COMPOSITION_TYPE_CPU)
+ return false;
+
+ //Video / Camera case
+ if (yuvBufferCount == 1) {
+ //If the previousLayerCount is anything other than the current count, it
+ //means something changed and we need to compose atleast once to FB.
+ if (currentLayerCount != ctx->previousLayerCount) {
+ ctx->previousLayerCount = currentLayerCount;
+ return false;
+ }
+ // We either have only one overlay layer or we have
+ // all non-updating UI layers.
+ // We can skip the composition of the UI layers.
+ if ((currentLayerCount == 1) ||
+ ((currentLayerCount - 1) == numLayersNotUpdating)) {
+ return true;
+ }
+ } else {
+ ctx->previousLayerCount = -1;
+ }
+ return false;
+}
+
+inline void getLayerResolution(const hwc_layer_t* layer, int& width, int& height)
+{
+ hwc_rect_t displayFrame = layer->displayFrame;
+
+ width = displayFrame.right - displayFrame.left;
+ height = displayFrame.bottom - displayFrame.top;
+}
+
+static bool canUseCopybit(const framebuffer_device_t* fbDev, const hwc_layer_list_t* list) {
+
+ if(!fbDev) {
+ LOGE("ERROR: %s : fb device is invalid",__func__);
+ return false;
+ }
+
+ if (!list)
+ return false;
+
+ int fb_w = fbDev->width;
+ int fb_h = fbDev->height;
+
+ /*
+ * Use copybit only when we need to blit
+ * max 2 full screen sized regions
+ */
+
+ unsigned int renderArea = 0;
+
+ for(int i = 0; i < list->numHwLayers; i++ ) {
+ int w, h;
+ getLayerResolution(&list->hwLayers[i], w, h);
+ renderArea += w*h;
+ }
+
+ return (renderArea <= (2 * fb_w * fb_h));
+}
+
+static void handleHDMIStateChange(hwc_composer_device_t *dev, int externaltype) {
+#if defined HDMI_DUAL_DISPLAY
+ hwc_context_t* ctx = (hwc_context_t*)(dev);
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ dev->common.module);
+ //Route the event to fbdev only if we are in default mirror mode
+ if(ExtDispOnly::isModeOn() == false) {
+ framebuffer_device_t *fbDev = hwcModule->fbDevice;
+ if (fbDev) {
+ fbDev->enableHDMIOutput(fbDev, externaltype);
+ }
+
+ if(ctx && ctx->mOverlayLibObject) {
+ overlay::Overlay *ovLibObject = ctx->mOverlayLibObject;
+ if (!externaltype) {
+ // Close the external overlay channels if HDMI is disconnected
+ ovLibObject->closeExternalChannel();
+ }
+ }
+ }
+#endif
+}
+
+/*
+ * function to set the status of external display in hwc
+ * Just mark flags and do stuff after eglSwapBuffers
+ * externaltype - can be HDMI, WIFI or OFF
+ */
+static void hwc_enableHDMIOutput(hwc_composer_device_t *dev, int externaltype) {
+#if defined HDMI_DUAL_DISPLAY
+ hwc_context_t* ctx = (hwc_context_t*)(dev);
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ dev->common.module);
+ framebuffer_device_t *fbDev = hwcModule->fbDevice;
+ overlay::Overlay *ovLibObject = ctx->mOverlayLibObject;
+ if(externaltype && ctx->mHDMIEnabled &&
+ (externaltype != ctx->mHDMIEnabled)) {
+ // Close the current external display - as the SF will
+ // prioritize and send the correct external display HDMI/WFD
+ handleHDMIStateChange(dev, 0);
+ }
+ // Store the external display
+ ctx->mHDMIEnabled = (external_display)externaltype;
+ if(ctx->mHDMIEnabled) { //On connect, allow bypass to draw once to FB
+ ctx->pendingHDMI = true;
+ } else { //On disconnect, close immediately (there will be no bypass)
+ handleHDMIStateChange(dev, ctx->mHDMIEnabled);
+ }
+#endif
+}
+
+static bool isValidDestination(const framebuffer_device_t* fbDev, const hwc_rect_t& rect)
+{
+ if (!fbDev) {
+ LOGE("%s: fbDev is null", __FUNCTION__);
+ return false;
+ }
+
+ int dest_width = (rect.right - rect.left);
+ int dest_height = (rect.bottom - rect.top);
+
+ if (rect.left < 0 || rect.right < 0 || rect.top < 0 || rect.bottom < 0
+ || dest_width <= 0 || dest_height <= 0) {
+ LOGE("%s: destination: left=%d right=%d top=%d bottom=%d width=%d"
+ "height=%d", __FUNCTION__, rect.left, rect.right, rect.top,
+ rect.bottom, dest_width, dest_height);
+ return false;
+ }
+
+ if ((rect.left+dest_width) > fbDev->width || (rect.top+dest_height) > fbDev->height) {
+ LOGE("%s: destination out of bound params", __FUNCTION__);
+ return false;
+ }
+
+ return true;
+}
+
+static int getYUVBufferCount (const hwc_layer_list_t* list) {
+ int yuvBufferCount = 0;
+ if (list) {
+ for (size_t i=0 ; i<list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ if (hnd && (hnd->bufferType == BUFFER_TYPE_VIDEO) &&
+ !(list->hwLayers[i].flags & HWC_DO_NOT_USE_OVERLAY)) {
+ yuvBufferCount++;
+ if (yuvBufferCount > 1) {
+ break;
+ }
+ }
+ }
+ }
+ return yuvBufferCount;
+}
+
+static int getS3DVideoFormat (const hwc_layer_list_t* list) {
+ int s3dFormat = 0;
+ if (list) {
+ for (size_t i=0; i<list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ if (hnd && (hnd->bufferType == BUFFER_TYPE_VIDEO))
+ s3dFormat = FORMAT_3D_INPUT(hnd->format);
+ if (s3dFormat)
+ break;
+ }
+ }
+ return s3dFormat;
+}
+
+static int getS3DFormat (const hwc_layer_list_t* list) {
+ int s3dFormat = 0;
+ if (list) {
+ for (size_t i=0; i<list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ if (hnd)
+ s3dFormat = FORMAT_3D_INPUT(hnd->format);
+ if (s3dFormat)
+ break;
+ }
+ }
+ return s3dFormat;
+}
+
+
+static int getLayerS3DFormat (hwc_layer_t &layer) {
+ int s3dFormat = 0;
+ private_handle_t *hnd = (private_handle_t *)layer.handle;
+ if (hnd)
+ s3dFormat = FORMAT_3D_INPUT(hnd->format);
+ return s3dFormat;
+}
+static bool isS3DCompositionRequired() {
+#ifdef HDMI_AS_PRIMARY
+ return overlay::is3DTV();
+#endif
+ return false;
+}
+
+static void markUILayerForS3DComposition (hwc_layer_t &layer, int s3dVideoFormat) {
+#ifdef HDMI_AS_PRIMARY
+ layer.compositionType = HWC_FRAMEBUFFER;
+ switch(s3dVideoFormat) {
+ case HAL_3D_IN_SIDE_BY_SIDE_L_R:
+ case HAL_3D_IN_SIDE_BY_SIDE_R_L:
+ layer.hints |= HWC_HINT_DRAW_S3D_SIDE_BY_SIDE;
+ break;
+ case HAL_3D_IN_TOP_BOTTOM:
+ layer.hints |= HWC_HINT_DRAW_S3D_TOP_BOTTOM;
+ break;
+ default:
+ LOGE("%s: Unknown S3D input format 0x%x", __FUNCTION__, s3dVideoFormat);
+ break;
+ }
+#endif
+ return;
+}
+
+static int getLayersNotUpdatingCount(const hwc_layer_list_t* list) {
+ int numLayersNotUpdating = 0;
+ if (list) {
+ for (size_t i=0 ; i<list->numHwLayers; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+ if (hnd && (hnd->bufferType != BUFFER_TYPE_VIDEO) &&
+ list->hwLayers[i].flags & HWC_LAYER_NOT_UPDATING)
+ numLayersNotUpdating++;
+ }
+ }
+ return numLayersNotUpdating;
+}
+
+static int hwc_prepare(hwc_composer_device_t *dev, hwc_layer_list_t* list) {
+
+ hwc_context_t* ctx = (hwc_context_t*)(dev);
+
+ if(!ctx) {
+ LOGE("hwc_prepare invalid context");
+ return -1;
+ }
+
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ dev->common.module);
+ if (!hwcModule) {
+ LOGE("hwc_prepare invalid module");
+#ifdef COMPOSITION_BYPASS
+ unlockPreviousBypassBuffers(ctx);
+ unsetBypassBufferLockState(ctx);
+#endif
+ unlockPreviousOverlayBuffer(ctx);
+ ExtDispOnly::close();
+ return -1;
+ }
+
+ int yuvBufferCount = 0;
+ int layerType = 0;
+ bool isS3DCompositionNeeded = false;
+ int s3dVideoFormat = 0;
+ int numLayersNotUpdating = 0;
+ bool useCopybit = false;
+ bool isSkipLayerPresent = false;
+ bool skipComposition = false;
+
+ if (list) {
+ useCopybit = canUseCopybit(hwcModule->fbDevice, list);
+ yuvBufferCount = getYUVBufferCount(list);
+ numLayersNotUpdating = getLayersNotUpdatingCount(list);
+ skipComposition = canSkipComposition(ctx, yuvBufferCount,
+ list->numHwLayers, numLayersNotUpdating);
+
+ if (yuvBufferCount == 1) {
+ s3dVideoFormat = getS3DVideoFormat(list);
+ if (s3dVideoFormat)
+ isS3DCompositionNeeded = isS3DCompositionRequired();
+ } else if((s3dVideoFormat = getS3DFormat(list))){
+ if (s3dVideoFormat)
+ isS3DCompositionNeeded = isS3DCompositionRequired();
+ } else {
+ unlockPreviousOverlayBuffer(ctx);
+ }
+
+ if (list->flags & HWC_GEOMETRY_CHANGED) {
+ if (yuvBufferCount == 1) {
+ // Inform the gralloc of the current video overlay status
+ setVideoOverlayStatusInGralloc(ctx, true);
+ }
+ }
+
+ for (size_t i=0 ; i<list->numHwLayers ; i++) {
+ private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
+
+ // If there is a single Fullscreen layer, we can bypass it - TBD
+ // If there is only one video/camera buffer, we can bypass itn
+ if (list->hwLayers[i].flags & HWC_SKIP_LAYER) {
+ // During the animaton UI layers are marked as SKIP
+ // need to still mark the layer for S3D composition
+ isSkipLayerPresent = true;
+ skipComposition = false;
+ //Reset count, so that we end up composing once after animation
+ //is over, in case of overlay.
+ ctx->previousLayerCount = -1;
+
+ if (isS3DCompositionNeeded)
+ markUILayerForS3DComposition(list->hwLayers[i], s3dVideoFormat);
+
+// LGE_CHANGE_E, [G1_Player][bokyung.kim@lge.com], 20120201, Apply SR 00744210 to fix screen flicker {
+ ssize_t layer_countdown = ((ssize_t)i) - 1;
+ // Mark every layer below the SKIP layer to be composed by the GPU
+ while (layer_countdown >= 0)
+ {
+ private_handle_t *countdown_handle =
+ (private_handle_t *)list->hwLayers[layer_countdown].handle;
+ if (countdown_handle && (countdown_handle->bufferType == BUFFER_TYPE_VIDEO)
+ && (yuvBufferCount == 1)) {
+ unlockPreviousOverlayBuffer(ctx);
+ }
+ list->hwLayers[layer_countdown].compositionType = HWC_FRAMEBUFFER;
+ list->hwLayers[layer_countdown].hints &= ~HWC_HINT_CLEAR_FB;
+ layer_countdown--;
+ }
+// LGE_CHANGE_E, [G1_Player][bokyung.kim@lge.com], 20120201, Apply SR 00744210 to fix screen flicker }
+ continue;
+ }
+ if (hnd && (hnd->bufferType == BUFFER_TYPE_VIDEO) && (yuvBufferCount == 1)) {
+ int flags = WAIT_FOR_VSYNC;
+ flags |= (hnd->flags &
+ private_handle_t::PRIV_FLAGS_SECURE_BUFFER)?
+ SECURE_OVERLAY_SESSION : 0;
+ flags |= (1 == list->numHwLayers) ? DISABLE_FRAMEBUFFER_FETCH : 0;
+ if (!isValidDestination(hwcModule->fbDevice, list->hwLayers[i].displayFrame)) {
+ list->hwLayers[i].compositionType = HWC_FRAMEBUFFER;
+ //Even though there are no skip layers, animation is still
+ //ON and in its final stages.
+ //Reset count, so that we end up composing once after animation
+ //is done, if overlay is used.
+ ctx->previousLayerCount = -1;
+ skipComposition = false;
+#ifdef USE_OVERLAY
+ } else if(prepareOverlay(ctx, &(list->hwLayers[i]), flags) == 0) {
+ list->hwLayers[i].compositionType = HWC_USE_OVERLAY;
+ list->hwLayers[i].hints |= HWC_HINT_CLEAR_FB;
+ // We've opened the channel. Set the state to open.
+ ctx->hwcOverlayStatus = HWC_OVERLAY_OPEN;
+#endif
+ } else if (hwcModule->compositionType & (COMPOSITION_TYPE_C2D|
+ COMPOSITION_TYPE_MDP)) {
+ //Fail safe path: If drawing with overlay fails,
+
+ //Use C2D if available.
+ list->hwLayers[i].compositionType = HWC_USE_COPYBIT;
+ } else {
+ //If C2D is not enabled fall back to GPU.
+ list->hwLayers[i].compositionType = HWC_FRAMEBUFFER;
+ }
+ if (HWC_USE_OVERLAY != list->hwLayers[i].compositionType) {
+ unlockPreviousOverlayBuffer(ctx);
+ skipComposition = false;
+ }
+ } else if (getLayerS3DFormat(list->hwLayers[i])) {
+ int flags = WAIT_FOR_VSYNC;
+ flags |= (1 == list->numHwLayers) ? DISABLE_FRAMEBUFFER_FETCH : 0;
+ flags |= (hnd->flags &
+ private_handle_t::PRIV_FLAGS_SECURE_BUFFER)?
+ SECURE_OVERLAY_SESSION : 0;
+#ifdef USE_OVERLAY
+ if(prepareOverlay(ctx, &(list->hwLayers[i]), flags) == 0) {
+ list->hwLayers[i].compositionType = HWC_USE_OVERLAY;
+ list->hwLayers[i].hints |= HWC_HINT_CLEAR_FB;
+ // We've opened the channel. Set the state to open.
+ ctx->hwcOverlayStatus = HWC_OVERLAY_OPEN;
+ }
+#endif
+ } else if (isS3DCompositionNeeded) {
+ markUILayerForS3DComposition(list->hwLayers[i], s3dVideoFormat);
+ } else if (list->hwLayers[i].flags & HWC_USE_ORIGINAL_RESOLUTION) {
+ list->hwLayers[i].compositionType = HWC_USE_OVERLAY;
+ list->hwLayers[i].hints |= HWC_HINT_CLEAR_FB;
+ layerType |= HWC_ORIG_RESOLUTION;
+ } else if (hnd && hnd->flags & private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY) {
+ //handle later after other layers are handled
+ } else if (hnd && (hwcModule->compositionType &
+ (COMPOSITION_TYPE_C2D|COMPOSITION_TYPE_MDP))) {
+ list->hwLayers[i].compositionType = HWC_USE_COPYBIT;
+ } else if ((hwcModule->compositionType == COMPOSITION_TYPE_DYN)
+ && useCopybit) {
+ list->hwLayers[i].compositionType = HWC_USE_COPYBIT;
+ }
+ else {
+ list->hwLayers[i].compositionType = HWC_FRAMEBUFFER;
+ }
+ }
+
+ //Update the stats and pipe config for external-only layers
+ ExtDispOnly::update(ctx, list);
+
+ if (skipComposition) {
+ list->flags |= HWC_SKIP_COMPOSITION;
+ } else {
+ list->flags &= ~HWC_SKIP_COMPOSITION;
+ }
+
+#ifdef COMPOSITION_BYPASS
+ bool isBypassUsed = true;
+ bool isDoable = isBypassDoable(dev, yuvBufferCount, list);
+ //Check if bypass is feasible
+ if(isDoable && !isSkipLayerPresent) {
+ if(setupBypass(ctx, list)) {
+ setBypassLayerFlags(ctx, list);
+ ctx->bypassState = BYPASS_ON;
+ } else {
+ LOGE_IF(BYPASS_DEBUG,"%s: Bypass setup Failed",__FUNCTION__);
+ isBypassUsed = false;
+ }
+ } else {
+ LOGE_IF(BYPASS_DEBUG,"%s: Bypass not possible[%d,%d]",__FUNCTION__,
+ isDoable, !isSkipLayerPresent );
+ isBypassUsed = false;
+ }
+
+ //Reset bypass states
+ if(!isBypassUsed) {
+ ctx->nPipesUsed = 0;
+ unsetBypassLayerFlags(list);
+ if(ctx->bypassState == BYPASS_ON) {
+ ctx->bypassState = BYPASS_OFF_PENDING;
+ }
+ }
+#endif
+ } else {
+#ifdef COMPOSITION_BYPASS
+ unlockPreviousBypassBuffers(ctx);
+ unsetBypassBufferLockState(ctx);
+#endif
+ unlockPreviousOverlayBuffer(ctx);
+ }
+ return 0;
+}
+// ---------------------------------------------------------------------------
+struct range {
+ int current;
+ int end;
+};
+struct region_iterator : public copybit_region_t {
+
+ region_iterator(hwc_region_t region) {
+ mRegion = region;
+ r.end = region.numRects;
+ r.current = 0;
+ this->next = iterate;
+ }
+
+private:
+ static int iterate(copybit_region_t const * self, copybit_rect_t* rect) {
+ if (!self || !rect) {
+ LOGE("iterate invalid parameters");
+ return 0;
+ }
+
+ region_iterator const* me = static_cast<region_iterator const*>(self);
+ if (me->r.current != me->r.end) {
+ rect->l = me->mRegion.rects[me->r.current].left;
+ rect->t = me->mRegion.rects[me->r.current].top;
+ rect->r = me->mRegion.rects[me->r.current].right;
+ rect->b = me->mRegion.rects[me->r.current].bottom;
+ me->r.current++;
+ return 1;
+ }
+ return 0;
+ }
+
+ hwc_region_t mRegion;
+ mutable range r;
+};
+
+static int drawLayerUsingCopybit(hwc_composer_device_t *dev, hwc_layer_t *layer, EGLDisplay dpy,
+ EGLSurface surface)
+{
+ hwc_context_t* ctx = (hwc_context_t*)(dev);
+ if(!ctx) {
+ LOGE("drawLayerUsingCopybit null context ");
+ return -1;
+ }
+
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(dev->common.module);
+ if(!hwcModule) {
+ LOGE("drawLayerUsingCopybit null module ");
+ return -1;
+ }
+
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(!hnd) {
+ LOGE("drawLayerUsingCopybit invalid handle");
+ return -1;
+ }
+
+ // Lock this buffer for read.
+ genlock_lock_type lockType = GENLOCK_READ_LOCK;
+ int err = genlock_lock_buffer(hnd, lockType, GENLOCK_MAX_TIMEOUT);
+ if (GENLOCK_FAILURE == err) {
+ LOGE("%s: genlock_lock_buffer(READ) failed", __FUNCTION__);
+ return -1;
+ }
+ //render buffer
+ android_native_buffer_t *renderBuffer = (android_native_buffer_t *)eglGetRenderBufferANDROID(dpy, surface);
+ if (!renderBuffer) {
+ LOGE("eglGetRenderBufferANDROID returned NULL buffer");
+ genlock_unlock_buffer(hnd);
+ return -1;
+ }
+ private_handle_t *fbHandle = (private_handle_t *)renderBuffer->handle;
+ if(!fbHandle) {
+ LOGE("Framebuffer handle is NULL");
+ genlock_unlock_buffer(hnd);
+ return -1;
+ }
+ int alignment = 32;
+ if( HAL_PIXEL_FORMAT_RGB_565 == fbHandle->format )
+ alignment = 16;
+ // Set the copybit source:
+ copybit_image_t src;
+ src.w = ALIGN(hnd->width, alignment);
+ src.h = hnd->height;
+ src.format = hnd->format;
+ src.base = (void *)hnd->base;
+ src.handle = (native_handle_t *)layer->handle;
+ src.horiz_padding = src.w - hnd->width;
+ // Initialize vertical padding to zero for now,
+ // this needs to change to accomodate vertical stride
+ // if needed in the future
+ src.vert_padding = 0;
+
+ // Copybit source rect
+ hwc_rect_t sourceCrop = layer->sourceCrop;
+ copybit_rect_t srcRect = {sourceCrop.left, sourceCrop.top,
+ sourceCrop.right,
+ sourceCrop.bottom};
+
+ // Copybit destination rect
+ hwc_rect_t displayFrame = layer->displayFrame;
+ copybit_rect_t dstRect = {displayFrame.left, displayFrame.top,
+ displayFrame.right,
+ displayFrame.bottom};
+
+ // Copybit dst
+ copybit_image_t dst;
+ dst.w = ALIGN(fbHandle->width,alignment);
+ dst.h = fbHandle->height;
+ dst.format = fbHandle->format;
+ dst.base = (void *)fbHandle->base;
+ dst.handle = (native_handle_t *)renderBuffer->handle;
+
+ copybit_device_t *copybit = hwcModule->copybitEngine;
+
+ int32_t screen_w = displayFrame.right - displayFrame.left;
+ int32_t screen_h = displayFrame.bottom - displayFrame.top;
+ int32_t src_crop_width = sourceCrop.right - sourceCrop.left;
+ int32_t src_crop_height = sourceCrop.bottom -sourceCrop.top;
+
+ float copybitsMaxScale = (float)copybit->get(copybit,COPYBIT_MAGNIFICATION_LIMIT);
+ float copybitsMinScale = (float)copybit->get(copybit,COPYBIT_MINIFICATION_LIMIT);
+
+ if((layer->transform == HWC_TRANSFORM_ROT_90) ||
+ (layer->transform == HWC_TRANSFORM_ROT_270)) {
+ //swap screen width and height
+ int tmp = screen_w;
+ screen_w = screen_h;
+ screen_h = tmp;
+ }
+ private_handle_t *tmpHnd = NULL;
+
+ if(screen_w <=0 || screen_h<=0 ||src_crop_width<=0 || src_crop_height<=0 ) {
+ LOGE("%s: wrong params for display screen_w=%d src_crop_width=%d screen_w=%d \
+ src_crop_width=%d", __FUNCTION__, screen_w,
+ src_crop_width,screen_w,src_crop_width);
+ genlock_unlock_buffer(hnd);
+ return -1;
+ }
+
+ float dsdx = (float)screen_w/src_crop_width;
+ float dtdy = (float)screen_h/src_crop_height;
+
+ float scaleLimitMax = copybitsMaxScale * copybitsMaxScale;
+ float scaleLimitMin = copybitsMinScale * copybitsMinScale;
+ if(dsdx > scaleLimitMax || dtdy > scaleLimitMax || dsdx < 1/scaleLimitMin || dtdy < 1/scaleLimitMin) {
+ LOGE("%s: greater than max supported size dsdx=%f dtdy=%f scaleLimitMax=%f scaleLimitMin=%f", __FUNCTION__,dsdx,dtdy,scaleLimitMax,1/scaleLimitMin);
+ genlock_unlock_buffer(hnd);
+ return -1;
+ }
+ if(dsdx > copybitsMaxScale || dtdy > copybitsMaxScale || dsdx < 1/copybitsMinScale || dtdy < 1/copybitsMinScale){
+ // The requested scale is out of the range the hardware
+ // can support.
+ LOGD("%s:%d::Need to scale twice dsdx=%f, dtdy=%f,copybitsMaxScale=%f,copybitsMinScale=%f,screen_w=%d,screen_h=%d \
+ src_crop_width=%d src_crop_height=%d",__FUNCTION__,__LINE__,
+ dsdx,dtdy,copybitsMaxScale,1/copybitsMinScale,screen_w,screen_h,src_crop_width,src_crop_height);
+
+ //Driver makes width and height as even
+ //that may cause wrong calculation of the ratio
+ //in display and crop.Hence we make
+ //crop width and height as even.
+ src_crop_width = (src_crop_width/2)*2;
+ src_crop_height = (src_crop_height/2)*2;
+
+ int tmp_w = src_crop_width;
+ int tmp_h = src_crop_height;
+
+ if (dsdx > copybitsMaxScale || dtdy > copybitsMaxScale ){
+ tmp_w = src_crop_width*copybitsMaxScale;
+ tmp_h = src_crop_height*copybitsMaxScale;
+ }else if (dsdx < 1/copybitsMinScale ||dtdy < 1/copybitsMinScale ){
+ tmp_w = src_crop_width/copybitsMinScale;
+ tmp_h = src_crop_height/copybitsMinScale;
+ tmp_w = (tmp_w/2)*2;
+ tmp_h = (tmp_h/2)*2;
+ }
+ LOGD("%s:%d::tmp_w = %d,tmp_h = %d",__FUNCTION__,__LINE__,tmp_w,tmp_h);
+
+ int usage = GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
+ GRALLOC_USAGE_PRIVATE_MM_HEAP;
+
+ if (0 == alloc_buffer(&tmpHnd, tmp_w, tmp_h, fbHandle->format, usage)){
+ copybit_image_t tmp_dst;
+ copybit_rect_t tmp_rect;
+ tmp_dst.w = tmp_w;
+ tmp_dst.h = tmp_h;
+ tmp_dst.format = tmpHnd->format;
+ tmp_dst.handle = tmpHnd;
+ tmp_dst.horiz_padding = src.horiz_padding;
+ tmp_dst.vert_padding = src.vert_padding;
+ tmp_rect.l = 0;
+ tmp_rect.t = 0;
+ tmp_rect.r = tmp_dst.w;
+ tmp_rect.b = tmp_dst.h;
+ //create one clip region
+ hwc_rect tmp_hwc_rect = {0,0,tmp_rect.r,tmp_rect.b};
+ hwc_region_t tmp_hwc_reg = {1,(hwc_rect_t const*)&tmp_hwc_rect};
+ region_iterator tmp_it(tmp_hwc_reg);
+ copybit->set_parameter(copybit,COPYBIT_TRANSFORM,0);
+ copybit->set_parameter(copybit, COPYBIT_PLANE_ALPHA,
+ (layer->blending == HWC_BLENDING_NONE) ? -1 : layer->alpha);
+ err = copybit->stretch(copybit,&tmp_dst, &src, &tmp_rect, &srcRect, &tmp_it);
+ if(err < 0){
+ LOGE("%s:%d::tmp copybit stretch failed",__FUNCTION__,__LINE__);
+ if(tmpHnd)
+ free_buffer(tmpHnd);
+ genlock_unlock_buffer(hnd);
+ return err;
+ }
+ // copy new src and src rect crop
+ src = tmp_dst;
+ srcRect = tmp_rect;
+ }
+ }
+ // Copybit region
+ hwc_region_t region = layer->visibleRegionScreen;
+ region_iterator copybitRegion(region);
+
+ copybit->set_parameter(copybit, COPYBIT_FRAMEBUFFER_WIDTH, renderBuffer->width);
+ copybit->set_parameter(copybit, COPYBIT_FRAMEBUFFER_HEIGHT, renderBuffer->height);
+ copybit->set_parameter(copybit, COPYBIT_TRANSFORM, layer->transform);
+ copybit->set_parameter(copybit, COPYBIT_PLANE_ALPHA,
+ (layer->blending == HWC_BLENDING_NONE) ? -1 : layer->alpha);
+ copybit->set_parameter(copybit, COPYBIT_PREMULTIPLIED_ALPHA,
+ (layer->blending == HWC_BLENDING_PREMULT)? COPYBIT_ENABLE : COPYBIT_DISABLE);
+ copybit->set_parameter(copybit, COPYBIT_DITHER,
+ (dst.format == HAL_PIXEL_FORMAT_RGB_565)? COPYBIT_ENABLE : COPYBIT_DISABLE);
+ err = copybit->stretch(copybit, &dst, &src, &dstRect, &srcRect, ©bitRegion);
+
+ if(tmpHnd)
+ free_buffer(tmpHnd);
+
+ if(err < 0)
+ LOGE("%s: copybit stretch failed",__FUNCTION__);
+
+ // Unlock this buffer since copybit is done with it.
+ err = genlock_unlock_buffer(hnd);
+ if (GENLOCK_FAILURE == err) {
+ LOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
+ }
+
+ return err;
+}
+
+static int drawLayerUsingOverlay(hwc_context_t *ctx, hwc_layer_t *layer)
+{
+ if (ctx && ctx->mOverlayLibObject) {
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(ctx->device.common.module);
+ if (!hwcModule) {
+ LOGE("drawLayerUsingLayer null module ");
+ return -1;
+ }
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ overlay::Overlay *ovLibObject = ctx->mOverlayLibObject;
+ int ret = 0;
+
+ // Lock this buffer for read.
+ if (GENLOCK_NO_ERROR != genlock_lock_buffer(hnd, GENLOCK_READ_LOCK,
+ GENLOCK_MAX_TIMEOUT)) {
+ LOGE("%s: genlock_lock_buffer(READ) failed", __FUNCTION__);
+ return -1;
+ }
+
+ ret = ovLibObject->queueBuffer(hnd);
+
+ // Unlock the previously locked buffer, since the overlay has completed reading the buffer
+ unlockPreviousOverlayBuffer(ctx);
+
+ if (!ret) {
+ LOGE("drawLayerUsingOverlay queueBuffer failed");
+ // Unlock the buffer handle
+ genlock_unlock_buffer(hnd);
+ ctx->previousOverlayHandle = NULL;
+ } else {
+ // Store the current buffer handle as the one that is to be unlocked after
+ // the next overlay play call.
+ ctx->previousOverlayHandle = hnd;
+ hnd->flags |= private_handle_t::PRIV_FLAGS_HWC_LOCK;
+ }
+
+ return ret;
+ }
+ return -1;
+}
+
+#ifdef COMPOSITION_BYPASS
+static int drawLayerUsingBypass(hwc_context_t *ctx, hwc_layer_t *layer, int layer_index) {
+
+ int index = getLayerbypassIndex(layer);
+
+ if(index < 0) {
+ LOGE("%s: Invalid bypass index (%d)", __FUNCTION__, index);
+ return -1;
+ }
+
+ if (ctx && ctx->mOvUI[index]) {
+ overlay::OverlayUI *ovUI = ctx->mOvUI[index];
+ int ret = 0;
+
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(!hnd) {
+ LOGE("%s handle null", __FUNCTION__);
+ return -1;
+ }
+
+ ctx->bypassBufferLockState[index] = BYPASS_BUFFER_UNLOCKED;
+
+ if (GENLOCK_FAILURE == genlock_lock_buffer(hnd, GENLOCK_READ_LOCK,
+ GENLOCK_MAX_TIMEOUT)) {
+ LOGE("%s: genlock_lock_buffer(READ) failed", __FUNCTION__);
+ return -1;
+ }
+
+ ctx->bypassBufferLockState[index] = BYPASS_BUFFER_LOCKED;
+
+ LOGE_IF(BYPASS_DEBUG,"%s: Bypassing layer: %p using pipe: %d",__FUNCTION__, layer, index );
+
+ ret = ovUI->queueBuffer(hnd);
+
+ if (ret) {
+ // Unlock the locked buffer
+ if (GENLOCK_FAILURE == genlock_unlock_buffer(hnd)) {
+ LOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
+ }
+ ctx->bypassBufferLockState[index] = BYPASS_BUFFER_UNLOCKED;
+ return -1;
+ }
+ }
+ return 0;
+}
+#endif
+
+static int hwc_set(hwc_composer_device_t *dev,
+ hwc_display_t dpy,
+ hwc_surface_t sur,
+ hwc_layer_list_t* list)
+{
+ hwc_context_t* ctx = (hwc_context_t*)(dev);
+ if(!ctx) {
+ LOGE("hwc_set invalid context");
+ ExtDispOnly::close();
+ return -1;
+ }
+
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ dev->common.module);
+ if (!hwcModule) {
+ LOGE("hwc_set invalid module");
+#ifdef COMPOSITION_BYPASS
+ unlockPreviousBypassBuffers(ctx);
+ unsetBypassBufferLockState(ctx);
+#endif
+ ExtDispOnly::close();
+ unlockPreviousOverlayBuffer(ctx);
+ return -1;
+ }
+
+ int ret = 0;
+ if (list) {
+ bool bDumpLayers = needToDumpLayers(); // Check need for debugging dumps
+ for (size_t i=0; i<list->numHwLayers; i++) {
+ if (bDumpLayers)
+ dumpLayer(hwcModule->compositionType, list->flags, i, list->hwLayers);
+ if (list->hwLayers[i].flags & HWC_SKIP_LAYER) {
+ continue;
+ } else if(list->hwLayers[i].flags & HWC_USE_EXT_ONLY) {
+ continue;
+ //Draw after layers for primary are drawn
+#ifdef COMPOSITION_BYPASS
+ } else if (list->hwLayers[i].flags & HWC_COMP_BYPASS) {
+ drawLayerUsingBypass(ctx, &(list->hwLayers[i]), i);
+#endif
+ } else if (list->hwLayers[i].compositionType == HWC_USE_OVERLAY) {
+ drawLayerUsingOverlay(ctx, &(list->hwLayers[i]));
+ } else if (list->flags & HWC_SKIP_COMPOSITION) {
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+ //break;
+ continue;
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+ } else if (list->hwLayers[i].compositionType == HWC_USE_COPYBIT) {
+ drawLayerUsingCopybit(dev, &(list->hwLayers[i]), (EGLDisplay)dpy, (EGLSurface)sur);
+ }
+ }
+ } else {
+ //Device in suspended state. Close all the MDP pipes
+#ifdef COMPOSITION_BYPASS
+ ctx->nPipesUsed = 0;
+#endif
+ ctx->hwcOverlayStatus = HWC_OVERLAY_PREPARE_TO_CLOSE;
+ }
+
+ bool canSkipComposition = list && list->flags & HWC_SKIP_COMPOSITION;
+ //Draw External-only layers
+ if(ExtDispOnly::draw(ctx, list) != overlay::NO_ERROR) {
+ ExtDispOnly::close();
+ }
+
+#ifdef COMPOSITION_BYPASS
+ unlockPreviousBypassBuffers(ctx);
+ storeLockedBypassHandle(list, ctx);
+ // We have stored the handles, unset the current lock states in the context.
+ unsetBypassBufferLockState(ctx);
+ closeExtraPipes(ctx);
+#if BYPASS_DEBUG
+ if(canSkipComposition)
+ LOGE("%s: skipping eglSwapBuffer call", __FUNCTION__);
+#endif
+#endif
+ // Do not call eglSwapBuffers if we the skip composition flag is set on the list.
+ if (dpy && sur && !canSkipComposition) {
+ EGLBoolean sucess = eglSwapBuffers((EGLDisplay)dpy, (EGLSurface)sur);
+ if (!sucess) {
+ ret = HWC_EGL_ERROR;
+ } else {
+ CALC_FPS();
+ }
+ }
+#if defined HDMI_DUAL_DISPLAY
+ if(ctx->pendingHDMI) {
+ handleHDMIStateChange(dev, ctx->mHDMIEnabled);
+ ctx->pendingHDMI = false;
+ }
+#endif
+
+ hwc_closeOverlayChannels(ctx);
+ int yuvBufferCount = getYUVBufferCount(list);
+ setHWCOverlayStatus(ctx, yuvBufferCount);
+
+ return ret;
+}
+
+static int hwc_device_close(struct hw_device_t *dev)
+{
+ if(!dev) {
+ LOGE("hwc_device_close null device pointer");
+ return -1;
+ }
+
+ struct hwc_context_t* ctx = (struct hwc_context_t*)dev;
+
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>(
+ ctx->device.common.module);
+ // Close the overlay and copybit modules
+ if(hwcModule->copybitEngine) {
+ copybit_close(hwcModule->copybitEngine);
+ hwcModule->copybitEngine = NULL;
+ }
+ if(hwcModule->fbDevice) {
+ framebuffer_close(hwcModule->fbDevice);
+ hwcModule->fbDevice = NULL;
+ }
+
+ unlockPreviousOverlayBuffer(ctx);
+
+ if (ctx) {
+ delete ctx->mOverlayLibObject;
+ ctx->mOverlayLibObject = NULL;
+#ifdef COMPOSITION_BYPASS
+ for(int i = 0; i < MAX_BYPASS_LAYERS; i++) {
+ delete ctx->mOvUI[i];
+ }
+ unlockPreviousBypassBuffers(ctx);
+ unsetBypassBufferLockState(ctx);
+#endif
+ ExtDispOnly::close();
+ ExtDispOnly::destroy();
+
+ free(ctx);
+ }
+ return 0;
+}
+
+/*****************************************************************************/
+static int hwc_module_initialize(struct private_hwc_module_t* hwcModule)
+{
+
+ // Open the overlay and copybit modules
+ hw_module_t const *module;
+ if (hw_get_module(COPYBIT_HARDWARE_MODULE_ID, &module) == 0) {
+ copybit_open(module, &(hwcModule->copybitEngine));
+ }
+ if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module) == 0) {
+ framebuffer_open(module, &(hwcModule->fbDevice));
+ }
+
+ // get the current composition type
+ char property[PROPERTY_VALUE_MAX];
+ if (property_get("debug.sf.hw", property, NULL) > 0) {
+ if(atoi(property) == 0) {
+ //debug.sf.hw = 0
+ hwcModule->compositionType = COMPOSITION_TYPE_CPU;
+ } else { //debug.sf.hw = 1
+ // Get the composition type
+ property_get("debug.composition.type", property, NULL);
+ if (property == NULL) {
+ hwcModule->compositionType = COMPOSITION_TYPE_GPU;
+ } else if ((strncmp(property, "mdp", 3)) == 0) {
+ hwcModule->compositionType = COMPOSITION_TYPE_MDP;
+ } else if ((strncmp(property, "c2d", 3)) == 0) {
+ hwcModule->compositionType = COMPOSITION_TYPE_C2D;
+ } else if ((strncmp(property, "dyn", 3)) == 0) {
+ hwcModule->compositionType = COMPOSITION_TYPE_DYN;
+ } else {
+ hwcModule->compositionType = COMPOSITION_TYPE_GPU;
+ }
+
+ if(!hwcModule->copybitEngine)
+ hwcModule->compositionType = COMPOSITION_TYPE_GPU;
+ }
+ } else { //debug.sf.hw is not set. Use cpu composition
+ hwcModule->compositionType = COMPOSITION_TYPE_CPU;
+ }
+
+ //Check if composition bypass is enabled
+ if(property_get("ro.sf.compbypass.enable", property, NULL) > 0) {
+ if(atoi(property) == 1) {
+ hwcModule->isBypassEnabled = true;
+ }
+ }
+
+ CALC_INIT();
+
+ return 0;
+}
+
+
+static int hwc_device_open(const struct hw_module_t* module, const char* name,
+ struct hw_device_t** device)
+{
+ int status = -EINVAL;
+
+ if (!strcmp(name, HWC_HARDWARE_COMPOSER)) {
+ private_hwc_module_t* hwcModule = reinterpret_cast<private_hwc_module_t*>
+ (const_cast<hw_module_t*>(module));
+ hwc_module_initialize(hwcModule);
+ struct hwc_context_t *dev;
+ dev = (hwc_context_t*)malloc(sizeof(*dev));
+
+ /* initialize our state here */
+ memset(dev, 0, sizeof(*dev));
+#ifdef USE_OVERLAY
+ dev->mOverlayLibObject = new overlay::Overlay();
+ if(overlay::initOverlay() == -1)
+ LOGE("overlay::initOverlay() ERROR!!");
+#else
+ dev->mOverlayLibObject = NULL;
+#endif
+#ifdef COMPOSITION_BYPASS
+ for(int i = 0; i < MAX_BYPASS_LAYERS; i++) {
+ dev->mOvUI[i] = new overlay::OverlayUI();
+ dev->previousBypassHandle[i] = NULL;
+ }
+ unsetBypassBufferLockState(dev);
+ dev->bypassState = BYPASS_OFF;
+#endif
+ ExtDispOnly::init();
+#if defined HDMI_DUAL_DISPLAY
+ dev->mHDMIEnabled = EXT_DISPLAY_OFF;
+ dev->pendingHDMI = false;
+#endif
+ dev->previousOverlayHandle = NULL;
+ dev->hwcOverlayStatus = HWC_OVERLAY_CLOSED;
+ dev->previousLayerCount = -1;
+ /* initialize the procs */
+ dev->device.common.tag = HARDWARE_DEVICE_TAG;
+ dev->device.common.version = 0;
+ dev->device.common.module = const_cast<hw_module_t*>(module);
+ dev->device.common.close = hwc_device_close;
+
+ dev->device.prepare = hwc_prepare;
+ dev->device.set = hwc_set;
+ dev->device.enableHDMIOutput = hwc_enableHDMIOutput;
+ *device = &dev->device.common;
+
+ status = 0;
+ }
+ return status;
+}
diff --git a/liboverlay/Android.mk b/liboverlay/Android.mk
new file mode 100755
index 0000000..79e4256
--- /dev/null
+++ b/liboverlay/Android.mk
@@ -0,0 +1,43 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
+LOCAL_SHARED_LIBRARIES := liblog libcutils libutils libmemalloc
+LOCAL_C_INCLUDES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include
+LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
+LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
+LOCAL_SRC_FILES := \
+ overlayLib.cpp \
+ overlayLibUI.cpp \
+LOCAL_CFLAGS:= -DLOG_TAG=\"OverlayLib\"
+
+ifeq ($(TARGET_USE_HDMI_AS_PRIMARY),true)
+LOCAL_CFLAGS += -DHDMI_AS_PRIMARY
+endif
+ifeq ($(TARGET_USES_POST_PROCESSING),true)
+LOCAL_CFLAGS += -DUSES_POST_PROCESSING
+LOCAL_SHARED_LIBRARIES += libmm-abl
+LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/pp/inc
+LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/lib/
+endif
+LOCAL_MODULE := liboverlay
+
+#LGE_CHANGE, for userdebug mode
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
diff --git a/liboverlay/overlayLib.cpp b/liboverlay/overlayLib.cpp
new file mode 100755
index 0000000..fe5c3d3
--- /dev/null
+++ b/liboverlay/overlayLib.cpp
@@ -0,0 +1,2352 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "overlayLib.h"
+#include "gralloc_priv.h"
+
+#define INTERLACE_MASK 0x80
+#define DEBUG_OVERLAY true
+/* Helper functions */
+static inline size_t ALIGN(size_t x, size_t align) {
+ return (x + align-1) & ~(align-1);
+}
+
+using namespace overlay;
+using android::sp;
+using gralloc::IMemAlloc;
+using gralloc::IonController;
+using gralloc::alloc_data;
+
+#ifdef HDMI_AS_PRIMARY
+bool Overlay::sHDMIAsPrimary = true;
+#else
+bool Overlay::sHDMIAsPrimary = false;
+#endif
+
+template <class Type>
+void swapWidthHeight(Type& width, Type& height) {
+ Type tmp = width;
+ width = height;
+ height = tmp;
+}
+
+int overlay::get_mdp_format(int format) {
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RGBA_8888 :
+ return MDP_RGBA_8888;
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ return MDP_BGRA_8888;
+ case HAL_PIXEL_FORMAT_RGB_565:
+ return MDP_RGB_565;
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ return MDP_RGBX_8888;
+ case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+ return MDP_Y_CBCR_H2V1;
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP:
+ return MDP_Y_CRCB_H2V2;
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ return MDP_Y_CBCR_H2V2;
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
+ return MDP_Y_CRCB_H2V2_TILE;
+ case HAL_PIXEL_FORMAT_YV12:
+ return MDP_Y_CR_CB_GH2V2;
+ default:
+ LOGE("%s: unknown color format [0x%x]", __FUNCTION__, format);
+ return -1;
+ }
+ return -1;
+}
+
+int overlay::get_mdp_orientation(int value) {
+ switch(value) {
+ case 0: return 0;
+ case HAL_TRANSFORM_FLIP_V: return MDP_FLIP_UD;
+ case HAL_TRANSFORM_FLIP_H: return MDP_FLIP_LR;
+ case HAL_TRANSFORM_ROT_90: return MDP_ROT_90;
+ case HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_V:
+ return MDP_ROT_90|MDP_FLIP_LR;
+ case HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_H:
+ return MDP_ROT_90|MDP_FLIP_UD;
+ case HAL_TRANSFORM_ROT_180: return MDP_ROT_180;
+ case HAL_TRANSFORM_ROT_270: return MDP_ROT_270;
+ default:
+ LOGE("%s: invalid rotation value (value = 0x%x",
+ __FUNCTION__, value);
+ return -1;
+ }
+ return -1;
+}
+
+// Rotator - input to output mapping
+int overlay::get_rot_output_format(int format) {
+ switch (format) {
+ case MDP_Y_CRCB_H2V2_TILE:
+ return MDP_Y_CRCB_H2V2;
+ case MDP_Y_CB_CR_H2V2:
+ return MDP_Y_CBCR_H2V2;
+ case MDP_Y_CR_CB_GH2V2:
+ return MDP_Y_CRCB_H2V2;
+ default:
+ return format;
+ }
+ return -1;
+}
+
+// This function normalizes the crop values to be all even
+void overlay::normalize_crop(uint32_t& xy, uint32_t& wh) {
+
+ if (xy & 0x0001) {
+ // x or y is odd, increment it's value
+ xy += 1;
+ // Since we've incremented x(y), we need to decrement
+ // w(h) accordingly
+ if (wh & 0x0001) {
+ // w or h is odd, decrement it by 1, to make it even
+ EVEN_OUT(wh);
+ } else {
+ // w(h) is already even, hence we decrement by 2
+ wh -=2;
+ }
+ } else {
+ EVEN_OUT(wh);
+ }
+}
+
+#define LOG_TAG "OverlayLIB"
+static void reportError(const char* message) {
+ LOGE( "%s", message);
+}
+
+void overlay::dump(mdp_overlay& mOVInfo) {
+ if (!DEBUG_OVERLAY)
+ return;
+ LOGE("mOVInfo:");
+ LOGE("src: width %d height %d format %s user_data[0] %d", mOVInfo.src.width,
+ mOVInfo.src.height, getFormatString(mOVInfo.src.format),
+ mOVInfo.user_data[0]);
+ LOGE("src_rect: x %d y %d w %d h %d", mOVInfo.src_rect.x,
+ mOVInfo.src_rect.y, mOVInfo.src_rect.w, mOVInfo.src_rect.h);
+ LOGE("dst_rect: x %d y %d w %d h %d", mOVInfo.dst_rect.x,
+ mOVInfo.dst_rect.y, mOVInfo.dst_rect.w, mOVInfo.dst_rect.h);
+ LOGE("z_order %d is_fg %d alpha %d transp_mask %d flags %x id %d",
+ mOVInfo.z_order, mOVInfo.is_fg, mOVInfo.alpha, mOVInfo.transp_mask,
+ mOVInfo.flags, mOVInfo.id);
+}
+
+void overlay::dump(msm_rotator_img_info& mRotInfo) {
+ if (!DEBUG_OVERLAY)
+ return;
+ LOGE("mRotInfo:");
+ LOGE("session_id %d dst_x %d dst_y %d rotations %d enable %d",
+ mRotInfo.session_id, mRotInfo.dst_x, mRotInfo.dst_y,
+ mRotInfo.rotations, mRotInfo.enable);
+ LOGE("src: width %d height %d format %s", mRotInfo.src.width,
+ mRotInfo.src.height, getFormatString(mRotInfo.src.format));
+ LOGE("dst: width %d height %d format %s", mRotInfo.dst.width,
+ mRotInfo.dst.height, getFormatString(mRotInfo.src.format));
+ LOGE("src_rect: x %d y %d w %d h %d", mRotInfo.src_rect.x,
+ mRotInfo.src_rect.y, mRotInfo.src_rect.w, mRotInfo.src_rect.h);
+}
+
+const char* overlay::getFormatString(int format){
+ static const char* formats[] = {
+ "MDP_RGB_565",
+ "MDP_XRGB_8888",
+ "MDP_Y_CBCR_H2V2",
+ "MDP_ARGB_8888",
+ "MDP_RGB_888",
+ "MDP_Y_CRCB_H2V2",
+ "MDP_YCRYCB_H2V1",
+ "MDP_Y_CRCB_H2V1",
+ "MDP_Y_CBCR_H2V1",
+ "MDP_RGBA_8888",
+ "MDP_BGRA_8888",
+ "MDP_RGBX_8888",
+ "MDP_Y_CRCB_H2V2_TILE",
+ "MDP_Y_CBCR_H2V2_TILE",
+ "MDP_Y_CR_CB_H2V2",
+ "MDP_Y_CR_CB_GH2V2",
+ "MDP_Y_CB_CR_H2V2",
+ "MDP_Y_CRCB_H1V1",
+ "MDP_Y_CBCR_H1V1",
+ "MDP_IMGTYPE_LIMIT",
+ "MDP_BGR_565",
+ "MDP_FB_FORMAT",
+ "MDP_IMGTYPE_LIMIT2"
+ };
+ return formats[format];
+}
+ZOrderManager* ZOrderManager::sInstance = 0;
+FrameBufferInfo* FrameBufferInfo::sFBInfoInstance = 0;
+
+int ZOrderManager::getZ(int fbnum){
+ int zorder = NO_PIPE;;
+ Mutex::Autolock objLock(mObjMutex);
+ if(mPipesInuse == mMaxPipes) {
+ LOGE("No free pipes available.. inUse = %d ", mPipesInuse);
+ return NO_PIPE;
+ }
+ switch(fbnum) {
+ case FRAMEBUFFER_0:
+ for (int i = 0;i < NUM_CHANNELS; i++) {
+ if(mFB0Pipes[i] == false) {
+ mFB0Pipes[i]= true;
+ zorder = i;
+ break;
+ }
+ }
+ break;
+ case FRAMEBUFFER_1:
+ case FRAMEBUFFER_2:
+ for (int i = 0;i < mMaxPipes; i++) {
+ if(mFB1Pipes[i] == false) {
+ mFB1Pipes[i]= true;
+ zorder = i;
+ break;
+ }
+ }
+ break;
+ default:
+ LOGE("getZ: Invalid framebuffer..");
+ break;
+ }
+ mPipesInuse++;
+ LOGE("getZ: return zorder = %d for fbdev = %d, pipesinUse = %d",
+ zorder, fbnum, mPipesInuse);
+ return zorder;
+}
+
+void ZOrderManager::decZ(int fbnum, int zorder){
+ Mutex::Autolock objLock(mObjMutex);
+ switch(fbnum) {
+ case FRAMEBUFFER_0:
+ LOG_ASSERT(!mFB0Pipes[zorder],"channel with ZOrder does not exist");
+ LOGE("decZ: freeing the pipe with zorder = %d for fbdev = %d", zorder, fbnum);
+ mFB0Pipes[zorder] = false;
+ break;
+ case FRAMEBUFFER_1:
+ case FRAMEBUFFER_2:
+ LOG_ASSERT(!mFB1Pipes[zorder],"channel with ZOrder does not exist");
+ LOGE("decZ: freeing the pipe with zorder = %d for fbdev = %d", zorder, fbnum);
+ mFB1Pipes[zorder] = false;
+ break;
+ default:
+ LOGE("decZ: Invalid framebuffer ");
+ break;
+ }
+ if(mPipesInuse > 0)
+ mPipesInuse--;
+ LOGE("decZ: Pipes in use = %d", mPipesInuse);
+}
+
+bool overlay::isHDMIConnected () {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("hw.hdmiON", value, "0");
+ int isHDMI = atoi(value);
+ return isHDMI ? true : false;
+}
+
+bool overlay::is3DTV() {
+ char is3DTV = '0';
+ FILE *fp = fopen(EDID_3D_INFO_FILE, "r");
+ if (fp) {
+ fread(&is3DTV, 1, 1, fp);
+ fclose(fp);
+ }
+ LOGI("3DTV EDID flag: %c", is3DTV);
+ return (is3DTV == '0') ? false : true;
+}
+
+bool overlay::isPanel3D() {
+ int fd = open("/dev/graphics/fb0", O_RDWR, 0);
+ if (fd < 0) {
+ reportError("Can't open framebuffer 0");
+ return false;
+ }
+ fb_fix_screeninfo finfo;
+ if (ioctl(fd, FBIOGET_FSCREENINFO, &finfo) == -1) {
+ reportError("FBIOGET_FSCREENINFO on fb0 failed");
+ close(fd);
+ fd = -1;
+ return false;
+ }
+ close(fd);
+ return (FB_TYPE_3D_PANEL == finfo.type) ? true : false;
+}
+
+bool overlay::usePanel3D() {
+ if (Overlay::sHDMIAsPrimary)
+ return is3DTV();
+
+ if(!isPanel3D())
+ return false;
+ char value[PROPERTY_VALUE_MAX];
+ property_get("persist.user.panel3D", value, "0");
+ int usePanel3D = atoi(value);
+ return usePanel3D ? true : false;
+}
+
+bool overlay::send3DInfoPacket (unsigned int format3D) {
+ FILE *fp = fopen(FORMAT_3D_FILE, "wb");
+ if (fp) {
+ fprintf(fp, "%d", format3D);
+ fclose(fp);
+ fp = NULL;
+ return true;
+ }
+ LOGE("%s:no sysfs entry for setting 3d mode!", __FUNCTION__);
+ return false;
+}
+
+bool overlay::enableBarrier (unsigned int orientation) {
+ FILE *fp = fopen(BARRIER_FILE, "wb");
+ if (fp) {
+ fprintf(fp, "%d", orientation);
+ fclose(fp);
+ fp = NULL;
+ return true;
+ }
+ LOGE("%s:no sysfs entry for enabling barriers on 3D panel!", __FUNCTION__);
+ return false;
+}
+
+int overlay::getColorFormat(int format)
+{
+ if (format == HAL_PIXEL_FORMAT_YV12)
+ return format;
+ else if (format & INTERLACE_MASK)
+ return format ^ HAL_PIXEL_FORMAT_INTERLACE;
+ else
+ return COLOR_FORMAT(format);
+}
+
+bool overlay::isInterlacedContent(int format)
+{
+ if ((format != HAL_PIXEL_FORMAT_YV12) &&
+ (format & INTERLACE_MASK))
+ return true;
+
+ return false;
+}
+
+unsigned int overlay::getOverlayConfig (unsigned int format3D, bool poll,
+ bool isHDMI) {
+ bool isTV3D = false;
+ unsigned int curState = 0;
+ if (poll)
+ isHDMI = isHDMIConnected();
+ if (isHDMI) {
+ LOGD("%s: HDMI connected... checking the TV type", __FUNCTION__);
+ if (format3D) {
+ if (is3DTV())
+ curState = OV_3D_VIDEO_3D_TV;
+ else
+ curState = OV_3D_VIDEO_2D_TV;
+ } else
+ curState = OV_2D_VIDEO_ON_TV;
+ } else {
+ LOGD("%s: HDMI not connected...", __FUNCTION__);
+ if(format3D) {
+ if (usePanel3D())
+ curState = OV_3D_VIDEO_3D_PANEL;
+ else
+ curState = OV_3D_VIDEO_2D_PANEL;
+ }
+ else
+ curState = OV_2D_VIDEO_ON_PANEL;
+ }
+ return curState;
+}
+
+/* clears any VG pipes allocated to the fb devices */
+int overlay::initOverlay() {
+ msmfb_mixer_info_req req;
+ mdp_mixer_info *minfo = NULL;
+ char name[64];
+ int fd = -1;
+ for(int i = 0; i < NUM_FB_DEVICES; i++) {
+ snprintf(name, 64, FB_DEVICE_TEMPLATE, i);
+ LOGD("initoverlay:: opening the device:: %s", name);
+ fd = open(name, O_RDWR, 0);
+ if(fd < 0) {
+ LOGE("cannot open framebuffer(%d)", i);
+ return -1;
+ }
+ //Get the mixer configuration */
+ req.mixer_num = i;
+ if (ioctl(fd, MSMFB_MIXER_INFO, &req) == -1) {
+ LOGE("ERROR: MSMFB_MIXER_INFO ioctl failed");
+ close(fd);
+ return -1;
+ }
+ minfo = req.info;
+ for (int j = 0; j < req.cnt; j++) {
+ LOGD("ndx=%d num=%d z_order=%d", minfo->pndx, minfo->pnum,
+ minfo->z_order);
+ // except the RGB base layer with z_order of -1, clear any
+ // other pipes connected to mixer.
+ if((minfo->z_order) != -1) {
+ int index = minfo->pndx;
+ LOGD("Unset overlay with index: %d at mixer %d", index, i);
+ if(ioctl(fd, MSMFB_OVERLAY_UNSET, &index) == -1) {
+ LOGE("ERROR: MSMFB_OVERLAY_UNSET failed");
+ close(fd);
+ return -1;
+ }
+ }
+ minfo++;
+ }
+ close(fd);
+ fd = -1;
+ }
+ return 0;
+}
+
+Overlay::Overlay() : mChannelUP(false), mExternalDisplay(false),
+ mS3DFormat(0), mCroppedSrcWidth(0),
+ mCroppedSrcHeight(0), mState(-1) {
+ mOVBufferInfo.width = mOVBufferInfo.height = 0;
+ mOVBufferInfo.format = mOVBufferInfo.size = 0;
+}
+
+Overlay::~Overlay() {
+ closeChannel();
+}
+
+int Overlay::getFBWidth(int channel) const {
+ return objOvCtrlChannel[channel].getFBWidth();
+}
+
+int Overlay::getFBHeight(int channel) const {
+ return objOvCtrlChannel[channel].getFBHeight();
+}
+
+bool Overlay::startChannel(const overlay_buffer_info& info, int fbnum,
+ bool norot, bool uichannel,
+ unsigned int format3D, int channel,
+ int flags, int num_buffers) {
+ int zorder = 0;
+ mCroppedSrcWidth = info.width;
+ mCroppedSrcHeight = info.height;
+ if (format3D)
+ zorder = channel;
+ if (mState == -1)
+ mState = OV_UI_MIRROR_TV;
+
+ mChannelUP = objOvCtrlChannel[channel].startControlChannel(info, fbnum,
+ norot, uichannel,
+ format3D, zorder, flags);
+ if (!mChannelUP) {
+ LOGE("startChannel for fb%d failed", fbnum);
+ return mChannelUP;
+ }
+ bool secure = flags & SECURE_OVERLAY_SESSION;
+ objOvCtrlChannel[channel].setSize(info.size);
+ return objOvDataChannel[channel].startDataChannel(objOvCtrlChannel[channel], fbnum,
+ norot, secure, uichannel, num_buffers);
+}
+
+bool Overlay::closeChannel() {
+
+ if (!mChannelUP)
+ return true;
+
+ if(mS3DFormat) {
+ if (mExternalDisplay)
+ overlay::send3DInfoPacket(0);
+ else if (mState == OV_3D_VIDEO_3D_PANEL) {
+ if (sHDMIAsPrimary)
+ overlay::send3DInfoPacket(0);
+ else
+ enableBarrier(0);
+ }
+ }
+ for (int i = 0; i < NUM_CHANNELS; i++) {
+ objOvCtrlChannel[i].closeControlChannel();
+ objOvDataChannel[i].closeDataChannel();
+ }
+ mChannelUP = false;
+ mS3DFormat = 0;
+ mOVBufferInfo.width = 0;
+ mOVBufferInfo.height = 0;
+ mOVBufferInfo.format = 0;
+ mOVBufferInfo.size = 0;
+ mState = -1;
+ return true;
+}
+
+void Overlay::closeExternalChannel() {
+ if (objOvCtrlChannel[VG1_PIPE].isChannelUP()) {
+ objOvCtrlChannel[VG1_PIPE].closeControlChannel();
+ objOvDataChannel[VG1_PIPE].closeDataChannel();
+ }
+}
+
+bool Overlay::getPosition(int& x, int& y, uint32_t& w, uint32_t& h, int channel) {
+ return objOvCtrlChannel[channel].getPosition(x, y, w, h);
+}
+
+bool Overlay::getOrientation(int& orientation, int channel) const {
+ return objOvCtrlChannel[channel].getOrientation(orientation);
+}
+
+bool Overlay::setDeviceOrientation(int orientation) {
+ // Use this to calculate the position on HDMI
+ mDevOrientation = orientation;
+ return true;
+}
+
+bool Overlay::setPosition(int x, int y, uint32_t w, uint32_t h) {
+ bool ret = false;
+ overlay_rect secDest;
+ overlay_rect priDest;
+ int currX, currY;
+ uint32_t currW, currH;
+ // Set even destination co-ordinates
+ EVEN_OUT(x); EVEN_OUT(y);
+ EVEN_OUT(w); EVEN_OUT(h);
+ objOvCtrlChannel[VG0_PIPE].getPosition(currX, currY, currW, currH);
+ priDest.x = x, priDest.y = y;
+ priDest.w = w, priDest.h = h;
+ if(x != currX || y != currY || w != currW || h != currH) {
+ switch (mState) {
+ case OV_UI_MIRROR_TV:
+ case OV_2D_VIDEO_ON_PANEL:
+ case OV_3D_VIDEO_2D_PANEL:
+ return setChannelPosition(x, y, w, h, VG0_PIPE);
+ break;
+ case OV_2D_VIDEO_ON_TV:
+ if (FrameBufferInfo::getInstance()->canSupportTrueMirroring()) {
+ objOvCtrlChannel[VG1_PIPE].getAspectRatioPosition(
+ mCroppedSrcWidth, mCroppedSrcHeight, mDevOrientation,
+ &priDest, &secDest);
+ } else {
+ objOvCtrlChannel[VG1_PIPE].getAspectRatioPosition(
+ mCroppedSrcWidth, mCroppedSrcHeight, &secDest);
+ }
+ setChannelPosition(secDest.x, secDest.y, secDest.w, secDest.h,
+ VG1_PIPE);
+ return setChannelPosition(x, y, w, h, VG0_PIPE);
+ break;
+ case OV_3D_VIDEO_3D_PANEL:
+ for (int i = 0; i < NUM_CHANNELS; i++) {
+ if (sHDMIAsPrimary)
+ objOvCtrlChannel[i].getPositionS3D(i, mS3DFormat, &secDest);
+ else {
+ if (!objOvCtrlChannel[i].useVirtualFB()) {
+ LOGE("%s: failed virtual fb for channel %d", __FUNCTION__, i);
+ return false;
+ }
+ objOvCtrlChannel[i].getPositionS3D(i, 0x1, &secDest);
+ }
+ if(!setChannelPosition(secDest.x, secDest.y, secDest.w,
+ secDest.h, i)) {
+ LOGE("%s: failed for channel %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ break;
+ case OV_3D_VIDEO_2D_TV:
+ case OV_3D_VIDEO_3D_TV:
+ for (int i = 0; i < NUM_CHANNELS; i++) {
+ ret = objOvCtrlChannel[i].getPositionS3D(i, mS3DFormat,
+ &secDest);
+ if (!ret)
+ ret = setChannelPosition(x, y, w, h, i);
+ else
+ ret = setChannelPosition(secDest.x, secDest.y, secDest.w,
+ secDest.h, i);
+ if (!ret) {
+ LOGE("%s: failed for channel %d", __FUNCTION__, i);
+ return ret;
+ }
+ }
+ break;
+ default:
+ LOGE("%s:Unknown state %d", __FUNCTION__, mState);
+ break;
+ }
+ }
+ return true;
+}
+
+bool Overlay::setChannelPosition(int x, int y, uint32_t w, uint32_t h, int channel) {
+ return objOvCtrlChannel[channel].setPosition(x, y, w, h);
+}
+
+bool Overlay::updateOverlaySource(const overlay_buffer_info& info, int orientation,
+ int flags) {
+ bool ret = false;
+ int currentFlags = 0;
+
+ bool needUpdateFlags = false;
+ if (objOvCtrlChannel[0].isChannelUP()) {
+ needUpdateFlags = objOvCtrlChannel[0].doFlagsNeedUpdate(flags);
+ }
+
+ bool geometryChanged = true;
+ if (info.width == mOVBufferInfo.width &&
+ info.height == mOVBufferInfo.height &&
+ info.format == mOVBufferInfo.format) {
+ geometryChanged = false;
+ }
+
+ if (sHDMIAsPrimary)
+ needUpdateFlags = false;
+
+ if ((false == needUpdateFlags) && (false == geometryChanged)) {
+ return true;
+ }
+
+ // Disable rotation for the HDMI channels
+ int orientHdmi = 0;
+ int orientPrimary = sHDMIAsPrimary ? 0 : orientation;
+ int orient[2] = {orientPrimary, orientHdmi};
+ // disable waitForVsync on HDMI, since we call the wait ioctl
+ int ovFlagsExternal = 0;
+ int ovFlagsPrimary = sHDMIAsPrimary ? (flags |= WAIT_FOR_VSYNC): flags;
+ int ovFlags[2] = {flags, ovFlagsExternal};
+ switch(mState) {
+ case OV_3D_VIDEO_3D_PANEL:
+ orient[1] = sHDMIAsPrimary ? 0 : orientation;
+ break;
+ case OV_3D_VIDEO_3D_TV:
+ orient[0] = 0;
+ break;
+ default:
+ break;
+ }
+
+ int numChannelsToUpdate = NUM_CHANNELS;
+ if (!geometryChanged) {
+ // Only update the primary channel - we only need to update the
+ // wait/no-wait flags
+ if (objOvCtrlChannel[0].isChannelUP()) {
+ return objOvCtrlChannel[0].updateOverlayFlags(flags);
+ }
+ }
+
+ // Set the overlay source info
+ for (int i = 0; i < NUM_CHANNELS; i++) {
+ if (objOvCtrlChannel[i].isChannelUP()) {
+ ret = objOvCtrlChannel[i].updateOverlaySource(info, orient[i], ovFlags[i]);
+ if (!ret) {
+ LOGE("objOvCtrlChannel[%d].updateOverlaySource failed", i);
+ return false;
+ }
+ objOvCtrlChannel[i].setSize(info.size);
+ ret = objOvDataChannel[i].updateDataChannel(info.size);
+ }
+ }
+ if (ret) {
+ mOVBufferInfo = info;
+ } else
+ LOGE("update failed");
+ return ret;
+}
+
+bool Overlay::getAspectRatioPosition(int w, int h, overlay_rect *rect, int channel) {
+ return objOvCtrlChannel[channel].getAspectRatioPosition(w, h, rect);
+}
+
+int Overlay::getS3DFormat(int format) {
+ // The S3D is part of the HAL_PIXEL_FORMAT_YV12 value. Add
+ // an explicit check for the format
+ if (format == HAL_PIXEL_FORMAT_YV12) {
+ return 0;
+ }
+ int format3D = FORMAT_3D(format);
+ int fIn3D = FORMAT_3D_INPUT(format3D); // MSB 2 bytes are input format
+ int fOut3D = FORMAT_3D_OUTPUT(format3D); // LSB 2 bytes are output format
+ format3D = fIn3D | fOut3D;
+ if (!fIn3D) {
+ format3D |= fOut3D << SHIFT_3D; //Set the input format
+ }
+ if (!fOut3D) {
+ format3D |= fIn3D >> SHIFT_3D; //Set the output format
+ }
+ return format3D;
+}
+
+bool Overlay::setSource(const overlay_buffer_info& info, int orientation,
+ int hdmiConnected, int flags, int num_buffers) {
+ // Separate the color format from the 3D format.
+ // If there is 3D content; the effective format passed by the client is:
+ // effectiveFormat = 3D_IN | 3D_OUT | ColorFormat
+ int newState = mState;
+ bool stateChange = false, ret = true;
+ bool isHDMIStateChange = (mExternalDisplay != hdmiConnected) && (mState != -1);
+ unsigned int format3D = getS3DFormat(info.format);
+ int colorFormat = getColorFormat(info.format);
+ if (isHDMIStateChange || -1 == mState) {
+ // we were mirroring UI. Also HDMI state stored was stale
+ newState = getOverlayConfig (format3D, false, hdmiConnected);
+ stateChange = (mState == newState) ? false : true;
+ }
+
+ if (stateChange) {
+ mExternalDisplay = hdmiConnected;
+ mState = newState;
+ mS3DFormat = format3D;
+ if (mState == OV_3D_VIDEO_2D_PANEL || mState == OV_3D_VIDEO_2D_TV) {
+ LOGI("3D content on 2D display: set the output format as monoscopic");
+ mS3DFormat = FORMAT_3D_INPUT(format3D) | HAL_3D_OUT_MONOSCOPIC_MASK;
+ }
+ // We always enable the rotator for the primary.
+ bool noRot = false;
+ bool uiChannel = false;
+ int fbnum = 0;
+ switch(mState) {
+ case OV_2D_VIDEO_ON_PANEL:
+ if(isHDMIStateChange) {
+ //close HDMI Only
+ closeExternalChannel();
+ break;
+ }
+ case OV_3D_VIDEO_2D_PANEL:
+ closeChannel();
+ return startChannel(info, FRAMEBUFFER_0, noRot, false,
+ mS3DFormat, VG0_PIPE, flags, num_buffers);
+ break;
+ case OV_3D_VIDEO_3D_PANEL:
+ closeChannel();
+ if (sHDMIAsPrimary) {
+ noRot = true;
+ flags |= WAIT_FOR_VSYNC;
+ send3DInfoPacket(mS3DFormat & OUTPUT_MASK_3D);
+ }
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ if(!startChannel(info, FRAMEBUFFER_0, noRot, uiChannel,
+ mS3DFormat, i, flags, num_buffers)) {
+ LOGE("%s:failed to open channel %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ break;
+ case OV_2D_VIDEO_ON_TV:
+ if(isHDMIStateChange) {
+ //start only HDMI channel
+ noRot = true;
+ bool waitForVsync = true;
+ // External display connected, start corresponding channel
+ // mExternalDisplay will hold the fbnum
+ if(!startChannel(info, mExternalDisplay, noRot, false, mS3DFormat,
+ VG1_PIPE, waitForVsync, num_buffers)) {
+ LOGE("%s:failed to open channel %d", __func__, VG1_PIPE);
+ return false;
+ }
+ int currX, currY;
+ uint32_t currW, currH;
+ overlay_rect priDest;
+ overlay_rect secDest;
+ objOvCtrlChannel[VG0_PIPE].getPosition(currX, currY, currW, currH);
+ priDest.x = currX, priDest.y = currY;
+ priDest.w = currW, priDest.h = currH;
+ if (FrameBufferInfo::getInstance()->canSupportTrueMirroring()) {
+ objOvCtrlChannel[VG1_PIPE].getAspectRatioPosition(
+ mCroppedSrcWidth, mCroppedSrcHeight, mDevOrientation,
+ &priDest, &secDest);
+ } else {
+ objOvCtrlChannel[VG1_PIPE].getAspectRatioPosition(
+ mCroppedSrcWidth, mCroppedSrcHeight, &secDest);
+ }
+ return setChannelPosition(secDest.x, secDest.y, secDest.w, secDest.h, VG1_PIPE);
+ }
+ case OV_3D_VIDEO_2D_TV:
+ closeChannel();
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ fbnum = i;
+ //start two channels for one for primary and external.
+ if (fbnum) {
+ // Disable rotation for external
+ noRot = true;
+ //set fbnum to hdmiConnected, which holds the ext display
+ fbnum = hdmiConnected;
+ flags &= ~WAIT_FOR_VSYNC;
+ }
+ if(!startChannel(info, fbnum, noRot, false, mS3DFormat,
+ i, flags, num_buffers)) {
+ LOGE("%s:failed to open channel %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ return true;
+ break;
+ case OV_3D_VIDEO_3D_TV:
+ closeChannel();
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ if(!startChannel(info, FRAMEBUFFER_1, true, false,
+ mS3DFormat, i, flags, num_buffers)) {
+ LOGE("%s:failed to open channel %d", __FUNCTION__, i);
+ return false;
+ }
+ send3DInfoPacket(mS3DFormat & OUTPUT_MASK_3D);
+ }
+ break;
+ default:
+ LOGE("%s:Unknown state %d", __FUNCTION__, mState);
+ break;
+ }
+ } else {
+ ret = updateOverlaySource(info, orientation, flags);
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+ return ret;
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+ }
+ return true;
+}
+
+bool Overlay::setCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
+ if (!mChannelUP) {
+ LOGE("%s: channel not set", __FUNCTION__);
+ return false;
+ }
+ overlay_rect rect, inRect;
+ inRect.x = x; inRect.y = y; inRect.w = w; inRect.h = h;
+ mCroppedSrcWidth = w;
+ mCroppedSrcHeight = h;
+ switch (mState) {
+ case OV_UI_MIRROR_TV:
+ case OV_2D_VIDEO_ON_PANEL:
+ return setChannelCrop(x, y, w, h, VG0_PIPE);
+ break;
+ case OV_3D_VIDEO_2D_PANEL:
+ objOvDataChannel[VG0_PIPE].getCropS3D(&inRect, VG0_PIPE, mS3DFormat, &rect);
+ return setChannelCrop(rect.x, rect.y, rect.w, rect.h, VG0_PIPE);
+ break;
+ case OV_2D_VIDEO_ON_TV:
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ if(!setChannelCrop(x, y, w, h, i)) {
+ LOGE("%s: failed for pipe %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ break;
+ case OV_3D_VIDEO_3D_PANEL:
+ case OV_3D_VIDEO_2D_TV:
+ case OV_3D_VIDEO_3D_TV:
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ objOvDataChannel[i].getCropS3D(&inRect, i, mS3DFormat, &rect);
+ if(!setChannelCrop(rect.x, rect.y, rect.w, rect.h, i)) {
+ LOGE("%s: failed for pipe %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ break;
+ default:
+ LOGE("%s:Unknown state %d", __FUNCTION__, mState);
+ break;
+ }
+ return true;
+}
+
+bool Overlay::setChannelCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h, int channel) {
+ return objOvDataChannel[channel].setCrop(x, y, w, h);
+}
+
+bool Overlay::updateOverlayFlags(int flags) {
+ return objOvCtrlChannel[VG0_PIPE].updateOverlayFlags(flags);
+}
+
+bool Overlay::setTransform(int value) {
+ int barrier = 0;
+ switch (mState) {
+ case OV_UI_MIRROR_TV:
+ case OV_2D_VIDEO_ON_PANEL:
+ case OV_3D_VIDEO_2D_PANEL:
+ return objOvCtrlChannel[VG0_PIPE].setTransform(value);
+ break;
+ case OV_2D_VIDEO_ON_TV:
+ case OV_3D_VIDEO_2D_TV:
+ case OV_3D_VIDEO_3D_TV:
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ if(!objOvCtrlChannel[i].setTransform(value)) {
+ LOGE("%s:failed for channel %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ break;
+ case OV_3D_VIDEO_3D_PANEL:
+ switch (value) {
+ case HAL_TRANSFORM_ROT_90:
+ case HAL_TRANSFORM_ROT_270:
+ barrier = BARRIER_LANDSCAPE;
+ break;
+ default:
+ barrier = BARRIER_PORTRAIT;
+ break;
+ if(!enableBarrier(barrier))
+ LOGE("%s:failed to enable barriers for 3D video", __FUNCTION__);
+ }
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ if(!objOvCtrlChannel[i].setTransform(value)) {
+ LOGE("%s:failed for channel %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ break;
+ default:
+ LOGE("%s:Unknown state %d", __FUNCTION__, mState);
+ break;
+ }
+ return true;
+}
+
+bool Overlay::setFd(int fd, int channel) {
+ return objOvDataChannel[channel].setFd(fd);
+}
+
+bool Overlay::queueBuffer(uint32_t offset, int channel) {
+ return objOvDataChannel[channel].queueBuffer(offset);
+}
+
+bool Overlay::waitForHdmiVsync(int channel) {
+ return objOvDataChannel[channel].waitForHdmiVsync();
+}
+
+bool Overlay::queueBuffer(buffer_handle_t buffer) {
+ private_handle_t const* hnd = reinterpret_cast
+ <private_handle_t const*>(buffer);
+ if (!hnd) {
+ LOGE("Overlay::queueBuffer invalid handle");
+ return false;
+ }
+ const size_t offset = hnd->offset;
+ const int fd = hnd->fd;
+ switch (mState) {
+ case OV_UI_MIRROR_TV:
+ case OV_2D_VIDEO_ON_PANEL:
+ case OV_3D_VIDEO_2D_PANEL:
+ if(!queueBuffer(fd, offset, VG0_PIPE)) {
+ LOGE("%s:failed for channel 0", __FUNCTION__);
+ return false;
+ }
+ break;
+ case OV_2D_VIDEO_ON_TV:
+ case OV_3D_VIDEO_3D_PANEL:
+ case OV_3D_VIDEO_2D_TV:
+ case OV_3D_VIDEO_3D_TV:
+ for (int i=NUM_CHANNELS-1; i>=0; i--) {
+ if(!queueBuffer(fd, offset, i)) {
+ LOGE("%s:failed for channel %d", __FUNCTION__, i);
+ return false;
+ }
+ }
+ //Wait for HDMI done..
+ if(!waitForHdmiVsync(VG1_PIPE)) {
+ LOGE("%s: waitforHdmiVsync failed", __FUNCTION__);
+ return false;
+ }
+ break;
+ default:
+ LOGE("%s:Unknown state %d", __FUNCTION__, mState);
+ break;
+ }
+ return true;
+}
+
+bool Overlay::queueBuffer(int fd, uint32_t offset, int channel) {
+ bool ret = false;
+ ret = setFd(fd, channel);
+ if(!ret) {
+ LOGE("Overlay::queueBuffer channel %d setFd failed", channel);
+ return false;
+ }
+ ret = queueBuffer(offset, channel);
+ if(!ret) {
+ LOGE("Overlay::queueBuffer channel %d queueBuffer failed", channel);
+ return false;
+ }
+ return ret;
+}
+
+OverlayControlChannel::OverlayControlChannel() : mNoRot(false), mFD(-1), mRotFD(-1),
+ mFormat3D(0), mIsChannelUpdated(true) {
+ memset(&mOVInfo, 0, sizeof(mOVInfo));
+ memset(&m3DOVInfo, 0, sizeof(m3DOVInfo));
+ memset(&mRotInfo, 0, sizeof(mRotInfo));
+}
+
+
+OverlayControlChannel::~OverlayControlChannel() {
+ closeControlChannel();
+}
+
+bool OverlayControlChannel::getAspectRatioPosition(int w, int h, overlay_rect *rect)
+{
+ int width = w, height = h, x, y;
+ int fbWidth = getFBWidth();
+ int fbHeight = getFBHeight();
+ // width and height for YUV TILE format
+ int tempWidth = w, tempHeight = h;
+ /* Calculate the width and height if it is YUV TILE format*/
+ if(getFormat() == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED) {
+ tempWidth = w - ( (((w-1)/64 +1)*64) - w);
+ tempHeight = h - ((((h-1)/32 +1)*32) - h);
+ }
+ if (width * fbHeight > fbWidth * height) {
+ height = fbWidth * height / width;
+ EVEN_OUT(height);
+ width = fbWidth;
+ } else if (width * fbHeight < fbWidth * height) {
+ width = fbHeight * width / height;
+ EVEN_OUT(width);
+ height = fbHeight;
+ } else {
+ width = fbWidth;
+ height = fbHeight;
+ }
+ /* Scaling of upto a max of 8 times supported */
+ if(width >(tempWidth * HW_OVERLAY_MAGNIFICATION_LIMIT)){
+ width = HW_OVERLAY_MAGNIFICATION_LIMIT * tempWidth;
+ }
+ if(height >(tempHeight*HW_OVERLAY_MAGNIFICATION_LIMIT)) {
+ height = HW_OVERLAY_MAGNIFICATION_LIMIT * tempHeight;
+ }
+ if (width > fbWidth) width = fbWidth;
+ if (height > fbHeight) height = fbHeight;
+
+ char value[PROPERTY_VALUE_MAX];
+ property_get("hw.actionsafe.width", value, "0");
+ float asWidth = atof(value);
+ property_get("hw.actionsafe.height", value, "0");
+ float asHeight = atof(value);
+ width = width * (1.0f - asWidth / 100.0f);
+ height = height * (1.0f - asHeight / 100.0f);
+
+ x = (fbWidth - width) / 2;
+ y = (fbHeight - height) / 2;
+ rect->x = x;
+ rect->y = y;
+ rect->w = width;
+ rect->h = height;
+ return true;
+}
+
+
+// This function gets the destination position for Seconday display
+// based on the position and aspect ratio of the primary
+bool OverlayControlChannel::getAspectRatioPosition(int w, int h, int orientation,
+ overlay_rect *inRect, overlay_rect *outRect) {
+ float priWidth = FrameBufferInfo::getInstance()->getWidth();
+ float priHeight = FrameBufferInfo::getInstance()->getHeight();
+ float fbWidth = getFBWidth();
+ float fbHeight = getFBHeight();
+ float wRatio = 1.0;
+ float hRatio = 1.0;
+ float xRatio = 1.0;
+ float yRatio = 1.0;
+
+ int xPos = 0;
+ int yPos = 0;
+ int tmp = 0;
+ overlay_rect rect;
+ switch(orientation) {
+ case MDP_ROT_NOP:
+ case MDP_ROT_180:
+ getAspectRatioPosition((int)priWidth, (int)priHeight, &rect);
+ xPos = rect.x;
+ yPos = rect.y;
+ fbWidth = rect.w;
+ fbHeight = rect.h;
+
+ if(orientation == MDP_ROT_180) {
+ inRect->x = priWidth - (inRect->x + inRect->w);
+ inRect->y = priHeight - (inRect->y + inRect->h);
+ }
+ break;
+ case MDP_ROT_90:
+ case MDP_ROT_270:
+ if(orientation == MDP_ROT_90) {
+ tmp = inRect->y;
+ inRect->y = priWidth - (inRect->x + inRect->w);
+ inRect->x = tmp;
+ }
+ else if(orientation == MDP_ROT_270) {
+ tmp = inRect->x;
+ inRect->x = priHeight - (inRect->y + inRect->h);
+ inRect->y = tmp;
+ }
+ //Swap the destination width/height
+ swapWidthHeight(inRect->w, inRect->h);
+ // Swap width/height for primary
+ swapWidthHeight(priWidth, priHeight);
+ getAspectRatioPosition((int)priWidth, (int)priHeight, &rect);
+ xPos = rect.x;
+ yPos = rect.y;
+ fbWidth = rect.w;
+ fbHeight = rect.h;
+ break;
+ default:
+ LOGE("In %s: Unknown Orientation", __FUNCTION__);
+ break;
+ }
+ //Calculate the position...
+ xRatio = inRect->x/priWidth;
+ yRatio = inRect->y/priHeight;
+
+ wRatio = inRect->w/priWidth;
+ hRatio = inRect->h/priHeight;
+ outRect->x = (xRatio * fbWidth) + xPos;
+ outRect->y = (yRatio * fbHeight) + yPos;
+
+ outRect->w = (wRatio * fbWidth);
+ outRect->h = hRatio * fbHeight;
+ LOGD("Calculated AS Position for HDMI: X= %d, y = %d w = %d h = %d",
+ outRect->x, outRect->y,outRect->w, outRect->h);
+ return true;
+}
+
+
+bool OverlayControlChannel::getPositionS3D(int channel, int format, overlay_rect *rect) {
+ int wDisp = getFBWidth();
+ int hDisp = getFBHeight();
+ switch (format & OUTPUT_MASK_3D) {
+ case HAL_3D_OUT_SIDE_BY_SIDE_MASK:
+ if (channel == VG0_PIPE) {
+ rect->x = 0;
+ rect->y = 0;
+ rect->w = wDisp/2;
+ rect->h = hDisp;
+ } else {
+ rect->x = wDisp/2;
+ rect->y = 0;
+ rect->w = wDisp/2;
+ rect->h = hDisp;
+ }
+ break;
+ case HAL_3D_OUT_TOP_BOTTOM_MASK:
+ if (channel == VG0_PIPE) {
+ rect->x = 0;
+ rect->y = 0;
+ rect->w = wDisp;
+ rect->h = hDisp/2;
+ } else {
+ rect->x = 0;
+ rect->y = hDisp/2;
+ rect->w = wDisp;
+ rect->h = hDisp/2;
+ }
+ break;
+ case HAL_3D_OUT_MONOSCOPIC_MASK:
+ if (channel == VG1_PIPE) {
+ rect->x = 0;
+ rect->y = 0;
+ rect->w = wDisp;
+ rect->h = hDisp;
+ }
+ else
+ return false;
+ break;
+ case HAL_3D_OUT_INTERLEAVE_MASK:
+ break;
+ default:
+ reportError("Unsupported 3D output format");
+ break;
+ }
+ return true;
+}
+
+bool OverlayControlChannel::openDevices(int fbnum) {
+ if (fbnum < 0)
+ return false;
+
+ char dev_name[64];
+ snprintf(dev_name, 64, FB_DEVICE_TEMPLATE, fbnum);
+ mFD = open(dev_name, O_RDWR, 0);
+ if (mFD < 0) {
+ reportError("Cant open framebuffer ");
+ return false;
+ }
+
+ fb_fix_screeninfo finfo;
+ if (ioctl(mFD, FBIOGET_FSCREENINFO, &finfo) == -1) {
+ reportError("FBIOGET_FSCREENINFO on fb1 failed");
+ close(mFD);
+ mFD = -1;
+ return false;
+ }
+
+ fb_var_screeninfo vinfo;
+ if (ioctl(mFD, FBIOGET_VSCREENINFO, &vinfo) == -1) {
+ reportError("FBIOGET_VSCREENINFO on fb1 failed");
+ close(mFD);
+ mFD = -1;
+ return false;
+ }
+ mFBWidth = vinfo.xres;
+ mFBHeight = vinfo.yres;
+ mFBbpp = vinfo.bits_per_pixel;
+ mFBystride = finfo.line_length;
+
+ if (!mNoRot) {
+ mRotFD = open("/dev/msm_rotator", O_RDWR, 0);
+ if (mRotFD < 0) {
+ reportError("Cant open rotator device");
+ close(mFD);
+ mFD = -1;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool OverlayControlChannel::setOverlayInformation(const overlay_buffer_info& info,
+ int zorder, int flags, int requestType) {
+ int w = info.width;
+ int h = info.height;
+ int format = info.format;
+
+ mOVInfo.src.width = w;
+ mOVInfo.src.height = h;
+ mOVInfo.src_rect.x = 0;
+ mOVInfo.src_rect.y = 0;
+ mOVInfo.dst_rect.x = 0;
+ mOVInfo.dst_rect.y = 0;
+ mOVInfo.dst_rect.w = w;
+ mOVInfo.dst_rect.h = h;
+ if(format == MDP_Y_CRCB_H2V2_TILE) {
+ if (mNoRot) {
+ mOVInfo.src_rect.w = w - ((((w-1)/64 +1)*64) - w);
+ mOVInfo.src_rect.h = h - ((((h-1)/32 +1)*32) - h);
+ } else {
+ mOVInfo.src_rect.w = w;
+ mOVInfo.src_rect.h = h;
+ mOVInfo.src.width = (((w-1)/64 +1)*64);
+ mOVInfo.src.height = (((h-1)/32 +1)*32);
+ mOVInfo.src_rect.x = mOVInfo.src.width - w;
+ mOVInfo.src_rect.y = mOVInfo.src.height - h;
+ }
+ } else {
+ mOVInfo.src_rect.w = w;
+ mOVInfo.src_rect.h = h;
+ }
+
+ mOVInfo.src.format = format;
+ int dst_w = w;
+ int dst_h = h;
+
+ if (dst_w > mFBWidth) {
+ dst_w = mFBWidth;
+ dst_h = dst_h * mFBWidth / w;
+ }
+ if (dst_h > mFBHeight) {
+ dst_h = mFBHeight;
+ dst_w = dst_w * mFBHeight / h;
+ }
+ mOVInfo.dst_rect.w = dst_w;
+ mOVInfo.dst_rect.h = dst_h;
+ mOVInfo.user_data[0] = 0;
+ if (requestType == NEW_REQUEST) {
+ mOVInfo.id = MSMFB_NEW_REQUEST;
+ mOVInfo.z_order = zorder;
+ mOVInfo.alpha = 0xff;
+ mOVInfo.transp_mask = 0xffffffff;
+ }
+ mOVInfo.flags = 0;
+ setInformationFromFlags(flags, mOVInfo);
+ mOVInfo.dpp.sharp_strength = 0;
+ return true;
+}
+
+void OverlayControlChannel::setInformationFromFlags(int flags, mdp_overlay& ov)
+{
+ if (flags & INTERLACED_CONTENT) {
+ mOVInfo.flags |= MDP_DEINTERLACE;
+ } else {
+ mOVInfo.flags &= ~MDP_DEINTERLACE;
+ }
+
+ if ((flags & WAIT_FOR_VSYNC) == 0)
+ mOVInfo.flags |= MDP_OV_PLAY_NOWAIT;
+ else
+ mOVInfo.flags &= ~MDP_OV_PLAY_NOWAIT;
+
+ if(flags & SECURE_OVERLAY_SESSION)
+ mOVInfo.flags |= MDP_SECURE_OVERLAY_SESSION;
+ else
+ mOVInfo.flags &= ~MDP_SECURE_OVERLAY_SESSION;
+
+ //set the default sharpening settings
+ mOVInfo.flags |= MDP_SHARPENING;
+
+ if (flags & DISABLE_FRAMEBUFFER_FETCH)
+ mOVInfo.is_fg = 1;
+ else
+ mOVInfo.is_fg = 0;
+
+ if (flags & OVERLAY_PIPE_SHARE) {
+ mOVInfo.flags |= MDP_OV_PIPE_SHARE;
+ } else {
+ mOVInfo.flags &= ~MDP_OV_PIPE_SHARE;
+ }
+ mOVInfo.dpp.sharp_strength = 0;
+
+}
+
+bool OverlayControlChannel::doFlagsNeedUpdate(int flags) {
+ bool needUpdate = false;
+
+ if ((flags & WAIT_FOR_VSYNC) == 0) {
+ if (!(mOVInfo.flags & MDP_OV_PLAY_NOWAIT)) {
+ needUpdate = true;
+ }
+ }
+ if (flags & WAIT_FOR_VSYNC) {
+ if (mOVInfo.flags & MDP_OV_PLAY_NOWAIT) {
+ needUpdate = true;
+ }
+ }
+
+ if ((flags & DISABLE_FRAMEBUFFER_FETCH) == 0) {
+ if (mOVInfo.is_fg == 1) {
+ needUpdate = true;
+ }
+ }
+ if (flags & DISABLE_FRAMEBUFFER_FETCH) {
+ if (mOVInfo.is_fg == 0) {
+ needUpdate = true;
+ }
+ }
+ return needUpdate;
+}
+
+bool OverlayControlChannel::startOVRotatorSessions(
+ const overlay_buffer_info& info,
+ int requestType) {
+ bool ret = true;
+ int w = info.width;
+ int h = info.height;
+ int format = info.format;
+
+ if (!mNoRot) {
+ mRotInfo.src.format = format;
+ mRotInfo.src.width = w;
+ mRotInfo.src.height = h;
+ mRotInfo.src_rect.w = w;
+ mRotInfo.src_rect.h = h;
+ mRotInfo.dst.width = w;
+ mRotInfo.dst.height = h;
+ if(format == MDP_Y_CRCB_H2V2_TILE) {
+ mRotInfo.src.width = (((w-1)/64 +1)*64);
+ mRotInfo.src.height = (((h-1)/32 +1)*32);
+ mRotInfo.src_rect.w = (((w-1)/64 +1)*64);
+ mRotInfo.src_rect.h = (((h-1)/32 +1)*32);
+ mRotInfo.dst.width = (((w-1)/64 +1)*64);
+ mRotInfo.dst.height = (((h-1)/32 +1)*32);
+ mRotInfo.dst.format = MDP_Y_CRCB_H2V2;
+ }
+ mRotInfo.dst.format = get_rot_output_format(format);
+ mRotInfo.dst_x = 0;
+ mRotInfo.dst_y = 0;
+ mRotInfo.src_rect.x = 0;
+ mRotInfo.src_rect.y = 0;
+ mRotInfo.rotations = 0;
+
+ if (requestType == NEW_REQUEST) {
+ mRotInfo.enable = 0;
+ if(mUIChannel)
+ mRotInfo.enable = 1;
+ mRotInfo.session_id = 0;
+ } else
+ mRotInfo.enable = 1;
+
+ int result = ioctl(mRotFD, MSM_ROTATOR_IOCTL_START, &mRotInfo);
+ if (result) {
+ reportError("Rotator session failed");
+ dump(mRotInfo);
+ ret = false;
+ }
+ }
+
+ if (ret && ioctl(mFD, MSMFB_OVERLAY_SET, &mOVInfo)) {
+ reportError("startOVRotatorSessions, Overlay set failed");
+ dump(mOVInfo);
+ ret = false;
+ }
+
+ if (!ret)
+ closeControlChannel();
+ else
+ mIsChannelUpdated = true;
+ return ret;
+}
+
+bool OverlayControlChannel::updateOverlaySource(const overlay_buffer_info& info,
+ int orientation, int flags)
+{
+ int colorFormat = getColorFormat(info.format);
+ int hw_format = get_mdp_format(colorFormat);
+ overlay_buffer_info ovBufInfo;
+ ovBufInfo.width = info.width;
+ ovBufInfo.height = info.height;
+ ovBufInfo.format = hw_format;
+
+ if (isInterlacedContent(info.format)) {
+ flags |= INTERLACED_CONTENT;
+ }
+ if (!setOverlayInformation(ovBufInfo, 0, flags,
+ UPDATE_REQUEST))
+ return false;
+
+ return startOVRotatorSessions(ovBufInfo, UPDATE_REQUEST);
+}
+
+bool OverlayControlChannel::startControlChannel(const overlay_buffer_info& info,
+ int fbnum, bool norot,
+ bool uichannel,
+ unsigned int format3D, int zorder,
+ int flags) {
+ int colorFormat = getColorFormat(info.format);
+ mNoRot = norot;
+ mFormat = colorFormat;
+ mUIChannel = uichannel;
+ mFBNum = fbnum;
+ fb_fix_screeninfo finfo;
+ fb_var_screeninfo vinfo;
+ int hw_format;
+
+ // The interlace mask is part of the HAL_PIXEL_FORMAT_YV12 value. Add
+ // an explicit check for the format
+ if (isInterlacedContent(colorFormat)) {
+ flags |= MDP_DEINTERLACE;
+
+ // Get the actual format
+ colorFormat = colorFormat ^ HAL_PIXEL_FORMAT_INTERLACE;
+ }
+ hw_format = get_mdp_format(colorFormat);
+ if (hw_format < 0) {
+ reportError("Unsupported format");
+ return false;
+ }
+
+ mFormat3D = format3D;
+ if ( !mFormat3D || (mFormat3D & HAL_3D_OUT_MONOSCOPIC_MASK) ) {
+ // Set the share bit for sharing the VG pipe
+ flags |= OVERLAY_PIPE_SHARE;
+ }
+ //do not set the PIPE SHARE bit for true mirroring
+ if(uichannel && FrameBufferInfo::getInstance()->canSupportTrueMirroring())
+ flags &= ~OVERLAY_PIPE_SHARE;
+ if (!openDevices(fbnum))
+ return false;
+
+ //get Z order
+ zorder = ZOrderManager::getInstance()->getZ(fbnum);
+ if (zorder == NO_PIPE)
+ return false;
+
+ overlay_buffer_info ovBufInfo;
+ ovBufInfo.width = info.width;
+ ovBufInfo.height = info.height;
+ ovBufInfo.format = hw_format;
+ if (!setOverlayInformation(ovBufInfo, zorder, flags, NEW_REQUEST))
+ return false;
+
+ return startOVRotatorSessions(ovBufInfo, NEW_REQUEST);
+}
+
+bool OverlayControlChannel::closeControlChannel() {
+ if (!isChannelUP())
+ return true;
+
+ if (!mNoRot && mRotFD > 0) {
+ ioctl(mRotFD, MSM_ROTATOR_IOCTL_FINISH, &(mRotInfo.session_id));
+ close(mRotFD);
+ mRotFD = -1;
+ }
+
+ int ovid = mOVInfo.id;
+ ioctl(mFD, MSMFB_OVERLAY_UNSET, &ovid);
+ if (m3DOVInfo.is_3d) {
+ m3DOVInfo.is_3d = 0;
+ ioctl(mFD, MSMFB_OVERLAY_3D, &m3DOVInfo);
+ }
+
+ close(mFD);
+
+ if(NO_PIPE != mOVInfo.z_order){
+ ZOrderManager::getInstance()->decZ(mFBNum, mOVInfo.z_order);
+ }
+ memset(&mOVInfo, 0, sizeof(mOVInfo));
+ memset(&mRotInfo, 0, sizeof(mRotInfo));
+ memset(&m3DOVInfo, 0, sizeof(m3DOVInfo));
+
+ mOVInfo.z_order = NO_PIPE;
+ mFD = -1;
+
+ return true;
+}
+
+bool OverlayControlChannel::updateOverlayFlags(int flags) {
+ if ((flags & WAIT_FOR_VSYNC) == 0)
+ mOVInfo.flags |= MDP_OV_PLAY_NOWAIT;
+ else
+ mOVInfo.flags &= ~MDP_OV_PLAY_NOWAIT;
+
+ if (flags & DISABLE_FRAMEBUFFER_FETCH)
+ mOVInfo.is_fg = 1;
+ else
+ mOVInfo.is_fg = 0;
+
+ if (ioctl(mFD, MSMFB_OVERLAY_SET, &mOVInfo)) {
+ LOGE("%s: OVERLAY_SET failed", __FUNCTION__);
+ dump(mOVInfo);
+ return false;
+ }
+ return true;
+}
+
+bool OverlayControlChannel::setPosition(int x, int y, uint32_t w, uint32_t h) {
+
+ if (!isChannelUP() ||
+ (x < 0) || (y < 0) || ((x + w) > mFBWidth) ||
+ ((y + h) > mFBHeight)) {
+ reportError("setPosition failed");
+ LOGW("x %d y %d (x+w) %d (y+h) %d FBWidth %d FBHeight %d", x, y, x+w, y+h,
+ mFBWidth,mFBHeight);
+ return false;
+ }
+ if( x != mOVInfo.dst_rect.x || y != mOVInfo.dst_rect.y ||
+ w != mOVInfo.dst_rect.w || h != mOVInfo.dst_rect.h ) {
+ mdp_overlay ov;
+ ov.id = mOVInfo.id;
+ if (ioctl(mFD, MSMFB_OVERLAY_GET, &ov)) {
+ reportError("setPosition, overlay GET failed");
+ return false;
+ }
+
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+ // can not scale-up 8 times over original source
+ // return false to compose with GPU
+#if 1
+ if(w > (ov.src_rect.w * HW_OVERLAY_MAGNIFICATION_LIMIT)){
+ LOGE("[TJ] setPosition : too big width, back to GPU comp %d => %d", ov.src_rect.w, w);
+ return false;
+ }
+ if(h > (ov.src_rect.h * HW_OVERLAY_MAGNIFICATION_LIMIT)) {
+ LOGE("[TJ] setPosition : too big height, back to GPU comp %d => %d", ov.src_rect.h, h);
+ return false;
+ }
+#else
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+
+ /* Scaling of upto a max of 8 times supported */
+ if(w >(ov.src_rect.w * HW_OVERLAY_MAGNIFICATION_LIMIT)){
+ w = HW_OVERLAY_MAGNIFICATION_LIMIT * ov.src_rect.w;
+ x = (mFBWidth - w) / 2;
+ }
+ if(h >(ov.src_rect.h * HW_OVERLAY_MAGNIFICATION_LIMIT)) {
+ h = HW_OVERLAY_MAGNIFICATION_LIMIT * ov.src_rect.h;
+ y = (mFBHeight - h) / 2;
+ }
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+#endif
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+
+ ov.dst_rect.x = x;
+ ov.dst_rect.y = y;
+ ov.dst_rect.w = w;
+ ov.dst_rect.h = h;
+ if (ioctl(mFD, MSMFB_OVERLAY_SET, &ov)) {
+ reportError("setPosition, Overlay SET failed");
+ dump(ov);
+ return false;
+ }
+ mOVInfo = ov;
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+ LOGE("setPosition");
+ dump(ov);
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+ }
+ return true;
+}
+
+void OverlayControlChannel::swapOVRotWidthHeight() {
+ int tmp = mOVInfo.src.width;
+ mOVInfo.src.width = mOVInfo.src.height;
+ mOVInfo.src.height = tmp;
+
+ tmp = mOVInfo.src_rect.h;
+ mOVInfo.src_rect.h = mOVInfo.src_rect.w;
+ mOVInfo.src_rect.w = tmp;
+
+ tmp = mRotInfo.dst.width;
+ mRotInfo.dst.width = mRotInfo.dst.height;
+ mRotInfo.dst.height = tmp;
+}
+
+bool OverlayControlChannel::useVirtualFB() {
+ if(!m3DOVInfo.is_3d) {
+ m3DOVInfo.is_3d = 1;
+ mFBWidth *= 2;
+ mFBHeight /= 2;
+ m3DOVInfo.width = mFBWidth;
+ m3DOVInfo.height = mFBHeight;
+ return ioctl(mFD, MSMFB_OVERLAY_3D, &m3DOVInfo) ? false : true;
+ }
+ return true;
+}
+
+bool OverlayControlChannel::setTransform(int value, bool fetch) {
+ if (!isChannelUP()) {
+ LOGE("%s: channel is not up", __FUNCTION__);
+ return false;
+ }
+
+ mdp_overlay ov = mOVInfo;
+ if (fetch && ioctl(mFD, MSMFB_OVERLAY_GET, &ov)) {
+ reportError("setParameter, overlay GET failed");
+ return false;
+ }
+ mOVInfo = ov;
+ if (!mIsChannelUpdated) {
+ int orientation = get_mdp_orientation(value);
+ if (orientation == mOVInfo.user_data[0]) {
+ return true;
+ }
+ }
+ mIsChannelUpdated = false;
+
+ int val = mOVInfo.user_data[0];
+ if (mNoRot)
+ return true;
+
+ int rot = value;
+
+ switch(rot) {
+ case 0:
+ case HAL_TRANSFORM_FLIP_H:
+ case HAL_TRANSFORM_FLIP_V:
+ {
+ if (val == MDP_ROT_90) {
+ int tmp = mOVInfo.src_rect.y;
+ mOVInfo.src_rect.y = mOVInfo.src.width -
+ (mOVInfo.src_rect.x + mOVInfo.src_rect.w);
+ mOVInfo.src_rect.x = tmp;
+ swapOVRotWidthHeight();
+ }
+ else if (val == MDP_ROT_270) {
+ int tmp = mOVInfo.src_rect.x;
+ mOVInfo.src_rect.x = mOVInfo.src.height - (
+ mOVInfo.src_rect.y + mOVInfo.src_rect.h);
+ mOVInfo.src_rect.y = tmp;
+ swapOVRotWidthHeight();
+ }
+ break;
+ }
+ case HAL_TRANSFORM_ROT_90:
+ case (HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_H):
+ case (HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_V):
+ {
+ if (val == MDP_ROT_270) {
+ mOVInfo.src_rect.x = mOVInfo.src.width - (
+ mOVInfo.src_rect.x + mOVInfo.src_rect.w);
+ mOVInfo.src_rect.y = mOVInfo.src.height - (
+ mOVInfo.src_rect.y + mOVInfo.src_rect.h);
+ }
+ else if (val == MDP_ROT_NOP || val == MDP_ROT_180) {
+ int tmp = mOVInfo.src_rect.x;
+ mOVInfo.src_rect.x = mOVInfo.src.height -
+ (mOVInfo.src_rect.y + mOVInfo.src_rect.h);
+ mOVInfo.src_rect.y = tmp;
+ swapOVRotWidthHeight();
+ }
+ break;
+ }
+ case HAL_TRANSFORM_ROT_180:
+ {
+ if (val == MDP_ROT_270) {
+ int tmp = mOVInfo.src_rect.y;
+ mOVInfo.src_rect.y = mOVInfo.src.width -
+ (mOVInfo.src_rect.x + mOVInfo.src_rect.w);
+ mOVInfo.src_rect.x = tmp;
+ swapOVRotWidthHeight();
+ }
+ else if (val == MDP_ROT_90) {
+ int tmp = mOVInfo.src_rect.x;
+ mOVInfo.src_rect.x = mOVInfo.src.height - (
+ mOVInfo.src_rect.y + mOVInfo.src_rect.h);
+ mOVInfo.src_rect.y = tmp;
+ swapOVRotWidthHeight();
+ }
+ break;
+ }
+ case HAL_TRANSFORM_ROT_270:
+ {
+ if (val == MDP_ROT_90) {
+ mOVInfo.src_rect.y = mOVInfo.src.height -
+ (mOVInfo.src_rect.y + mOVInfo.src_rect.h);
+ mOVInfo.src_rect.x = mOVInfo.src.width -
+ (mOVInfo.src_rect.x + mOVInfo.src_rect.w);
+ }
+ else if (val == MDP_ROT_NOP || val == MDP_ROT_180) {
+ int tmp = mOVInfo.src_rect.y;
+ mOVInfo.src_rect.y = mOVInfo.src.width - (
+ mOVInfo.src_rect.x + mOVInfo.src_rect.w);
+ mOVInfo.src_rect.x = tmp;
+ swapOVRotWidthHeight();
+ }
+ break;
+ }
+ default: return false;
+ }
+
+ int mdp_rotation = get_mdp_orientation(rot);
+ if (mdp_rotation == -1)
+ return false;
+
+ mOVInfo.user_data[0] = mdp_rotation;
+ mRotInfo.rotations = mOVInfo.user_data[0];
+
+ /* Rotator always outputs non-tiled formats.
+ If rotator is used, set Overlay input to non-tiled
+ Else, overlay input remains tiled */
+ if (mOVInfo.user_data[0]) {
+ mOVInfo.src.format = get_rot_output_format(mRotInfo.src.format);
+ mRotInfo.enable = 1;
+ }
+ else {
+ //We can switch between rotator ON and OFF. Reset overlay
+ //i/p format whenever this happens
+ if(mRotInfo.dst.format == mOVInfo.src.format)
+ mOVInfo.src.format = mRotInfo.src.format;
+ mRotInfo.enable = 0;
+ //Always enable rotation for UI mirror usecase
+ if(mUIChannel)
+ mRotInfo.enable = 1;
+ }
+
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+ // can not scale-up 8 times over original source
+ // return false to compose with GPU
+#if 1
+ if(mOVInfo.dst_rect.w > (mOVInfo.src_rect.w * HW_OVERLAY_MAGNIFICATION_LIMIT)){
+ LOGE("[TJ] setTransform : too big width, back to GPU comp %d => %d", mOVInfo.src_rect.w, mOVInfo.dst_rect.w);
+ return false;
+ }
+ if(mOVInfo.dst_rect.h > (mOVInfo.src_rect.h * HW_OVERLAY_MAGNIFICATION_LIMIT)) {
+ LOGE("[TJ] setTransform : too big height, back to GPU comp %d => %d", mOVInfo.src_rect.h, mOVInfo.dst_rect.h);
+ return false;
+ }
+#endif
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+
+ //dump(mRotInfo); // TJ
+ if (ioctl(mRotFD, MSM_ROTATOR_IOCTL_START, &mRotInfo)) {
+ reportError("setTransform, rotator start failed");
+ dump(mRotInfo);
+ return false;
+ }
+
+ if ((mOVInfo.user_data[0] == MDP_ROT_90) ||
+ (mOVInfo.user_data[0] == MDP_ROT_270))
+ mOVInfo.flags |= MDP_SOURCE_ROTATED_90;
+ else
+ mOVInfo.flags &= ~MDP_SOURCE_ROTATED_90;
+
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+ LOGE("setTransform"); // TJ
+ dump(mOVInfo); // TJ
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+
+ if (ioctl(mFD, MSMFB_OVERLAY_SET, &mOVInfo)) {
+ reportError("setTransform, overlay set failed");
+ dump(mOVInfo);
+ return false;
+ }
+
+ return true;
+}
+
+bool OverlayControlChannel::getPosition(int& x, int& y,
+ uint32_t& w, uint32_t& h) {
+ if (!isChannelUP())
+ return false;
+ //mOVInfo has the current Overlay Position
+ x = mOVInfo.dst_rect.x;
+ y = mOVInfo.dst_rect.y;
+ w = mOVInfo.dst_rect.w;
+ h = mOVInfo.dst_rect.h;
+
+ return true;
+}
+
+bool OverlayControlChannel::getOrientation(int& orientation) const {
+ if (!isChannelUP())
+ return false;
+ // mOVInfo has the current orientation
+ orientation = mOVInfo.user_data[0];
+ return true;
+}
+bool OverlayControlChannel::getOvSessionID(int& sessionID) const {
+ if (!isChannelUP())
+ return false;
+ sessionID = mOVInfo.id;
+ return true;
+}
+
+bool OverlayControlChannel::getRotSessionID(int& sessionID) const {
+ if (!isChannelUP())
+ return false;
+ sessionID = mRotInfo.session_id;
+ return true;
+}
+
+bool OverlayControlChannel::getSize(int& size) const {
+ if (!isChannelUP())
+ return false;
+ size = mSize;
+ return true;
+}
+
+OverlayDataChannel::OverlayDataChannel() : mNoRot(false), mFD(-1), mRotFD(-1),
+ mPmemFD(-1), mPmemAddr(0), mUpdateDataChannel(false)
+{
+ //XXX: getInstance(false) implies that it should only
+ // use the kernel allocator. Change it to something
+ // more descriptive later.
+ mAlloc = gralloc::IAllocController::getInstance(false);
+}
+
+OverlayDataChannel::~OverlayDataChannel() {
+ closeDataChannel();
+}
+
+bool OverlayDataChannel::startDataChannel(
+ const OverlayControlChannel& objOvCtrlChannel,
+ int fbnum, bool norot, bool secure, bool uichannel,
+ int num_buffers) {
+ int ovid, rotid, size;
+ mNoRot = norot;
+ mSecure = secure;
+ memset(&mOvData, 0, sizeof(mOvData));
+ memset(&mOvDataRot, 0, sizeof(mOvDataRot));
+ memset(&mRotData, 0, sizeof(mRotData));
+ if (objOvCtrlChannel.getOvSessionID(ovid) &&
+ objOvCtrlChannel.getRotSessionID(rotid) &&
+ objOvCtrlChannel.getSize(size)) {
+ return startDataChannel(ovid, rotid, size, fbnum,
+ norot, uichannel, num_buffers);
+ }
+ else
+ return false;
+}
+
+bool OverlayDataChannel::openDevices(int fbnum, bool uichannel, int num_buffers) {
+ if (fbnum < 0)
+ return false;
+ char dev_name[64];
+ snprintf(dev_name, 64, FB_DEVICE_TEMPLATE, fbnum);
+
+ mFD = open(dev_name, O_RDWR, 0);
+ if (mFD < 0) {
+ reportError("Cant open framebuffer ");
+ return false;
+ }
+ if (!mNoRot) {
+ mRotFD = open("/dev/msm_rotator", O_RDWR, 0);
+ if (mRotFD < 0) {
+ reportError("Cant open rotator device");
+ close(mFD);
+ mFD = -1;
+ return false;
+ }
+
+ return mapRotatorMemory(num_buffers, uichannel, NEW_REQUEST);
+ }
+ return true;
+}
+
+bool OverlayDataChannel::mapRotatorMemory(int num_buffers, bool uiChannel, int requestType)
+{
+ mPmemAddr = MAP_FAILED;
+
+ alloc_data data;
+ data.base = 0;
+ data.fd = -1;
+ data.offset = 0;
+ data.size = mPmemOffset * num_buffers;
+ data.align = getpagesize();
+ data.uncached = true;
+
+ int allocFlags = GRALLOC_USAGE_PRIVATE_MM_HEAP |
+ GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP|
+ GRALLOC_USAGE_PRIVATE_DO_NOT_MAP;
+
+ if(mSecure) {
+ allocFlags |= GRALLOC_USAGE_PROTECTED;
+ } else {
+ allocFlags |= GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
+ GRALLOC_USAGE_PRIVATE_IOMMU_HEAP;
+ if((requestType == NEW_REQUEST) && !uiChannel)
+ allocFlags |= GRALLOC_USAGE_PRIVATE_SMI_HEAP;
+ }
+
+ int err = mAlloc->allocate(data, allocFlags, 0);
+ if(err) {
+ reportError("Cant allocate rotatory memory");
+ close(mFD);
+ mFD = -1;
+ close(mRotFD);
+ mRotFD = -1;
+ return false;
+ }
+ mPmemFD = data.fd;
+ mPmemAddr = data.base;
+ mBufferType = data.allocType;
+
+ // Set this flag if source memory is fb
+ if(uiChannel)
+ mRotData.src.flags |= MDP_MEMORY_ID_TYPE_FB;
+
+ mOvDataRot.data.memory_id = mPmemFD;
+ mRotData.dst.memory_id = mPmemFD;
+ mRotData.dst.offset = 0;
+ mNumBuffers = num_buffers;
+ mCurrentItem = 0;
+ for (int i = 0; i < num_buffers; i++)
+ mRotOffset[i] = i * mPmemOffset;
+
+ return true;
+}
+
+bool OverlayDataChannel::updateDataChannel(int size) {
+ mUpdateDataChannel = true;
+ mNewPmemOffset = size;
+ return true;
+}
+
+bool OverlayDataChannel::startDataChannel(int ovid, int rotid, int size,
+ int fbnum, bool norot,
+ bool uichannel, int num_buffers) {
+ memset(&mOvData, 0, sizeof(mOvData));
+ memset(&mOvDataRot, 0, sizeof(mOvDataRot));
+ memset(&mRotData, 0, sizeof(mRotData));
+ mNoRot = norot;
+ mOvData.data.memory_id = -1;
+ mOvData.id = ovid;
+ mOvDataRot = mOvData;
+ mPmemOffset = size;
+ mRotData.session_id = rotid;
+ mNumBuffers = 0;
+ mCurrentItem = 0;
+
+ return openDevices(fbnum, uichannel, num_buffers);
+}
+
+bool OverlayDataChannel::closeDataChannel() {
+ if (!isChannelUP())
+ return true;
+
+ if (!mNoRot && mRotFD > 0) {
+ sp<IMemAlloc> memalloc = mAlloc->getAllocator(mBufferType);
+ memalloc->free_buffer(mPmemAddr, mPmemOffset * mNumBuffers, 0, mPmemFD);
+ close(mPmemFD);
+ mPmemFD = -1;
+ close(mRotFD);
+ mRotFD = -1;
+ }
+ close(mFD);
+ mFD = -1;
+ memset(&mOvData, 0, sizeof(mOvData));
+ memset(&mOvDataRot, 0, sizeof(mOvDataRot));
+ memset(&mRotData, 0, sizeof(mRotData));
+
+ mNumBuffers = 0;
+ mCurrentItem = 0;
+
+ return true;
+}
+
+bool OverlayDataChannel::setFd(int fd) {
+ mOvData.data.memory_id = fd;
+ return true;
+}
+
+bool OverlayDataChannel::queueBuffer(uint32_t offset) {
+ if ((!isChannelUP()) || mOvData.data.memory_id < 0) {
+ reportError("QueueBuffer failed, either channel is not set or no file descriptor to read from");
+ return false;
+ }
+
+ int oldPmemFD = -1;
+ void* oldPmemAddr = MAP_FAILED;
+ uint32_t oldPmemOffset = -1;
+ bool result;
+ if (!mNoRot) {
+ if (mUpdateDataChannel) {
+ oldPmemFD = mPmemFD;
+ oldPmemAddr = mPmemAddr;
+ oldPmemOffset = mPmemOffset;
+ mPmemOffset = mNewPmemOffset;
+ mNewPmemOffset = -1;
+ // Map the new PMEM memory
+ result = mapRotatorMemory(mNumBuffers, 0, UPDATE_REQUEST);
+ if (!result) {
+ LOGE("queueBuffer: mapRotatorMemory failed");
+ return false;
+ }
+ mUpdateDataChannel = false;
+ }
+ }
+
+ result = queue(offset);
+
+ // Unmap the old PMEM memory after the queueBuffer has returned
+ if (oldPmemFD != -1 && oldPmemAddr != MAP_FAILED) {
+ sp<IMemAlloc> memalloc = mAlloc->getAllocator(mBufferType);
+ memalloc->free_buffer(oldPmemAddr, oldPmemOffset * mNumBuffers, 0, oldPmemFD);
+ oldPmemFD = -1;
+ }
+ return result;
+}
+
+bool OverlayDataChannel::queue(uint32_t offset) {
+ msmfb_overlay_data *odPtr;
+ mOvData.data.offset = offset;
+ odPtr = &mOvData;
+ if (!mNoRot) {
+ mRotData.src.memory_id = mOvData.data.memory_id;
+ mRotData.src.offset = offset;
+ mRotData.dst.offset = (mRotData.dst.offset) ? 0 : mPmemOffset;
+ mRotData.dst.offset = mRotOffset[mCurrentItem];
+ mCurrentItem = (mCurrentItem + 1) % mNumBuffers;
+
+ int result = ioctl(mRotFD,
+ MSM_ROTATOR_IOCTL_ROTATE, &mRotData);
+
+ if (!result) {
+ mOvDataRot.data.offset = (uint32_t) mRotData.dst.offset;
+ odPtr = &mOvDataRot;
+ }
+ }
+
+ if (ioctl(mFD, MSMFB_OVERLAY_PLAY, odPtr)) {
+ reportError("overlay play failed.");
+ return false;
+ }
+
+ return true;
+}
+
+bool OverlayDataChannel::waitForHdmiVsync() {
+ if (!isChannelUP()) {
+ LOGE("%s: channel not up", __FUNCTION__);
+ return false;
+ }
+ if (ioctl(mFD, MSMFB_OVERLAY_PLAY_WAIT, &mOvData)) {
+ LOGE("%s: MSMFB_OVERLAY_PLAY_WAIT failed", __FUNCTION__);
+ return false;
+ }
+ return true;
+}
+
+bool OverlayDataChannel::getCropS3D(overlay_rect *inRect, int channel, int format,
+ overlay_rect *rect) {
+ // for the 3D usecase extract channels from a frame
+ switch (format & INPUT_MASK_3D) {
+ case HAL_3D_IN_SIDE_BY_SIDE_L_R:
+ if(channel == 0) {
+ rect->x = 0;
+ rect->y = 0;
+ rect->w = inRect->w/2;
+ rect->h = inRect->h;
+ } else {
+ rect->x = inRect->w/2;
+ rect->y = 0;
+ rect->w = inRect->w/2;
+ rect->h = inRect->h;
+ }
+ break;
+ case HAL_3D_IN_SIDE_BY_SIDE_R_L:
+ if(channel == 1) {
+ rect->x = 0;
+ rect->y = 0;
+ rect->w = inRect->w/2;
+ rect->h = inRect->h;
+ } else {
+ rect->x = inRect->w/2;
+ rect->y = 0;
+ rect->w = inRect->w/2;
+ rect->h = inRect->h;
+ }
+ break;
+ case HAL_3D_IN_TOP_BOTTOM:
+ if(channel == 0) {
+ rect->x = 0;
+ rect->y = 0;
+ rect->w = inRect->w;
+ rect->h = inRect->h/2;
+ } else {
+ rect->x = 0;
+ rect->y = inRect->h/2;
+ rect->w = inRect->w;
+ rect->h = inRect->h/2;
+ }
+ break;
+ case HAL_3D_IN_INTERLEAVE:
+ break;
+ default:
+ reportError("Unsupported 3D format...");
+ break;
+ }
+ return true;
+}
+
+bool OverlayDataChannel::setCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
+ if (!isChannelUP()) {
+ reportError("Channel not set");
+ return false;
+ }
+
+ mdp_overlay ov;
+ ov.id = mOvData.id;
+ if (ioctl(mFD, MSMFB_OVERLAY_GET, &ov)) {
+ reportError("setCrop, overlay GET failed");
+ return false;
+ }
+
+ if ((ov.user_data[0] == MDP_ROT_90) ||
+ (ov.user_data[0] == (MDP_ROT_90 | MDP_FLIP_UD)) ||
+ (ov.user_data[0] == (MDP_ROT_90 | MDP_FLIP_LR))){
+ if (ov.src.width < (y + h))
+ return false;
+
+ uint32_t tmp = x;
+ x = ov.src.width - (y + h);
+ y = tmp;
+
+ tmp = w;
+ w = h;
+ h = tmp;
+ }
+ else if (ov.user_data[0] == MDP_ROT_270) {
+ if (ov.src.height < (x + w))
+ return false;
+
+ uint32_t tmp = y;
+ y = ov.src.height - (x + w);
+ x = tmp;
+
+ tmp = w;
+ w = h;
+ h = tmp;
+ }
+ else if(ov.user_data[0] == MDP_ROT_180) {
+ if ((ov.src.height < (y + h)) || (ov.src.width < ( x + w)))
+ return false;
+
+ x = ov.src.width - (x + w);
+ y = ov.src.height - (y + h);
+ }
+
+
+ normalize_crop(x, w);
+ normalize_crop(y, h);
+
+ if ((ov.src_rect.x == x) &&
+ (ov.src_rect.y == y) &&
+ (ov.src_rect.w == w) &&
+ (ov.src_rect.h == h))
+ return true;
+
+ ov.src_rect.x = x;
+ ov.src_rect.y = y;
+ ov.src_rect.w = w;
+ ov.src_rect.h = h;
+
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+ // can not scale-up 8 times over original source
+ // return false to compose with GPU
+#if 1
+ if(ov.dst_rect.w > (ov.src_rect.w * HW_OVERLAY_MAGNIFICATION_LIMIT)){
+ LOGE("[TJ] setCrop : too big width, back to GPU comp %d => %d", ov.src_rect.w, ov.dst_rect.w);
+ return false;
+ }
+ if(ov.dst_rect.h > (ov.src_rect.h * HW_OVERLAY_MAGNIFICATION_LIMIT)) {
+ LOGE("[TJ] setCrop : too big height, back to GPU comp %d => %d", ov.src_rect.h, ov.dst_rect.h);
+ return false;
+ }
+#else
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+
+ /* Scaling of upto a max of 8 times supported */
+ if(ov.dst_rect.w >(ov.src_rect.w * HW_OVERLAY_MAGNIFICATION_LIMIT)){
+ ov.dst_rect.w = HW_OVERLAY_MAGNIFICATION_LIMIT * ov.src_rect.w;
+ }
+ if(ov.dst_rect.h >(ov.src_rect.h * HW_OVERLAY_MAGNIFICATION_LIMIT)) {
+ ov.dst_rect.h = HW_OVERLAY_MAGNIFICATION_LIMIT * ov.src_rect.h;
+ }
+// LGE_CHANGE_S, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents {
+#endif
+
+ LOGE("setCrop");
+ dump(ov);
+// LGE_CHANGE_E, [G1_Player][mukyung.jung@lge.com], 20120206, Apply SR 00718706 to fix noise of QCIF contents }
+
+ if (ioctl(mFD, MSMFB_OVERLAY_SET, &ov)) {
+ reportError("setCrop, overlay set error");
+ dump(ov);
+ return false;
+ }
+
+ return true;
+}
+
+/* setVisualParam can be called to set the configuration value of a post
+ * processing feature (HUE,SATURATION,BRIGHTNESS,CONTRAST,SMOOTHING/SHARPENING)
+ * for the first 4, the setting will stay set until the parameter is changed
+ * by another call to setVisualParam with that same paramType */
+void Overlay::setVisualParam(int8_t paramType, float paramValue) {
+ switch (mState) {
+ case OV_UI_MIRROR_TV:
+ case OV_2D_VIDEO_ON_PANEL:
+ case OV_3D_VIDEO_2D_PANEL:
+ // set the parameter value for the given parameter type.
+ if(!objOvCtrlChannel[VG0_PIPE].setVisualParam(paramType, paramValue)) {
+ LOGE("Failed to set param %d for value %f", paramType, paramValue);
+ }
+ break;
+ case OV_2D_VIDEO_ON_TV:
+ case OV_3D_VIDEO_3D_PANEL:
+ case OV_3D_VIDEO_2D_TV:
+ case OV_3D_VIDEO_3D_TV:
+ for (int i=0; i<NUM_CHANNELS; i++) {
+ //setting the value for the given parameter on each pipe (i.e. for
+ //both video pipes)
+ if(!objOvCtrlChannel[i].setVisualParam(paramType, paramValue)) {
+ LOGE("Failed to set param %d for value %f", paramType, paramValue);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/* Finalizes the parameter value in the hsic_cfg structure*/
+int OverlayControlChannel::commitVisualParam(int8_t paramType, float paramValue) {
+#ifdef USES_POST_PROCESSING
+ switch(paramType) {
+ case SET_HUE:
+ //API expects param within range -180 - 180
+ CAP_RANGE(paramValue, HUE_RANGE, -HUE_RANGE);
+ hsic_cfg.hue = (int32_t) paramValue;
+ break;
+ case SET_BRIGHTNESS:
+ //API expects param within range -255 - 255
+ CAP_RANGE(paramValue, BRIGHTNESS_RANGE, -BRIGHTNESS_RANGE);
+ hsic_cfg.intensity = (int32_t) paramValue;
+ break;
+ case SET_SATURATION:
+ //API expects param within range -1 - 1
+ CAP_RANGE(paramValue, CON_SAT_RANGE, -CON_SAT_RANGE);
+ hsic_cfg.sat = paramValue;
+ break;
+ case SET_CONTRAST:
+ //API expects param within range -1 - 1
+ CAP_RANGE(paramValue, CON_SAT_RANGE, -CON_SAT_RANGE);
+ hsic_cfg.contrast = paramValue;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+#endif
+ return -1;
+}
+
+/* Converts paramValue to the expected range for each paramType, */
+bool OverlayControlChannel::setVisualParam(int8_t paramType, float paramValue)
+{
+ if (!isChannelUP()) {
+ LOGE("%s: Channel not set", __FUNCTION__);
+ return false;
+ }
+
+ bool setFlag = false;
+
+ //Sharpness values range from -128 to 127
+ //Integer values must be converted accordingly
+
+ int8_t value;
+ if (paramType == SET_SHARPNESS) {
+ //binding paramValue to the limits of its range.
+ CAP_RANGE(paramValue, SHARPNESS_RANGE, -SHARPNESS_RANGE);
+ value = paramValue * NUM_SHARPNESS_VALS - (NUM_SHARPNESS_VALS / 2);
+ }
+
+ uint32_t block = MDP_BLOCK_MAX;
+
+ //tranlate mOVInfo.id into block type for pp_conv
+ switch(mOVInfo.id) {
+ case 3:
+ // 3 is the pipe_ndx given when OVERLAY_PIPE_VG1 is used
+ block = MDP_BLOCK_VG_1;
+ break;
+ case 4:
+ // 4 is the pipe_ndx given when OVERLAY_PIPE_VG2 is used
+ block = MDP_BLOCK_VG_2;
+ break;
+ default:
+ LOGE("%s: Invalid HSIC overlay id",__FUNCTION__);
+ }
+
+ //save the paramValue to hsic_cfg
+ commitVisualParam(paramType, paramValue);
+#ifdef USES_POST_PROCESSING
+ //calling our user space library to configure the post processing color
+ //conversion (does Hue, Saturation, Brightness, and Contrast adjustment)
+ display_pp_conv_set_cfg(block, &hsic_cfg);
+#endif
+ mdp_overlay overlay;
+
+ switch(paramType) {
+ case SET_NONE:
+ return true;
+ case SET_SHARPNESS:
+ if (ioctl(mFD, MSMFB_OVERLAY_GET, &overlay)) {
+ reportError("setVisualParam, overlay GET failed");
+ return false;
+ }
+ if (overlay.dpp.sharp_strength != value) {
+ mOVInfo.flags |= MDP_SHARPENING;
+ mOVInfo.dpp.sharp_strength = value;
+ setFlag = true;
+ }
+ break;
+ case RESET_ALL:
+ //set all visual params to a default value
+ //passed in from the app
+ mOVInfo.flags |= MDP_SHARPENING;
+ mOVInfo.dpp.sharp_strength = value;
+ setFlag = true;
+ break;
+ default:
+ return false;
+ }
+ if (setFlag) {
+ if (ioctl(mFD, MSMFB_OVERLAY_SET, &mOVInfo)) {
+ reportError("setVisualParam, overlay set failed");
+ dump(mOVInfo);
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/liboverlay/overlayLib.h b/liboverlay/overlayLib.h
new file mode 100755
index 0000000..65b134c
--- /dev/null
+++ b/liboverlay/overlayLib.h
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_OVERLAY_LIB
+#define INCLUDE_OVERLAY_LIB
+
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include <cutils/atomic.h>
+
+#include <hardware/hardware.h>
+#include <hardware/gralloc.h>
+
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+#include <linux/msm_rotator.h>
+#include <linux/android_pmem.h>
+
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <utils/threads.h>
+#include <utils/RefBase.h>
+#include <alloc_controller.h>
+#include <memalloc.h>
+
+#ifdef USES_POST_PROCESSING
+#include "lib-postproc.h"
+#endif
+
+#define HW_OVERLAY_MAGNIFICATION_LIMIT 8
+#define HW_OVERLAY_MINIFICATION_LIMIT HW_OVERLAY_MAGNIFICATION_LIMIT
+
+#define EVEN_OUT(x) if (x & 0x0001) {x--;}
+#define NO_PIPE -1
+#define VG0_PIPE 0
+#define VG1_PIPE 1
+#define NUM_CHANNELS 2
+#define NUM_FB_DEVICES 3
+#define FRAMEBUFFER_0 0
+#define FRAMEBUFFER_1 1
+#define FRAMEBUFFER_2 2
+#define NUM_SHARPNESS_VALS 256
+#define SHARPNESS_RANGE 1.0f
+#define HUE_RANGE 180
+#define BRIGHTNESS_RANGE 255
+#define CON_SAT_RANGE 1.0f
+#define CAP_RANGE(value,max,min) do { if (value - min < -0.0001)\
+ {value = min;}\
+ else if(value - max > 0.0001)\
+ {value = max;}\
+ } while(0);
+
+enum {
+ HDMI_OFF,
+ HDMI_ON
+};
+
+enum {
+ OVERLAY_CHANNEL_DOWN,
+ OVERLAY_CHANNEL_UP
+};
+
+enum {
+ NEW_REQUEST,
+ UPDATE_REQUEST
+};
+
+enum {
+ WAIT_FOR_VSYNC = 1<<0,
+ DISABLE_FRAMEBUFFER_FETCH = 1<<1,
+ INTERLACED_CONTENT = 1<<2,
+ OVERLAY_PIPE_SHARE = 1<<3,
+ SECURE_OVERLAY_SESSION = 1<<4,
+};
+
+/* ------------------------------- 3D defines ---------------------------------------*/
+// The compound format passed to the overlay is
+// ABCCC where A is the input 3D format,
+// B is the output 3D format
+// CCC is the color format e.g YCbCr420SP YCrCb420SP etc.
+#define FORMAT_3D(x) (x & 0xFF000)
+#define COLOR_FORMAT(x) (x & 0xFFF)
+// in the final 3D format, the MSB 2Bytes are the input format and the
+// LSB 2bytes are the output format. Shift the output byte 12 bits.
+#define SHIFT_OUTPUT_3D 12
+#define FORMAT_3D_OUTPUT(x) ((x & 0xF000) >> SHIFT_OUTPUT_3D)
+#define FORMAT_3D_INPUT(x) (x & 0xF0000)
+#define INPUT_MASK_3D 0xFFFF0000
+#define OUTPUT_MASK_3D 0x0000FFFF
+#define SHIFT_3D 16
+// The output format is the 2MSB bytes. Shift the format by 12 to reflect this
+#define HAL_3D_OUT_SIDE_BY_SIDE_MASK (HAL_3D_OUT_SIDE_BY_SIDE >> SHIFT_OUTPUT_3D)
+#define HAL_3D_OUT_TOP_BOTTOM_MASK (HAL_3D_OUT_TOP_BOTTOM >> SHIFT_OUTPUT_3D)
+#define HAL_3D_OUT_INTERLEAVE_MASK (HAL_3D_OUT_INTERLEAVE >> SHIFT_OUTPUT_3D)
+#define HAL_3D_OUT_MONOSCOPIC_MASK (HAL_3D_OUT_MONOSCOPIC >> SHIFT_OUTPUT_3D)
+
+// 3D panel barrier orientation
+#define BARRIER_LANDSCAPE 1
+#define BARRIER_PORTRAIT 2
+
+#ifdef HDMI_AS_PRIMARY
+#define FORMAT_3D_FILE "/sys/class/graphics/fb0/format_3d"
+#define EDID_3D_INFO_FILE "/sys/class/graphics/fb0/3d_present"
+#else
+#define FORMAT_3D_FILE "/sys/class/graphics/fb1/format_3d"
+#define EDID_3D_INFO_FILE "/sys/class/graphics/fb1/3d_present"
+#endif
+#define BARRIER_FILE "/sys/devices/platform/mipi_novatek.0/enable_3d_barrier"
+/* -------------------------- end 3D defines ----------------------------------------*/
+
+// Struct to hold the buffer info: geometry and size
+struct overlay_buffer_info {
+ int width;
+ int height;
+ int format;
+ int size;
+};
+
+using android::Mutex;
+namespace overlay {
+
+#define FB_DEVICE_TEMPLATE "/dev/graphics/fb%u"
+
+ //Utility Class to query the framebuffer info
+ class FrameBufferInfo {
+ int mFBWidth;
+ int mFBHeight;
+ bool mBorderFillSupported;
+ static FrameBufferInfo *sFBInfoInstance;
+
+ FrameBufferInfo():mFBWidth(0),mFBHeight(0), mBorderFillSupported(false) {
+ char const * const device_name =
+ "/dev/graphics/fb0";
+ int fd = open(device_name, O_RDWR, 0);
+ mdp_overlay ov;
+ memset(&ov, 0, sizeof(ov));
+ if (fd < 0) {
+ LOGE("FrameBufferInfo: Cant open framebuffer ");
+ return;
+ }
+ fb_var_screeninfo vinfo;
+ if (ioctl(fd, FBIOGET_VSCREENINFO, &vinfo) == -1) {
+ LOGE("FrameBufferInfo: FBIOGET_VSCREENINFO on fb0 failed");
+ close(fd);
+ fd = -1;
+ return;
+ }
+ ov.id = 1;
+ if(ioctl(fd, MSMFB_OVERLAY_GET, &ov)) {
+ LOGE("FrameBufferInfo: MSMFB_OVERLAY_GET on fb0 failed");
+ close(fd);
+ fd = -1;
+ return;
+ }
+ close(fd);
+ fd = -1;
+ mFBWidth = vinfo.xres;
+ mFBHeight = vinfo.yres;
+ mBorderFillSupported = (ov.flags & MDP_BORDERFILL_SUPPORTED) ?
+ true : false;
+ }
+ public:
+ static FrameBufferInfo* getInstance(){
+ if (!sFBInfoInstance){
+ sFBInfoInstance = new FrameBufferInfo;
+ }
+ return sFBInfoInstance;
+ }
+ int getWidth() const { return mFBWidth; }
+ int getHeight() const { return mFBHeight; }
+ bool canSupportTrueMirroring() const {
+ return mBorderFillSupported; }
+ };
+
+enum {
+ OV_UI_MIRROR_TV = 0,
+ OV_2D_VIDEO_ON_PANEL,
+ OV_2D_VIDEO_ON_TV,
+ OV_3D_VIDEO_2D_PANEL,
+ OV_3D_VIDEO_2D_TV,
+ OV_3D_VIDEO_3D_PANEL,
+ OV_3D_VIDEO_3D_TV
+};
+bool isHDMIConnected();
+bool is3DTV();
+bool isPanel3D();
+bool usePanel3D();
+bool send3DInfoPacket(unsigned int format3D);
+bool enableBarrier(unsigned int orientation);
+unsigned int getOverlayConfig (unsigned int format3D, bool poll = true,
+ bool isHDMI = false);
+int getColorFormat(int format);
+bool isInterlacedContent(int format);
+int get_mdp_format(int format);
+int get_size(int format, int w, int h);
+int get_rot_output_format(int format);
+int get_mdp_orientation(int value);
+void normalize_crop(uint32_t& xy, uint32_t& wh);
+//Initializes the overlay - cleans up any existing overlay pipes
+int initOverlay();
+
+/* Print values being sent to driver in case of ioctl failures
+ These logs are enabled only if DEBUG_OVERLAY is true */
+void dump(msm_rotator_img_info& mRotInfo);
+void dump(mdp_overlay& mOvInfo);
+const char* getFormatString(int format);
+
+ //singleton class to decide the z order of new overlay surfaces
+ class ZOrderManager {
+ bool mFB0Pipes[NUM_CHANNELS];
+ bool mFB1Pipes[NUM_CHANNELS+1]; //FB1 can have 3 pipes
+ int mPipesInuse; // Holds the number of pipes in use
+ int mMaxPipes; // Max number of pipes
+ static ZOrderManager *sInstance;
+ Mutex *mObjMutex;
+ ZOrderManager(){
+ mPipesInuse = 0;
+ // for true mirroring support there can be 3 pipes on secondary
+ mMaxPipes = FrameBufferInfo::getInstance()->canSupportTrueMirroring()?
+ NUM_CHANNELS+1 : NUM_CHANNELS;
+ for (int i = 0; i < NUM_CHANNELS; i++)
+ mFB0Pipes[i] = false;
+ for (int j = 0; j < mMaxPipes; j++)
+ mFB1Pipes[j] = false;
+ mObjMutex = new Mutex();
+ }
+ ~ZOrderManager() {
+ delete sInstance;
+ delete mObjMutex;
+ }
+ public:
+ static ZOrderManager* getInstance(){
+ if (!sInstance){
+ sInstance = new ZOrderManager;
+ }
+ return sInstance;
+ }
+ int getZ(int fbnum);
+ void decZ(int fbnum, int zorder);
+ };
+const int max_num_buffers = 3;
+typedef struct mdp_rect overlay_rect;
+
+class OverlayControlChannel {
+
+enum {
+ SET_NONE = 0,
+ SET_SHARPNESS,
+#ifdef USES_POST_PROCESSING
+ SET_HUE,
+ SET_BRIGHTNESS,
+ SET_SATURATION,
+ SET_CONTRAST,
+#endif
+ RESET_ALL,
+};
+ bool mNoRot;
+ int mFBNum;
+ int mFBWidth;
+ int mFBHeight;
+ int mFBbpp;
+ int mFBystride;
+ int mFormat;
+ int mFD;
+ int mRotFD;
+ int mSize;
+ int mOrientation;
+ unsigned int mFormat3D;
+ bool mUIChannel;
+#ifdef USES_POST_PROCESSING
+ struct display_pp_conv_cfg hsic_cfg;
+#endif
+ mdp_overlay mOVInfo;
+ msm_rotator_img_info mRotInfo;
+ msmfb_overlay_3d m3DOVInfo;
+ bool mIsChannelUpdated;
+ bool openDevices(int fbnum = -1);
+ bool setOverlayInformation(const overlay_buffer_info& info,
+ int zorder = 0, int flags = 0,
+ int requestType = NEW_REQUEST);
+ bool startOVRotatorSessions(const overlay_buffer_info& info, int requestType);
+ void swapOVRotWidthHeight();
+ int commitVisualParam(int8_t paramType, float paramValue);
+ void setInformationFromFlags(int flags, mdp_overlay& ov);
+
+public:
+ OverlayControlChannel();
+ ~OverlayControlChannel();
+ bool startControlChannel(const overlay_buffer_info& info,
+ int fbnum, bool norot = false,
+ bool uichannel = false,
+ unsigned int format3D = 0, int zorder = 0,
+ int flags = 0);
+ bool closeControlChannel();
+ bool setPosition(int x, int y, uint32_t w, uint32_t h);
+ bool setTransform(int value, bool fetch = true);
+ void setSize (int size) { mSize = size; }
+ bool getPosition(int& x, int& y, uint32_t& w, uint32_t& h);
+ bool getOvSessionID(int& sessionID) const;
+ bool getRotSessionID(int& sessionID) const;
+ bool getSize(int& size) const;
+ bool isChannelUP() const { return (mFD > 0); }
+ int getFBWidth() const { return mFBWidth; }
+ int getFBHeight() const { return mFBHeight; }
+ int getFormat3D() const { return mFormat3D; }
+ bool getOrientation(int& orientation) const;
+ bool updateOverlayFlags(int flags);
+ bool getAspectRatioPosition(int w, int h, overlay_rect *rect);
+ // Calculates the aspect ratio for video on HDMI based on primary
+ // aspect ratio used in case of true mirroring
+ bool getAspectRatioPosition(int w, int h, int orientation,
+ overlay_rect *inRect, overlay_rect *outRect);
+ bool getPositionS3D(int channel, int format, overlay_rect *rect);
+ bool updateOverlaySource(const overlay_buffer_info& info, int orientation, int flags);
+ bool getFormat() const { return mFormat; }
+ bool setVisualParam(int8_t paramType, float paramValue);
+ bool useVirtualFB ();
+ bool doFlagsNeedUpdate(int flags);
+};
+
+class OverlayDataChannel {
+
+ bool mNoRot;
+ bool mSecure;
+ int mFD;
+ int mRotFD;
+ int mPmemFD;
+ void* mPmemAddr;
+ uint32_t mPmemOffset;
+ uint32_t mNewPmemOffset;
+ msmfb_overlay_data mOvData;
+ msmfb_overlay_data mOvDataRot;
+ msm_rotator_data_info mRotData;
+ int mRotOffset[max_num_buffers];
+ int mCurrentItem;
+ int mNumBuffers;
+ bool mUpdateDataChannel;
+ android::sp<gralloc::IAllocController> mAlloc;
+ int mBufferType;
+
+ bool openDevices(int fbnum = -1, bool uichannel = false, int num_buffers = 2);
+ bool mapRotatorMemory(int num_buffers, bool uiChannel, int requestType);
+ bool queue(uint32_t offset);
+
+public:
+ OverlayDataChannel();
+ ~OverlayDataChannel();
+ bool startDataChannel(const OverlayControlChannel& objOvCtrlChannel,
+ int fbnum, bool norot = false, bool secure = false,
+ bool uichannel = false, int num_buffers = 2);
+ bool startDataChannel(int ovid, int rotid, int size,
+ int fbnum, bool norot = false, bool uichannel = false,
+ int num_buffers = 2);
+ bool closeDataChannel();
+ bool setFd(int fd);
+ bool queueBuffer(uint32_t offset);
+ bool waitForHdmiVsync();
+ bool setCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h);
+ bool getCropS3D(overlay_rect *inRect, int channel, int format, overlay_rect *rect);
+ bool isChannelUP() const { return (mFD > 0); }
+ bool updateDataChannel(int size);
+};
+
+/*
+ * Overlay class for single thread application
+ * A multiple thread/process application need to use Overlay HAL
+ */
+class Overlay {
+
+ bool mChannelUP;
+ //stores the connected external display Ex: HDMI(1) WFD(2)
+ int mExternalDisplay;
+ unsigned int mS3DFormat;
+ //Actual cropped source width and height of overlay
+ int mCroppedSrcWidth;
+ int mCroppedSrcHeight;
+ overlay_buffer_info mOVBufferInfo;
+ int mState;
+ // Stores the current device orientation
+ int mDevOrientation;
+ OverlayControlChannel objOvCtrlChannel[2];
+ OverlayDataChannel objOvDataChannel[2];
+
+public:
+ Overlay();
+ ~Overlay();
+
+ static bool sHDMIAsPrimary;
+ bool startChannel(const overlay_buffer_info& info, int fbnum, bool norot = false,
+ bool uichannel = false, unsigned int format3D = 0,
+ int channel = 0, int flags = 0,
+ int num_buffers = 2);
+ bool closeChannel();
+ bool setDeviceOrientation(int orientation);
+ bool setPosition(int x, int y, uint32_t w, uint32_t h);
+ bool setTransform(int value);
+ bool setOrientation(int value, int channel = 0);
+ bool setFd(int fd, int channel = 0);
+ bool queueBuffer(uint32_t offset, int channel = 0);
+ bool getPosition(int& x, int& y, uint32_t& w, uint32_t& h, int channel = 0);
+ bool isChannelUP() const { return mChannelUP; }
+ int getFBWidth(int channel = 0) const;
+ int getFBHeight(int channel = 0) const;
+ bool getOrientation(int& orientation, int channel = 0) const;
+ bool queueBuffer(buffer_handle_t buffer);
+ bool setSource(const overlay_buffer_info& info, int orientation, int hdmiConnected,
+ int flags, int numBuffers = 2);
+ bool getAspectRatioPosition(int w, int h, overlay_rect *rect, int channel = 0);
+ bool setCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h);
+ bool updateOverlayFlags(int flags);
+ void setVisualParam(int8_t paramType, float paramValue);
+ bool waitForHdmiVsync(int channel);
+ int getChannelStatus() const { return (mChannelUP ? OVERLAY_CHANNEL_UP: OVERLAY_CHANNEL_DOWN); }
+ void closeExternalChannel();
+private:
+ bool setChannelPosition(int x, int y, uint32_t w, uint32_t h, int channel = 0);
+ bool setChannelCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h, int channel);
+ bool queueBuffer(int fd, uint32_t offset, int channel);
+ bool updateOverlaySource(const overlay_buffer_info& info, int orientation, int flags);
+ int getS3DFormat(int format);
+};
+
+struct overlay_shared_data {
+ volatile bool isControlSetup;
+ unsigned int state;
+ int rotid[2];
+ int ovid[2];
+};
+};
+#endif
diff --git a/liboverlay/overlayLibUI.cpp b/liboverlay/overlayLibUI.cpp
new file mode 100755
index 0000000..d3fd80a
--- /dev/null
+++ b/liboverlay/overlayLibUI.cpp
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "overlayLibUI.h"
+#include "gralloc_priv.h"
+#define LOG_TAG "OverlayUI"
+
+using android::sp;
+using gralloc::IMemAlloc;
+using gralloc::alloc_data;
+
+namespace {
+/* helper functions */
+void swapOVRotWidthHeight(msm_rotator_img_info& rotInfo,
+ mdp_overlay& ovInfo) {
+ int srcWidth = ovInfo.src.width;
+ ovInfo.src.width = ovInfo.src.height;
+ ovInfo.src.height = srcWidth;
+
+ int srcRectWidth = ovInfo.src_rect.w;
+ ovInfo.src_rect.w = ovInfo.src_rect.h;
+ ovInfo.src_rect.h = srcRectWidth;
+
+ int dstWidth = rotInfo.dst.width;
+ rotInfo.dst.width = rotInfo.dst.height;
+ rotInfo.dst.height = dstWidth;
+}
+
+bool isRGBType(int format) {
+ bool ret = false;
+ switch(format) {
+ case MDP_RGBA_8888:
+ case MDP_BGRA_8888:
+ case MDP_RGBX_8888:
+ case MDP_RGB_565:
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+ return ret;
+}
+
+int getRGBBpp(int format) {
+ int ret = -1;
+ switch(format) {
+ case MDP_RGBA_8888:
+ case MDP_BGRA_8888:
+ case MDP_RGBX_8888:
+ ret = 4;
+ break;
+ case MDP_RGB_565:
+ ret = 2;
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+bool turnOFFVSync() {
+ static int swapIntervalPropVal = -1;
+ if (swapIntervalPropVal == -1) {
+ char pval[PROPERTY_VALUE_MAX];
+ property_get("debug.gr.swapinterval", pval, "1");
+ swapIntervalPropVal = atoi(pval);
+ }
+ return (swapIntervalPropVal == 0);
+}
+
+};
+
+namespace overlay {
+
+status_t Display::openDisplay(int fbnum) {
+ if (mFD != NO_INIT)
+ return NO_ERROR;
+
+ status_t ret = NO_INIT;
+ char dev_name[64];
+ snprintf(dev_name, 64, FB_DEVICE_TEMPLATE, fbnum);
+
+ mFD = open(dev_name, O_RDWR, 0);
+ if (mFD < 0) {
+ LOGE("Failed to open FB %d", fbnum);
+ return ret;
+ }
+
+ fb_var_screeninfo vinfo;
+ if (ioctl(mFD, FBIOGET_VSCREENINFO, &vinfo)) {
+ LOGE("FBIOGET_VSCREENINFO on failed on FB %d", fbnum);
+ close(mFD);
+ mFD = NO_INIT;
+ return ret;
+ }
+
+ mFBWidth = vinfo.xres;
+ mFBHeight = vinfo.yres;
+ mFBBpp = vinfo.bits_per_pixel;
+ ret = NO_ERROR;
+
+ return ret;
+}
+
+void Display::closeDisplay() {
+ close(mFD);
+ mFD = NO_INIT;
+}
+
+Rotator::Rotator() : mFD(NO_INIT), mSessionID(NO_INIT), mPmemFD(NO_INIT)
+{
+ mAlloc = gralloc::IAllocController::getInstance(false);
+}
+
+Rotator::~Rotator()
+{
+ closeRotSession();
+}
+
+status_t Rotator::startRotSession(msm_rotator_img_info& rotInfo,
+ int size, int numBuffers) {
+ status_t ret = NO_ERROR;
+ if (mSessionID == NO_INIT && mFD == NO_INIT) {
+ mNumBuffers = numBuffers;
+ mFD = open("/dev/msm_rotator", O_RDWR, 0);
+ if (mFD < 0) {
+ LOGE("Couldnt open rotator device");
+ return NO_INIT;
+ }
+
+ if (ioctl(mFD, MSM_ROTATOR_IOCTL_START, &rotInfo)) {
+ close(mFD);
+ mFD = NO_INIT;
+ return NO_INIT;
+ }
+
+ mSessionID = rotInfo.session_id;
+ alloc_data data;
+ data.base = 0;
+ data.fd = -1;
+ data.offset = 0;
+ data.size = mSize * mNumBuffers;
+ data.align = getpagesize();
+ data.uncached = true;
+
+ int allocFlags = GRALLOC_USAGE_PRIVATE_MM_HEAP |
+ GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP |
+ GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
+ GRALLOC_USAGE_PRIVATE_IOMMU_HEAP |
+ GRALLOC_USAGE_PRIVATE_SMI_HEAP |
+ GRALLOC_USAGE_PRIVATE_DO_NOT_MAP;
+
+ int err = mAlloc->allocate(data, allocFlags, 0);
+
+ if(err) {
+ LOGE("%s: Can't allocate rotator memory", __func__);
+ closeRotSession();
+ return NO_INIT;
+ }
+ mPmemFD = data.fd;
+ mPmemAddr = data.base;
+ mBufferType = data.allocType;
+
+ mCurrentItem = 0;
+ for (int i = 0; i < mNumBuffers; i++)
+ mRotOffset[i] = i * mSize;
+ ret = NO_ERROR;
+ }
+ return ret;
+}
+
+status_t Rotator::closeRotSession() {
+ if (mSessionID != NO_INIT && mFD != NO_INIT) {
+ ioctl(mFD, MSM_ROTATOR_IOCTL_FINISH, &mSessionID);
+ close(mFD);
+ if (NO_INIT != mPmemFD) {
+ sp<IMemAlloc> memalloc = mAlloc->getAllocator(mBufferType);
+ memalloc->free_buffer(mPmemAddr, mSize * mNumBuffers, 0, mPmemFD);
+ close(mPmemFD);
+ }
+ }
+
+ mFD = NO_INIT;
+ mSessionID = NO_INIT;
+ mPmemFD = NO_INIT;
+ mPmemAddr = MAP_FAILED;
+
+ return NO_ERROR;
+}
+
+status_t Rotator::rotateBuffer(msm_rotator_data_info& rotData) {
+ status_t ret = NO_INIT;
+ if (mSessionID != NO_INIT) {
+ rotData.dst.memory_id = mPmemFD;
+ rotData.dst.offset = mRotOffset[mCurrentItem];
+ rotData.session_id = mSessionID;
+ mCurrentItem = (mCurrentItem + 1) % mNumBuffers;
+ if (ioctl(mFD, MSM_ROTATOR_IOCTL_ROTATE, &rotData)) {
+ LOGE("Rotator failed to rotate");
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+ }
+
+ return ret;
+}
+
+//===================== OverlayUI =================//
+
+OverlayUI::OverlayUI() : mChannelState(CLOSED), mOrientation(NO_INIT),
+ mFBNum(NO_INIT), mZorder(NO_INIT), mWaitForVsync(false), mIsFg(false),
+ mSessionID(NO_INIT), mParamsChanged(false) {
+ memset(&mOvInfo, 0, sizeof(mOvInfo));
+ memset(&mRotInfo, 0, sizeof(mRotInfo));
+}
+
+OverlayUI::~OverlayUI() {
+ closeChannel();
+}
+
+void OverlayUI::setSource(const overlay_buffer_info& info, int orientation) {
+ status_t ret = NO_INIT;
+ int format3D = FORMAT_3D(info.format);
+ int colorFormat = COLOR_FORMAT(info.format);
+ int format = get_mdp_format(colorFormat);
+
+ if (format3D || !isRGBType(format)) {
+ LOGE("%s: Unsupported format", __func__);
+ return;
+ }
+
+ mParamsChanged |= (mSource.width ^ info.width) ||
+ (mSource.height ^ info.height) ||
+ (mSource.format ^ format) ||
+ (mSource.size ^ info.size) ||
+ (mOrientation ^ orientation);
+
+ mSource.width = info.width;
+ mSource.height = info.height;
+ mSource.format = format;
+ mSource.size = info.size;
+ mOrientation = orientation;
+ setupOvRotInfo();
+}
+
+void OverlayUI::setDisplayParams(int fbNum, bool waitForVsync, bool isFg, int
+ zorder, bool isVGPipe) {
+ int flags = 0;
+
+ if(false == waitForVsync)
+ flags |= MDP_OV_PLAY_NOWAIT;
+ else
+ flags &= ~MDP_OV_PLAY_NOWAIT;
+
+ if(isVGPipe)
+ flags |= MDP_OV_PIPE_SHARE;
+ else
+ flags &= ~MDP_OV_PIPE_SHARE;
+
+ if (turnOFFVSync())
+ flags |= MDP_OV_PLAY_NOWAIT;
+
+ mParamsChanged |= (mFBNum ^ fbNum) ||
+ (mOvInfo.is_fg ^ isFg) ||
+ (mOvInfo.flags ^ flags) ||
+ (mOvInfo.z_order ^ zorder);
+
+ mFBNum = fbNum;
+ mOvInfo.is_fg = isFg;
+ mOvInfo.flags = flags;
+ mOvInfo.z_order = zorder;
+
+ mobjDisplay.openDisplay(mFBNum);
+}
+
+void OverlayUI::setPosition(int x, int y, int w, int h) {
+ mParamsChanged |= (mOvInfo.dst_rect.x ^ x) ||
+ (mOvInfo.dst_rect.y ^ y) ||
+ (mOvInfo.dst_rect.w ^ w) ||
+ (mOvInfo.dst_rect.h ^ h);
+
+ mOvInfo.dst_rect.x = x;
+ mOvInfo.dst_rect.y = y;
+ mOvInfo.dst_rect.w = w;
+ mOvInfo.dst_rect.h = h;
+}
+
+void OverlayUI::setCrop(int x, int y, int w, int h) {
+ mParamsChanged |= (mOvInfo.src_rect.x ^ x) ||
+ (mOvInfo.src_rect.y ^ y) ||
+ (mOvInfo.src_rect.w ^ w) ||
+ (mOvInfo.src_rect.h ^ h);
+
+ mOvInfo.src_rect.x = x;
+ mOvInfo.src_rect.y = y;
+ mOvInfo.src_rect.w = w;
+ mOvInfo.src_rect.h = h;
+}
+
+void OverlayUI::setupOvRotInfo() {
+ int w = mSource.width;
+ int h = mSource.height;
+ int format = mSource.format;
+ int srcw = (w + 31) & ~31;
+ int srch = (h + 31) & ~31;
+ mOvInfo.src.width = srcw;
+ mOvInfo.src.height = srch;
+ mOvInfo.src.format = format;
+ mOvInfo.src_rect.w = w;
+ mOvInfo.src_rect.h = h;
+ mOvInfo.alpha = 0xff;
+ mOvInfo.transp_mask = 0xffffffff;
+ mRotInfo.src.format = format;
+ mRotInfo.dst.format = format;
+ mRotInfo.src.width = srcw;
+ mRotInfo.src.height = srch;
+ mRotInfo.src_rect.w = srcw;
+ mRotInfo.src_rect.h = srch;
+ mRotInfo.dst.width = srcw;
+ mRotInfo.dst.height = srch;
+
+ int rot = mOrientation;
+ switch(rot) {
+ case 0:
+ case HAL_TRANSFORM_FLIP_H:
+ case HAL_TRANSFORM_FLIP_V:
+ rot = 0;
+ break;
+ case HAL_TRANSFORM_ROT_90:
+ case (HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_H):
+ case (HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_V): {
+ int tmp = mOvInfo.src_rect.x;
+ mOvInfo.src_rect.x = mOvInfo.src.height -
+ (mOvInfo.src_rect.y + mOvInfo.src_rect.h);
+ mOvInfo.src_rect.y = tmp;
+ swapOVRotWidthHeight(mRotInfo, mOvInfo);
+ rot = HAL_TRANSFORM_ROT_90;
+ break;
+ }
+ case HAL_TRANSFORM_ROT_180:
+ break;
+ case HAL_TRANSFORM_ROT_270: {
+ int tmp = mOvInfo.src_rect.y;
+ mOvInfo.src_rect.y = mOvInfo.src.width -
+ (mOvInfo.src_rect.x + mOvInfo.src_rect.w);
+ mOvInfo.src_rect.x = tmp;
+ swapOVRotWidthHeight(mRotInfo, mOvInfo);
+ break;
+ }
+ default:
+ break;
+ }
+ int mdp_rotation = overlay::get_mdp_orientation(rot);
+ if (mdp_rotation < 0)
+ mdp_rotation = 0;
+ mOvInfo.user_data[0] = mdp_rotation;
+ mRotInfo.rotations = mOvInfo.user_data[0];
+ if (mdp_rotation)
+ mRotInfo.enable = 1;
+}
+
+status_t OverlayUI::commit() {
+ status_t ret = BAD_VALUE;
+ if(mChannelState != UP)
+ mOvInfo.id = MSMFB_NEW_REQUEST;
+ ret = startOVSession();
+ if (ret == NO_ERROR && mOrientation) {
+ ret = mobjRotator.startRotSession(mRotInfo, mSource.size);
+ }
+ if (ret == NO_ERROR) {
+ mChannelState = UP;
+ } else {
+ LOGE("start channel failed.");
+ }
+ return ret;
+}
+
+status_t OverlayUI::closeChannel() {
+ if( mChannelState != UP ) {
+ return NO_ERROR;
+ }
+ if(NO_ERROR != closeOVSession()) {
+ LOGE("%s: closeOVSession() failed.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ if(NO_ERROR != mobjRotator.closeRotSession()) {
+ LOGE("%s: closeRotSession() failed.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ mChannelState = CLOSED;
+ mParamsChanged = false;
+ memset(&mOvInfo, 0, sizeof(mOvInfo));
+ memset(&mRotInfo, 0, sizeof(mRotInfo));
+ return NO_ERROR;
+}
+
+status_t OverlayUI::startOVSession() {
+ status_t ret = NO_INIT;
+ ret = mobjDisplay.openDisplay(mFBNum);
+
+ if (ret != NO_ERROR)
+ return ret;
+
+ if(mParamsChanged) {
+ mParamsChanged = false;
+ mdp_overlay ovInfo = mOvInfo;
+ if (ioctl(mobjDisplay.getFD(), MSMFB_OVERLAY_SET, &ovInfo)) {
+ LOGE("Overlay set failed..");
+ ret = BAD_VALUE;
+ } else {
+ mSessionID = ovInfo.id;
+ mOvInfo = ovInfo;
+ ret = NO_ERROR;
+ }
+ }
+ return ret;
+}
+
+status_t OverlayUI::closeOVSession() {
+ status_t ret = NO_ERROR;
+ int err = 0;
+ if(err = ioctl(mobjDisplay.getFD(), MSMFB_OVERLAY_UNSET, &mSessionID)) {
+ LOGE("%s: MSMFB_OVERLAY_UNSET failed. (%d)", __FUNCTION__, err);
+ ret = BAD_VALUE;
+ } else {
+ mobjDisplay.closeDisplay();
+ mSessionID = NO_INIT;
+ }
+ return ret;
+}
+
+status_t OverlayUI::queueBuffer(buffer_handle_t buffer) {
+ status_t ret = NO_INIT;
+
+ if (mChannelState != UP)
+ return ret;
+
+ msmfb_overlay_data ovData;
+ memset(&ovData, 0, sizeof(ovData));
+
+ private_handle_t const* hnd = reinterpret_cast
+ <private_handle_t const*>(buffer);
+ ovData.data.memory_id = hnd->fd;
+ ovData.data.offset = hnd->offset;
+ if (mOrientation) {
+ msm_rotator_data_info rotData;
+ memset(&rotData, 0, sizeof(rotData));
+ rotData.src.memory_id = hnd->fd;
+ rotData.src.offset = hnd->offset;
+ if (mobjRotator.rotateBuffer(rotData) != NO_ERROR) {
+ LOGE("Rotator failed.. ");
+ return BAD_VALUE;
+ }
+ ovData.data.memory_id = rotData.dst.memory_id;
+ ovData.data.offset = rotData.dst.offset;
+ }
+ ovData.id = mSessionID;
+ if (ioctl(mobjDisplay.getFD(), MSMFB_OVERLAY_PLAY, &ovData)) {
+ LOGE("Queuebuffer failed ");
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+};
diff --git a/liboverlay/overlayLibUI.h b/liboverlay/overlayLibUI.h
new file mode 100644
index 0000000..d16a968
--- /dev/null
+++ b/liboverlay/overlayLibUI.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_OVERLAY_LIB_UI
+#define INCLUDE_OVERLAY_LIB_UI
+
+#include <errno.h>
+
+#include "overlayLib.h"
+
+namespace overlay {
+
+enum channel_state_t { UP, CLOSED, PENDING_CLOSE };
+enum status_t {
+ NO_ERROR,
+ INVALID_OPERATION = -ENOSYS,
+ BAD_VALUE = -EINVAL,
+ NO_INIT = -ENODEV,
+ ALREADY_EXISTS = -EEXIST
+ };
+
+/*
+ * Display class provides following services
+ * Open FB
+ * FB information (Width, Height and Bpp)
+ */
+
+class Display {
+ int mFD;
+ int mFBWidth;
+ int mFBHeight;
+ int mFBBpp;
+ Display(const Display& objDisplay);
+ Display& operator=(const Display& objDisplay);
+
+public:
+ explicit Display() : mFD(NO_INIT) { };
+ ~Display() { close(mFD); };
+ int getFD() const { return mFD; };
+ int getFBWidth() const { return mFBWidth; };
+ int getFBHeight() const { return mFBHeight; };
+ int getFBBpp() const { return mFBBpp; };
+ status_t openDisplay(int fbnum);
+ void closeDisplay();
+};
+
+/*
+ * Rotator class, manages rotation of the buffers
+ * It communicates with Rotator driver, provides following services
+ * Start rotator session
+ * Rotate buffer
+ */
+
+class Rotator {
+ int mFD;
+ int mSessionID;
+ int mPmemFD;
+ void* mPmemAddr;
+ int mRotOffset[max_num_buffers];
+ int mCurrentItem;
+ int mNumBuffers;
+ int mSize;
+ android::sp<gralloc::IAllocController> mAlloc;
+ int mBufferType;
+ Rotator(const Rotator& objROtator);
+ Rotator& operator=(const Rotator& objRotator);
+
+public:
+ explicit Rotator();
+ ~Rotator();
+ status_t startRotSession(msm_rotator_img_info& rotInfo, int size,
+ int numBuffers = max_num_buffers);
+ status_t closeRotSession();
+ status_t rotateBuffer(msm_rotator_data_info& rotData);
+};
+
+/*
+ * Overlay class for Comp. Bypass
+ * We merge control and data channel classes.
+ */
+
+class OverlayUI {
+ channel_state_t mChannelState;
+ overlay_buffer_info mSource;
+ int mZorder;
+ int mOrientation;
+ int mFBNum;
+ bool mWaitForVsync;
+ bool mIsFg;
+ int mSessionID;
+ Display mobjDisplay;
+ Rotator mobjRotator;
+
+ mdp_overlay mOvInfo;
+ msm_rotator_img_info mRotInfo;
+
+ bool mParamsChanged;
+
+ OverlayUI(const OverlayUI& objOverlay);
+ OverlayUI& operator=(const OverlayUI& objOverlay);
+
+ status_t startOVSession();
+ status_t closeOVSession();
+ void setupOvRotInfo();
+
+public:
+
+ enum fbnum_t { FB0, FB1 };
+
+ OverlayUI();
+ ~OverlayUI();
+ void setSource(const overlay_buffer_info& info, int orientation);
+ void setPosition(int x, int y, int w, int h);
+ void setCrop(int x, int y, int w, int h);
+ void setDisplayParams(int fbNum, bool waitForVsync, bool isFg, int zorder,
+ bool isVGPipe);
+ status_t commit();
+ status_t closeChannel();
+ channel_state_t isChannelUP() const { return mChannelState; };
+ int getFBWidth() const { return mobjDisplay.getFBWidth(); };
+ int getFBHeight() const { return mobjDisplay.getFBHeight(); };
+ status_t queueBuffer(buffer_handle_t buffer);
+};
+
+};
+#endif
diff --git a/libqcomui/Android.mk b/libqcomui/Android.mk
new file mode 100644
index 0000000..5897161
--- /dev/null
+++ b/libqcomui/Android.mk
@@ -0,0 +1,31 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ qcom_ui.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ libcutils \
+ libui \
+ libEGL \
+ libskia
+
+LOCAL_C_INCLUDES := $(TOP)/hardware/qcom/display/libgralloc \
+ $(TOP)/frameworks/native/services/surfaceflinger \
+ $(TOP)/external/skia/include/core \
+ $(TOP)/external/skia/include/images
+
+LOCAL_CFLAGS := -DLOG_TAG=\"libQcomUI\"
+
+ifneq ($(call is-vendor-board-platform,QCOM),true)
+ LOCAL_CFLAGS += -DNON_QCOM_TARGET
+else
+ LOCAL_SHARED_LIBRARIES += libmemalloc
+endif
+
+LOCAL_CFLAGS += -DDEBUG_CALC_FPS
+
+LOCAL_MODULE := libQcomUI
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
diff --git a/libqcomui/qcom_ui.cpp b/libqcomui/qcom_ui.cpp
new file mode 100644
index 0000000..80ce25d
--- /dev/null
+++ b/libqcomui/qcom_ui.cpp
@@ -0,0 +1,937 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cutils/log.h>
+#include <cutils/memory.h>
+#include <qcom_ui.h>
+#include <gralloc_priv.h>
+#include <alloc_controller.h>
+#include <memalloc.h>
+#include <errno.h>
+#include <EGL/eglext.h>
+#include <sys/stat.h>
+#include <SkBitmap.h>
+#include <SkImageEncoder.h>
+#include <Transform.h>
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+using gralloc::IMemAlloc;
+using gralloc::IonController;
+using gralloc::alloc_data;
+using android::sp;
+
+static int sCompositionType = -1;
+
+namespace {
+
+ static android::sp<gralloc::IAllocController> sAlloc = 0;
+
+ int reallocate_memory(native_handle_t *buffer_handle, int mReqSize, int usage)
+ {
+ int ret = 0;
+
+#ifndef NON_QCOM_TARGET
+ if (sAlloc == 0) {
+ sAlloc = gralloc::IAllocController::getInstance(true);
+ }
+ if (sAlloc == 0) {
+ ALOGE("sAlloc is still NULL");
+ return -EINVAL;
+ }
+
+ // Dealloc the old memory
+ private_handle_t *hnd = (private_handle_t *)buffer_handle;
+ sp<IMemAlloc> memalloc = sAlloc->getAllocator(hnd->flags);
+ ret = memalloc->free_buffer((void*)hnd->base, hnd->size, hnd->offset, hnd->fd);
+
+ if (ret) {
+ ALOGE("%s: free_buffer failed", __FUNCTION__);
+ return -1;
+ }
+
+ // Realloc new memory
+ alloc_data data;
+ data.base = 0;
+ data.fd = -1;
+ data.offset = 0;
+ data.size = mReqSize;
+ data.align = getpagesize();
+ data.uncached = true;
+ int allocFlags = usage;
+
+ switch (hnd->format) {
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
+ case (HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED^HAL_PIXEL_FORMAT_INTERLACE): {
+ data.align = 8192;
+ } break;
+ default: break;
+ }
+ ret = sAlloc->allocate(data, allocFlags, 0);
+ if (ret == 0) {
+ hnd->fd = data.fd;
+ hnd->base = (int)data.base;
+ hnd->offset = data.offset;
+ hnd->size = data.size;
+ } else {
+ ALOGE("%s: allocate failed", __FUNCTION__);
+ return -EINVAL;
+ }
+#endif
+ return ret;
+ }
+}; // ANONYNMOUS NAMESPACE
+
+/*
+ * Gets the number of arguments required for this operation.
+ *
+ * @param: operation whose argument count is required.
+ *
+ * @return -EINVAL if the operation is invalid.
+ */
+int getNumberOfArgsForOperation(int operation) {
+ int num_args = -EINVAL;
+ switch(operation) {
+ case NATIVE_WINDOW_SET_BUFFERS_SIZE:
+ num_args = 1;
+ break;
+ case NATIVE_WINDOW_UPDATE_BUFFERS_GEOMETRY:
+ num_args = 3;
+ break;
+ default: ALOGE("%s: invalid operation(0x%x)", __FUNCTION__, operation);
+ break;
+ };
+ return num_args;
+}
+
+/*
+ * Checks if the format is supported by the GPU.
+ *
+ * @param: format to check
+ *
+ * @return true if the format is supported by the GPU.
+ */
+bool isGPUSupportedFormat(int format) {
+ if (format == HAL_PIXEL_FORMAT_YV12) {
+ // We check the YV12 formats, since some Qcom specific formats
+ // could have the bits set.
+ return true;
+ } else if (format & INTERLACE_MASK) {
+ // Interlaced content
+ return false;
+ } else if (format & S3D_FORMAT_MASK) {
+ // S3D Formats are not supported by the GPU
+ return false;
+ }
+ return true;
+}
+
+/* decide the texture target dynamically, based on the pixel format*/
+
+int decideTextureTarget(int pixel_format)
+{
+
+ // Default the return value to GL_TEXTURE_EXTERAL_OES
+ int retVal = GL_TEXTURE_EXTERNAL_OES;
+
+ // Change texture target to TEXTURE_2D for RGB formats
+ switch (pixel_format) {
+
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ case HAL_PIXEL_FORMAT_RGB_888:
+ case HAL_PIXEL_FORMAT_RGB_565:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ case HAL_PIXEL_FORMAT_RGBA_5551:
+ case HAL_PIXEL_FORMAT_RGBA_4444:
+ retVal = GL_TEXTURE_2D;
+ break;
+ default:
+ retVal = GL_TEXTURE_EXTERNAL_OES;
+ break;
+ }
+ return retVal;
+}
+
+/*
+ * Function to check if the allocated buffer is of the correct size.
+ * Reallocate the buffer with the correct size, if the size doesn't
+ * match
+ *
+ * @param: handle of the allocated buffer
+ * @param: requested size for the buffer
+ * @param: usage flags
+ *
+ * return 0 on success
+ */
+int checkBuffer(native_handle_t *buffer_handle, int size, int usage)
+{
+ // If the client hasn't set a size, return
+ if (0 >= size) {
+ return 0;
+ }
+
+ // Validate the handle
+ if (private_handle_t::validate(buffer_handle)) {
+ ALOGE("%s: handle is invalid", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ // Obtain the private_handle from the native handle
+ private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
+ if (hnd->size != size) {
+ return reallocate_memory(hnd, size, usage);
+ }
+ return 0;
+}
+
+/*
+ * Checks if memory needs to be reallocated for this buffer.
+ *
+ * @param: Geometry of the current buffer.
+ * @param: Required Geometry.
+ * @param: Geometry of the updated buffer.
+ *
+ * @return True if a memory reallocation is required.
+ */
+bool needNewBuffer(const qBufGeometry currentGeometry,
+ const qBufGeometry requiredGeometry,
+ const qBufGeometry updatedGeometry)
+{
+ // If the current buffer info matches the updated info,
+ // we do not require any memory allocation.
+ if (updatedGeometry.width && updatedGeometry.height &&
+ updatedGeometry.format) {
+ return false;
+ }
+ if (currentGeometry.width != requiredGeometry.width ||
+ currentGeometry.height != requiredGeometry.height ||
+ currentGeometry.format != requiredGeometry.format) {
+ // Current and required geometry do not match. Allocation
+ // required.
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Update the geometry of this buffer without reallocation.
+ *
+ * @param: buffer whose geometry needs to be updated.
+ * @param: Updated width
+ * @param: Updated height
+ * @param: Updated format
+ */
+int updateBufferGeometry(sp<GraphicBuffer> buffer, const qBufGeometry updatedGeometry)
+{
+ if (buffer == 0) {
+ ALOGE("%s: graphic buffer is NULL", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (!updatedGeometry.width || !updatedGeometry.height ||
+ !updatedGeometry.format) {
+ // No update required. Return.
+ return 0;
+ }
+ if (buffer->width == updatedGeometry.width &&
+ buffer->height == updatedGeometry.height &&
+ buffer->format == updatedGeometry.format) {
+ // The buffer has already been updated. Return.
+ return 0;
+ }
+
+ // Validate the handle
+ if (private_handle_t::validate(buffer->handle)) {
+ ALOGE("%s: handle is invalid", __FUNCTION__);
+ return -EINVAL;
+ }
+ buffer->width = updatedGeometry.width;
+ buffer->height = updatedGeometry.height;
+ buffer->format = updatedGeometry.format;
+ private_handle_t *hnd = (private_handle_t*)(buffer->handle);
+ if (hnd) {
+ hnd->width = updatedGeometry.width;
+ hnd->height = updatedGeometry.height;
+ hnd->format = updatedGeometry.format;
+ } else {
+ ALOGE("%s: hnd is NULL", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Update the S3D format of this buffer.
+*
+* @param: buffer whosei S3D format needs to be updated.
+* @param: Updated buffer S3D format
+*/
+int updateBufferS3DFormat(sp<GraphicBuffer> buffer, const int s3dFormat)
+{
+ if (buffer == 0) {
+ ALOGE("%s: graphic buffer is NULL", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ buffer->format |= s3dFormat;
+ return 0;
+}
+/*
+ * Updates the flags for the layer
+ *
+ * @param: Attribute
+ * @param: Identifies if the attribute was enabled or disabled.
+ *
+ * @return: -EINVAL if the attribute is invalid
+ */
+int updateLayerQcomFlags(eLayerAttrib attribute, bool enable, int& currentFlags)
+{
+ int ret = 0;
+ switch (attribute) {
+ case LAYER_UPDATE_STATUS: {
+ if (enable)
+ currentFlags |= LAYER_UPDATING;
+ else
+ currentFlags &= ~LAYER_UPDATING;
+ } break;
+ case LAYER_ASYNCHRONOUS_STATUS: {
+ if (enable)
+ currentFlags |= LAYER_ASYNCHRONOUS;
+ else
+ currentFlags &= ~LAYER_ASYNCHRONOUS;
+ } break;
+ default: ALOGE("%s: invalid attribute(0x%x)", __FUNCTION__, attribute);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Gets the per frame HWC flags for this layer.
+ *
+ * @param: current hwcl flags
+ * @param: current layerFlags
+ *
+ * @return: the per frame flags.
+ */
+int getPerFrameFlags(int hwclFlags, int layerFlags) {
+ int flags = hwclFlags;
+ if (layerFlags & LAYER_UPDATING)
+ flags &= ~HWC_LAYER_NOT_UPDATING;
+ else
+ flags |= HWC_LAYER_NOT_UPDATING;
+
+ if (layerFlags & LAYER_ASYNCHRONOUS)
+ flags |= HWC_LAYER_ASYNCHRONOUS;
+ else
+ flags &= ~HWC_LAYER_ASYNCHRONOUS;
+
+ return flags;
+}
+
+
+/*
+ * Checks if FB is updated by this composition type
+ *
+ * @param: composition type
+ * @return: true if FB is updated, false if not
+ */
+
+bool isUpdatingFB(HWCCompositionType compositionType)
+{
+ switch(compositionType)
+ {
+ case HWC_USE_COPYBIT:
+ return true;
+ default:
+ ALOGE("%s: invalid composition type(%d)", __FUNCTION__, compositionType);
+ return false;
+ };
+}
+
+/*
+ * Get the current composition Type
+ *
+ * @return the compositon Type
+ */
+int getCompositionType() {
+ char property[PROPERTY_VALUE_MAX];
+ int compositionType = 0;
+ if (property_get("debug.sf.hw", property, NULL) > 0) {
+ if(atoi(property) == 0) {
+ compositionType = COMPOSITION_TYPE_CPU;
+ } else { //debug.sf.hw = 1
+ property_get("debug.composition.type", property, NULL);
+ if (property == NULL) {
+ compositionType = COMPOSITION_TYPE_GPU;
+ } else if ((strncmp(property, "mdp", 3)) == 0) {
+ compositionType = COMPOSITION_TYPE_MDP;
+ } else if ((strncmp(property, "c2d", 3)) == 0) {
+ compositionType = COMPOSITION_TYPE_C2D;
+ } else if ((strncmp(property, "dyn", 3)) == 0) {
+ compositionType = COMPOSITION_TYPE_DYN;
+ } else {
+ compositionType = COMPOSITION_TYPE_GPU;
+ }
+ }
+ } else { //debug.sf.hw is not set. Use cpu composition
+ compositionType = COMPOSITION_TYPE_CPU;
+ }
+ return compositionType;
+}
+
+/*
+ * Clear Region implementation for C2D/MDP versions.
+ *
+ * @param: region to be cleared
+ * @param: EGL Display
+ * @param: EGL Surface
+ *
+ * @return 0 on success
+ */
+int qcomuiClearRegion(Region region, EGLDisplay dpy, EGLSurface sur)
+{
+#if 0 /* FIXME DIE */
+ int ret = 0;
+
+ if (-1 == sCompositionType) {
+ sCompositionType = getCompositionType();
+ }
+
+ if ((COMPOSITION_TYPE_MDP != sCompositionType) &&
+ (COMPOSITION_TYPE_C2D != sCompositionType) &&
+ (COMPOSITION_TYPE_CPU != sCompositionType)) {
+ // For non CPU/C2D/MDP composition, return an error, so that SF can use
+ // the GPU to draw the wormhole.
+ return -1;
+ }
+
+ android_native_buffer_t *renderBuffer = (android_native_buffer_t *)
+ eglGetRenderBufferANDROID(dpy, sur);
+ if (!renderBuffer) {
+ ALOGE("%s: eglGetRenderBufferANDROID returned NULL buffer",
+ __FUNCTION__);
+ return -1;
+ }
+ private_handle_t *fbHandle = (private_handle_t *)renderBuffer->handle;
+ if(!fbHandle) {
+ ALOGE("%s: Framebuffer handle is NULL", __FUNCTION__);
+ return -1;
+ }
+
+ int bytesPerPixel = 4;
+ if (HAL_PIXEL_FORMAT_RGB_565 == fbHandle->format) {
+ bytesPerPixel = 2;
+ }
+
+ Region::const_iterator it = region.begin();
+ Region::const_iterator const end = region.end();
+ const int32_t stride = renderBuffer->stride*bytesPerPixel;
+ while (it != end) {
+ const Rect& r = *it++;
+ uint8_t* dst = (uint8_t*) fbHandle->base +
+ (r.left + r.top*renderBuffer->stride)*bytesPerPixel;
+ int w = r.width()*bytesPerPixel;
+ int h = r.height();
+ do {
+ if(4 == bytesPerPixel)
+ android_memset32((uint32_t*)dst, 0, w);
+ else
+ android_memset16((uint16_t*)dst, 0, w);
+ dst += stride;
+ } while(--h);
+ }
+#endif
+ return 0;
+}
+
+/*
+ * Handles the externalDisplay event
+ * HDMI has highest priority compared to WifiDisplay
+ * Based on the current and the new display event, decides the
+ * external display to be enabled
+ *
+ * @param: newEvent - new external event
+ * @param: currEvent - currently enabled external event
+ * @return: external display to be enabled
+ *
+ */
+external_display handleEventHDMI(external_display newState, external_display
+ currState)
+{
+ external_display retState = currState;
+ switch(newState) {
+ case EXT_DISPLAY_HDMI:
+ retState = EXT_DISPLAY_HDMI;
+ break;
+ case EXT_DISPLAY_WIFI:
+ if(currState != EXT_DISPLAY_HDMI) {
+ retState = EXT_DISPLAY_WIFI;
+ }
+ break;
+ case EXT_DISPLAY_OFF:
+ retState = EXT_DISPLAY_OFF;
+ break;
+ default:
+ ALOGE("handleEventHDMI: unknown Event");
+ break;
+ }
+ return retState;
+}
+
+// Using global variables for layer dumping since "property_set("debug.sf.dump",
+// property)" does not work.
+int sfdump_countlimit_raw = 0;
+int sfdump_counter_raw = 1;
+char sfdump_propstr_persist_raw[PROPERTY_VALUE_MAX] = "";
+char sfdumpdir_raw[256] = "";
+int sfdump_countlimit_png = 0;
+int sfdump_counter_png = 1;
+char sfdump_propstr_persist_png[PROPERTY_VALUE_MAX] = "";
+char sfdumpdir_png[256] = "";
+
+bool needToDumpLayers()
+{
+ bool bDumpLayer = false;
+ char sfdump_propstr[PROPERTY_VALUE_MAX];
+ time_t timenow;
+ tm sfdump_time;
+
+ time(&timenow);
+ localtime_r(&timenow, &sfdump_time);
+
+ if ((property_get("debug.sf.dump.png", sfdump_propstr, NULL) > 0) &&
+ (strncmp(sfdump_propstr, sfdump_propstr_persist_png,
+ PROPERTY_VALUE_MAX - 1))) {
+ // Strings exist & not equal implies it has changed, so trigger a dump
+ strncpy(sfdump_propstr_persist_png, sfdump_propstr,
+ PROPERTY_VALUE_MAX - 1);
+ sfdump_countlimit_png = atoi(sfdump_propstr);
+ sfdump_countlimit_png = (sfdump_countlimit_png < 0) ? 0:
+ (sfdump_countlimit_png >= LONG_MAX) ? (LONG_MAX - 1):
+ sfdump_countlimit_png;
+ if (sfdump_countlimit_png) {
+ sprintf(sfdumpdir_png,"/data/sfdump.png%04d%02d%02d.%02d%02d%02d",
+ sfdump_time.tm_year + 1900, sfdump_time.tm_mon + 1,
+ sfdump_time.tm_mday, sfdump_time.tm_hour,
+ sfdump_time.tm_min, sfdump_time.tm_sec);
+ if (0 == mkdir(sfdumpdir_png, 0777))
+ sfdump_counter_png = 0;
+ else
+ ALOGE("sfdump: Error: %s. Failed to create sfdump directory"
+ ": %s", strerror(errno), sfdumpdir_png);
+ }
+ }
+
+ if (sfdump_counter_png <= sfdump_countlimit_png)
+ sfdump_counter_png++;
+
+ if ((property_get("debug.sf.dump", sfdump_propstr, NULL) > 0) &&
+ (strncmp(sfdump_propstr, sfdump_propstr_persist_raw,
+ PROPERTY_VALUE_MAX - 1))) {
+ // Strings exist & not equal implies it has changed, so trigger a dump
+ strncpy(sfdump_propstr_persist_raw, sfdump_propstr,
+ PROPERTY_VALUE_MAX - 1);
+ sfdump_countlimit_raw = atoi(sfdump_propstr);
+ sfdump_countlimit_raw = (sfdump_countlimit_raw < 0) ? 0:
+ (sfdump_countlimit_raw >= LONG_MAX) ? (LONG_MAX - 1):
+ sfdump_countlimit_raw;
+ if (sfdump_countlimit_raw) {
+ sprintf(sfdumpdir_raw,"/data/sfdump.raw%04d%02d%02d.%02d%02d%02d",
+ sfdump_time.tm_year + 1900, sfdump_time.tm_mon + 1,
+ sfdump_time.tm_mday, sfdump_time.tm_hour,
+ sfdump_time.tm_min, sfdump_time.tm_sec);
+ if (0 == mkdir(sfdumpdir_raw, 0777))
+ sfdump_counter_raw = 0;
+ else
+ ALOGE("sfdump: Error: %s. Failed to create sfdump directory"
+ ": %s", strerror(errno), sfdumpdir_raw);
+ }
+ }
+
+ if (sfdump_counter_raw <= sfdump_countlimit_raw)
+ sfdump_counter_raw++;
+
+ bDumpLayer = (sfdump_countlimit_png || sfdump_countlimit_raw)? true : false;
+ return bDumpLayer;
+}
+
+inline void getHalPixelFormatStr(int format, char pixelformatstr[])
+{
+ if (!pixelformatstr)
+ return;
+
+ switch(format) {
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ strcpy(pixelformatstr, "RGBA_8888");
+ break;
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ strcpy(pixelformatstr, "RGBX_8888");
+ break;
+ case HAL_PIXEL_FORMAT_RGB_888:
+ strcpy(pixelformatstr, "RGB_888");
+ break;
+ case HAL_PIXEL_FORMAT_RGB_565:
+ strcpy(pixelformatstr, "RGB_565");
+ break;
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ strcpy(pixelformatstr, "BGRA_8888");
+ break;
+ case HAL_PIXEL_FORMAT_RGBA_5551:
+ strcpy(pixelformatstr, "RGBA_5551");
+ break;
+ case HAL_PIXEL_FORMAT_RGBA_4444:
+ strcpy(pixelformatstr, "RGBA_4444");
+ break;
+ case HAL_PIXEL_FORMAT_YV12:
+ strcpy(pixelformatstr, "YV12");
+ break;
+ case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+ strcpy(pixelformatstr, "YCbCr_422_SP_NV16");
+ break;
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ strcpy(pixelformatstr, "YCrCb_420_SP_NV21");
+ break;
+ case HAL_PIXEL_FORMAT_YCbCr_422_I:
+ strcpy(pixelformatstr, "YCbCr_422_I_YUY2");
+ break;
+ case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
+ strcpy(pixelformatstr, "NV12_ENCODEABLE");
+ break;
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
+ strcpy(pixelformatstr, "YCbCr_420_SP_TILED_TILE_4x2");
+ break;
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP:
+ strcpy(pixelformatstr, "YCbCr_420_SP");
+ break;
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:
+ strcpy(pixelformatstr, "YCrCb_420_SP_ADRENO");
+ break;
+ case HAL_PIXEL_FORMAT_YCrCb_422_SP:
+ strcpy(pixelformatstr, "YCrCb_422_SP");
+ break;
+ case HAL_PIXEL_FORMAT_R_8:
+ strcpy(pixelformatstr, "R_8");
+ break;
+ case HAL_PIXEL_FORMAT_RG_88:
+ strcpy(pixelformatstr, "RG_88");
+ break;
+ case HAL_PIXEL_FORMAT_INTERLACE:
+ strcpy(pixelformatstr, "INTERLACE");
+ break;
+ default:
+ sprintf(pixelformatstr, "Unknown0x%X", format);
+ break;
+ }
+}
+
+void dumpLayer(int moduleCompositionType, int listFlags, size_t layerIndex,
+ hwc_layer_t hwLayers[])
+{
+ char dumplogstr_png[128] = "";
+ char dumplogstr_raw[128] = "";
+ if (sfdump_counter_png <= sfdump_countlimit_png) {
+ sprintf(dumplogstr_png, "[png-dump-frame: %03d of %03d] ",
+ sfdump_counter_png, sfdump_countlimit_png);
+ }
+ if (sfdump_counter_raw <= sfdump_countlimit_raw) {
+ sprintf(dumplogstr_raw, "[raw-dump-frame: %03d of %03d]",
+ sfdump_counter_raw, sfdump_countlimit_raw);
+ }
+ if (NULL == hwLayers) {
+ ALOGE("sfdump: Error.%s%sLayer[%d] No hwLayers to dump.",
+ dumplogstr_raw, dumplogstr_png, layerIndex);
+ return;
+ }
+ hwc_layer *layer = &hwLayers[layerIndex];
+ hwc_rect_t sourceCrop = layer->sourceCrop;
+ hwc_rect_t displayFrame = layer->displayFrame;
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ char pixelformatstr[32] = "None";
+
+ if (hnd)
+ getHalPixelFormatStr(hnd->format, pixelformatstr);
+#if 0
+ ALOGE("sfdump: %s%s[%s]-Composition, Layer[%d] SrcBuff[%dx%d] "
+ "SrcCrop[%dl, %dt, %dr, %db] "
+ "DispFrame[%dl, %dt, %dr, %db] Composition-type = %s, Format = %s, "
+ "Orientation = %s, Flags = %s%s%s%s%s%s%s%s%s%s",
+ dumplogstr_raw, dumplogstr_png,
+ (moduleCompositionType == COMPOSITION_TYPE_GPU)? "GPU":
+ (moduleCompositionType == COMPOSITION_TYPE_MDP)? "MDP":
+ (moduleCompositionType == COMPOSITION_TYPE_C2D)? "C2D":
+ (moduleCompositionType == COMPOSITION_TYPE_CPU)? "CPU":
+ (moduleCompositionType == COMPOSITION_TYPE_DYN)? "DYN": "???",
+ layerIndex,
+ (hnd)? hnd->width : -1, (hnd)? hnd->height : -1,
+ sourceCrop.left, sourceCrop.top,
+ sourceCrop.right, sourceCrop.bottom,
+ displayFrame.left, displayFrame.top,
+ displayFrame.right, displayFrame.bottom,
+ (layer->compositionType == HWC_FRAMEBUFFER)? "Framebuffer (OpenGL ES)":
+ (layer->compositionType == HWC_OVERLAY)? "Overlay":
+ (layer->compositionType == HWC_USE_COPYBIT)? "Copybit": "???",
+ pixelformatstr,
+ (layer->transform == Transform::ROT_0)? "ROT_0":
+ (layer->transform == Transform::FLIP_H)? "FLIP_H":
+ (layer->transform == Transform::FLIP_V)? "FLIP_V":
+ (layer->transform == Transform::ROT_90)? "ROT_90":
+ (layer->transform == Transform::ROT_180)? "ROT_180":
+ (layer->transform == Transform::ROT_270)? "ROT_270":
+ (layer->transform == Transform::ROT_INVALID)? "ROT_INVALID":"???",
+ (layer->flags == 0)? "[None]":"",
+ (layer->flags & HWC_SKIP_LAYER)? "[Skip layer]":"",
+ (layer->flags & HWC_LAYER_NOT_UPDATING)? "[Layer not updating]":"",
+ (layer->flags & HWC_USE_ORIGINAL_RESOLUTION)? "[Original Resolution]":"",
+ (layer->flags & HWC_DO_NOT_USE_OVERLAY)? "[Do not use Overlay]":"",
+ (layer->flags & HWC_COMP_BYPASS)? "[Bypass]":"",
+ (layer->flags & HWC_BYPASS_RESERVE_0)? "[Bypass Reserve 0]":"",
+ (layer->flags & HWC_BYPASS_RESERVE_1)? "[Bypass Reserve 1]":"",
+ (listFlags & HWC_GEOMETRY_CHANGED)? "[List: Geometry Changed]":"",
+ (listFlags & HWC_SKIP_COMPOSITION)? "[List: Skip Composition]":"");
+#endif
+ if (NULL == hnd) {
+ ALOGE("sfdump: %s%sLayer[%d] private-handle is invalid.",
+ dumplogstr_raw, dumplogstr_png, layerIndex);
+ return;
+ }
+
+ if ((sfdump_counter_png <= sfdump_countlimit_png) && hnd->base) {
+ bool bResult = false;
+ char sfdumpfile_name[256];
+ SkBitmap *tempSkBmp = new SkBitmap();
+ SkBitmap::Config tempSkBmpConfig = SkBitmap::kNo_Config;
+ sprintf(sfdumpfile_name, "%s/sfdump%03d_layer%d.png", sfdumpdir_png,
+ sfdump_counter_png, layerIndex);
+
+ switch (hnd->format) {
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ tempSkBmpConfig = SkBitmap::kARGB_8888_Config;
+ break;
+ case HAL_PIXEL_FORMAT_RGB_565:
+ case HAL_PIXEL_FORMAT_RGBA_5551:
+ case HAL_PIXEL_FORMAT_RGBA_4444:
+ tempSkBmpConfig = SkBitmap::kRGB_565_Config;
+ break;
+ case HAL_PIXEL_FORMAT_RGB_888:
+ default:
+ tempSkBmpConfig = SkBitmap::kNo_Config;
+ break;
+ }
+ if (SkBitmap::kNo_Config != tempSkBmpConfig) {
+ tempSkBmp->setConfig(tempSkBmpConfig, hnd->width, hnd->height);
+ tempSkBmp->setPixels((void*)hnd->base);
+ bResult = SkImageEncoder::EncodeFile(sfdumpfile_name,
+ *tempSkBmp, SkImageEncoder::kPNG_Type, 100);
+ ALOGE("sfdump: %sDumped Layer[%d] to %s: %s", dumplogstr_png,
+ layerIndex, sfdumpfile_name, bResult ? "Success" : "Fail");
+ }
+ else {
+ ALOGE("sfdump: %sSkipping Layer[%d] dump: Unsupported layer "
+ "format %s for png encoder.", dumplogstr_png, layerIndex,
+ pixelformatstr);
+ }
+ delete tempSkBmp; // Calls SkBitmap::freePixels() internally.
+ }
+
+ if ((sfdump_counter_raw <= sfdump_countlimit_raw) && hnd->base) {
+ char sfdumpfile_name[256];
+ bool bResult = false;
+ sprintf(sfdumpfile_name, "%s/sfdump%03d_layer%d_%dx%d_%s.raw",
+ sfdumpdir_raw,
+ sfdump_counter_raw, layerIndex, hnd->width, hnd->height,
+ pixelformatstr);
+ FILE* fp = fopen(sfdumpfile_name, "w+");
+ if (fp != NULL) {
+ bResult = (bool) fwrite((void*)hnd->base, hnd->size, 1, fp);
+ fclose(fp);
+ }
+ ALOGE("sfdump: %s Dumped Layer[%d] to %s: %s", dumplogstr_raw,
+ layerIndex, sfdumpfile_name, bResult ? "Success" : "Fail");
+ }
+}
+
+#ifdef DEBUG_CALC_FPS
+ANDROID_SINGLETON_STATIC_INSTANCE(CalcFps) ;
+
+CalcFps::CalcFps() {
+ debug_fps_level = 0;
+ Init();
+}
+
+CalcFps::~CalcFps() {
+}
+
+void CalcFps::Init() {
+ char prop[PROPERTY_VALUE_MAX];
+ property_get("debug.gr.calcfps", prop, "0");
+ debug_fps_level = atoi(prop);
+ if (debug_fps_level > MAX_DEBUG_FPS_LEVEL) {
+ ALOGW("out of range value for debug.gr.calcfps, using 0");
+ debug_fps_level = 0;
+ }
+
+ ALOGE("DEBUG_CALC_FPS: %d", debug_fps_level);
+ populate_debug_fps_metadata();
+}
+
+void CalcFps::Fps() {
+ if (debug_fps_level > 0)
+ calc_fps(ns2us(systemTime()));
+}
+
+void CalcFps::populate_debug_fps_metadata(void)
+{
+ char prop[PROPERTY_VALUE_MAX];
+
+ /*defaults calculation of fps to based on number of frames*/
+ property_get("debug.gr.calcfps.type", prop, "0");
+ debug_fps_metadata.type = (debug_fps_metadata_t::DfmType) atoi(prop);
+
+ /*defaults to 1000ms*/
+ property_get("debug.gr.calcfps.timeperiod", prop, "1000");
+ debug_fps_metadata.time_period = atoi(prop);
+
+ property_get("debug.gr.calcfps.period", prop, "10");
+ debug_fps_metadata.period = atoi(prop);
+
+ if (debug_fps_metadata.period > MAX_FPS_CALC_PERIOD_IN_FRAMES) {
+ debug_fps_metadata.period = MAX_FPS_CALC_PERIOD_IN_FRAMES;
+ }
+
+ /* default ignorethresh_us: 500 milli seconds */
+ property_get("debug.gr.calcfps.ignorethresh_us", prop, "500000");
+ debug_fps_metadata.ignorethresh_us = atoi(prop);
+
+ debug_fps_metadata.framearrival_steps =
+ (debug_fps_metadata.ignorethresh_us / 16666);
+
+ if (debug_fps_metadata.framearrival_steps > MAX_FRAMEARRIVAL_STEPS) {
+ debug_fps_metadata.framearrival_steps = MAX_FRAMEARRIVAL_STEPS;
+ debug_fps_metadata.ignorethresh_us =
+ debug_fps_metadata.framearrival_steps * 16666;
+ }
+
+ /* 2ms margin of error for the gettimeofday */
+ debug_fps_metadata.margin_us = 2000;
+
+ for (unsigned int i = 0; i < MAX_FRAMEARRIVAL_STEPS; i++)
+ debug_fps_metadata.accum_framearrivals[i] = 0;
+
+ ALOGE("period: %d", debug_fps_metadata.period);
+ ALOGE("ignorethresh_us: %lld", debug_fps_metadata.ignorethresh_us);
+}
+
+void CalcFps::print_fps(float fps)
+{
+ if (debug_fps_metadata_t::DFM_FRAMES == debug_fps_metadata.type)
+ ALOGE("FPS for last %d frames: %3.2f", debug_fps_metadata.period, fps);
+ else
+ ALOGE("FPS for last (%f ms, %d frames): %3.2f",
+ debug_fps_metadata.time_elapsed,
+ debug_fps_metadata.curr_frame, fps);
+
+ debug_fps_metadata.curr_frame = 0;
+ debug_fps_metadata.time_elapsed = 0.0;
+
+ if (debug_fps_level > 1) {
+ ALOGE("Frame Arrival Distribution:");
+ for (unsigned int i = 0;
+ i < ((debug_fps_metadata.framearrival_steps / 6) + 1);
+ i++) {
+ ALOGE("%lld %lld %lld %lld %lld %lld",
+ debug_fps_metadata.accum_framearrivals[i*6],
+ debug_fps_metadata.accum_framearrivals[i*6+1],
+ debug_fps_metadata.accum_framearrivals[i*6+2],
+ debug_fps_metadata.accum_framearrivals[i*6+3],
+ debug_fps_metadata.accum_framearrivals[i*6+4],
+ debug_fps_metadata.accum_framearrivals[i*6+5]);
+ }
+
+ /* We are done with displaying, now clear the stats */
+ for (unsigned int i = 0;
+ i < debug_fps_metadata.framearrival_steps;
+ i++)
+ debug_fps_metadata.accum_framearrivals[i] = 0;
+ }
+ return;
+}
+
+void CalcFps::calc_fps(nsecs_t currtime_us)
+{
+ static nsecs_t oldtime_us = 0;
+
+ nsecs_t diff = currtime_us - oldtime_us;
+
+ oldtime_us = currtime_us;
+
+ if (debug_fps_metadata_t::DFM_FRAMES == debug_fps_metadata.type &&
+ diff > debug_fps_metadata.ignorethresh_us) {
+ return;
+ }
+
+ if (debug_fps_metadata.curr_frame < MAX_FPS_CALC_PERIOD_IN_FRAMES) {
+ debug_fps_metadata.framearrivals[debug_fps_metadata.curr_frame] = diff;
+ }
+
+ debug_fps_metadata.curr_frame++;
+
+ if (debug_fps_level > 1) {
+ unsigned int currstep = (diff + debug_fps_metadata.margin_us) / 16666;
+
+ if (currstep < debug_fps_metadata.framearrival_steps) {
+ debug_fps_metadata.accum_framearrivals[currstep-1]++;
+ }
+ }
+
+ if (debug_fps_metadata_t::DFM_FRAMES == debug_fps_metadata.type) {
+ if (debug_fps_metadata.curr_frame == debug_fps_metadata.period) {
+ /* time to calculate and display FPS */
+ nsecs_t sum = 0;
+ for (unsigned int i = 0; i < debug_fps_metadata.period; i++)
+ sum += debug_fps_metadata.framearrivals[i];
+ print_fps((debug_fps_metadata.period * float(1000000))/float(sum));
+ }
+ }
+ else if (debug_fps_metadata_t::DFM_TIME == debug_fps_metadata.type) {
+ debug_fps_metadata.time_elapsed += ((float)diff/1000.0);
+ if (debug_fps_metadata.time_elapsed >= debug_fps_metadata.time_period) {
+ float fps = (1000.0 * debug_fps_metadata.curr_frame)/
+ (float)debug_fps_metadata.time_elapsed;
+ print_fps(fps);
+ }
+ }
+ return;
+}
+#endif
diff --git a/libqcomui/qcom_ui.h b/libqcomui/qcom_ui.h
new file mode 100644
index 0000000..88462cc
--- /dev/null
+++ b/libqcomui/qcom_ui.h
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef INCLUDE_LIBQCOM_UI
+#define INCLUDE_LIBQCOM_UI
+
+#include <cutils/native_handle.h>
+#include <ui/GraphicBuffer.h>
+#include <hardware/hwcomposer.h>
+#include <hardware/hwcomposer_defs.h>
+#include <ui/Region.h>
+#include <EGL/egl.h>
+#include <utils/Singleton.h>
+#include <cutils/properties.h>
+#include "../libgralloc/gralloc_priv.h"
+
+using namespace android;
+using android::sp;
+using android::GraphicBuffer;
+
+#define HWC_BYPASS_INDEX_MASK 0x00000030
+
+/*
+ * Qcom specific Native Window perform operations
+ */
+enum {
+ NATIVE_WINDOW_SET_BUFFERS_SIZE = 0x10000000,
+ NATIVE_WINDOW_UPDATE_BUFFERS_GEOMETRY = 0x20000000,
+ NATIVE_WINDOW_SET_S3D_FORMAT = 0x40000000,
+};
+
+// Enum containing the supported composition types
+enum {
+ COMPOSITION_TYPE_GPU = 0,
+ COMPOSITION_TYPE_MDP = 0x1,
+ COMPOSITION_TYPE_C2D = 0x2,
+ COMPOSITION_TYPE_CPU = 0x4,
+ COMPOSITION_TYPE_DYN = 0x8
+};
+
+/*
+ * Layer Attributes
+ */
+enum eLayerAttrib {
+ LAYER_UPDATE_STATUS,
+ LAYER_ASYNCHRONOUS_STATUS,
+};
+
+/*
+ * Layer Flags
+ */
+enum {
+ LAYER_UPDATING = 1<<0,
+ LAYER_ASYNCHRONOUS = 1<<1,
+};
+
+/*
+ * Flags set by the layer and sent to HWC
+ */
+enum {
+ HWC_LAYER_NOT_UPDATING = 0x00000002,
+ HWC_LAYER_ASYNCHRONOUS = 0x00000004,
+ HWC_USE_ORIGINAL_RESOLUTION = 0x10000000,
+ HWC_DO_NOT_USE_OVERLAY = 0x20000000,
+ HWC_COMP_BYPASS = 0x40000000,
+ HWC_USE_EXT_ONLY = 0x80000000, //Layer displayed on external only
+ HWC_USE_EXT_BLOCK = 0x01000000, //Layer displayed on external only
+ HWC_BYPASS_RESERVE_0 = 0x00000010,
+ HWC_BYPASS_RESERVE_1 = 0x00000020,
+};
+
+enum HWCCompositionType {
+ HWC_USE_GPU = HWC_FRAMEBUFFER, // This layer is to be handled by Surfaceflinger
+ HWC_USE_OVERLAY = HWC_OVERLAY, // This layer is to be handled by the overlay
+ HWC_USE_COPYBIT // This layer is to be handled by copybit
+};
+
+enum external_display {
+ EXT_DISPLAY_OFF,
+ EXT_DISPLAY_HDMI,
+ EXT_DISPLAY_WIFI
+};
+
+/*
+ * Structure to hold the buffer geometry
+ */
+struct qBufGeometry {
+ int width;
+ int height;
+ int format;
+ void set(int w, int h, int f) {
+ width = w;
+ height = h;
+ format = f;
+ }
+};
+
+#ifndef DEBUG_CALC_FPS
+#define CALC_FPS() ((void)0)
+#define CALC_INIT() ((void)0)
+#else
+#define CALC_FPS() CalcFps::getInstance().Fps()
+#define CALC_INIT() CalcFps::getInstance().Init()
+
+class CalcFps : public Singleton<CalcFps> {
+public:
+ CalcFps();
+ ~CalcFps();
+
+ void Init();
+ void Fps();
+
+private:
+ static const unsigned int MAX_FPS_CALC_PERIOD_IN_FRAMES = 128;
+ static const unsigned int MAX_FRAMEARRIVAL_STEPS = 50;
+ static const unsigned int MAX_DEBUG_FPS_LEVEL = 2;
+
+ struct debug_fps_metadata_t {
+ /*fps calculation based on time or number of frames*/
+ enum DfmType {
+ DFM_FRAMES = 0,
+ DFM_TIME = 1,
+ };
+
+ DfmType type;
+
+ /* indicates how much time do we wait till we calculate FPS */
+ unsigned long time_period;
+
+ /*indicates how much time elapsed since we report fps*/
+ float time_elapsed;
+
+ /* indicates how many frames do we wait till we calculate FPS */
+ unsigned int period;
+ /* current frame, will go upto period, and then reset */
+ unsigned int curr_frame;
+ /* frame will arrive at a multiple of 16666 us at the display.
+ This indicates how many steps to consider for our calculations.
+ For example, if framearrival_steps = 10, then the frame that arrived
+ after 166660 us or more will be ignored.
+ */
+ unsigned int framearrival_steps;
+ /* ignorethresh_us = framearrival_steps * 16666 */
+ nsecs_t ignorethresh_us;
+ /* used to calculate the actual frame arrival step, the times might not be
+ accurate
+ */
+ unsigned int margin_us;
+
+ /* actual data storage */
+ nsecs_t framearrivals[MAX_FPS_CALC_PERIOD_IN_FRAMES];
+ nsecs_t accum_framearrivals[MAX_FRAMEARRIVAL_STEPS];
+ };
+
+private:
+ void populate_debug_fps_metadata(void);
+ void print_fps(float fps);
+ void calc_fps(nsecs_t currtime_us);
+
+private:
+ debug_fps_metadata_t debug_fps_metadata;
+ unsigned int debug_fps_level;
+};
+#endif
+
+#if 0
+class QCBaseLayer
+{
+// int mS3DFormat;
+ int32_t mComposeS3DFormat;
+public:
+ QCBaseLayer()
+ {
+ mComposeS3DFormat = 0;
+ }
+ enum { // S3D formats
+ eS3D_SIDE_BY_SIDE = 0x10000,
+ eS3D_TOP_BOTTOM = 0x20000
+ };
+/*
+ virtual status_t setStereoscopic3DFormat(int format) { mS3DFormat = format; return 0; }
+ virtual int getStereoscopic3DFormat() const { return mS3DFormat; }
+ */
+ void setS3DComposeFormat (int32_t hints)
+ {
+ if (hints & HWC_HINT_DRAW_S3D_SIDE_BY_SIDE)
+ mComposeS3DFormat = eS3D_SIDE_BY_SIDE;
+ else if (hints & HWC_HINT_DRAW_S3D_TOP_BOTTOM)
+ mComposeS3DFormat = eS3D_TOP_BOTTOM;
+ else
+ mComposeS3DFormat = 0;
+ }
+ int32_t needsS3DCompose () const { return mComposeS3DFormat; }
+};
+#endif
+
+/*
+ * Function to check if the allocated buffer is of the correct size.
+ * Reallocate the buffer with the correct size, if the size doesn't
+ * match
+ *
+ * @param: handle of the allocated buffer
+ * @param: requested size for the buffer
+ * @param: usage flags
+ *
+ * return 0 on success
+ */
+int checkBuffer(native_handle_t *buffer_handle, int size, int usage);
+
+/*
+ * Checks if the format is supported by the GPU.
+ *
+ * @param: format to check
+ *
+ * @return true if the format is supported by the GPU.
+ */
+bool isGPUSupportedFormat(int format);
+
+/*
+ * Adreno is not optimized for GL_TEXTURE_EXTERNAL_OES
+ * texure target. DO NOT choose TEXTURE_EXTERNAL_OES
+ * target for RGB formats.
+ *
+ * Based on the pixel format, decide the texture target.
+ *
+ * @param : pixel format to check
+ *
+ * @return : GL_TEXTURE_2D for RGB formats, and
+ * GL_TEXTURE_EXTERNAL_OES for YUV formats.
+ *
+*/
+
+int decideTextureTarget (const int pixel_format);
+
+/*
+ * Gets the number of arguments required for this operation.
+ *
+ * @param: operation whose argument count is required.
+ *
+ * @return -EINVAL if the operation is invalid.
+ */
+int getNumberOfArgsForOperation(int operation);
+
+/*
+ * Checks if memory needs to be reallocated for this buffer.
+ *
+ * @param: Geometry of the current buffer.
+ * @param: Required Geometry.
+ * @param: Geometry of the updated buffer.
+ *
+ * @return True if a memory reallocation is required.
+ */
+bool needNewBuffer(const qBufGeometry currentGeometry,
+ const qBufGeometry requiredGeometry,
+ const qBufGeometry updatedGeometry);
+
+/*
+ * Update the geometry of this buffer without reallocation.
+ *
+ * @param: buffer whose geometry needs to be updated.
+ * @param: Updated buffer geometry
+ */
+int updateBufferGeometry(sp<GraphicBuffer> buffer, const qBufGeometry bufGeometry);
+
+/*
+ * Update the S3D format of this buffer.
+ *
+ * @param: buffer whosei S3D format needs to be updated.
+ * @param: Updated buffer S3D format
+ */
+int updateBufferS3DFormat(sp<GraphicBuffer> buffer, const int s3dFormat);
+
+/*
+ * Updates the flags for the layer
+ *
+ * @param: Attribute
+ * @param: Identifies if the attribute was enabled or disabled.
+ * @param: current Layer flags.
+ *
+ * @return: Flags for the layer
+ */
+int updateLayerQcomFlags(eLayerAttrib attribute, bool enable, int& currentFlags);
+
+/*
+ * Gets the per frame HWC flags for this layer.
+ *
+ * @param: current hwcl flags
+ * @param: current layerFlags
+ *
+ * @return: the per frame flags.
+ */
+int getPerFrameFlags(int hwclFlags, int layerFlags);
+
+/*
+ * Checks if FB is updated by this composition type
+ *
+ * @param: composition type
+ * @return: true if FB is updated, false if not
+ */
+
+bool isUpdatingFB(HWCCompositionType compositionType);
+
+/*
+ * Get the current composition Type
+ *
+ * @return the compositon Type
+ */
+int getCompositionType();
+
+/*
+ * Clear region implementation for C2D/MDP versions.
+ *
+ * @param: region to be cleared
+ * @param: EGL Display
+ * @param: EGL Surface
+ *
+ * @return 0 on success
+ */
+int qcomuiClearRegion(Region region, EGLDisplay dpy, EGLSurface sur);
+
+/*
+ * Handles the externalDisplay event
+ * HDMI has highest priority compared to WifiDisplay
+ * Based on the current and the new display event, decides the
+ * external display to be enabled
+ *
+ * @param: newEvent - new external event
+ * @param: currEvent - currently enabled external event
+ * @return: external display to be enabled
+ *
+ */
+external_display handleEventHDMI(external_display newEvent, external_display
+ currEvent);
+
+/*
+ * Checks if layers need to be dumped based on system property "debug.sf.dump"
+ * for raw dumps and "debug.sf.dump.png" for png dumps.
+ *
+ * For example, to dump 25 frames in raw format, do,
+ * adb shell setprop debug.sf.dump 25
+ * Layers are dumped in a time-stamped location: /data/sfdump*.
+ *
+ * To dump 10 frames in png format, do,
+ * adb shell setprop debug.sf.dump.png 10
+ * To dump another 25 or so frames in raw format, do,
+ * adb shell setprop debug.sf.dump 26
+ *
+ * To turn off logcat logging of layer-info, set both properties to 0,
+ * adb shell setprop debug.sf.dump.png 0
+ * adb shell setprop debug.sf.dump 0
+ *
+ * @return: true if layers need to be dumped (or logcat-ed).
+ */
+bool needToDumpLayers();
+
+/*
+ * Dumps a layer's info into logcat and its buffer into raw/png files.
+ *
+ * @param: moduleCompositionType - Composition type set in hwcomposer module.
+ * @param: listFlags - Flags used in hwcomposer's list.
+ * @param: layerIndex - Index of layer being dumped.
+ * @param: hwLayers - Address of hwc_layer_t to log and dump.
+ *
+ */
+void dumpLayer(int moduleCompositionType, int listFlags, size_t layerIndex,
+ hwc_layer_t hwLayers[]);
+
+#endif // INCLUDE_LIBQCOM_UI
diff --git a/libtilerenderer/Android.mk b/libtilerenderer/Android.mk
new file mode 100644
index 0000000..e0bf342
--- /dev/null
+++ b/libtilerenderer/Android.mk
@@ -0,0 +1,26 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+ifeq ($(USE_OPENGL_RENDERER),true)
+LOCAL_PRELINK_MODULE := false
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
+LOCAL_SHARED_LIBRARIES := libutils libcutils libGLESv2 libhwui
+
+LOCAL_C_INCLUDES += \
+ frameworks/base/include/utils \
+ frameworks/base/libs/hwui \
+ external/skia/include/core \
+ external/skia/include/effects \
+ external/skia/include/images \
+ external/skia/src/ports \
+ external/skia/include/utils \
+ hardware/libhardware/include/hardware \
+ frameworks/base/opengl/include/GLES2
+
+LOCAL_SRC_FILES := \
+ tilerenderer.cpp
+
+LOCAL_MODULE := libtilerenderer
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
+endif
diff --git a/libtilerenderer/tilerenderer.cpp b/libtilerenderer/tilerenderer.cpp
new file mode 100644
index 0000000..6e8f847
--- /dev/null
+++ b/libtilerenderer/tilerenderer.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <GLES2/gl2.h>
+#include <EGL/egl.h>
+#include <gl2ext.h>
+#include <OpenGLRenderer.h>
+#include "tilerenderer.h"
+
+namespace android {
+ANDROID_SINGLETON_STATIC_INSTANCE(uirenderer::TileRenderer) ;
+namespace uirenderer {
+
+TileRenderer::TileRenderer() {
+ mIsTiled = false;
+}
+
+TileRenderer::~TileRenderer() {
+}
+
+void TileRenderer::startTileRendering(OpenGLRenderer* renderer,
+ int left, int top,
+ int right, int bottom) {
+ int width = 0;
+ int height = 0;
+ GLenum status = GL_NO_ERROR;
+
+ if (renderer != NULL) {
+ renderer->getViewport(width, height);
+ }
+
+ if (!left && !right && !top && !bottom) {
+ left = 0;
+ top = 0;
+ right = width;
+ bottom = height;
+ }
+
+ if (!left && !right && !top && !bottom) {
+ //can't do tile rendering
+ LOGE("can't tile render; drity region, width, height not available");
+ return;
+ }
+
+ int l = left, t = (height - bottom), w = (right - left), h = (bottom - top), preserve = 0;
+
+ if (l < 0 || t < 0) {
+ l = (l < 0) ? 0 : l;
+ t = (t < 0) ? 0 : t;
+ preserve = 1;
+ }
+
+ if (w > width || h > height) {
+ w = (w > width) ? width : w;
+ h = (h > height) ? height : h;
+ preserve = 1;
+ }
+
+ //clear off all errors before tiling, if any
+ while ((status = glGetError()) != GL_NO_ERROR);
+
+ if (preserve)
+ glStartTilingQCOM(l, t, w, h, GL_COLOR_BUFFER_BIT0_QCOM);
+ else
+ glStartTilingQCOM(l, t, w, h, GL_NONE);
+
+ status = glGetError();
+ if (status == GL_NO_ERROR)
+ mIsTiled = true;
+}
+
+void TileRenderer::endTileRendering(OpenGLRenderer*) {
+ if (!mIsTiled) {
+ return;
+ }
+ glEndTilingQCOM(GL_COLOR_BUFFER_BIT0_QCOM);
+ mIsTiled = false;
+ GLenum status = GL_NO_ERROR;
+ while ((status = glGetError()) != GL_NO_ERROR);
+}
+
+}; // namespace uirenderer
+}; // namespace android
diff --git a/libtilerenderer/tilerenderer.h b/libtilerenderer/tilerenderer.h
new file mode 100644
index 0000000..bec225d
--- /dev/null
+++ b/libtilerenderer/tilerenderer.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_TILE_RENDERER_H
+#define ANDROID_TILE_RENDERER_H
+
+#include <utils/Singleton.h>
+
+namespace android {
+namespace uirenderer {
+
+class OpenGLRenderer;
+
+class TileRenderer: public Singleton<TileRenderer> {
+public:
+ TileRenderer();
+ ~TileRenderer();
+
+ void startTileRendering(OpenGLRenderer* renderer, int left, int top, int right, int bottom);
+ void endTileRendering(OpenGLRenderer*);
+
+private:
+ bool mIsTiled;
+};
+
+}; // namespace uirenderer
+}; // namespace android
+
+#endif