Merge "Enable TripleBuffering on SF by default"
diff --git a/cmds/atrace/Android.bp b/cmds/atrace/Android.bp
index 69ed416..225de20 100644
--- a/cmds/atrace/Android.bp
+++ b/cmds/atrace/Android.bp
@@ -16,6 +16,9 @@
"libz",
"libbase",
],
+ static_libs: [
+ "libpdx_default_transport",
+ ],
init_rc: ["atrace.rc"],
}
diff --git a/cmds/atrace/atrace.cpp b/cmds/atrace/atrace.cpp
index 4be0432..ce0caed 100644
--- a/cmds/atrace/atrace.cpp
+++ b/cmds/atrace/atrace.cpp
@@ -18,6 +18,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <ftw.h>
#include <getopt.h>
#include <inttypes.h>
#include <signal.h>
@@ -41,6 +42,7 @@
#include <hidl/ServiceManagement.h>
#include <cutils/properties.h>
+#include <pdx/default_transport/service_utility.h>
#include <utils/String8.h>
#include <utils/Timers.h>
#include <utils/Tokenizer.h>
@@ -48,6 +50,7 @@
#include <android-base/file.h>
using namespace android;
+using pdx::default_transport::ServiceUtility;
using std::string;
#define NELEM(x) ((int) (sizeof(x) / sizeof((x)[0])))
@@ -569,6 +572,46 @@
}
}
+// Sends the sysprop_change message to the service at fpath, so it re-reads its
+// system properties. Returns 0 on success or a negated errno code on failure.
+static int pokeOnePDXService(const char *fpath, const struct stat * /*sb*/,
+ int typeflag, struct FTW * /*ftwbuf*/)
+{
+ const bool kIgnoreErrors = true;
+
+ if (typeflag == FTW_F) {
+ int error;
+ auto utility = ServiceUtility::Create(fpath, &error);
+ if (!utility) {
+ if (error != -ECONNREFUSED) {
+ ALOGE("pokeOnePDXService: Failed to open %s, %s.", fpath,
+ strerror(-error));
+ }
+ return kIgnoreErrors ? 0 : error;
+ }
+
+ auto status = utility->ReloadSystemProperties();
+ if (!status) {
+ ALOGE("pokeOnePDXService: Failed to send sysprop change to %s, "
+ "error %d, %s.", fpath, status.error(),
+ status.GetErrorMessage().c_str());
+ return kIgnoreErrors ? 0 : -status.error();
+ }
+ }
+
+ return 0;
+}
+
+// Pokes all the PDX processes in the system to get them to re-read
+// their system properties. Returns true on success, false on failure.
+static bool pokePDXServices()
+{
+ const int kMaxDepth = 16;
+ const int result = nftw(ServiceUtility::GetRootEndpointPath().c_str(),
+ pokeOnePDXService, kMaxDepth, FTW_PHYS);
+ return result == 0 ? true : false;
+}
+
// Set the trace tags that userland tracing uses, and poke the running
// processes to pick up the new value.
static bool setTagsProperty(uint64_t tags)
@@ -812,6 +855,7 @@
ok &= setAppCmdlineProperty(&packageList[0]);
ok &= pokeBinderServices();
pokeHalServices();
+ ok &= pokePDXServices();
// Disable all the sysfs enables. This is done as a separate loop from
// the enables to allow the same enable to exist in multiple categories.
@@ -849,6 +893,7 @@
setTagsProperty(0);
clearAppProperties();
pokeBinderServices();
+ pokePDXServices();
// Set the options back to their defaults.
setTraceOverwriteEnable(true);
diff --git a/data/etc/android.hardware.vulkan.compute-0.xml b/data/etc/android.hardware.vulkan.compute-0.xml
new file mode 100644
index 0000000..bac2fde
--- /dev/null
+++ b/data/etc/android.hardware.vulkan.compute-0.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2017 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- This is the standard feature indicating that the device supports Vulkan
+ compute level 0. -->
+<permissions>
+ <feature name="android.hardware.vulkan.compute" version="0" />
+</permissions>
diff --git a/include/audiomanager/IPlayer.h b/include/audiomanager/IPlayer.h
index efcac74..94afae5 100644
--- a/include/audiomanager/IPlayer.h
+++ b/include/audiomanager/IPlayer.h
@@ -41,6 +41,10 @@
virtual void setVolume(float vol) = 0;
+ virtual void setPan(float pan) = 0;
+
+ virtual void setStartDelayMs(int delayMs) = 0;
+
};
// ----------------------------------------------------------------------------
diff --git a/include/batteryservice/IBatteryPropertiesListener.h b/include/batteryservice/IBatteryPropertiesListener.h
index 9154076..b226dd6 100644
--- a/include/batteryservice/IBatteryPropertiesListener.h
+++ b/include/batteryservice/IBatteryPropertiesListener.h
@@ -40,6 +40,12 @@
// ----------------------------------------------------------------------------
+class BnBatteryPropertiesListener: public BnInterface<IBatteryPropertiesListener> {
+public:
+ virtual status_t onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags = 0);
+};
+
}; // namespace android
#endif // ANDROID_IBATTERYPROPERTIESLISTENER_H
diff --git a/include/vr/vr_manager/vr_manager.h b/include/vr/vr_manager/vr_manager.h
index 20e4f7c..0c5da19 100644
--- a/include/vr/vr_manager/vr_manager.h
+++ b/include/vr/vr_manager/vr_manager.h
@@ -58,23 +58,6 @@
GET_VR_MODE_STATE,
};
-enum class VrDisplayStateTransaction {
- ON_DISPLAY_STATE_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
-};
-
-class IVrDisplayStateService : public IInterface {
-public:
- DECLARE_META_INTERFACE(VrDisplayStateService)
-
- virtual void displayAvailable(bool available) = 0;
-};
-
-class BnVrDisplayStateService : public BnInterface<IVrDisplayStateService> {
-public:
- status_t onTransact(uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0) override;
-};
-
}; // namespace android
#endif // ANDROID_VR_MANAGER_H
diff --git a/libs/binder/ProcessState.cpp b/libs/binder/ProcessState.cpp
index d42bb82..fe28533 100644
--- a/libs/binder/ProcessState.cpp
+++ b/libs/binder/ProcessState.cpp
@@ -319,7 +319,8 @@
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
- ALOGE("Binder driver protocol does not match user space protocol!");
+ ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d",
+ vers, BINDER_CURRENT_PROTOCOL_VERSION, result);
close(fd);
fd = -1;
}
diff --git a/libs/gui/tests/SRGB_test.cpp b/libs/gui/tests/SRGB_test.cpp
index 3b11b97..c2640cd 100644
--- a/libs/gui/tests/SRGB_test.cpp
+++ b/libs/gui/tests/SRGB_test.cpp
@@ -435,9 +435,6 @@
ASSERT_EQ(NO_ERROR, mCpuConsumer->unlockBuffer(mLockedBuffer));
// Switch to SRGB window surface
-#define EGL_GL_COLORSPACE_KHR EGL_VG_COLORSPACE
-#define EGL_GL_COLORSPACE_SRGB_KHR EGL_VG_COLORSPACE_sRGB
-
static const int srgbAttribs[] = {
EGL_GL_COLORSPACE_KHR, EGL_GL_COLORSPACE_SRGB_KHR,
EGL_NONE,
diff --git a/libs/vr/libbufferhub/Android.mk b/libs/vr/libbufferhub/Android.mk
index 467f69f..0877b0b 100644
--- a/libs/vr/libbufferhub/Android.mk
+++ b/libs/vr/libbufferhub/Android.mk
@@ -23,7 +23,6 @@
$(LOCAL_PATH)/include
staticLibraries := \
- libchrome \
libdvrcommon \
libpdx_default_transport \
diff --git a/libs/vr/libbufferhub/buffer_hub_client.cpp b/libs/vr/libbufferhub/buffer_hub_client.cpp
index 146780e..e2413bd 100644
--- a/libs/vr/libbufferhub/buffer_hub_client.cpp
+++ b/libs/vr/libbufferhub/buffer_hub_client.cpp
@@ -1,6 +1,6 @@
#include <private/dvr/buffer_hub_client.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <poll.h>
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include <utils/Trace.h>
diff --git a/libs/vr/libbufferhub/bufferhub_tests.cpp b/libs/vr/libbufferhub/bufferhub_tests.cpp
index cb45dbe..0b9e0cc 100644
--- a/libs/vr/libbufferhub/bufferhub_tests.cpp
+++ b/libs/vr/libbufferhub/bufferhub_tests.cpp
@@ -1,11 +1,19 @@
#include <android/native_window.h>
-#include <base/posix/eintr_wrapper.h>
#include <gtest/gtest.h>
#include <private/dvr/buffer_hub_client.h>
#include <mutex>
#include <thread>
+#define RETRY_EINTR(fnc_call) \
+ ([&]() -> decltype(fnc_call) { \
+ decltype(fnc_call) result; \
+ do { \
+ result = (fnc_call); \
+ } while (result == -1 && errno == EINTR); \
+ return result; \
+ })()
+
using android::dvr::BufferProducer;
using android::dvr::BufferConsumer;
using android::pdx::LocalHandle;
@@ -32,27 +40,27 @@
EXPECT_EQ(0, p->Post(LocalHandle(), kContext));
// Both consumers should be triggered.
- EXPECT_GE(0, HANDLE_EINTR(p->Poll(0)));
- EXPECT_LT(0, HANDLE_EINTR(c->Poll(10)));
- EXPECT_LT(0, HANDLE_EINTR(c2->Poll(10)));
+ EXPECT_GE(0, RETRY_EINTR(p->Poll(0)));
+ EXPECT_LT(0, RETRY_EINTR(c->Poll(10)));
+ EXPECT_LT(0, RETRY_EINTR(c2->Poll(10)));
uint64_t context;
LocalHandle fence;
EXPECT_LE(0, c->Acquire(&fence, &context));
EXPECT_EQ(kContext, context);
- EXPECT_GE(0, HANDLE_EINTR(c->Poll(0)));
+ EXPECT_GE(0, RETRY_EINTR(c->Poll(0)));
EXPECT_LE(0, c2->Acquire(&fence, &context));
EXPECT_EQ(kContext, context);
- EXPECT_GE(0, HANDLE_EINTR(c2->Poll(0)));
+ EXPECT_GE(0, RETRY_EINTR(c2->Poll(0)));
EXPECT_EQ(0, c->Release(LocalHandle()));
- EXPECT_GE(0, HANDLE_EINTR(p->Poll(0)));
+ EXPECT_GE(0, RETRY_EINTR(p->Poll(0)));
EXPECT_EQ(0, c2->Discard());
- EXPECT_LE(0, HANDLE_EINTR(p->Poll(0)));
+ EXPECT_LE(0, RETRY_EINTR(p->Poll(0)));
EXPECT_EQ(0, p->Gain(&fence));
- EXPECT_GE(0, HANDLE_EINTR(p->Poll(0)));
+ EXPECT_GE(0, RETRY_EINTR(p->Poll(0)));
}
TEST_F(LibBufferHubTest, TestWithCustomMetadata) {
@@ -69,7 +77,7 @@
Metadata m = {1, 3};
EXPECT_EQ(0, p->Post(LocalHandle(), m));
- EXPECT_LE(0, HANDLE_EINTR(c->Poll(10)));
+ EXPECT_LE(0, RETRY_EINTR(c->Poll(10)));
LocalHandle fence;
Metadata m2 = {};
@@ -78,7 +86,7 @@
EXPECT_EQ(m.field2, m2.field2);
EXPECT_EQ(0, c->Release(LocalHandle()));
- EXPECT_LT(0, HANDLE_EINTR(p->Poll(0)));
+ EXPECT_LT(0, RETRY_EINTR(p->Poll(0)));
}
TEST_F(LibBufferHubTest, TestPostWithWrongMetaSize) {
@@ -95,7 +103,7 @@
int64_t sequence = 3;
EXPECT_NE(0, p->Post(LocalHandle(), sequence));
- EXPECT_GE(0, HANDLE_EINTR(c->Poll(10)));
+ EXPECT_GE(0, RETRY_EINTR(c->Poll(10)));
}
TEST_F(LibBufferHubTest, TestAcquireWithWrongMetaSize) {
diff --git a/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h b/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
index b6ff5b6..cefde7b 100644
--- a/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
+++ b/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
@@ -71,6 +71,15 @@
}
using Client::event_fd;
+
+ Status<int> GetEventMask(int events) {
+ if (auto* client_channel = GetChannel()) {
+ return client_channel->GetEventMask(events);
+ } else {
+ return pdx::ErrorStatus(EINVAL);
+ }
+ }
+
native_handle_t* native_handle() const {
return const_cast<native_handle_t*>(slices_[0].handle());
}
@@ -158,8 +167,9 @@
int Post(const LocalHandle& ready_fence) {
return Post(ready_fence, nullptr, 0);
}
- template <typename Meta, typename = typename std::enable_if<
- !std::is_void<Meta>::value>::type>
+ template <
+ typename Meta,
+ typename = typename std::enable_if<!std::is_void<Meta>::value>::type>
int Post(const LocalHandle& ready_fence, const Meta& meta) {
return Post(ready_fence, &meta, sizeof(meta));
}
diff --git a/libs/vr/libbufferhub/include/private/dvr/native_buffer.h b/libs/vr/libbufferhub/include/private/dvr/native_buffer.h
index f6c24d9..afed052 100644
--- a/libs/vr/libbufferhub/include/private/dvr/native_buffer.h
+++ b/libs/vr/libbufferhub/include/private/dvr/native_buffer.h
@@ -4,8 +4,7 @@
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <android/native_window.h>
-#include <base/logging.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <system/window.h>
#include <ui/ANativeObjectBase.h>
#include <utils/RefBase.h>
@@ -181,7 +180,7 @@
ANativeWindowBuffer::stride = buffer_->stride();
ANativeWindowBuffer::format = buffer_->format();
ANativeWindowBuffer::usage = buffer_->usage();
- CHECK(buffer_->slice_count() > index);
+ LOG_ALWAYS_FATAL_IF(buffer_->slice_count() <= index);
handle = buffer_->slice(index)->handle();
}
diff --git a/libs/vr/libbufferhub/ion_buffer.cpp b/libs/vr/libbufferhub/ion_buffer.cpp
index 7d20049..4db2164 100644
--- a/libs/vr/libbufferhub/ion_buffer.cpp
+++ b/libs/vr/libbufferhub/ion_buffer.cpp
@@ -1,6 +1,6 @@
#include <private/dvr/ion_buffer.h>
-#include <cutils/log.h>
+#include <log/log.h>
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include <utils/Trace.h>
diff --git a/libs/vr/libbufferhubqueue/Android.mk b/libs/vr/libbufferhubqueue/Android.mk
index 46b83e7..3ed7ff2 100644
--- a/libs/vr/libbufferhubqueue/Android.mk
+++ b/libs/vr/libbufferhubqueue/Android.mk
@@ -25,7 +25,6 @@
staticLibraries := \
libbufferhub \
- libchrome \
libdvrcommon \
libpdx_default_transport \
diff --git a/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp b/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp
index 4fbfcf6..0576b21 100644
--- a/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp
+++ b/libs/vr/libbufferhubqueue/buffer_hub_queue_client.cpp
@@ -1,6 +1,7 @@
#include "include/private/dvr/buffer_hub_queue_client.h"
-#include <base/logging.h>
+#include <inttypes.h>
+#include <log/log.h>
#include <sys/epoll.h>
#include <array>
@@ -43,8 +44,8 @@
void BufferHubQueue::Initialize() {
int ret = epoll_fd_.Create();
if (ret < 0) {
- LOG(ERROR) << "BufferHubQueue::BufferHubQueue: Failed to create epoll fd:"
- << strerror(-ret);
+ ALOGE("BufferHubQueue::BufferHubQueue: Failed to create epoll fd: %s",
+ strerror(-ret));
return;
}
@@ -53,8 +54,8 @@
BufferHubQueue::kEpollQueueEventIndex)}};
ret = epoll_fd_.Control(EPOLL_CTL_ADD, event_fd(), &event);
if (ret < 0) {
- LOG(ERROR) << "Failed to register ConsumerQueue into epoll event: "
- << strerror(-ret);
+ ALOGE("Failed to register ConsumerQueue into epoll event: %s",
+ strerror(-ret));
}
}
@@ -63,13 +64,13 @@
InvokeRemoteMethod<BufferHubRPC::CreateConsumerQueue>();
if (!status) {
- LOG(ERROR) << "Cannot create ConsumerQueue: " << status.GetErrorMessage();
+ ALOGE("Cannot create ConsumerQueue: %s", status.GetErrorMessage().c_str());
return nullptr;
}
auto return_value = status.take();
- VLOG(1) << "CreateConsumerQueue: meta_size_bytes=" << return_value.second;
+ ALOGD("CreateConsumerQueue: meta_size_bytes=%zu", return_value.second);
return ConsumerQueue::Create(std::move(return_value.first),
return_value.second);
}
@@ -81,12 +82,12 @@
int ret = epoll_fd_.Wait(events.data(), events.size(), timeout);
if (ret == 0) {
- VLOG(1) << "Wait on epoll returns nothing before timeout.";
+ ALOGD("Wait on epoll returns nothing before timeout.");
return false;
}
if (ret < 0 && ret != -EINTR) {
- LOG(ERROR) << "Failed to wait for buffers:" << strerror(-ret);
+ ALOGE("Failed to wait for buffers: %s", strerror(-ret));
return false;
}
@@ -98,13 +99,13 @@
for (int i = 0; i < num_events; i++) {
int64_t index = static_cast<int64_t>(events[i].data.u64);
- VLOG(1) << "New BufferHubQueue event " << i << ": index=" << index;
+ ALOGD("New BufferHubQueue event %d: index=%" PRId64, i, index);
if (is_buffer_event_index(index) && (events[i].events & EPOLLIN)) {
auto buffer = buffers_[index];
ret = OnBufferReady(buffer);
if (ret < 0) {
- LOG(ERROR) << "Failed to set buffer ready:" << strerror(-ret);
+ ALOGE("Failed to set buffer ready: %s", strerror(-ret));
continue;
}
Enqueue(buffer, index);
@@ -113,18 +114,18 @@
// This maybe caused by producer replacing an exising buffer slot.
// Currently the epoll FD is cleaned up when the replacement consumer
// client is imported.
- LOG(WARNING) << "Receives EPOLLHUP at slot: " << index;
+ ALOGW("Receives EPOLLHUP at slot: %" PRId64, index);
} else if (is_queue_event_index(index) && (events[i].events & EPOLLIN)) {
// Note that after buffer imports, if |count()| still returns 0, epoll
// wait will be tried again to acquire the newly imported buffer.
ret = OnBufferAllocated();
if (ret < 0) {
- LOG(ERROR) << "Failed to import buffer:" << strerror(-ret);
+ ALOGE("Failed to import buffer: %s", strerror(-ret));
continue;
}
} else {
- LOG(WARNING) << "Unknown event " << i << ": u64=" << index
- << ": events=" << events[i].events;
+ ALOGW("Unknown event %d: u64=%" PRId64 ": events=%" PRIu32, i, index,
+ events[i].events);
}
}
}
@@ -137,8 +138,8 @@
if (is_full()) {
// TODO(jwcai) Move the check into Producer's AllocateBuffer and consumer's
// import buffer.
- LOG(ERROR) << "BufferHubQueue::AddBuffer queue is at maximum capacity: "
- << capacity_;
+ ALOGE("BufferHubQueue::AddBuffer queue is at maximum capacity: %zu",
+ capacity_);
return -E2BIG;
}
@@ -152,9 +153,8 @@
epoll_event event = {.events = EPOLLIN | EPOLLET, .data = {.u64 = slot}};
const int ret = epoll_fd_.Control(EPOLL_CTL_ADD, buf->event_fd(), &event);
if (ret < 0) {
- LOG(ERROR)
- << "BufferHubQueue::AddBuffer: Failed to add buffer to epoll set:"
- << strerror(-ret);
+ ALOGE("BufferHubQueue::AddBuffer: Failed to add buffer to epoll set: %s",
+ strerror(-ret));
return ret;
}
@@ -166,15 +166,16 @@
int BufferHubQueue::DetachBuffer(size_t slot) {
auto& buf = buffers_[slot];
if (buf == nullptr) {
- LOG(ERROR) << "BufferHubQueue::DetachBuffer: Invalid slot: " << slot;
+ ALOGE("BufferHubQueue::DetachBuffer: Invalid slot: %zu", slot);
return -EINVAL;
}
const int ret = epoll_fd_.Control(EPOLL_CTL_DEL, buf->event_fd(), nullptr);
if (ret < 0) {
- LOG(ERROR) << "BufferHubQueue::DetachBuffer: Failed to detach buffer from "
- "epoll set:"
- << strerror(-ret);
+ ALOGE(
+ "BufferHubQueue::DetachBuffer: Failed to detach buffer from epoll set: "
+ "%s",
+ strerror(-ret));
return ret;
}
@@ -186,7 +187,7 @@
void BufferHubQueue::Enqueue(std::shared_ptr<BufferHubBuffer> buf,
size_t slot) {
if (count() == capacity_) {
- LOG(ERROR) << "Buffer queue is full!";
+ ALOGE("Buffer queue is full!");
return;
}
@@ -206,7 +207,7 @@
std::shared_ptr<BufferHubBuffer> BufferHubQueue::Dequeue(int timeout,
size_t* slot,
void* meta) {
- VLOG(1) << "Dequeue: count=" << count() << ", timeout=" << timeout;
+ ALOGD("Dequeue: count=%zu, timeout=%d", count(), timeout);
if (count() == 0 && !WaitForBuffers(timeout))
return nullptr;
@@ -224,7 +225,7 @@
available_buffers_.PopFront();
if (!buf) {
- LOG(ERROR) << "Dequeue: Buffer to be dequeued is nullptr";
+ ALOGE("Dequeue: Buffer to be dequeued is nullptr");
return nullptr;
}
@@ -250,9 +251,8 @@
meta_size_, usage_set_mask, usage_clear_mask, usage_deny_set_mask,
usage_deny_clear_mask);
if (!status) {
- LOG(ERROR)
- << "ProducerQueue::ProducerQueue: Failed to create producer queue: %s"
- << status.GetErrorMessage();
+ ALOGE("ProducerQueue::ProducerQueue: Failed to create producer queue: %s",
+ status.GetErrorMessage().c_str());
Close(-status.error());
return;
}
@@ -261,13 +261,13 @@
int ProducerQueue::AllocateBuffer(int width, int height, int format, int usage,
size_t slice_count, size_t* out_slot) {
if (out_slot == nullptr) {
- LOG(ERROR) << "Parameter out_slot cannot be null.";
+ ALOGE("Parameter out_slot cannot be null.");
return -EINVAL;
}
if (is_full()) {
- LOG(ERROR) << "ProducerQueue::AllocateBuffer queue is at maximum capacity: "
- << capacity();
+ ALOGE("ProducerQueue::AllocateBuffer queue is at maximum capacity: %zu",
+ capacity());
return -E2BIG;
}
@@ -277,21 +277,22 @@
InvokeRemoteMethod<BufferHubRPC::ProducerQueueAllocateBuffers>(
width, height, format, usage, slice_count, kBufferCount);
if (!status) {
- LOG(ERROR) << "ProducerQueue::AllocateBuffer failed to create producer "
- "buffer through BufferHub.";
+ ALOGE(
+ "ProducerQueue::AllocateBuffer failed to create producer buffer "
+ "through BufferHub.");
return -status.error();
}
auto buffer_handle_slots = status.take();
- CHECK_EQ(buffer_handle_slots.size(), kBufferCount)
- << "BufferHubRPC::ProducerQueueAllocateBuffers should return one and "
- "only one buffer handle.";
+ LOG_ALWAYS_FATAL_IF(buffer_handle_slots.size() != kBufferCount,
+ "BufferHubRPC::ProducerQueueAllocateBuffers should "
+ "return one and only one buffer handle.");
// We only allocate one buffer at a time.
auto& buffer_handle = buffer_handle_slots[0].first;
size_t buffer_slot = buffer_handle_slots[0].second;
- VLOG(1) << "ProducerQueue::AllocateBuffer, new buffer, channel_handle: "
- << buffer_handle.value();
+ ALOGD("ProducerQueue::AllocateBuffer, new buffer, channel_handle: %d",
+ buffer_handle.value());
*out_slot = buffer_slot;
return AddBuffer(BufferProducer::Import(std::move(buffer_handle)),
@@ -314,9 +315,10 @@
Status<int> status =
InvokeRemoteMethod<BufferHubRPC::ProducerQueueDetachBuffer>(slot);
if (!status) {
- LOG(ERROR) << "ProducerQueue::DetachBuffer failed to detach producer "
- "buffer through BufferHub, error: "
- << status.GetErrorMessage();
+ ALOGE(
+ "ProducerQueue::DetachBuffer failed to detach producer buffer through "
+ "BufferHub, error: %s",
+ status.GetErrorMessage().c_str());
return -status.error();
}
@@ -344,9 +346,10 @@
Status<std::vector<std::pair<LocalChannelHandle, size_t>>> status =
InvokeRemoteMethod<BufferHubRPC::ConsumerQueueImportBuffers>();
if (!status) {
- LOG(ERROR) << "ConsumerQueue::ImportBuffers failed to import consumer "
- "buffer through BufferBub, error: "
- << status.GetErrorMessage();
+ ALOGE(
+ "ConsumerQueue::ImportBuffers failed to import consumer buffer through "
+ "BufferBub, error: %s",
+ status.GetErrorMessage().c_str());
return -status.error();
}
@@ -355,15 +358,15 @@
auto buffer_handle_slots = status.take();
for (auto& buffer_handle_slot : buffer_handle_slots) {
- VLOG(1) << "ConsumerQueue::ImportBuffers, new buffer, buffer_handle: "
- << buffer_handle_slot.first.value();
+ ALOGD("ConsumerQueue::ImportBuffers, new buffer, buffer_handle: %d",
+ buffer_handle_slot.first.value());
std::unique_ptr<BufferConsumer> buffer_consumer =
BufferConsumer::Import(std::move(buffer_handle_slot.first));
int ret = AddBuffer(std::move(buffer_consumer), buffer_handle_slot.second);
if (ret < 0) {
- LOG(ERROR) << "ConsumerQueue::ImportBuffers failed to add buffer, ret: "
- << strerror(-ret);
+ ALOGE("ConsumerQueue::ImportBuffers failed to add buffer, ret: %s",
+ strerror(-ret));
last_error = ret;
continue;
} else {
@@ -384,9 +387,10 @@
size_t* slot, void* meta,
size_t meta_size) {
if (meta_size != meta_size_) {
- LOG(ERROR) << "metadata size (" << meta_size
- << ") for the dequeuing buffer does not match metadata size ("
- << meta_size_ << ") for the queue.";
+ ALOGE(
+ "metadata size (%zu) for the dequeuing buffer does not match metadata "
+ "size (%zu) for the queue.",
+ meta_size, meta_size_);
return nullptr;
}
auto buf = BufferHubQueue::Dequeue(timeout, slot, meta);
@@ -402,11 +406,11 @@
int ConsumerQueue::OnBufferAllocated() {
const int ret = ImportBuffers();
if (ret == 0) {
- LOG(WARNING) << "No new buffer can be imported on buffer allocated event.";
+ ALOGW("No new buffer can be imported on buffer allocated event.");
} else if (ret < 0) {
- LOG(ERROR) << "Failed to import buffers on buffer allocated event.";
+ ALOGE("Failed to import buffers on buffer allocated event.");
}
- VLOG(1) << "Imported " << ret << " consumer buffers.";
+ ALOGD("Imported %d consumer buffers.", ret);
return ret;
}
diff --git a/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp b/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp
index 3fc0600..a108042 100644
--- a/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp
+++ b/libs/vr/libbufferhubqueue/buffer_hub_queue_core.cpp
@@ -1,5 +1,7 @@
#include "include/private/dvr/buffer_hub_queue_core.h"
+#include <log/log.h>
+
namespace android {
namespace dvr {
@@ -14,9 +16,9 @@
std::shared_ptr<BufferHubQueueCore> BufferHubQueueCore::Create(
const std::shared_ptr<ProducerQueue>& producer) {
if (producer->metadata_size() != sizeof(BufferMetadata)) {
- LOG(ERROR)
- << "BufferHubQueueCore::Create producer's metadata size is "
- << "different than the size of BufferHubQueueCore::BufferMetadata";
+ ALOGE(
+ "BufferHubQueueCore::Create producer's metadata size is different than "
+ "the size of BufferHubQueueCore::BufferMetadata");
return nullptr;
}
@@ -39,18 +41,18 @@
// bookkeeping.
if (producer_->AllocateBuffer(width, height, format, usage, slice_count,
&slot) < 0) {
- LOG(ERROR) << "Failed to allocate new buffer in BufferHub.";
+ ALOGE("Failed to allocate new buffer in BufferHub.");
return NO_MEMORY;
}
auto buffer_producer = producer_->GetBuffer(slot);
- CHECK(buffer_producer != nullptr) << "Failed to get buffer producer at slot: "
- << slot;
+ LOG_ALWAYS_FATAL_IF(buffer_producer == nullptr,
+ "Failed to get buffer producer at slot: %zu", slot);
// Allocating a new buffer, |buffers_[slot]| should be in initial state.
- CHECK(buffers_[slot].mGraphicBuffer == nullptr) << "AllocateBuffer: slot "
- << slot << " is not empty.";
+ LOG_ALWAYS_FATAL_IF(buffers_[slot].mGraphicBuffer != nullptr,
+ "AllocateBuffer: slot %zu is not empty.", slot);
// Create new GraphicBuffer based on the newly created |buffer_producer|. Here
// we have to cast |buffer_handle_t| to |native_handle_t|, it's OK because
@@ -65,8 +67,8 @@
const_cast<native_handle_t*>(buffer_producer->buffer()->handle()),
false));
- CHECK_EQ(NO_ERROR, graphic_buffer->initCheck())
- << "Failed to init GraphicBuffer.";
+ LOG_ALWAYS_FATAL_IF(NO_ERROR != graphic_buffer->initCheck(),
+ "Failed to init GraphicBuffer.");
buffers_[slot].mBufferProducer = buffer_producer;
buffers_[slot].mGraphicBuffer = graphic_buffer;
@@ -77,8 +79,8 @@
// Detach the buffer producer via BufferHubRPC.
int ret = producer_->DetachBuffer(slot);
if (ret < 0) {
- LOG(ERROR) << "BufferHubQueueCore::DetachBuffer failed through RPC, ret="
- << strerror(-ret);
+ ALOGE("BufferHubQueueCore::DetachBuffer failed through RPC, ret=%s",
+ strerror(-ret));
return ret;
}
diff --git a/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp b/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
index 93d7307..752e8c4 100644
--- a/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
+++ b/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
@@ -1,5 +1,8 @@
#include "include/private/dvr/buffer_hub_queue_producer.h"
+#include <inttypes.h>
+#include <log/log.h>
+
namespace android {
namespace dvr {
@@ -9,18 +12,17 @@
status_t BufferHubQueueProducer::requestBuffer(int slot,
sp<GraphicBuffer>* buf) {
- VLOG(1) << "requestBuffer: slot=" << slot;;
+ ALOGD("requestBuffer: slot=%d", slot);
std::unique_lock<std::mutex> lock(core_->mutex_);
if (slot < 0 || slot >= req_buffer_count_) {
- LOG(ERROR) << "requestBuffer: slot index " << slot << " out of range [0, "
- << req_buffer_count_ << ")";
+ ALOGE("requestBuffer: slot index %d out of range [0, %d)", slot,
+ req_buffer_count_);
return BAD_VALUE;
} else if (!core_->buffers_[slot].mBufferState.isDequeued()) {
- LOG(ERROR) << "requestBuffer: slot " << slot
- << " is not owned by the producer (state = "
- << core_->buffers_[slot].mBufferState.string() << " )";
+ ALOGE("requestBuffer: slot %d is not owned by the producer (state = %s)",
+ slot, core_->buffers_[slot].mBufferState.string());
return BAD_VALUE;
}
@@ -31,17 +33,16 @@
status_t BufferHubQueueProducer::setMaxDequeuedBufferCount(
int max_dequeued_buffers) {
- VLOG(1) << "setMaxDequeuedBufferCount: max_dequeued_buffers="
- << max_dequeued_buffers;
+ ALOGD("setMaxDequeuedBufferCount: max_dequeued_buffers=%d",
+ max_dequeued_buffers);
std::unique_lock<std::mutex> lock(core_->mutex_);
if (max_dequeued_buffers <= 0 ||
max_dequeued_buffers >
static_cast<int>(BufferHubQueue::kMaxQueueCapacity)) {
- LOG(ERROR) << "setMaxDequeuedBufferCount: " << max_dequeued_buffers
- << " out of range (0, " << BufferHubQueue::kMaxQueueCapacity
- << "]";
+ ALOGE("setMaxDequeuedBufferCount: %d out of range (0, %zu]",
+ max_dequeued_buffers, BufferHubQueue::kMaxQueueCapacity);
return BAD_VALUE;
}
@@ -50,7 +51,7 @@
}
status_t BufferHubQueueProducer::setAsyncMode(bool /* async */) {
- LOG(ERROR) << "BufferHubQueueProducer::setAsyncMode not implemented.";
+ ALOGE("BufferHubQueueProducer::setAsyncMode not implemented.");
return INVALID_OPERATION;
}
@@ -60,8 +61,8 @@
PixelFormat format,
uint32_t usage,
FrameEventHistoryDelta* /* outTimestamps */) {
- VLOG(1) << "dequeueBuffer: w=" << width << ", h=" << height
- << " format=" << format << ", usage=" << usage;
+ ALOGD("dequeueBuffer: w=%u, h=%u, format=%d, usage=%u", width, height, format,
+ usage);
status_t ret;
std::unique_lock<std::mutex> lock(core_->mutex_);
@@ -94,13 +95,12 @@
// Needs reallocation.
// TODO(jwcai) Consider use VLOG instead if we find this log is not useful.
- LOG(INFO) << "dequeueBuffer,: requested buffer (w=" << width
- << ", h=" << height << ", format=" << format
- << ") is different from the buffer returned at slot: " << slot
- << " (w=" << buffer_producer->width()
- << ", h=" << buffer_producer->height()
- << ", format=" << buffer_producer->format()
- << "). Need re-allocattion.";
+ ALOGI(
+ "dequeueBuffer: requested buffer (w=%u, h=%u, format=%d) is different "
+ "from the buffer returned at slot: %zu (w=%d, h=%d, format=%d). Need "
+ "re-allocattion.",
+ width, height, format, slot, buffer_producer->width(),
+ buffer_producer->height(), buffer_producer->format());
// Mark the slot as reallocating, so that later we can set
// BUFFER_NEEDS_REALLOCATION when the buffer actually get dequeued.
core_->buffers_[slot].mIsReallocating = true;
@@ -125,13 +125,13 @@
// BufferHubQueue).
// TODO(jwcai) Clean this up, make mBufferState compatible with BufferHub's
// model.
- CHECK(core_->buffers_[slot].mBufferState.isFree() ||
- core_->buffers_[slot].mBufferState.isQueued())
- << "dequeueBuffer: slot " << slot << " is not free or queued.";
+ LOG_ALWAYS_FATAL_IF(!core_->buffers_[slot].mBufferState.isFree() &&
+ !core_->buffers_[slot].mBufferState.isQueued(),
+ "dequeueBuffer: slot %zu is not free or queued.", slot);
core_->buffers_[slot].mBufferState.freeQueued();
core_->buffers_[slot].mBufferState.dequeue();
- VLOG(1) << "dequeueBuffer: slot=" << slot;
+ ALOGD("dequeueBuffer: slot=%zu", slot);
// TODO(jwcai) Handle fence properly. |BufferHub| has full fence support, we
// just need to exopose that through |BufferHubQueue| once we need fence.
@@ -148,13 +148,13 @@
}
status_t BufferHubQueueProducer::detachBuffer(int /* slot */) {
- LOG(ERROR) << "BufferHubQueueProducer::detachBuffer not implemented.";
+ ALOGE("BufferHubQueueProducer::detachBuffer not implemented.");
return INVALID_OPERATION;
}
status_t BufferHubQueueProducer::detachNextBuffer(
sp<GraphicBuffer>* /* out_buffer */, sp<Fence>* /* out_fence */) {
- LOG(ERROR) << "BufferHubQueueProducer::detachNextBuffer not implemented.";
+ ALOGE("BufferHubQueueProducer::detachNextBuffer not implemented.");
return INVALID_OPERATION;
}
@@ -163,14 +163,14 @@
// With this BufferHub backed implementation, we assume (for now) all buffers
// are allocated and owned by the BufferHub. Thus the attempt of transfering
// ownership of a buffer to the buffer queue is intentionally unsupported.
- LOG(FATAL) << "BufferHubQueueProducer::attachBuffer not supported.";
+ LOG_ALWAYS_FATAL("BufferHubQueueProducer::attachBuffer not supported.");
return INVALID_OPERATION;
}
status_t BufferHubQueueProducer::queueBuffer(int slot,
const QueueBufferInput& input,
QueueBufferOutput* /* output */) {
- VLOG(1) << "queueBuffer: slot " << slot;
+ ALOGD("queueBuffer: slot %d", slot);
int64_t timestamp;
sp<Fence> fence;
@@ -186,7 +186,7 @@
&scaling_mode, &transform, &fence);
if (fence == nullptr) {
- LOG(ERROR) << "queueBuffer: fence is NULL";
+ ALOGE("queueBuffer: fence is NULL");
return BAD_VALUE;
}
@@ -194,13 +194,12 @@
std::unique_lock<std::mutex> lock(core_->mutex_);
if (slot < 0 || slot >= req_buffer_count_) {
- LOG(ERROR) << "queueBuffer: slot index " << slot << " out of range [0, "
- << req_buffer_count_ << ")";
+ ALOGE("queueBuffer: slot index %d out of range [0, %d)", slot,
+ req_buffer_count_);
return BAD_VALUE;
} else if (!core_->buffers_[slot].mBufferState.isDequeued()) {
- LOG(ERROR) << "queueBuffer: slot " << slot
- << " is not owned by the producer (state = "
- << core_->buffers_[slot].mBufferState.string() << " )";
+ ALOGE("queueBuffer: slot %d is not owned by the producer (state = %s)",
+ slot, core_->buffers_[slot].mBufferState.string());
return BAD_VALUE;
}
@@ -218,21 +217,20 @@
status_t BufferHubQueueProducer::cancelBuffer(int slot,
const sp<Fence>& fence) {
- VLOG(1) << (__FUNCTION__);
+ ALOGD(__FUNCTION__);
std::unique_lock<std::mutex> lock(core_->mutex_);
if (slot < 0 || slot >= req_buffer_count_) {
- LOG(ERROR) << "cancelBuffer: slot index " << slot << " out of range [0, "
- << req_buffer_count_ << ")";
+ ALOGE("cancelBuffer: slot index %d out of range [0, %d)", slot,
+ req_buffer_count_);
return BAD_VALUE;
} else if (!core_->buffers_[slot].mBufferState.isDequeued()) {
- LOG(ERROR) << "cancelBuffer: slot " << slot
- << " is not owned by the producer (state = "
- << core_->buffers_[slot].mBufferState.string() << " )";
+ ALOGE("cancelBuffer: slot %d is not owned by the producer (state = %s)",
+ slot, core_->buffers_[slot].mBufferState.string());
return BAD_VALUE;
} else if (fence == NULL) {
- LOG(ERROR) << "cancelBuffer: fence is NULL";
+ ALOGE("cancelBuffer: fence is NULL");
return BAD_VALUE;
}
@@ -240,18 +238,18 @@
core_->producer_->Enqueue(buffer_producer, slot);
core_->buffers_[slot].mBufferState.cancel();
core_->buffers_[slot].mFence = fence;
- VLOG(1) << "cancelBuffer: slot " << slot;
+ ALOGD("cancelBuffer: slot %d", slot);
return NO_ERROR;
}
status_t BufferHubQueueProducer::query(int what, int* out_value) {
- VLOG(1) << (__FUNCTION__);
+ ALOGD(__FUNCTION__);
std::unique_lock<std::mutex> lock(core_->mutex_);
if (out_value == NULL) {
- LOG(ERROR) << "query: out_value was NULL";
+ ALOGE("query: out_value was NULL");
return BAD_VALUE;
}
@@ -277,7 +275,7 @@
return BAD_VALUE;
}
- VLOG(1) << "query: key=" << what << ", v=" << value;
+ ALOGD("query: key=%d, v=%d", what, value);
*out_value = value;
return NO_ERROR;
}
@@ -287,14 +285,14 @@
bool /* producer_controlled_by_app */, QueueBufferOutput* /* output */) {
// Consumer interaction are actually handled by buffer hub, and we need
// to maintain consumer operations here. Hence |connect| is a NO-OP.
- VLOG(1) << (__FUNCTION__);
+ ALOGD(__FUNCTION__);
return NO_ERROR;
}
status_t BufferHubQueueProducer::disconnect(int /* api */, DisconnectMode /* mode */) {
// Consumer interaction are actually handled by buffer hub, and we need
// to maintain consumer operations here. Hence |disconnect| is a NO-OP.
- VLOG(1) << (__FUNCTION__);
+ ALOGD(__FUNCTION__);
return NO_ERROR;
}
@@ -303,7 +301,7 @@
if (stream != NULL) {
// TODO(jwcai) Investigate how is is used, maybe use BufferHubBuffer's
// metadata.
- LOG(ERROR) << "SidebandStream is not currently supported.";
+ ALOGE("SidebandStream is not currently supported.");
return INVALID_OPERATION;
}
return NO_ERROR;
@@ -316,17 +314,17 @@
// TODO(jwcai) |allocateBuffers| aims to preallocate up to the maximum number
// of buffers permitted by the current BufferQueue configuration (aka
// |req_buffer_count_|).
- LOG(ERROR) << "BufferHubQueueProducer::allocateBuffers not implemented.";
+ ALOGE("BufferHubQueueProducer::allocateBuffers not implemented.");
}
status_t BufferHubQueueProducer::allowAllocation(bool /* allow */) {
- LOG(ERROR) << "BufferHubQueueProducer::allowAllocation not implemented.";
+ ALOGE("BufferHubQueueProducer::allowAllocation not implemented.");
return INVALID_OPERATION;
}
status_t BufferHubQueueProducer::setGenerationNumber(
uint32_t generation_number) {
- VLOG(1) << (__FUNCTION__);
+ ALOGD(__FUNCTION__);
std::unique_lock<std::mutex> lock(core_->mutex_);
core_->generation_number_ = generation_number;
@@ -337,23 +335,23 @@
// BufferHub based implementation could have one to many producer/consumer
// relationship, thus |getConsumerName| from the producer side does not
// make any sense.
- LOG(ERROR) << "BufferHubQueueProducer::getConsumerName not supported.";
+ ALOGE("BufferHubQueueProducer::getConsumerName not supported.");
return String8("BufferHubQueue::DummyConsumer");
}
status_t BufferHubQueueProducer::setSharedBufferMode(
bool /* shared_buffer_mode */) {
- LOG(ERROR) << "BufferHubQueueProducer::setSharedBufferMode not implemented.";
+ ALOGE("BufferHubQueueProducer::setSharedBufferMode not implemented.");
return INVALID_OPERATION;
}
status_t BufferHubQueueProducer::setAutoRefresh(bool /* auto_refresh */) {
- LOG(ERROR) << "BufferHubQueueProducer::setAutoRefresh not implemented.";
+ ALOGE("BufferHubQueueProducer::setAutoRefresh not implemented.");
return INVALID_OPERATION;
}
status_t BufferHubQueueProducer::setDequeueTimeout(nsecs_t timeout) {
- VLOG(1) << (__FUNCTION__);
+ ALOGD(__FUNCTION__);
std::unique_lock<std::mutex> lock(core_->mutex_);
core_->dequeue_timeout_ms_ = static_cast<int>(timeout / (1000 * 1000));
@@ -363,17 +361,17 @@
status_t BufferHubQueueProducer::getLastQueuedBuffer(
sp<GraphicBuffer>* /* out_buffer */, sp<Fence>* /* out_fence */,
float /*out_transform_matrix*/[16]) {
- LOG(ERROR) << "BufferHubQueueProducer::getLastQueuedBuffer not implemented.";
+ ALOGE("BufferHubQueueProducer::getLastQueuedBuffer not implemented.");
return INVALID_OPERATION;
}
void BufferHubQueueProducer::getFrameTimestamps(
FrameEventHistoryDelta* /*outDelta*/) {
- LOG(ERROR) << "BufferHubQueueProducer::getFrameTimestamps not implemented.";
+ ALOGE("BufferHubQueueProducer::getFrameTimestamps not implemented.");
}
status_t BufferHubQueueProducer::getUniqueId(uint64_t* out_id) const {
- VLOG(1) << (__FUNCTION__);
+ ALOGD(__FUNCTION__);
*out_id = core_->unique_id_;
return NO_ERROR;
@@ -382,7 +380,7 @@
IBinder* BufferHubQueueProducer::onAsBinder() {
// BufferHubQueueProducer is a non-binder implementation of
// IGraphicBufferProducer.
- LOG(WARNING) << "BufferHubQueueProducer::onAsBinder is not supported.";
+ ALOGW("BufferHubQueueProducer::onAsBinder is not supported.");
return nullptr;
}
diff --git a/libs/vr/libdisplay/Android.mk b/libs/vr/libdisplay/Android.mk
index 670bdcd..f0e62df 100644
--- a/libs/vr/libdisplay/Android.mk
+++ b/libs/vr/libdisplay/Android.mk
@@ -49,7 +49,6 @@
libsync
staticLibraries := \
- libchrome \
libbufferhub \
libbufferhubqueue \
libdvrcommon \
diff --git a/libs/vr/libdisplay/display_client.cpp b/libs/vr/libdisplay/display_client.cpp
index cfb346d..54098e8 100644
--- a/libs/vr/libdisplay/display_client.cpp
+++ b/libs/vr/libdisplay/display_client.cpp
@@ -1,7 +1,7 @@
#include "include/private/dvr/display_client.h"
-#include <cutils/log.h>
#include <cutils/native_handle.h>
+#include <log/log.h>
#include <pdx/default_transport/client_channel.h>
#include <pdx/default_transport/client_channel_factory.h>
#include <pdx/status.h>
diff --git a/libs/vr/libdisplay/frame_history.cpp b/libs/vr/libdisplay/frame_history.cpp
index 67e4a09..154afbe 100644
--- a/libs/vr/libdisplay/frame_history.cpp
+++ b/libs/vr/libdisplay/frame_history.cpp
@@ -1,7 +1,7 @@
#include <private/dvr/frame_history.h>
-#include <cutils/log.h>
#include <errno.h>
+#include <log/log.h>
#include <sync/sync.h>
#include <pdx/file_handle.h>
diff --git a/libs/vr/libdisplay/gl_fenced_flush.cpp b/libs/vr/libdisplay/gl_fenced_flush.cpp
index 64b2e99..c70d554 100644
--- a/libs/vr/libdisplay/gl_fenced_flush.cpp
+++ b/libs/vr/libdisplay/gl_fenced_flush.cpp
@@ -6,7 +6,7 @@
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include <utils/Trace.h>
-#include <base/logging.h>
+#include <log/log.h>
using android::pdx::LocalHandle;
@@ -22,14 +22,14 @@
eglCreateSyncKHR(display, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs);
glFlush();
if (sync_point == EGL_NO_SYNC_KHR) {
- LOG(ERROR) << "sync_point == EGL_NO_SYNC_KHR";
+ ALOGE("sync_point == EGL_NO_SYNC_KHR");
return LocalHandle();
}
EGLint fence_fd = eglDupNativeFenceFDANDROID(display, sync_point);
eglDestroySyncKHR(display, sync_point);
if (fence_fd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
- LOG(ERROR) << "fence_fd == EGL_NO_NATIVE_FENCE_FD_ANDROID";
+ ALOGE("fence_fd == EGL_NO_NATIVE_FENCE_FD_ANDROID");
return LocalHandle();
}
return LocalHandle(fence_fd);
diff --git a/libs/vr/libdisplay/graphics.cpp b/libs/vr/libdisplay/graphics.cpp
index d599616..d0557a9 100644
--- a/libs/vr/libdisplay/graphics.cpp
+++ b/libs/vr/libdisplay/graphics.cpp
@@ -1,10 +1,11 @@
#include <dvr/graphics.h>
+#include <inttypes.h>
#include <sys/timerfd.h>
#include <array>
#include <vector>
-#include <cutils/log.h>
+#include <log/log.h>
#include <utils/Trace.h>
#ifndef VK_USE_PLATFORM_ANDROID_KHR
@@ -372,8 +373,8 @@
case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_FORMAT_OUT:
break;
default:
- ALOGE("Invalid display surface parameter: key=%d value=%ld", p->key,
- p->value);
+ ALOGE("Invalid display surface parameter: key=%d value=%" PRId64,
+ p->key, p->value);
return nullptr;
}
}
@@ -583,7 +584,8 @@
static int LockBuffer_DEPRECATED(ANativeWindow* window,
ANativeWindowBuffer* buffer);
- DISALLOW_COPY_AND_ASSIGN(DvrGraphicsContext);
+ DvrGraphicsContext(const DvrGraphicsContext&) = delete;
+ void operator=(const DvrGraphicsContext&) = delete;
};
DvrGraphicsContext::DvrGraphicsContext()
@@ -743,8 +745,8 @@
// so that anyone who tries to bind an FBO to context->texture_id
// will not get an incomplete buffer.
context->current_buffer = context->buffer_queue->Dequeue();
- CHECK(context->gl.texture_count ==
- context->current_buffer->buffer()->slice_count());
+ LOG_ALWAYS_FATAL_IF(context->gl.texture_count !=
+ context->current_buffer->buffer()->slice_count());
for (int i = 0; i < context->gl.texture_count; ++i) {
glBindTexture(context->gl.texture_target_type, context->gl.texture_id[i]);
glEGLImageTargetTexture2DOES(context->gl.texture_target_type,
@@ -794,12 +796,12 @@
result = vkCreateAndroidSurfaceKHR(
context->vk.instance, &android_surface_ci,
context->vk.allocation_callbacks, &context->vk.surface);
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
VkBool32 surface_supports_present = VK_FALSE;
result = vkGetPhysicalDeviceSurfaceSupportKHR(
context->vk.physical_device, context->vk.present_queue_family,
context->vk.surface, &surface_supports_present);
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
if (!surface_supports_present) {
ALOGE("Error: provided queue family (%u) does not support presentation",
context->vk.present_queue_family);
@@ -809,21 +811,22 @@
result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
context->vk.physical_device, context->vk.surface,
&surface_capabilities);
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
// Determine the swapchain image format.
uint32_t device_surface_format_count = 0;
result = vkGetPhysicalDeviceSurfaceFormatsKHR(
context->vk.physical_device, context->vk.surface,
&device_surface_format_count, nullptr);
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
std::vector<VkSurfaceFormatKHR> device_surface_formats(
device_surface_format_count);
result = vkGetPhysicalDeviceSurfaceFormatsKHR(
context->vk.physical_device, context->vk.surface,
&device_surface_format_count, device_surface_formats.data());
- CHECK_EQ(result, VK_SUCCESS);
- CHECK_GT(device_surface_format_count, 0U);
- CHECK_NE(device_surface_formats[0].format, VK_FORMAT_UNDEFINED);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(device_surface_format_count == 0U);
+ LOG_ALWAYS_FATAL_IF(device_surface_formats[0].format ==
+ VK_FORMAT_UNDEFINED);
VkSurfaceFormatKHR present_surface_format = device_surface_formats[0];
// Determine the swapchain present mode.
// TODO(cort): query device_present_modes to make sure MAILBOX is supported.
@@ -832,19 +835,19 @@
result = vkGetPhysicalDeviceSurfacePresentModesKHR(
context->vk.physical_device, context->vk.surface,
&device_present_mode_count, nullptr);
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
std::vector<VkPresentModeKHR> device_present_modes(
device_present_mode_count);
result = vkGetPhysicalDeviceSurfacePresentModesKHR(
context->vk.physical_device, context->vk.surface,
&device_present_mode_count, device_present_modes.data());
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
VkPresentModeKHR present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
// Extract presentation surface extents, image count, transform, usages,
// etc.
- LOG_ASSERT(
- static_cast<int>(surface_capabilities.currentExtent.width) != -1 &&
- static_cast<int>(surface_capabilities.currentExtent.height) != -1);
+ LOG_ALWAYS_FATAL_IF(
+ static_cast<int>(surface_capabilities.currentExtent.width) == -1 ||
+ static_cast<int>(surface_capabilities.currentExtent.height) == -1);
VkExtent2D swapchain_extent = surface_capabilities.currentExtent;
uint32_t desired_image_count = surface_capabilities.minImageCount;
@@ -856,8 +859,8 @@
surface_capabilities.currentTransform;
VkImageUsageFlags image_usage_flags =
surface_capabilities.supportedUsageFlags;
- CHECK_NE(surface_capabilities.supportedCompositeAlpha,
- static_cast<VkFlags>(0));
+ LOG_ALWAYS_FATAL_IF(surface_capabilities.supportedCompositeAlpha ==
+ static_cast<VkFlags>(0));
VkCompositeAlphaFlagBitsKHR composite_alpha =
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
if (!(surface_capabilities.supportedCompositeAlpha &
@@ -889,18 +892,18 @@
result = vkCreateSwapchainKHR(context->vk.device, &swapchain_ci,
context->vk.allocation_callbacks,
&context->vk.swapchain);
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
// Create swapchain image views
uint32_t image_count = 0;
result = vkGetSwapchainImagesKHR(context->vk.device, context->vk.swapchain,
&image_count, nullptr);
- CHECK_EQ(result, VK_SUCCESS);
- CHECK_GT(image_count, 0U);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(image_count == 0U);
context->vk.swapchain_images.resize(image_count);
result = vkGetSwapchainImagesKHR(context->vk.device, context->vk.swapchain,
&image_count,
context->vk.swapchain_images.data());
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
context->vk.swapchain_image_views.resize(image_count);
VkImageViewCreateInfo image_view_ci = {};
image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
@@ -923,7 +926,7 @@
result = vkCreateImageView(context->vk.device, &image_view_ci,
context->vk.allocation_callbacks,
&context->vk.swapchain_image_views[i]);
- CHECK_EQ(result, VK_SUCCESS);
+ LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
}
// Fill in any requested output parameters.
for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
@@ -950,7 +953,7 @@
// by the Vulkan path.
int DvrGraphicsContext::Post(android::dvr::NativeBufferProducer* buffer,
int fence_fd) {
- LOG_ASSERT(graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(graphics_api != DVR_GRAPHICS_API_VULKAN);
ATRACE_NAME(__PRETTY_FUNCTION__);
ALOGI_IF(TRACE, "DvrGraphicsContext::Post: buffer_id=%d, fence_fd=%d",
buffer->buffer()->id(), fence_fd);
@@ -967,7 +970,7 @@
ALOGI_IF(TRACE, "SetSwapInterval: window=%p interval=%d", window, interval);
DvrGraphicsContext* self = getSelf(window);
(void)self;
- LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
return android::NO_ERROR;
}
@@ -977,7 +980,7 @@
ATRACE_NAME(__PRETTY_FUNCTION__);
DvrGraphicsContext* self = getSelf(window);
- LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
std::lock_guard<std::mutex> autolock(self->lock_);
if (!self->current_buffer) {
@@ -997,7 +1000,7 @@
ALOGI_IF(TRACE, "NativeWindow::QueueBuffer: fence_fd=%d", fence_fd);
DvrGraphicsContext* self = getSelf(window);
- LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
std::lock_guard<std::mutex> autolock(self->lock_);
android::dvr::NativeBufferProducer* native_buffer =
@@ -1007,7 +1010,7 @@
if (self->buffer_already_posted) {
// Check that the buffer is the one we expect, but handle it if this happens
// in production by allowing this buffer to post on top of the previous one.
- DCHECK(native_buffer == self->current_buffer);
+ LOG_FATAL_IF(native_buffer != self->current_buffer);
if (native_buffer == self->current_buffer) {
do_post = false;
if (fence_fd >= 0)
@@ -1031,7 +1034,7 @@
ALOGI_IF(TRACE, "DvrGraphicsContext::CancelBuffer: fence_fd: %d", fence_fd);
DvrGraphicsContext* self = getSelf(window);
- LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
std::lock_guard<std::mutex> autolock(self->lock_);
android::dvr::NativeBufferProducer* native_buffer =
@@ -1042,7 +1045,7 @@
if (self->buffer_already_posted) {
// Check that the buffer is the one we expect, but handle it if this happens
// in production by returning this buffer to the buffer queue.
- DCHECK(native_buffer == self->current_buffer);
+ LOG_FATAL_IF(native_buffer != self->current_buffer);
if (native_buffer == self->current_buffer) {
do_enqueue = false;
}
@@ -1061,7 +1064,7 @@
int DvrGraphicsContext::Query(const ANativeWindow* window, int what,
int* value) {
DvrGraphicsContext* self = getSelf(const_cast<ANativeWindow*>(window));
- LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
std::lock_guard<std::mutex> autolock(self->lock_);
switch (what) {
@@ -1100,7 +1103,7 @@
int DvrGraphicsContext::Perform(ANativeWindow* window, int operation, ...) {
DvrGraphicsContext* self = getSelf(window);
- LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
std::lock_guard<std::mutex> autolock(self->lock_);
va_list args;
@@ -1231,7 +1234,7 @@
float32x4_t render_pose_orientation,
float32x4_t render_pose_translation) {
ATRACE_NAME("dvrBeginRenderFrameEds");
- LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_GLES);
+ LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api != DVR_GRAPHICS_API_GLES);
CHECK_GL();
// Grab a buffer from the queue and set its pose.
if (!graphics_context->current_buffer) {
@@ -1270,7 +1273,8 @@
uint32_t* swapchain_image_index,
VkImageView* swapchain_image_view) {
ATRACE_NAME("dvrBeginRenderFrameEds");
- LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api !=
+ DVR_GRAPHICS_API_VULKAN);
// Acquire a swapchain image. This calls Dequeue() internally.
VkResult result = vkAcquireNextImageKHR(
@@ -1313,7 +1317,7 @@
return -EPERM;
}
if (num_views > DVR_GRAPHICS_SURFACE_MAX_VIEWS) {
- LOG(ERROR) << "dvrBeginRenderFrameLateLatch called with too many views.";
+ ALOGE("dvrBeginRenderFrameLateLatch called with too many views.");
return -EINVAL;
}
dvrBeginRenderFrameEds(graphics_context, DVR_POSE_LATE_LATCH,
@@ -1424,7 +1428,7 @@
ATRACE_NAME("dvrGraphicsPostEarly");
ALOGI_IF(TRACE, "dvrGraphicsPostEarly");
- LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_GLES);
+ LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api != DVR_GRAPHICS_API_GLES);
// Note that this function can be called before or after
// dvrBeginRenderFrame.
@@ -1445,7 +1449,7 @@
}
int dvrPresent(DvrGraphicsContext* graphics_context) {
- LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_GLES);
+ LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api != DVR_GRAPHICS_API_GLES);
std::array<char, 128> buf;
snprintf(buf.data(), buf.size(), "dvrPresent|vsync=%d|",
@@ -1482,7 +1486,8 @@
int dvrPresentVk(DvrGraphicsContext* graphics_context,
VkSemaphore submit_semaphore, uint32_t swapchain_image_index) {
- LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api !=
+ DVR_GRAPHICS_API_VULKAN);
std::array<char, 128> buf;
snprintf(buf.data(), buf.size(), "dvrPresent|vsync=%d|",
@@ -1549,7 +1554,7 @@
auto display_surface = graphics_context->display_surface;
// A DisplaySurface must be created prior to the creation of a
// VideoMeshSurface.
- LOG_ASSERT(display_surface != nullptr);
+ LOG_ALWAYS_FATAL_IF(display_surface == nullptr);
LocalChannelHandle surface_handle = display_surface->CreateVideoMeshSurface();
if (!surface_handle.valid()) {
diff --git a/libs/vr/libdisplay/include/private/dvr/video_mesh_surface_client.h b/libs/vr/libdisplay/include/private/dvr/video_mesh_surface_client.h
index a2659a6..e52d0b9 100644
--- a/libs/vr/libdisplay/include/private/dvr/video_mesh_surface_client.h
+++ b/libs/vr/libdisplay/include/private/dvr/video_mesh_surface_client.h
@@ -1,7 +1,6 @@
#ifndef ANDROID_DVR_VIDEO_MESH_SURFACE_CLIENT_H_
#define ANDROID_DVR_VIDEO_MESH_SURFACE_CLIENT_H_
-#include <base/macros.h>
#include <private/dvr/buffer_hub_queue_client.h>
#include <private/dvr/display_client.h>
diff --git a/libs/vr/libdisplay/late_latch.cpp b/libs/vr/libdisplay/late_latch.cpp
index 3681e10..b1a1589 100644
--- a/libs/vr/libdisplay/late_latch.cpp
+++ b/libs/vr/libdisplay/late_latch.cpp
@@ -6,7 +6,7 @@
#include <iostream>
#include <string>
-#include <base/logging.h>
+#include <log/log.h>
#include <private/dvr/clock_ns.h>
#include <private/dvr/debug.h>
#include <private/dvr/graphics/gpu_profiler.h>
@@ -20,7 +20,6 @@
#ifndef LOG_TAG
#define LOG_TAG "latelatch"
#endif
-#include <cutils/log.h>
#define PE(str, ...) \
fprintf(stderr, "[%s:%d] " str, __FILE__, __LINE__, ##__VA_ARGS__); \
@@ -268,18 +267,18 @@
LocalHandle pose_buffer_fd;
pose_client_ = dvrPoseCreate();
if (!pose_client_) {
- LOG(ERROR) << "LateLatch Error: failed to create pose client";
+ ALOGE("LateLatch Error: failed to create pose client");
} else {
int ret = privateDvrPoseGetRingBufferFd(pose_client_, &pose_buffer_fd);
if (ret < 0) {
- LOG(ERROR) << "LateLatch Error: failed to get pose ring buffer";
+ ALOGE("LateLatch Error: failed to get pose ring buffer");
}
}
glGenBuffers(1, &pose_buffer_object_);
glGenBuffers(1, &metadata_buffer_id_);
if (!glBindSharedBufferQCOM) {
- LOG(ERROR) << "Error: Missing gralloc buffer extension, no pose data";
+ ALOGE("Error: Missing gralloc buffer extension, no pose data");
} else {
if (pose_buffer_fd) {
glBindBuffer(GL_SHADER_STORAGE_BUFFER, pose_buffer_object_);
@@ -346,7 +345,7 @@
}
void LateLatch::AddLateLatch(const LateLatchInput& data) const {
- CHECK(is_app_late_latch_);
+ LOG_ALWAYS_FATAL_IF(!is_app_late_latch_);
CHECK_GL();
late_latch_program_.Use();
@@ -361,7 +360,7 @@
if (adata)
*adata = data;
else
- LOG(ERROR) << "Error: LateLatchInput gl mapping is null";
+ ALOGE("Error: LateLatchInput gl mapping is null");
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, input_buffer_id_);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
@@ -410,7 +409,7 @@
void LateLatch::AddEdsLateLatch(const LateLatchInput& data,
GLuint render_pose_buffer_object) const {
- CHECK(!is_app_late_latch_);
+ LOG_ALWAYS_FATAL_IF(is_app_late_latch_);
late_latch_program_.Use();
// Fall back on internal buffer when none is provided.
diff --git a/libs/vr/libdisplay/native_buffer_queue.cpp b/libs/vr/libdisplay/native_buffer_queue.cpp
index 2d1e23d..8dd0ee0 100644
--- a/libs/vr/libdisplay/native_buffer_queue.cpp
+++ b/libs/vr/libdisplay/native_buffer_queue.cpp
@@ -1,7 +1,6 @@
#include "include/private/dvr/native_buffer_queue.h"
-#include <base/logging.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <sys/epoll.h>
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include <utils/Trace.h>
@@ -23,7 +22,7 @@
: surface_(surface),
buffers_(capacity),
buffer_queue_(capacity) {
- CHECK(surface);
+ LOG_ALWAYS_FATAL_IF(!surface);
epoll_fd_ = epoll_create(64);
if (epoll_fd_ < 0) {
@@ -34,7 +33,7 @@
// The kSurfaceBufferMaxCount must be >= the capacity so that shader code
// can bind surface buffer array data.
- CHECK(kSurfaceBufferMaxCount >= capacity);
+ LOG_ALWAYS_FATAL_IF(kSurfaceBufferMaxCount < capacity);
for (size_t i = 0; i < capacity; i++) {
uint32_t buffer_index = 0;
diff --git a/libs/vr/libdisplay/native_window.cpp b/libs/vr/libdisplay/native_window.cpp
index 63c81ed..24ecd8a 100644
--- a/libs/vr/libdisplay/native_window.cpp
+++ b/libs/vr/libdisplay/native_window.cpp
@@ -1,7 +1,6 @@
#include <EGL/egl.h>
#include <android/native_window.h>
-#include <base/logging.h>
#include <cutils/native_handle.h>
#include <errno.h>
#include <pthread.h>
@@ -17,7 +16,7 @@
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include <utils/Trace.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <memory>
#include <mutex>
@@ -177,7 +176,7 @@
if (self->next_buffer_already_posted_) {
// Check that the buffer is the one we expect, but handle it if this happens
// in production by allowing this buffer to post on top of the previous one.
- DCHECK(native_buffer == self->next_post_buffer_.get());
+ LOG_FATAL_IF(native_buffer != self->next_post_buffer_.get());
if (native_buffer == self->next_post_buffer_.get()) {
do_post = false;
if (fence_fd >= 0)
@@ -210,7 +209,7 @@
if (self->next_buffer_already_posted_) {
// Check that the buffer is the one we expect, but handle it if this happens
// in production by returning this buffer to the buffer queue.
- DCHECK(native_buffer == self->next_post_buffer_.get());
+ LOG_FATAL_IF(native_buffer != self->next_post_buffer_.get());
if (native_buffer == self->next_post_buffer_.get()) {
do_enqueue = false;
}
diff --git a/libs/vr/libdisplay/screenshot_client.cpp b/libs/vr/libdisplay/screenshot_client.cpp
index 78f5e0f..3ad0c68 100644
--- a/libs/vr/libdisplay/screenshot_client.cpp
+++ b/libs/vr/libdisplay/screenshot_client.cpp
@@ -1,6 +1,6 @@
#include "include/private/dvr/screenshot_client.h"
-#include <cutils/log.h>
+#include <log/log.h>
#include <mutex>
diff --git a/libs/vr/libdisplay/vsync_client.cpp b/libs/vr/libdisplay/vsync_client.cpp
index c4cad50..c928a08 100644
--- a/libs/vr/libdisplay/vsync_client.cpp
+++ b/libs/vr/libdisplay/vsync_client.cpp
@@ -1,6 +1,6 @@
#include "include/private/dvr/vsync_client.h"
-#include <cutils/log.h>
+#include <log/log.h>
#include <pdx/default_transport/client_channel_factory.h>
#include <private/dvr/display_rpc.h>
@@ -26,7 +26,10 @@
status.GetErrorMessage().c_str());
return -status.error();
}
- *timestamp_ns = status.get();
+
+ if (timestamp_ns != nullptr) {
+ *timestamp_ns = status.get();
+ }
return 0;
}
diff --git a/libs/vr/libdvrcommon/Android.mk b/libs/vr/libdvrcommon/Android.mk
index 72afab2..80eb3a6 100644
--- a/libs/vr/libdvrcommon/Android.mk
+++ b/libs/vr/libdvrcommon/Android.mk
@@ -36,7 +36,6 @@
libhardware
staticLibraries := \
- libchrome \
libpdx_default_transport \
include $(CLEAR_VARS)
diff --git a/libs/vr/libdvrcommon/frame_time_history.cpp b/libs/vr/libdvrcommon/frame_time_history.cpp
index 796498c..d4718a9 100644
--- a/libs/vr/libdvrcommon/frame_time_history.cpp
+++ b/libs/vr/libdvrcommon/frame_time_history.cpp
@@ -1,6 +1,6 @@
#include <private/dvr/frame_time_history.h>
-#include <cutils/log.h>
+#include <log/log.h>
namespace android {
namespace dvr {
diff --git a/libs/vr/libdvrcommon/include/private/dvr/debug.h b/libs/vr/libdvrcommon/include/private/dvr/debug.h
index 7db681a..c31a385 100644
--- a/libs/vr/libdvrcommon/include/private/dvr/debug.h
+++ b/libs/vr/libdvrcommon/include/private/dvr/debug.h
@@ -4,15 +4,15 @@
#include <GLES3/gl3.h>
#include <math.h>
-#include <base/logging.h>
+#include <log/log.h>
#ifndef NDEBUG
-#define CHECK_GL() \
- do { \
- const GLenum err = glGetError(); \
- if (err != GL_NO_ERROR) { \
- LOG(ERROR) << "OpenGL error " << err; \
- } \
+#define CHECK_GL() \
+ do { \
+ const GLenum err = glGetError(); \
+ if (err != GL_NO_ERROR) { \
+ ALOGE("OpenGL error %d", err); \
+ } \
} while (0)
#define CHECK_GL_FBO() \
@@ -22,10 +22,10 @@
case GL_FRAMEBUFFER_COMPLETE: \
break; \
case GL_FRAMEBUFFER_UNSUPPORTED: \
- LOG(ERROR) << "GL_FRAMEBUFFER_UNSUPPORTED"; \
+ ALOGE("GL_FRAMEBUFFER_UNSUPPORTED"); \
break; \
default: \
- LOG(ERROR) << "FBO user error: " << status; \
+ ALOGE("FBO user error: %d", status); \
break; \
} \
} while (0)
diff --git a/libs/vr/libdvrcommon/include/private/dvr/epoll_file_descriptor.h b/libs/vr/libdvrcommon/include/private/dvr/epoll_file_descriptor.h
index 8df741c..91e12c5 100644
--- a/libs/vr/libdvrcommon/include/private/dvr/epoll_file_descriptor.h
+++ b/libs/vr/libdvrcommon/include/private/dvr/epoll_file_descriptor.h
@@ -2,7 +2,7 @@
#define LIBS_VR_LIBDVRCOMMON_INCLUDE_PRIVATE_DVR_EPOLL_FILE_DESCRIPTOR_H_
#include <android-base/unique_fd.h>
-#include <base/logging.h>
+#include <log/log.h>
#include <sys/epoll.h>
namespace android {
@@ -24,7 +24,7 @@
int Create() {
if (IsValid()) {
- LOG(WARNING) << "epoll fd has already been created.";
+ ALOGW("epoll fd has already been created.");
return -EALREADY;
}
diff --git a/libs/vr/libdvrcommon/include/private/dvr/log_helpers.h b/libs/vr/libdvrcommon/include/private/dvr/log_helpers.h
index c9f7f8e..12ef622 100644
--- a/libs/vr/libdvrcommon/include/private/dvr/log_helpers.h
+++ b/libs/vr/libdvrcommon/include/private/dvr/log_helpers.h
@@ -2,8 +2,8 @@
#define ANDROID_DVR_LOG_HELPERS_H_
#include <iomanip>
+#include <ostream>
-#include <base/logging.h>
#include <private/dvr/eigen.h>
#include <private/dvr/field_of_view.h>
@@ -32,7 +32,8 @@
template <typename T>
inline std::ostream& operator<<(std::ostream& out,
const Eigen::AffineMatrix<T, 4>& mat) {
- out << std::setfill(' ') << std::setprecision(4) << std::fixed << std::showpos;
+ out << std::setfill(' ') << std::setprecision(4) << std::fixed
+ << std::showpos;
out << "\nmat4[";
out << std::setw(10) << mat(0, 0) << " " << std::setw(10) << mat(0, 1) << " "
<< std::setw(10) << mat(0, 2) << " " << std::setw(10) << mat(0, 3);
diff --git a/libs/vr/libdvrcommon/revision.cpp b/libs/vr/libdvrcommon/revision.cpp
index ae8603f..7925f65 100644
--- a/libs/vr/libdvrcommon/revision.cpp
+++ b/libs/vr/libdvrcommon/revision.cpp
@@ -9,7 +9,7 @@
#include <sys/types.h>
#include <unistd.h>
-#include <base/logging.h>
+#include <log/log.h>
#include "revision_path.h"
@@ -74,16 +74,16 @@
fd = open(dvr_product_revision_file_path(), O_RDONLY);
if (fd < 0) {
- PLOG(ERROR) << "Could not open '" << dvr_product_revision_file_path()
- << "' to get product revision";
+ ALOGE("Could not open '%s' to get product revision: %s",
+ dvr_product_revision_file_path(), strerror(errno));
global_product_revision_processed = true;
return;
}
read_rc = read(fd, global_product_revision_str, kProductRevisionStringSize);
if (read_rc <= 0) {
- PLOG(ERROR) << "Could not read from '" << dvr_product_revision_file_path()
- << "'";
+ ALOGE("Could not read from '%s': %s", dvr_product_revision_file_path(),
+ strerror(errno));
global_product_revision_processed = true;
return;
}
@@ -102,8 +102,8 @@
global_product = product_revision->product;
global_revision = product_revision->revision;
} else {
- LOG(ERROR) << "Unable to match '" << global_product_revision_str
- << "' to a product/revision.";
+ ALOGE("Unable to match '%s' to a product/revision.",
+ global_product_revision_str);
}
global_product_revision_processed = true;
diff --git a/libs/vr/libdvrgraphics/Android.mk b/libs/vr/libdvrgraphics/Android.mk
index 3d84319..b95b18e 100644
--- a/libs/vr/libdvrgraphics/Android.mk
+++ b/libs/vr/libdvrgraphics/Android.mk
@@ -13,7 +13,6 @@
$(LOCAL_PATH)/include
staticLibraries := \
- libchrome \
libbufferhub \
libdvrcommon \
libpdx_default_transport \
diff --git a/libs/vr/libdvrgraphics/blur.cpp b/libs/vr/libdvrgraphics/blur.cpp
index 7365b0e..90e271e 100644
--- a/libs/vr/libdvrgraphics/blur.cpp
+++ b/libs/vr/libdvrgraphics/blur.cpp
@@ -11,8 +11,7 @@
#include <string>
-#include <base/logging.h>
-#include <base/strings/string_number_conversions.h>
+#include <log/log.h>
#include <private/dvr/debug.h>
#include <private/dvr/graphics/egl_image.h>
#include <private/dvr/graphics/shader_program.h>
@@ -78,7 +77,7 @@
width_(w),
height_(h),
fbo_q_free_(1 + num_blur_outputs) {
- CHECK(num_blur_outputs > 0);
+ LOG_ALWAYS_FATAL_IF(num_blur_outputs <= 0);
source_fbo_ =
CreateFbo(w, h, source_texture, source_texture_target, is_external);
fbo_half_ = CreateFbo(w / 2, h / 2, 0, target_texture_target, is_external);
@@ -113,7 +112,7 @@
}
GLuint Blur::DrawBlur(GLuint source_texture) {
- CHECK(fbo_q_free_.GetSize() >= 2);
+ LOG_ALWAYS_FATAL_IF(fbo_q_free_.GetSize() < 2);
// Downsample to half w x half h.
glBindFramebuffer(GL_READ_FRAMEBUFFER, source_fbo_.fbo);
diff --git a/libs/vr/libdvrgraphics/gpu_profiler.cpp b/libs/vr/libdvrgraphics/gpu_profiler.cpp
index d252a34..49c515f 100644
--- a/libs/vr/libdvrgraphics/gpu_profiler.cpp
+++ b/libs/vr/libdvrgraphics/gpu_profiler.cpp
@@ -1,6 +1,6 @@
#include "include/private/dvr/graphics/gpu_profiler.h"
-#include <cutils/log.h>
+#include <log/log.h>
#include <private/dvr/clock_ns.h>
diff --git a/libs/vr/libdvrgraphics/shader_program.cpp b/libs/vr/libdvrgraphics/shader_program.cpp
index bf36eff..2d36600 100644
--- a/libs/vr/libdvrgraphics/shader_program.cpp
+++ b/libs/vr/libdvrgraphics/shader_program.cpp
@@ -3,15 +3,14 @@
#include <regex>
#include <sstream>
-#include <base/logging.h>
-#include <base/strings/string_util.h>
+#include <log/log.h>
namespace {
static bool CompileShader(GLuint shader, const std::string& shader_string) {
- std::string prefix = "";
- if (!base::StartsWith(shader_string, "#version",
- base::CompareCase::SENSITIVE)) {
+ std::string prefix;
+ const std::string kVersion = "#version";
+ if (shader_string.substr(0, kVersion.size()) != kVersion) {
prefix = "#version 310 es\n";
}
std::string string_with_prefix = prefix + shader_string;
@@ -24,8 +23,7 @@
if (!success) {
GLchar infoLog[512];
glGetShaderInfoLog(shader, 512, nullptr, infoLog);
- LOG(ERROR) << "Shader Failed to compile: " << *shader_str << " -- "
- << infoLog;
+ ALOGE("Shader Failed to compile: %s -- %s", *shader_str, infoLog);
return false;
}
return true;
@@ -43,7 +41,7 @@
if (!success) {
GLchar infoLog[512];
glGetProgramInfoLog(program, 512, nullptr, infoLog);
- LOG(ERROR) << "Shader failed to link: " << infoLog;
+ ALOGE("Shader failed to link: %s", infoLog);
return false;
}
@@ -60,7 +58,7 @@
if (!success) {
GLchar infoLog[512];
glGetProgramInfoLog(program, 512, nullptr, infoLog);
- LOG(ERROR) << "Shader failed to link: " << infoLog;
+ ALOGE("Shader failed to link: %s", infoLog);
return false;
}
diff --git a/libs/vr/libdvrgraphics/timer_query.cpp b/libs/vr/libdvrgraphics/timer_query.cpp
index dcc6216..23d2b7c 100644
--- a/libs/vr/libdvrgraphics/timer_query.cpp
+++ b/libs/vr/libdvrgraphics/timer_query.cpp
@@ -1,7 +1,7 @@
#include "include/private/dvr/graphics/timer_query.h"
#include <GLES2/gl2ext.h>
-#include <base/logging.h>
+#include <log/log.h>
namespace android {
namespace dvr {
@@ -38,7 +38,7 @@
double SyncTimerQuery::FlushAndGetTimeInMS() {
if (timer_.query_ == 0) {
- LOG(ERROR) << "Error: Only call FlushAndGetTimeInMS() once.";
+ ALOGE("Error: Only call FlushAndGetTimeInMS() once.");
return 0.0;
}
timer_.End();
@@ -51,7 +51,7 @@
GLint disjoint_occurred = 0;
glGetIntegerv(GL_GPU_DISJOINT_EXT, &disjoint_occurred);
if (disjoint_occurred) {
- LOG(ERROR) << "Disjoint occurred.";
+ ALOGE("Disjoint occurred.");
timer_.Delete();
return 0.0;
}
diff --git a/libs/vr/libeds/Android.mk b/libs/vr/libeds/Android.mk
index 0345f6d..373e68e 100644
--- a/libs/vr/libeds/Android.mk
+++ b/libs/vr/libeds/Android.mk
@@ -39,7 +39,6 @@
libvulkan
staticLibraries := \
- libchrome \
libdisplay \
libdvrcommon \
libdvrgraphics \
diff --git a/libs/vr/libeds/composite_hmd.cpp b/libs/vr/libeds/composite_hmd.cpp
index d29cd65..d6bf164 100644
--- a/libs/vr/libeds/composite_hmd.cpp
+++ b/libs/vr/libeds/composite_hmd.cpp
@@ -1,6 +1,7 @@
#include "include/private/dvr/composite_hmd.h"
-#include <base/logging.h>
+#include <log/log.h>
+
#include <private/dvr/numeric.h>
namespace android {
@@ -113,9 +114,9 @@
float meters_per_tan_angle = virtual_eye_to_screen_dist;
vec2 pixels_per_tan_angle = pixels_per_meter * meters_per_tan_angle;
- CHECK_NE(0.0f, display_width_meters);
- CHECK_NE(0.0f, display_height_meters);
- CHECK_NE(0.0f, virtual_eye_to_screen_dist);
+ LOG_ALWAYS_FATAL_IF(0.0f == display_width_meters);
+ LOG_ALWAYS_FATAL_IF(0.0f == display_height_meters);
+ LOG_ALWAYS_FATAL_IF(0.0f == virtual_eye_to_screen_dist);
// Height of lenses from the bottom of the screen.
float lens_y_center = 0;
diff --git a/libs/vr/libeds/distortion_renderer.cpp b/libs/vr/libeds/distortion_renderer.cpp
index a19843f..13090ca 100644
--- a/libs/vr/libeds/distortion_renderer.cpp
+++ b/libs/vr/libeds/distortion_renderer.cpp
@@ -8,7 +8,7 @@
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include <utils/Trace.h>
-#include <base/logging.h>
+#include <log/log.h>
#include <private/dvr/clock_ns.h>
#include <private/dvr/composite_hmd.h>
#include <private/dvr/debug.h>
@@ -303,12 +303,12 @@
vert_builder += "#define COMPOSITE_LAYER_2\n";
frag_builder += "#define COMPOSITE_LAYER_2\n";
} else {
- CHECK_EQ(num_layers, 1);
+ LOG_ALWAYS_FATAL_IF(num_layers != 1);
}
if (blend_with_previous_layer) {
// Check for unsupported shader combinations:
- CHECK_EQ(num_layers, 1);
- CHECK_EQ(use_alpha_vignette, false);
+ LOG_ALWAYS_FATAL_IF(num_layers != 1);
+ LOG_ALWAYS_FATAL_IF(use_alpha_vignette);
if (kUseFramebufferReadback)
frag_builder += "#define BLEND_WITH_PREVIOUS_LAYER\n";
}
@@ -320,7 +320,7 @@
vert_builder += vertex;
frag_builder += fragment;
pgm.Link(vert_builder, frag_builder);
- CHECK(pgm.IsUsable());
+ LOG_ALWAYS_FATAL_IF(!pgm.IsUsable());
pgm.Use();
@@ -343,7 +343,7 @@
projectionMatrix =
projectionMatrix * Eigen::AngleAxisf(rotation, vec3::UnitZ());
- CHECK(sizeof(mat4) == 4 * 4 * 4);
+ LOG_ALWAYS_FATAL_IF(sizeof(mat4) != 4 * 4 * 4);
glUniformMatrix4fv(uProjectionMatrix, 1, false, projectionMatrix.data());
}
@@ -367,8 +367,7 @@
if (eds_enabled_) {
// Late latch must be on if eds_enabled_ is true.
if (!late_latch_enabled) {
- LOG(ERROR) << "Cannot enable EDS without late latch. "
- << "Force enabling late latch.";
+ ALOGE("Cannot enable EDS without late latch. Force enabling late latch.");
late_latch_enabled = true;
}
}
@@ -633,11 +632,11 @@
PrepGlState(eye);
if (num_textures > kMaxLayers) {
- LOG(ERROR) << "Too many textures for DistortionRenderer";
+ ALOGE("Too many textures for DistortionRenderer");
num_textures = kMaxLayers;
}
- CHECK(num_textures == 1 || num_textures == 2);
+ LOG_ALWAYS_FATAL_IF(num_textures != 1 && num_textures != 2);
if (num_textures == 2) {
if (chromatic_aberration_correction_enabled_) {
@@ -776,7 +775,7 @@
bool DistortionRenderer::GetLastEdsPose(LateLatchOutput* out_data, int layer_id) const {
if (layer_id >= kMaxLayers) {
- LOG(ERROR) << "Accessing invalid layer " << layer_id << std::endl;
+ ALOGE("Accessing invalid layer %d", layer_id);
return false;
}
@@ -784,7 +783,7 @@
late_latch_[layer_id]->CaptureOutputData(out_data);
return true;
} else {
- LOG(ERROR) << "Late latch shader not enabled." << std::endl;
+ ALOGE("Late latch shader not enabled.");
return false;
}
}
diff --git a/libs/vr/libeds/eds_mesh.cpp b/libs/vr/libeds/eds_mesh.cpp
index 2c7dc2f..01a90cf 100644
--- a/libs/vr/libeds/eds_mesh.cpp
+++ b/libs/vr/libeds/eds_mesh.cpp
@@ -1,8 +1,8 @@
#include "include/private/dvr/eds_mesh.h"
+#include <log/log.h>
#include <math.h>
-#include <base/logging.h>
#include <private/dvr/types.h>
namespace {
@@ -105,7 +105,7 @@
// provided by |hmd| for |eye|.
EdsMesh BuildDistortionMesh(EyeType eye, int resolution,
const DistortionFunction& distortion_function) {
- CHECK_GT(resolution, 2);
+ LOG_ALWAYS_FATAL_IF(resolution <= 2);
// Number of indices produced by the strip method
// (see comment in ComputeDistortionMeshIndices):
diff --git a/libs/vr/libeds/lucid_pose_tracker.cpp b/libs/vr/libeds/lucid_pose_tracker.cpp
index c321bb0..5247020 100644
--- a/libs/vr/libeds/lucid_pose_tracker.cpp
+++ b/libs/vr/libeds/lucid_pose_tracker.cpp
@@ -1,7 +1,7 @@
#include "include/private/dvr/lucid_pose_tracker.h"
#define LOG_TAG "LucidPoseTracker"
-#include <cutils/log.h>
+#include <log/log.h>
#include <private/dvr/clock_ns.h>
diff --git a/libs/vr/libeds/tests/eds_app_tests.cpp b/libs/vr/libeds/tests/eds_app_tests.cpp
index 1742736..549d864 100644
--- a/libs/vr/libeds/tests/eds_app_tests.cpp
+++ b/libs/vr/libeds/tests/eds_app_tests.cpp
@@ -1,7 +1,6 @@
#include <EGL/egl.h>
#include <GLES2/gl2.h>
-#include <base/logging.h>
#include <dvr/graphics.h>
#include <dvr/pose_client.h>
#include <gtest/gtest.h>
diff --git a/libs/vr/libgvr/Android.mk b/libs/vr/libgvr/Android.mk
index 0fcf94b..be78605 100644
--- a/libs/vr/libgvr/Android.mk
+++ b/libs/vr/libgvr/Android.mk
@@ -68,14 +68,12 @@
libhardware \
liblog \
libsync \
- libevent \
libprotobuf-cpp-full
LOCAL_STATIC_LIBRARIES := \
libdisplay \
libbufferhub \
libbufferhubqueue \
- libchrome \
libdvrcommon \
libeds \
libdvrgraphics \
@@ -125,7 +123,6 @@
LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
LOCAL_SRC_FILES := dummy_gvr_ext.cpp
-LOCAL_STATIC_LIBRARIES := libchrome
LOCAL_LDLIBS := -llog
LOCAL_MODULE_TAGS := optional
LOCAL_SHARED_LIBRARIES += libgvr
diff --git a/libs/vr/libgvr/dummy_gvr_ext.cpp b/libs/vr/libgvr/dummy_gvr_ext.cpp
index c507038..f73838d 100644
--- a/libs/vr/libgvr/dummy_gvr_ext.cpp
+++ b/libs/vr/libgvr/dummy_gvr_ext.cpp
@@ -1,4 +1,4 @@
-#include <base/logging.h>
+#include <log/log.h>
#include <vr/gvr/capi/include/gvr.h>
#include <vr/gvr/capi/include/gvr_ext.h>
#include <vr/gvr/capi/include/gvr_types.h>
@@ -14,8 +14,8 @@
gvr_mat4f gvr_get_6dof_head_pose_in_start_space(gvr_context* gvr,
uint32_t /* vsync_count */) {
- LOG(FATAL) << "gvr_get_6dof_head_pose_in_start_space is not implemented. "
- << "Use gvr_get_head_space_from_start_space_pose instead.";
+ LOG_ALWAYS_FATAL("gvr_get_6dof_head_pose_in_start_space is not implemented. "
+ "Use gvr_get_head_space_from_start_space_pose instead.");
return gvr_mat4f({{{1.0f, 0.0f, 0.0f, 0.0f},
{0.0f, 1.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 1.0f, 0.0f},
@@ -25,5 +25,5 @@
void gvr_wait_next_frame(gvr_swap_chain* /* swap_chain */,
int64_t /* sched_offset_nanos */,
gvr_frame_schedule* /* out_next_frame_schedule */) {
- LOG(FATAL) << "gvr_wait_next_frame is not implemented.";
+ LOG_ALWAYS_FATAL("gvr_wait_next_frame is not implemented.");
}
diff --git a/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr.so b/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr.so
index 1d0ba50..bfd5956 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr_audio.so b/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr_audio.so
index 905ca64..c3012b1 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr_audio.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_arm/libgvr_audio.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr.so b/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr.so
index d62f7ca..6608c25 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr_audio.so b/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr_audio.so
index e342f6a..b1d7690 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr_audio.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_arm64/libgvr_audio.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr.so b/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr.so
index 8092138..f7f7786 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr_audio.so b/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr_audio.so
index 3fe5b2c..97aec40 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr_audio.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_x86/libgvr_audio.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr.so b/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr.so
index 3bcf60e..2e2dbc1 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr_audio.so b/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr_audio.so
index 2f2d834..cd8d0e0 100644
--- a/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr_audio.so
+++ b/libs/vr/libgvr/prebuilt/lib/android_x86_64/libgvr_audio.so
Binary files differ
diff --git a/libs/vr/libgvr/prebuilt/lib/common_library.aar b/libs/vr/libgvr/prebuilt/lib/common_library.aar
index 13147fe..9c1fbd0 100644
--- a/libs/vr/libgvr/prebuilt/lib/common_library.aar
+++ b/libs/vr/libgvr/prebuilt/lib/common_library.aar
Binary files differ
diff --git a/libs/vr/libgvr/shim_gvr.cpp b/libs/vr/libgvr/shim_gvr.cpp
index 264952e..5eb6e3d 100644
--- a/libs/vr/libgvr/shim_gvr.cpp
+++ b/libs/vr/libgvr/shim_gvr.cpp
@@ -19,10 +19,10 @@
#endif
#endif
-#include <cutils/log.h>
#include <dvr/graphics.h>
#include <dvr/performance_client_api.h>
#include <dvr/pose_client.h>
+#include <log/log.h>
#include <private/dvr/buffer_hub_queue_core.h>
#include <private/dvr/buffer_hub_queue_producer.h>
#include <private/dvr/clock_ns.h>
@@ -512,7 +512,8 @@
}
bool gvr_is_feature_supported(const gvr_context* /*gvr*/, int32_t feature) {
- return feature == GVR_FEATURE_ASYNC_REPROJECTION;
+ return feature == GVR_FEATURE_ASYNC_REPROJECTION ||
+ feature == GVR_FEATURE_HEAD_POSE_6DOF;
}
/////////////////////////////////////////////////////////////////////////////
@@ -1326,14 +1327,14 @@
}
void* gvr_external_surface_get_surface(const gvr_external_surface* surface) {
- CHECK(surface->swap_chain != nullptr &&
- surface->swap_chain->context != nullptr &&
- surface->swap_chain->context->jni_env_ != nullptr)
- << "gvr_external_surface_get_surface: Surface must be constructed within "
- << "a JNIEnv. Check |gvr_create| call.";
+ LOG_ALWAYS_FATAL_IF(surface->swap_chain == nullptr ||
+ surface->swap_chain->context == nullptr ||
+ surface->swap_chain->context->jni_env_ == nullptr,
+ "gvr_external_surface_get_surface: Surface must be "
+ "constructed within a JNIEnv. Check |gvr_create| call.");
- CHECK(surface->video_surface != nullptr)
- << "gvr_external_surface_get_surface: Invalid surface.";
+ LOG_ALWAYS_FATAL_IF(surface->video_surface == nullptr,
+ "gvr_external_surface_get_surface: Invalid surface.");
std::shared_ptr<android::dvr::ProducerQueue> producer_queue =
surface->video_surface->client->GetProducerQueue();
diff --git a/libs/vr/libgvr/shim_gvr_controller.cpp b/libs/vr/libgvr/shim_gvr_controller.cpp
index 54bc270..0f55903 100644
--- a/libs/vr/libgvr/shim_gvr_controller.cpp
+++ b/libs/vr/libgvr/shim_gvr_controller.cpp
@@ -1,6 +1,6 @@
#define LOG_TAG "libgvr_controller_shim"
-#include <cutils/log.h>
+#include <log/log.h>
#include <vr/gvr/capi/include/gvr_controller.h>
#include <vr/gvr/capi/include/gvr_types.h>
diff --git a/libs/vr/libgvr/shim_gvr_private.cpp b/libs/vr/libgvr/shim_gvr_private.cpp
index 6ab6971..25a5110 100644
--- a/libs/vr/libgvr/shim_gvr_private.cpp
+++ b/libs/vr/libgvr/shim_gvr_private.cpp
@@ -1,6 +1,6 @@
#define LOG_TAG "libgvr_shim_private"
-#include <cutils/log.h>
+#include <log/log.h>
#include <private/dvr/display_rpc.h>
#include <private/dvr/internal_types.h>
#include <vr/gvr/capi/include/gvr.h>
@@ -42,7 +42,7 @@
serialized_viewer_params_size_bytes);
std::unique_ptr<proto::DeviceParams> device_params(new proto::DeviceParams);
if (!device_params->ParseFromString(serialized_device_params_string)) {
- LOG(ERROR) << "Invalid serialized Cardboard DeviceParams";
+ ALOGE("Invalid serialized Cardboard DeviceParams");
return false;
}
diff --git a/libs/vr/libimageio/include/private/dvr/image_io_logging.h b/libs/vr/libimageio/include/private/dvr/image_io_logging.h
index 3c0f2a5..ac78179 100644
--- a/libs/vr/libimageio/include/private/dvr/image_io_logging.h
+++ b/libs/vr/libimageio/include/private/dvr/image_io_logging.h
@@ -1,7 +1,7 @@
#ifndef LIB_LIBIMAGEIO_PRIVATE_DREAMOS_IMAGE_IO_LOGGING_H_
#define LIB_LIBIMAGEIO_PRIVATE_DREAMOS_IMAGE_IO_LOGGING_H_
-// This header acts as cutils/log.h if LOG_TO_STDERR is not defined.
+// This header acts as log/log.h if LOG_TO_STDERR is not defined.
// If LOG_TO_STDERR is defined, then android logging macros (such as ALOGE)
// would log to stderr. This is useful if the code is also being used/tested on
// a desktop.
@@ -33,7 +33,7 @@
#define ALOGV(fmt, ...) LogToStderr("VERBOSE", fmt, ##__VA_ARGS__)
#else // LOG_TO_STDERR
-#include <cutils/log.h>
+#include <log/log.h>
#endif // LOG_TO_STDERR
#endif // LIB_LIBIMAGEIO_PRIVATE_DREAMOS_IMAGE_IO_LOGGING_H_
diff --git a/libs/vr/libpdx/private/pdx/service.h b/libs/vr/libpdx/private/pdx/service.h
index 029e6bf..175cedf 100644
--- a/libs/vr/libpdx/private/pdx/service.h
+++ b/libs/vr/libpdx/private/pdx/service.h
@@ -1,8 +1,8 @@
#ifndef ANDROID_PDX_SERVICE_H_
#define ANDROID_PDX_SERVICE_H_
-#include <cutils/log.h>
#include <errno.h>
+#include <log/log.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
diff --git a/libs/vr/libpdx/service.cpp b/libs/vr/libpdx/service.cpp
index 0053af8..daf9af8 100644
--- a/libs/vr/libpdx/service.cpp
+++ b/libs/vr/libpdx/service.cpp
@@ -1,8 +1,8 @@
#define LOG_TAG "ServiceFramework"
#include "pdx/service.h"
-#include <cutils/log.h>
#include <fcntl.h>
+#include <log/log.h>
#include <utils/misc.h>
#include <algorithm>
diff --git a/libs/vr/libposepredictor/linear_pose_predictor.cpp b/libs/vr/libposepredictor/linear_pose_predictor.cpp
index a2ce2ca..de1b951 100644
--- a/libs/vr/libposepredictor/linear_pose_predictor.cpp
+++ b/libs/vr/libposepredictor/linear_pose_predictor.cpp
@@ -1,4 +1,4 @@
-#include <cutils/log.h>
+#include <log/log.h>
#include <private/dvr/linear_pose_predictor.h>
diff --git a/libs/vr/libsensor/Android.mk b/libs/vr/libsensor/Android.mk
index db1514d..8c7ad43 100644
--- a/libs/vr/libsensor/Android.mk
+++ b/libs/vr/libsensor/Android.mk
@@ -23,7 +23,6 @@
staticLibraries := \
libbufferhub \
- libchrome \
libdvrcommon \
libpdx_default_transport \
@@ -33,7 +32,6 @@
libhardware \
liblog \
libutils \
- libevent
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(sourceFiles)
diff --git a/libs/vr/libsensor/pose_client.cpp b/libs/vr/libsensor/pose_client.cpp
index a65d668..9eae3aa 100644
--- a/libs/vr/libsensor/pose_client.cpp
+++ b/libs/vr/libsensor/pose_client.cpp
@@ -3,8 +3,7 @@
#include <stdint.h>
-#include <base/logging.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <pdx/client.h>
#include <pdx/default_transport/client_channel_factory.h>
#include <pdx/file_handle.h>
@@ -18,6 +17,8 @@
using android::pdx::Status;
using android::pdx::Transaction;
+#define arraysize(x) (static_cast<int32_t>(std::extent<decltype(x)>::value))
+
namespace android {
namespace dvr {
@@ -64,8 +65,7 @@
int GetControllerPose(int32_t controller_id, uint32_t vsync_count,
DvrPoseAsync* out_pose) {
- if (controller_id < 0 ||
- controller_id >= static_cast<int32_t>(arraysize(controllers_))) {
+ if (controller_id < 0 || controller_id >= arraysize(controllers_)) {
return -EINVAL;
}
if (!controllers_[controller_id].mapped_pose_buffer) {
@@ -154,14 +154,14 @@
}
pose_buffer_.swap(buffer);
mapped_pose_buffer_ = static_cast<const DvrPoseRingBuffer*>(addr);
- LOG(INFO) << "Mapped pose data translation "
- << mapped_pose_buffer_->ring[0].translation[0] << ','
- << mapped_pose_buffer_->ring[0].translation[1] << ','
- << mapped_pose_buffer_->ring[0].translation[2] << ", quat "
- << mapped_pose_buffer_->ring[0].orientation[0] << ','
- << mapped_pose_buffer_->ring[0].orientation[1] << ','
- << mapped_pose_buffer_->ring[0].orientation[2] << ','
- << mapped_pose_buffer_->ring[0].orientation[3];
+ ALOGI("Mapped pose data translation %f,%f,%f quat %f,%f,%f,%f",
+ mapped_pose_buffer_->ring[0].translation[0],
+ mapped_pose_buffer_->ring[0].translation[1],
+ mapped_pose_buffer_->ring[0].translation[2],
+ mapped_pose_buffer_->ring[0].orientation[0],
+ mapped_pose_buffer_->ring[0].orientation[1],
+ mapped_pose_buffer_->ring[0].orientation[2],
+ mapped_pose_buffer_->ring[0].orientation[3]);
if (out_info) {
GetPoseRingBufferInfo(out_info);
}
@@ -169,8 +169,7 @@
}
int GetControllerRingBuffer(int32_t controller_id) {
- if (controller_id < 0 ||
- controller_id >= static_cast<int32_t>(arraysize(controllers_))) {
+ if (controller_id < 0 || controller_id >= arraysize(controllers_)) {
return -EINVAL;
}
ControllerClientState& client_state = controllers_[controller_id];
@@ -200,15 +199,15 @@
}
client_state.pose_buffer.swap(buffer);
client_state.mapped_pose_buffer = static_cast<const DvrPoseAsync*>(addr);
- LOG(INFO) << "Mapped controller " << controller_id
- << " pose data translation "
- << client_state.mapped_pose_buffer[0].translation[0] << ','
- << client_state.mapped_pose_buffer[0].translation[1] << ','
- << client_state.mapped_pose_buffer[0].translation[2] << ", quat "
- << client_state.mapped_pose_buffer[0].orientation[0] << ','
- << client_state.mapped_pose_buffer[0].orientation[1] << ','
- << client_state.mapped_pose_buffer[0].orientation[2] << ','
- << client_state.mapped_pose_buffer[0].orientation[3];
+ ALOGI(
+ "Mapped controller %d pose data translation %f,%f,%f quat %f,%f,%f,%f",
+ controller_id, client_state.mapped_pose_buffer[0].translation[0],
+ client_state.mapped_pose_buffer[0].translation[1],
+ client_state.mapped_pose_buffer[0].translation[2],
+ client_state.mapped_pose_buffer[0].orientation[0],
+ client_state.mapped_pose_buffer[0].orientation[1],
+ client_state.mapped_pose_buffer[0].orientation[2],
+ client_state.mapped_pose_buffer[0].orientation[3]);
return 0;
}
diff --git a/libs/vr/libsensor/sensor_client.cpp b/libs/vr/libsensor/sensor_client.cpp
index 1c240f5..04e88cc 100644
--- a/libs/vr/libsensor/sensor_client.cpp
+++ b/libs/vr/libsensor/sensor_client.cpp
@@ -1,7 +1,7 @@
#define LOG_TAG "SensorClient"
#include <private/dvr/sensor_client.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <poll.h>
#include <pdx/default_transport/client_channel_factory.h>
diff --git a/libs/vr/libsensor/tests/sensor_app_tests.cpp b/libs/vr/libsensor/tests/sensor_app_tests.cpp
index 8cd6f79..0f5bf00 100644
--- a/libs/vr/libsensor/tests/sensor_app_tests.cpp
+++ b/libs/vr/libsensor/tests/sensor_app_tests.cpp
@@ -2,10 +2,10 @@
#include <GLES2/gl2.h>
#include <math.h>
-#include <base/logging.h>
#include <dvr/graphics.h>
#include <dvr/pose_client.h>
#include <gtest/gtest.h>
+#include <log/log.h>
#include <private/dvr/types.h>
using android::dvr::vec4;
@@ -63,9 +63,9 @@
// startup anomalies.
if (i > 0) {
if (last_vsync_count == schedule.vsync_count)
- LOG(ERROR) << "vsync did not increment: " << schedule.vsync_count;
+ ALOGE("vsync did not increment: %u", schedule.vsync_count);
if (pose.timestamp_ns == last_pose.timestamp_ns)
- LOG(ERROR) << "timestamp did not change: " << pose.timestamp_ns;
+ ALOGE("timestamp did not change: %" PRIu64, pose.timestamp_ns);
// TODO(jbates) figure out why the bots are not passing this check.
// EXPECT_NE(last_vsync_count, schedule.vsync_count);
// EXPECT_NE(pose.timestamp_ns, last_pose.timestamp_ns);
@@ -112,9 +112,9 @@
// startup anomalies.
if (i > 0) {
if (last_vsync_count == schedule.vsync_count)
- LOG(ERROR) << "vsync did not increment: " << schedule.vsync_count;
+ ALOGE("vsync did not increment: %u", schedule.vsync_count);
if (pose.timestamp_ns == last_pose.timestamp_ns)
- LOG(ERROR) << "timestamp did not change: " << pose.timestamp_ns;
+ ALOGE("timestamp did not change: %" PRIu64, pose.timestamp_ns);
// TODO(jbates) figure out why the bots are not passing this check.
// EXPECT_NE(last_vsync_count, schedule.vsync_count);
// EXPECT_NE(pose.timestamp_ns, last_pose.timestamp_ns);
diff --git a/libs/vr/libvrflinger/Android.mk b/libs/vr/libvrflinger/Android.mk
new file mode 100644
index 0000000..6b5e7cc
--- /dev/null
+++ b/libs/vr/libvrflinger/Android.mk
@@ -0,0 +1,87 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+sourceFiles := \
+ acquired_buffer.cpp \
+ compositor.cpp \
+ debug_hud_data.cpp \
+ debug_hud_view.cpp \
+ display_manager_service.cpp \
+ display_service.cpp \
+ display_surface.cpp \
+ epoll_event_dispatcher.cpp \
+ hardware_composer.cpp \
+ screenshot_service.cpp \
+ surface_channel.cpp \
+ video_compositor.cpp \
+ video_mesh_surface.cpp \
+ vr_flinger.cpp \
+ vsync_service.cpp
+
+includeFiles := $(LOCAL_PATH)/include
+
+staticLibraries := \
+ libsurfaceflingerincludes \
+ libhwcomposer-command-buffer \
+ libbufferhub \
+ libbufferhubqueue \
+ libeds \
+ libdisplay \
+ libdvrcommon \
+ libdvrgraphics \
+ libperformance \
+ libsensor \
+ libpdx_default_transport \
+
+sharedLibraries := \
+ android.dvr.composer@1.0 \
+ android.hardware.graphics.allocator@2.0 \
+ android.hardware.graphics.composer@2.1 \
+ libbinder \
+ libbase \
+ libcutils \
+ liblog \
+ libhardware \
+ libutils \
+ libEGL \
+ libGLESv1_CM \
+ libGLESv2 \
+ libvulkan \
+ libui \
+ libgui \
+ libsync \
+ libhidlbase \
+ libhidltransport \
+ libfmq \
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(sourceFiles)
+LOCAL_C_INCLUDES := $(includeFiles)
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(includeFiles)
+
+LOCAL_CFLAGS += -DLOG_TAG=\"vr_flinger\"
+LOCAL_CFLAGS += -DTRACE=0
+LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_GRAPHICS
+LOCAL_CFLAGS += -DGL_GLEXT_PROTOTYPES -DEGL_EGLEXT_PROTOTYPES
+ifeq ($(TARGET_USES_QCOM_BSP), true)
+ LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
+ LOCAL_C_INCLUDES += hardware/qcom/display/libqdutils
+ LOCAL_SHARED_LIBRARIES += libqdutils
+endif
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_WHOLE_STATIC_LIBRARIES := $(staticLibraries)
+LOCAL_MODULE := libvrflinger
+include $(BUILD_STATIC_LIBRARY)
diff --git a/libs/vr/libvrflinger/acquired_buffer.cpp b/libs/vr/libvrflinger/acquired_buffer.cpp
new file mode 100644
index 0000000..5a3aa7f
--- /dev/null
+++ b/libs/vr/libvrflinger/acquired_buffer.cpp
@@ -0,0 +1,100 @@
+#include "acquired_buffer.h"
+
+#include <log/log.h>
+#include <sync/sync.h>
+
+using android::pdx::LocalHandle;
+
+namespace android {
+namespace dvr {
+
+AcquiredBuffer::AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer,
+ LocalHandle acquire_fence, uint64_t /*sequence*/)
+ : buffer_(buffer), acquire_fence_(std::move(acquire_fence)) {}
+
+AcquiredBuffer::AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer,
+ int* error) {
+ LocalHandle fence;
+ const int ret = buffer->Acquire(&fence);
+
+ if (error)
+ *error = ret;
+
+ if (ret < 0) {
+ ALOGW("AcquiredBuffer::AcquiredBuffer: Failed to acquire buffer: %s",
+ strerror(-ret));
+ buffer_ = nullptr;
+ // Default construct sets acquire_fence_ to empty.
+ } else {
+ buffer_ = buffer;
+ acquire_fence_ = std::move(fence);
+ }
+}
+
+AcquiredBuffer::AcquiredBuffer(AcquiredBuffer&& other)
+ : buffer_(std::move(other.buffer_)),
+ acquire_fence_(std::move(other.acquire_fence_)) {}
+
+AcquiredBuffer::~AcquiredBuffer() { Release(LocalHandle(kEmptyFence)); }
+
+AcquiredBuffer& AcquiredBuffer::operator=(AcquiredBuffer&& other) {
+ if (this != &other) {
+ Release(LocalHandle(kEmptyFence));
+
+ buffer_ = std::move(other.buffer_);
+ acquire_fence_ = std::move(other.acquire_fence_);
+ }
+ return *this;
+}
+
+bool AcquiredBuffer::IsAvailable() const {
+ if (IsEmpty())
+ return false;
+
+ // Only check the fence if the acquire fence is not empty.
+ if (acquire_fence_) {
+ const int ret = sync_wait(acquire_fence_.Get(), 0);
+ ALOGD_IF(TRACE || (ret < 0 && errno != ETIME),
+ "AcquiredBuffer::IsAvailable: acquire_fence_=%d sync_wait()=%d "
+ "errno=%d.",
+ acquire_fence_.Get(), ret, ret < 0 ? errno : 0);
+ if (ret == 0) {
+ // The fence is completed, so to avoid further calls to sync_wait we close
+ // it here.
+ acquire_fence_.Close();
+ }
+ return ret == 0;
+ } else {
+ return true;
+ }
+}
+
+LocalHandle AcquiredBuffer::ClaimAcquireFence() {
+ return std::move(acquire_fence_);
+}
+
+std::shared_ptr<BufferConsumer> AcquiredBuffer::ClaimBuffer() {
+ return std::move(buffer_);
+}
+
+int AcquiredBuffer::Release(LocalHandle release_fence) {
+ if (buffer_) {
+ // Close the release fence since we can't transfer it with an async release.
+ release_fence.Close();
+ const int ret = buffer_->ReleaseAsync();
+ if (ret < 0) {
+ ALOGE("AcquiredBuffer::Release: Failed to release buffer %d: %s",
+ buffer_->id(), strerror(-ret));
+ if (ret != -ESHUTDOWN)
+ return ret;
+ }
+
+ buffer_ = nullptr;
+ acquire_fence_.Close();
+ }
+
+ return 0;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/acquired_buffer.h b/libs/vr/libvrflinger/acquired_buffer.h
new file mode 100644
index 0000000..050cd5f
--- /dev/null
+++ b/libs/vr/libvrflinger/acquired_buffer.h
@@ -0,0 +1,82 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_ACQUIRED_BUFFER_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_ACQUIRED_BUFFER_H_
+
+#include <pdx/file_handle.h>
+#include <private/dvr/buffer_hub_client.h>
+
+#include <memory>
+
+namespace android {
+namespace dvr {
+
+// Manages the ACQUIRE/RELEASE ownership cycle of a BufferConsumer.
+class AcquiredBuffer {
+ public:
+ static constexpr int kEmptyFence = pdx::LocalHandle::kEmptyFileHandle;
+
+ AcquiredBuffer() : buffer_(nullptr), acquire_fence_(kEmptyFence) {}
+
+ // Constructs an AcquiredBuffer from a BufferConsumer pointer and an acquire
+ // fence. The BufferConsumer MUST be in the ACQUIRED state prior to calling
+ // this constructor; the constructor does not attempt to ACQUIRE the buffer
+ // itself.
+ AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer,
+ pdx::LocalHandle acquire_fence, uint64_t sequence);
+
+ // Constructs an AcquiredBuffer from a BufferConsumer. The BufferConsumer MUST
+ // be in the POSTED state prior to calling this constructor, as this
+ // constructor attempts to ACQUIRE the buffer. If ACQUIRING the buffer fails
+ // this instance is left in the empty state. An optional error code is
+ // returned in |error|, which may be nullptr if not needed.
+ AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer, int* error);
+
+ // Move constructor. Behaves similarly to the move assignment operator below.
+ AcquiredBuffer(AcquiredBuffer&& other);
+
+ ~AcquiredBuffer();
+
+ // Move assignment operator. Moves the BufferConsumer and acquire fence from
+ // |other| into this instance after RELEASING the current BufferConsumer and
+ // closing the acquire fence. After the move |other| is left in the empty
+ // state.
+ AcquiredBuffer& operator=(AcquiredBuffer&& other);
+
+ // Accessors for the underlying BufferConsumer, the acquire fence, and the
+ // use-case specific sequence value from the acquisition (see
+ // dreamos/buffer_hub_client.h).
+ std::shared_ptr<BufferConsumer> buffer() const { return buffer_; }
+ int acquire_fence() const { return acquire_fence_.Get(); }
+
+ // When non-empty, returns true if the acquired fence was signaled (or if the
+ // fence is empty). Returns false when empty or if the fence is not signaled.
+ bool IsAvailable() const;
+
+ bool IsEmpty() const { return buffer_ == nullptr; }
+
+ // Returns the acquire fence, passing ownership to the caller.
+ pdx::LocalHandle ClaimAcquireFence();
+
+ // Returns the buffer, passing ownership to the caller. Caller is responsible
+ // for calling Release on the returned buffer.
+ std::shared_ptr<BufferConsumer> ClaimBuffer();
+
+ // Releases the BufferConsumer, passing the release fence in |release_fence|
+ // to the producer. On success, the BufferConsumer and acquire fence are set
+ // to empty state; if release fails, the BufferConsumer and acquire fence are
+ // left in place and a negative error code is returned.
+ int Release(pdx::LocalHandle release_fence);
+
+ private:
+ AcquiredBuffer(const AcquiredBuffer&) = delete;
+ void operator=(const AcquiredBuffer&) = delete;
+
+ std::shared_ptr<BufferConsumer> buffer_;
+ // Mutable so that the fence can be closed when it is determined to be
+ // signaled during IsAvailable().
+ mutable pdx::LocalHandle acquire_fence_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_ACQUIRED_BUFFER_H_
diff --git a/libs/vr/libvrflinger/compositor.cpp b/libs/vr/libvrflinger/compositor.cpp
new file mode 100644
index 0000000..5a111d4
--- /dev/null
+++ b/libs/vr/libvrflinger/compositor.cpp
@@ -0,0 +1,873 @@
+#include "compositor.h"
+
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2.h>
+
+#include <memory>
+
+#include <cutils/properties.h>
+
+#include <dvr/graphics.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/debug.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/dummy_native_window.h>
+#include <private/dvr/gl_fenced_flush.h>
+#include <private/dvr/graphics/blur.h>
+#include <private/dvr/graphics/gpu_profiler.h>
+#include <private/dvr/lucid_metrics.h>
+#include <private/dvr/native_buffer.h>
+#include <private/dvr/platform_defines.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "debug_hud_data.h"
+#include "debug_hud_view.h"
+#include "display_surface.h"
+
+#define BINNING_CONTROL_HINT_QCOM 0x8FB0
+
+// Accepted by the <hint> parameter of glHint:
+#define BINNING_QCOM 0x8FB1
+#define VISIBILITY_OPTIMIZED_BINNING_QCOM 0x8FB2
+#define RENDER_DIRECT_TO_FRAMEBUFFER_QCOM 0x8FB3
+
+#ifndef EGL_CONTEXT_MAJOR_VERSION
+#define EGL_CONTEXT_MAJOR_VERSION 0x3098
+#define EGL_CONTEXT_MINOR_VERSION 0x30FB
+#endif
+
+using android::pdx::LocalHandle;
+
+static const int kDistortionMeshResolution = 40;
+
+static std::shared_ptr<int64_t> eds_gpu_duration_ns =
+ std::make_shared<int64_t>(0);
+
+static constexpr char kDisableLensDistortionProp[] =
+ "persist.dreamos.disable_distort";
+
+static constexpr char kEnableEdsPoseSaveProp[] =
+ "persist.dreamos.save_eds_pose";
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// An implementation of ANativeWindowBuffer backed by a temporary IonBuffer.
+// Do not hold on to this kind of object, because the IonBuffer may become
+// invalid in other scopes.
+class TemporaryNativeBuffer
+ : public ANativeObjectBase<ANativeWindowBuffer, TemporaryNativeBuffer,
+ LightRefBase<TemporaryNativeBuffer>> {
+ public:
+ explicit TemporaryNativeBuffer(const IonBuffer* buffer) : BASE() {
+ ANativeWindowBuffer::width = buffer->width();
+ ANativeWindowBuffer::height = buffer->height();
+ ANativeWindowBuffer::stride = buffer->stride();
+ ANativeWindowBuffer::format = buffer->format();
+ ANativeWindowBuffer::usage = buffer->usage();
+ // TODO(eieio): Update NYC to support layer_count.
+ // ANativeWindowBuffer::layer_count = 1;
+ handle = buffer->handle();
+ }
+
+ private:
+ friend class android::LightRefBase<TemporaryNativeBuffer>;
+
+ TemporaryNativeBuffer(const TemporaryNativeBuffer&) = delete;
+ void operator=(TemporaryNativeBuffer&) = delete;
+};
+
+std::vector<uint8_t> ReadTextureRGBA(GLuint texture_id, int width, int height) {
+ std::vector<uint8_t> data(width * height * 4);
+ GLuint fbo;
+ glGenFramebuffers(1, &fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ texture_id, 0);
+ // Using default GL_PACK_ALIGNMENT of 4 for the 4 byte source data.
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data.data());
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ glDeleteFramebuffers(1, &fbo);
+ CHECK_GL();
+ return data;
+}
+
+} // namespace
+
+class Compositor::Texture {
+ public:
+ Texture(std::shared_ptr<BufferConsumer> consumer, EGLDisplay display,
+ int index);
+ ~Texture();
+
+ std::shared_ptr<BufferConsumer> consumer() const { return consumer_; }
+ GLuint texture_id() const { return texture_id_; }
+ vec2i size() const {
+ return vec2i(native_buffer_.get()->width, native_buffer_.get()->height);
+ }
+ int index() const { return index_; }
+
+ bool Initialize();
+
+ private:
+ Texture(const Texture&) = delete;
+ void operator=(const Texture&) = delete;
+
+ std::shared_ptr<BufferConsumer> consumer_;
+
+ android::sp<NativeBufferConsumer> native_buffer_;
+
+ EGLDisplay display_;
+ EGLImageKHR image_;
+ GLuint texture_id_;
+ int index_;
+};
+
+Compositor::Texture::Texture(std::shared_ptr<BufferConsumer> consumer,
+ EGLDisplay display, int index)
+ : consumer_(consumer),
+ display_(display),
+ image_(nullptr),
+ texture_id_(0),
+ index_(index) {}
+
+Compositor::Texture::~Texture() {
+ glDeleteTextures(1, &texture_id_);
+ eglDestroyImageKHR(display_, image_);
+}
+
+bool Compositor::Texture::Initialize() {
+ native_buffer_ = new NativeBufferConsumer(consumer_, index_);
+
+ CHECK_GL();
+ image_ = eglCreateImageKHR(
+ display_, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer_.get()), nullptr);
+ if (!image_) {
+ ALOGE("Failed to create EGLImage\n");
+ return false;
+ }
+
+ glGenTextures(1, &texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ CHECK_GL();
+ return true;
+}
+
+Compositor::RenderTarget::RenderTarget()
+ : buffer_texture_id_(0),
+ buffer_framebuffer_id_(0),
+ buffer_image_(nullptr) {}
+
+Compositor::RenderTarget::~RenderTarget() { Destroy(); }
+
+void Compositor::RenderTarget::Destroy() {
+ glDeleteFramebuffers(1, &buffer_framebuffer_id_);
+ glDeleteTextures(1, &buffer_texture_id_);
+ eglDestroyImageKHR(eglGetDisplay(EGL_DEFAULT_DISPLAY), buffer_image_);
+ buffer_texture_id_ = 0;
+ buffer_framebuffer_id_ = 0;
+ buffer_image_ = nullptr;
+}
+
+void Compositor::RenderTarget::Initialize(int width, int height) {
+ LOG_ALWAYS_FATAL_IF(buffer_texture_id_ || buffer_framebuffer_id_ ||
+ buffer_image_);
+ constexpr int usage = GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER |
+ GRALLOC_USAGE_HW_RENDER |
+ GRALLOC_USAGE_QCOM_FRAMEBUFFER_COMPRESSION;
+ buffer_ = std::make_shared<IonBuffer>(width, height,
+ HAL_PIXEL_FORMAT_RGBA_8888, usage);
+
+ native_buffer_ = new NativeBuffer(buffer_);
+
+ buffer_image_ = eglCreateImageKHR(
+ eglGetDisplay(EGL_DEFAULT_DISPLAY), EGL_NO_CONTEXT,
+ EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer_.get()), nullptr);
+
+ glGenTextures(1, &buffer_texture_id_);
+ glBindTexture(GL_TEXTURE_2D, buffer_texture_id_);
+ CHECK_GL();
+
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, buffer_image_);
+ CHECK_GL();
+
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ // Generate a framebuffer.
+ glGenFramebuffers(1, &buffer_framebuffer_id_);
+ glBindFramebuffer(GL_FRAMEBUFFER, buffer_framebuffer_id_);
+ CHECK_GL();
+
+ // Attach the color buffer
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ buffer_texture_id_, 0);
+ CHECK_GL();
+ GLenum result = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ CHECK_GL();
+ if (result != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Framebuffer incomplete: %d", result);
+ }
+
+ // Clear the render target to black once. In direct render mode we never draw
+ // the corner pixels.
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glFlush();
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ CHECK_GL();
+}
+
+void Compositor::RenderTarget::BindFramebuffer() {
+ glBindFramebuffer(GL_FRAMEBUFFER, buffer_framebuffer_id_);
+}
+
+void Compositor::RenderTarget::DiscardColorAttachment() {
+ GLenum attachment = GL_COLOR_ATTACHMENT0;
+ glDiscardFramebufferEXT(GL_FRAMEBUFFER, 1, &attachment);
+ CHECK_GL();
+}
+
+class Compositor::RenderPoseBufferObject {
+ public:
+ RenderPoseBufferObject(LocalHandle&& render_pose_buffer_fd) {
+ // Create new pose tracking buffer for this surface.
+ glGenBuffers(1, &render_pose_buffer_object_);
+ glBindBuffer(GL_UNIFORM_BUFFER, render_pose_buffer_object_);
+ if (render_pose_buffer_fd) {
+ LOG_ALWAYS_FATAL_IF(!glBindSharedBufferQCOM);
+ if (glBindSharedBufferQCOM)
+ glBindSharedBufferQCOM(GL_UNIFORM_BUFFER,
+ sizeof(DisplaySurfaceMetadata),
+ render_pose_buffer_fd.Get());
+ else
+ ALOGE("Error: Missing gralloc buffer extension");
+ CHECK_GL();
+ }
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+ }
+
+ ~RenderPoseBufferObject() { glDeleteBuffers(1, &render_pose_buffer_object_); }
+
+ GLuint object_id() const { return render_pose_buffer_object_; }
+
+ private:
+ // Render pose buffer object. This contains an array of poses that corresponds
+ // with the surface buffers.
+ GLuint render_pose_buffer_object_;
+
+ RenderPoseBufferObject(const RenderPoseBufferObject&) = delete;
+ void operator=(const RenderPoseBufferObject&) = delete;
+};
+
+HeadMountMetrics CreateDefaultHeadMountMetrics() {
+ const bool enable_distortion =
+ property_get_bool(kDisableLensDistortionProp, 0) == 0;
+ return enable_distortion ? CreateHeadMountMetrics()
+ : CreateUndistortedHeadMountMetrics();
+}
+
+Compositor::Compositor()
+ : head_mount_metrics_(CreateDefaultHeadMountMetrics()),
+ display_(0),
+ config_(0),
+ surface_(0),
+ context_(0),
+ active_render_target_(0),
+ is_render_direct_(false),
+ compute_fbo_(0),
+ compute_fbo_texture_(0),
+ hmd_metrics_requires_update_(false),
+ eds_pose_capture_enabled_(false) {}
+
+Compositor::~Compositor() {}
+
+bool Compositor::Initialize(const DisplayMetrics& display_metrics) {
+ ATRACE_NAME("Compositor::Initialize");
+ if (!InitializeEGL())
+ return false;
+
+ display_metrics_ = display_metrics;
+ const int width = display_metrics_.GetSizePixels().x();
+ const int height = display_metrics_.GetSizePixels().y();
+
+ render_target_[0].Initialize(width, height);
+ render_target_[1].Initialize(width, height);
+
+ // EDS:
+ GpuProfiler::Get()->SetEnableGpuTracing(true);
+
+ eds_pose_capture_enabled_ = property_get_bool(kEnableEdsPoseSaveProp, 0) == 1;
+
+ CheckAndUpdateHeadMountMetrics(true);
+
+ debug_hud_.reset(new DebugHudView(*composite_hmd_.get()));
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+
+ return true;
+}
+
+void Compositor::UpdateHeadMountMetrics(
+ const HeadMountMetrics& head_mount_metrics) {
+ // Recalculating the mesh must be done in the draw loop, defer until then.
+ std::lock_guard<std::mutex> _lock(mutex_);
+ head_mount_metrics_ = head_mount_metrics;
+ hmd_metrics_requires_update_ = true;
+}
+
+void Compositor::CheckAndUpdateHeadMountMetrics(bool force_update) {
+ std::lock_guard<std::mutex> _lock(mutex_);
+ if (hmd_metrics_requires_update_ || force_update) {
+ hmd_metrics_requires_update_ = false;
+ composite_hmd_.reset(
+ new CompositeHmd(head_mount_metrics_, display_metrics_));
+ CHECK_GL();
+ eds_renderer_.reset(new DistortionRenderer(
+ *composite_hmd_.get(), display_metrics_.GetSizePixels(),
+ kDistortionMeshResolution, true, false, false, true, true));
+ }
+}
+
+bool Compositor::InitializeEGL() {
+ ATRACE_NAME("Compositor::InitializeEGL");
+ display_ = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (!display_) {
+ ALOGE("Failed to get egl display\n");
+ return false;
+ }
+
+ eglInitialize(display_, nullptr, nullptr);
+
+ EGLint attribs[] = {
+ EGL_BUFFER_SIZE,
+ 32,
+ EGL_ALPHA_SIZE,
+ 0,
+ EGL_BLUE_SIZE,
+ 8,
+ EGL_RED_SIZE,
+ 8,
+ EGL_GREEN_SIZE,
+ 8,
+ EGL_DEPTH_SIZE,
+ 0,
+ EGL_SURFACE_TYPE,
+ EGL_WINDOW_BIT,
+ EGL_RENDERABLE_TYPE,
+ EGL_OPENGL_ES2_BIT,
+ EGL_NONE,
+ };
+
+ EGLint num_configs;
+ if (!eglChooseConfig(display_, attribs, &config_, 1, &num_configs)) {
+ ALOGE("Couldn't find config");
+ return false;
+ }
+
+ std::unique_ptr<DummyNativeWindow> window(new DummyNativeWindow());
+
+ surface_ = eglCreateWindowSurface(display_, config_, window.get(), nullptr);
+ if (surface_ == EGL_NO_SURFACE) {
+ ALOGE("Failed to create egl surface");
+ return false;
+ }
+ window.release();
+
+ EGLint context_attribs[] = {EGL_CONTEXT_MAJOR_VERSION,
+ 3,
+ EGL_CONTEXT_MINOR_VERSION,
+ 1,
+ EGL_CONTEXT_PRIORITY_LEVEL_IMG,
+ EGL_CONTEXT_PRIORITY_HIGH_IMG,
+ EGL_NONE};
+ context_ = eglCreateContext(display_, config_, nullptr, context_attribs);
+ if (!eglMakeCurrent(display_, surface_, surface_, context_)) {
+ ALOGE("Unable to create GLESv2 context");
+ return false;
+ }
+
+ load_gl_extensions();
+
+ glEnable(BINNING_CONTROL_HINT_QCOM);
+ glHint(BINNING_CONTROL_HINT_QCOM, RENDER_DIRECT_TO_FRAMEBUFFER_QCOM);
+ is_render_direct_ = true;
+ CHECK_GL();
+
+ // Initialize the placeholder 1x1 framebuffer that we bind during compute
+ // shader instances to avoid accesses to other framebuffers.
+ glGenFramebuffers(1, &compute_fbo_);
+ glGenTextures(1, &compute_fbo_texture_);
+ glBindFramebuffer(GL_FRAMEBUFFER, compute_fbo_);
+ glBindTexture(GL_TEXTURE_2D, compute_fbo_texture_);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ nullptr);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ compute_fbo_texture_, 0);
+ CHECK_GL();
+ CHECK_GL_FBO();
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+
+ return true;
+}
+
+void Compositor::Shutdown() {
+ render_target_[0].Destroy();
+ render_target_[1].Destroy();
+ layers_.clear();
+ glDeleteFramebuffers(1, &compute_fbo_);
+ glDeleteTextures(1, &compute_fbo_texture_);
+
+ debug_hud_.reset();
+ eds_renderer_.reset();
+
+ if (context_) {
+ eglDestroyContext(display_, context_);
+ context_ = 0;
+ }
+
+ if (surface_ != EGL_NO_SURFACE) {
+ eglDestroySurface(display_, surface_);
+ surface_ = EGL_NO_SURFACE;
+ }
+}
+
+void Compositor::RemoveAllBuffers() { layers_.clear(); }
+
+void Compositor::UpdateSurfaces(
+ const std::vector<std::shared_ptr<DisplaySurface>>& surfaces) {
+ // Delete the removed surfaces.
+ layers_.erase(
+ std::remove_if(layers_.begin(), layers_.end(),
+ [&surfaces](const AppFrame& layer) {
+ for (const auto& surface : surfaces)
+ if (surface->surface_id() == layer.surface_id())
+ return false;
+ return true;
+ }),
+ layers_.end());
+ // New surfaces are added on-demand as buffers are posted.
+}
+
+Compositor::AppFrame::AppFrame()
+ : surface_id_(-1),
+ blur_(0.0f),
+ z_order_(0),
+ vertical_flip_(false),
+ enable_cac_(true),
+ render_buffer_index_(0) {}
+
+Compositor::AppFrame::~AppFrame() {}
+
+const Compositor::Texture* Compositor::AppFrame::GetGlTextureId(
+ EGLDisplay display, int index) {
+ auto buffer_consumer = buffer_.buffer();
+ if (!buffer_consumer) {
+ return nullptr;
+ }
+ auto texture_it = std::find_if(
+ textures_.begin(), textures_.end(),
+ [buffer_consumer, index](const std::shared_ptr<Texture>& t) {
+ return t->consumer() == buffer_consumer && t->index() == index;
+ });
+
+ if (texture_it != textures_.end()) {
+ return (*texture_it).get();
+ }
+
+ textures_.push_back(
+ std::make_shared<Texture>(buffer_consumer, display, index));
+ if (!textures_.back()->Initialize()) {
+ textures_.pop_back();
+ return nullptr;
+ }
+ return textures_.back().get();
+}
+
+bool Compositor::AppFrame::UpdateSurface(
+ const std::shared_ptr<DisplaySurface>& surface) {
+ int surface_id = surface->surface_id();
+ float blur = surface->manager_blur();
+ bool need_sort = false;
+ if (z_order_ != surface->layer_order()) {
+ need_sort = true;
+ z_order_ = surface->layer_order();
+ }
+
+ surface_id_ = surface_id;
+ if (!render_pose_buffer_object_) {
+ render_pose_buffer_object_.reset(
+ new RenderPoseBufferObject(surface->GetMetadataBufferFd()));
+ }
+
+ blur_ = blur;
+ vertical_flip_ =
+ !!(surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_VERTICAL_FLIP);
+ enable_cac_ =
+ !(surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_CAC);
+
+ AcquiredBuffer skipped_buffer;
+ AcquiredBuffer buffer =
+ surface->AcquireNewestAvailableBuffer(&skipped_buffer);
+ if (!skipped_buffer.IsEmpty()) {
+ DebugHudData::data.SkipLayerFrame(z_order_);
+ ATRACE_NAME("DropToCatchUp");
+ ATRACE_ASYNC_END("BufferPost", skipped_buffer.buffer()->id());
+ }
+ if (!buffer.IsEmpty()) {
+ DebugHudData::data.AddLayerFrame(z_order_);
+ // Buffer was already ready, so we don't need to wait on the fence.
+ buffer.ClaimAcquireFence().Close();
+ ATRACE_ASYNC_END("BufferPost", buffer.buffer()->id());
+
+ render_buffer_index_ = surface->GetRenderBufferIndex(buffer.buffer()->id());
+
+#ifdef TRACE
+ const volatile DisplaySurfaceMetadata* data =
+ surface->GetMetadataBufferPtr();
+#endif
+ ALOGE_IF(TRACE, "read pose index %d %f %f", render_buffer_index_,
+ data->orientation[render_buffer_index_][0],
+ data->orientation[render_buffer_index_][1]);
+
+ // Move the new buffer over the old. AcquiredBuffer releases the old one.
+ buffer_ = std::move(buffer);
+ }
+ return need_sort;
+}
+
+void Compositor::AppFrame::UpdateVideoMeshSurface(
+ const std::shared_ptr<DisplaySurface>& surface) {
+ // Update |video_compositors_| with |video_surface|. Note that
+ // |UpdateVideoMeshSurface| should only be called on the PostThread before
+ // |DrawFrame| is called. Thus, no synchronization is required for
+ // |video_compositors_|.
+ if (!surface->video_mesh_surfaces_updated())
+ return;
+
+ // TODO(jwcai) The following loop handles adding new surfaces; video mesh
+ // removal logic shall be handled by listening to |OnChannelClose| event from
+ // DisplayService.
+ for (const auto& video_surface : surface->GetVideoMeshSurfaces()) {
+ // Here we assume number of |video_surface|s is relatively small, thus, the
+ // merge should be efficient enough.
+ auto video_compositor_it = std::find_if(
+ video_compositors_.begin(), video_compositors_.end(),
+ [video_surface](const std::shared_ptr<VideoCompositor>& c) {
+ return c->surface_id() == video_surface->surface_id();
+ });
+
+ if (video_compositor_it == video_compositors_.end()) {
+ // This video surface is new, create a new VideoCompositor.
+ video_compositors_.push_back(std::make_shared<VideoCompositor>(
+ video_surface, surface->GetMetadataBufferPtr()));
+ } else {
+ // There is a compositor in |video_compositors_| is already set up for
+ // this |video_surface|.
+ ALOGW("Duplicated video_mesh_surface: surface_id=%d",
+ video_surface->surface_id());
+ }
+ }
+}
+
+void Compositor::AppFrame::ResetBlurrers() { blurrers_.clear(); }
+
+void Compositor::AppFrame::AddBlurrer(Blur* blurrer) {
+ blurrers_.emplace_back(blurrer);
+}
+
+void Compositor::PostBuffer(const std::shared_ptr<DisplaySurface>& surface) {
+ int surface_id = surface->surface_id();
+
+ ALOGD_IF(TRACE, "Post surface %d", surface_id);
+
+ auto layer_it = std::find_if(layers_.begin(), layers_.end(),
+ [surface_id](const AppFrame& frame) {
+ return frame.surface_id() == surface_id;
+ });
+
+ bool need_sort = false;
+ if (layer_it == layers_.end()) {
+ layers_.push_back(AppFrame());
+ layer_it = layers_.end() - 1;
+ need_sort = true;
+ }
+
+ need_sort |= layer_it->UpdateSurface(surface);
+ layer_it->UpdateVideoMeshSurface(surface);
+
+ if (need_sort) {
+ std::stable_sort(layers_.begin(), layers_.end());
+ }
+}
+
+std::vector<uint8_t> Compositor::ReadLayerPixels(size_t index, int* width,
+ int* height) {
+ if (index >= layers_.size()) {
+ return {};
+ }
+
+ const Texture* texture = layers_[index].GetGlTextureId(display_, 0);
+ if (!texture) {
+ return {};
+ }
+
+ *width = texture->size()[0];
+ *height = texture->size()[1];
+ return ReadTextureRGBA(texture->texture_id(), *width, *height);
+}
+
+std::vector<uint8_t> Compositor::ReadBufferPixels(const IonBuffer* buffer) {
+ android::sp<TemporaryNativeBuffer> native_buffer =
+ new TemporaryNativeBuffer(buffer);
+
+ // Finish to make sure the GL driver has completed drawing of prior FBOs.
+ // Since we are creating an EGL image here, the driver will not know that
+ // there is a dependency on earlier GL draws.
+ glFinish();
+
+ EGLImageKHR image = eglCreateImageKHR(
+ display_, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer.get()), nullptr);
+ if (!image) {
+ ALOGE("Failed to create EGLImage\n");
+ return {};
+ }
+
+ GLuint texture_id;
+ glGenTextures(1, &texture_id);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+
+ int width = buffer->width();
+ int height = buffer->height();
+ std::vector<uint8_t> data = ReadTextureRGBA(texture_id, width, height);
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glDeleteTextures(1, &texture_id);
+ eglDestroyImageKHR(display_, image);
+ return data;
+}
+
+bool Compositor::DrawFrame(uint32_t target_vsync_count,
+ LocalHandle* buffer_fence_fd) {
+ CheckAndUpdateHeadMountMetrics(false);
+
+ ATRACE_NAME("Compositor::DrawFrame");
+ GpuProfiler::Get()->PollGlTimerQueries();
+
+ if (buffer_fence_fd)
+ buffer_fence_fd->Close();
+
+ int num_layers = 0;
+ const int kMaxLayers = 4;
+ GLuint texture_id[2][kMaxLayers] = {{0}};
+ GLuint render_pose_buffer_id[kMaxLayers] = {0};
+ uint32_t render_buffer_index[kMaxLayers] = {0};
+ bool vertical_flip[kMaxLayers] = {false};
+ bool separate_eye_textures[kMaxLayers] = {false};
+ bool enable_cac[kMaxLayers] = {};
+ CHECK_GL();
+ for (auto& layer : layers_) {
+ if (!layer.buffer().buffer()) {
+ ATRACE_NAME("no_buffer");
+ continue;
+ }
+
+ // Extract surface parameters.
+ render_buffer_index[num_layers] = layer.render_buffer_index();
+ render_pose_buffer_id[num_layers] =
+ layer.render_pose_buffer_object()->object_id();
+ vertical_flip[num_layers] = layer.vertical_flip();
+ enable_cac[num_layers] =
+ head_mount_metrics_.supports_chromatic_aberration_correction() &&
+ layer.enable_cac();
+
+ // Extract per-eye textures. These may be separate or joined (atlased).
+ vec2i size(0, 0);
+ int view_count = layer.buffer().buffer()->slice_count();
+ ALOGE_IF(view_count > 2, "Error: more than 2 views not supported");
+ view_count = std::min(2, view_count);
+ separate_eye_textures[num_layers] = (view_count > 1);
+ bool is_missing_texture = false;
+ for (int eye = 0; eye < 2; ++eye) {
+ // If view_count is 1, each eye texture is the 0th.
+ int view_index = (view_count == 2) ? eye : 0;
+ const Texture* texture = layer.GetGlTextureId(display_, view_index);
+ // Texture will be null if the EGL image creation fails (hopefully never).
+ if (!texture) {
+ is_missing_texture = true;
+ break;
+ }
+ // All views are currently expected to have the same size.
+ size = texture->size();
+ texture_id[eye][num_layers] = texture->texture_id();
+ }
+ if (is_missing_texture) {
+ continue;
+ }
+
+ // Perform blur if requested.
+ if (fabs(layer.blur()) > 0.001f) {
+ // No need for CAC on blurred layers.
+ enable_cac[num_layers] = false;
+ if (layer.blurrer_count() < 1 || layer.blurrer(0)->width() != size[0] ||
+ layer.blurrer(0)->height() != size[1]) {
+ // Blur is created with the left eye texture, but the same instance
+ // can be used for the right eye as well.
+ layer.ResetBlurrers();
+ layer.AddBlurrer(new Blur(size[0], size[1], texture_id[0][num_layers],
+ GL_TEXTURE_2D, GL_TEXTURE_2D, true, display_,
+ view_count));
+ }
+ // Reset blur instances to prepare for drawing.
+ layer.blurrer(0)->StartFrame();
+ layer.blurrer(0)->set_scale(layer.blur());
+ // Perform blur and replace source texture with blurred output texture.
+ if (view_count == 1) {
+ // Single wide buffer for both eyes, blur both eyes in one operation.
+ texture_id[0][num_layers] = texture_id[1][num_layers] =
+ layer.blurrer(0)->DrawBlur(texture_id[0][num_layers]);
+ } else {
+ // Split eye buffers in a single frame, blur each framebuffer.
+ texture_id[0][num_layers] =
+ layer.blurrer(0)->DrawBlur(texture_id[0][num_layers]);
+ texture_id[1][num_layers] =
+ layer.blurrer(0)->DrawBlur(texture_id[1][num_layers]);
+ }
+ }
+
+ ++num_layers;
+ if (num_layers >= kMaxLayers)
+ break;
+ }
+
+ CHECK_GL();
+ // Set appropriate binning mode for the number of layers.
+ if (num_layers > 1 && is_render_direct_) {
+ is_render_direct_ = false;
+ glDisable(BINNING_CONTROL_HINT_QCOM);
+ } else if (num_layers <= 1 && !is_render_direct_) {
+ is_render_direct_ = true;
+ glEnable(BINNING_CONTROL_HINT_QCOM);
+ glHint(BINNING_CONTROL_HINT_QCOM, RENDER_DIRECT_TO_FRAMEBUFFER_QCOM);
+ }
+
+ // Workaround for GL driver bug that causes the currently bound FBO to be
+ // accessed during a compute shader pass (DoLateLatch below). Based on an
+ // analysis with systrace, the best pattern here was to run the compute shader
+ // with a *different* FBO than what will be drawn to afterward. So we bind
+ // a dummy 1x1 FBO here and discard it. If instead, the current render target
+ // is bound during the compute shader, the following draw calls will be forced
+ // into direct mode rendering.
+ glBindFramebuffer(GL_FRAMEBUFFER, compute_fbo_);
+ GLenum attachment = GL_COLOR_ATTACHMENT0;
+ glDiscardFramebufferEXT(GL_FRAMEBUFFER, 1, &attachment);
+
+ // Double buffer the render target. Get the render target we're drawing into,
+ // and update the active buffer to the next buffer.
+ RenderTarget& render_target = GetRenderTarget();
+ SetNextRenderTarget();
+
+ if (num_layers > 0) {
+ // This trace prints the EDS+Warp GPU overhead and prints every 5 seconds:
+ TRACE_GPU_PRINT("GPU EDS+Warp", 5 * 60);
+ CHECK_GL();
+ eds_renderer_->DoLateLatch(target_vsync_count, render_buffer_index,
+ render_pose_buffer_id, vertical_flip,
+ separate_eye_textures, num_layers);
+
+ render_target.BindFramebuffer();
+
+ // Discard to avoid unresolving the framebuffer during tiled rendering.
+ render_target.DiscardColorAttachment();
+
+ // For tiled mode rendering, we clear every frame to avoid garbage showing
+ // up in the parts of tiles that are not rendered.
+ if (!is_render_direct_) {
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ }
+
+ for (int eye = kLeftEye; eye <= kRightEye; ++eye) {
+ eds_renderer_->PrepGlState(static_cast<EyeType>(eye));
+ for (int layer_i = 0; layer_i < num_layers; ++layer_i) {
+ bool blend_with_previous = layer_i > 0;
+ uint32_t current_buffer_index = render_buffer_index[layer_i];
+
+ // Render video mesh in the background of each graphics layer.
+ layers_[layer_i].ForEachVideoCompositor([this, eye, layer_i,
+ current_buffer_index,
+ &blend_with_previous](
+ const std::shared_ptr<VideoCompositor>& video_compositor) mutable {
+ eds_renderer_->DrawVideoQuad(
+ static_cast<EyeType>(eye), layer_i,
+ video_compositor->GetActiveTextureId(display_),
+ video_compositor->GetTransform(eye, current_buffer_index));
+ blend_with_previous = true;
+ });
+
+ // Apply distortion to frame submitted from the app's GL context.
+ eds_renderer_->SetChromaticAberrationCorrectionEnabled(
+ enable_cac[layer_i]);
+ eds_renderer_->ApplyDistortionCorrectionToTexture(
+ static_cast<EyeType>(eye), &texture_id[eye][layer_i],
+ &vertical_flip[layer_i], &separate_eye_textures[layer_i], &layer_i,
+ 1, blend_with_previous, false);
+ }
+ }
+ eds_renderer_->ResetGlState(1);
+ CHECK_GL();
+ } else {
+ ALOGI("No buffers for compositing, clearing to black.");
+ render_target.BindFramebuffer();
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ }
+
+ debug_hud_->Update();
+ debug_hud_->Draw();
+
+ LocalHandle fence_fd = CreateGLSyncAndFlush(display_);
+
+ if (buffer_fence_fd)
+ *buffer_fence_fd = std::move(fence_fd);
+
+ if (eds_pose_capture_enabled_) {
+ std::lock_guard<std::mutex> _lock(mutex_);
+ eds_renderer_->GetLastEdsPose(&eds_pose_capture_);
+ }
+
+ return true;
+}
+
+bool Compositor::GetLastEdsPose(LateLatchOutput* out_data) {
+ if (eds_pose_capture_enabled_) {
+ std::lock_guard<std::mutex> _lock(mutex_);
+ *out_data = eds_pose_capture_;
+ return true;
+ } else {
+ ALOGE("Eds pose capture is not enabled.");
+ return false;
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/compositor.h b/libs/vr/libvrflinger/compositor.h
new file mode 100644
index 0000000..be26d31
--- /dev/null
+++ b/libs/vr/libvrflinger/compositor.h
@@ -0,0 +1,233 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_COMPOSITOR_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_COMPOSITOR_H_
+
+#include <EGL/egl.h>
+#include <log/log.h>
+#include <utils/StrongPointer.h>
+
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <vector>
+
+#include <pdx/file_handle.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/composite_hmd.h>
+#include <private/dvr/display_metrics.h>
+#include <private/dvr/distortion_renderer.h>
+#include <private/dvr/frame_time_history.h>
+#include <private/dvr/ion_buffer.h>
+#include <private/dvr/native_buffer.h>
+
+#include "acquired_buffer.h"
+#include "video_compositor.h"
+struct DvrPose;
+
+namespace android {
+namespace dvr {
+
+class Blur;
+class BufferConsumer;
+class CompositeHmd;
+class DebugHudView;
+class DisplaySurface;
+
+// This is a GPU compositor for software EDS and lens warp on buffers provided
+// by HardwareComposer.
+class Compositor {
+ public:
+ Compositor();
+ ~Compositor();
+
+ bool Initialize(const DisplayMetrics& display_metrics);
+ void UpdateHeadMountMetrics(const HeadMountMetrics& head_mount_metrics);
+ void Shutdown();
+
+ // Renders a frame with the latest buffers with EDS and warp applied.
+ // buffer_fence_fd can be used to get a fence for the rendered frame. It can
+ // be set to null if the fence isn't needed.
+ bool DrawFrame(uint32_t target_vsync_count,
+ pdx::LocalHandle* buffer_fence_fd);
+
+ // Remove all buffers.
+ void RemoveAllBuffers();
+
+ // Synchronize compositor layers with in given surfaces.
+ void UpdateSurfaces(
+ const std::vector<std::shared_ptr<DisplaySurface>>& surfaces);
+
+ // This must be called for each surface before DrawFrame is called.
+ void PostBuffer(const std::shared_ptr<DisplaySurface>& surface);
+
+ std::shared_ptr<IonBuffer> GetBuffer() const {
+ return render_target_[active_render_target_].buffer();
+ }
+
+ // Returns the number of layers being rendered by the compositor.
+ size_t GetLayerCount() const { return layers_.size(); }
+
+ // Returns the source buffer at the given layer index or nullptr if none is
+ // available.
+ std::shared_ptr<BufferConsumer> PeekAtLayer(size_t index) const {
+ if (index >= GetLayerCount())
+ return nullptr;
+ return layers_[index].buffer().buffer();
+ }
+
+ // Expensive operation to transfer the pixels of the given layer index into
+ // unformatted memory and return as a RGBA buffer.
+ // On success, returns non-zero sized vector and sets width and height.
+ // On failure, returns empty vector.
+ std::vector<uint8_t> ReadLayerPixels(size_t index, int* width, int* height);
+
+ // Expensive operation to transfer the pixels of the given buffer into
+ // unformatted memory and return as a RGBA buffer.
+ // On success, returns non-zero sized vector.
+ // On failure, returns empty vector.
+ std::vector<uint8_t> ReadBufferPixels(const IonBuffer* buffer);
+
+ bool GetLastEdsPose(LateLatchOutput* out_data);
+
+ const HeadMountMetrics& head_mount_metrics() const {
+ return head_mount_metrics_;
+ }
+
+ private:
+ class Texture;
+ class RenderPoseBufferObject;
+
+ // A rendered frame from an application.
+ class AppFrame {
+ public:
+ AppFrame();
+ ~AppFrame();
+
+ AppFrame(AppFrame&& other) = default;
+ AppFrame& operator=(AppFrame&&) = default;
+
+ // Gets a GL texture object for the current buffer. The resulting texture
+ // object will be cached for future calls. Returns a pointer for temporary
+ // access - not meant to hold on to.
+ const Texture* GetGlTextureId(EGLDisplay display, int index);
+
+ bool operator<(const AppFrame& rhs) const {
+ return z_order_ < rhs.z_order_;
+ }
+ int z_order() const { return z_order_; }
+ // Return true if this surface z order has been changed.
+ bool UpdateSurface(const std::shared_ptr<DisplaySurface>& surface);
+ void UpdateVideoMeshSurface(const std::shared_ptr<DisplaySurface>& surface);
+ void ResetBlurrers();
+ void AddBlurrer(Blur* blurrer);
+
+ const AcquiredBuffer& buffer() const { return buffer_; }
+ int surface_id() const { return surface_id_; }
+ float blur() const { return blur_; }
+ bool vertical_flip() const { return vertical_flip_; }
+ bool enable_cac() const { return enable_cac_; }
+ size_t blurrer_count() const { return blurrers_.size(); }
+ Blur* blurrer(size_t i) {
+ return blurrers_.size() < i ? nullptr : blurrers_[i].get();
+ }
+ uint32_t render_buffer_index() const { return render_buffer_index_; }
+ const RenderPoseBufferObject* render_pose_buffer_object() const {
+ return render_pose_buffer_object_.get();
+ }
+
+ template <class A>
+ void ForEachVideoCompositor(A action) const {
+ for (auto& c : video_compositors_) {
+ action(c);
+ }
+ }
+
+ private:
+ int surface_id_;
+ float blur_;
+ int z_order_;
+ bool vertical_flip_;
+ bool enable_cac_;
+ std::vector<std::unique_ptr<Blur>> blurrers_;
+ AcquiredBuffer buffer_;
+ std::vector<std::shared_ptr<Texture>> textures_;
+ uint32_t render_buffer_index_;
+ std::unique_ptr<RenderPoseBufferObject> render_pose_buffer_object_;
+
+ // Active video mesh compositors
+ std::vector<std::shared_ptr<VideoCompositor>> video_compositors_;
+
+ AppFrame(const AppFrame& other) = delete;
+ AppFrame& operator=(const AppFrame&) = delete;
+ };
+
+ class RenderTarget {
+ public:
+ RenderTarget();
+ ~RenderTarget();
+
+ void Initialize(int width, int height);
+ void Destroy();
+ void BindFramebuffer();
+ void DiscardColorAttachment();
+
+ std::shared_ptr<IonBuffer> buffer() const { return buffer_; }
+
+ private:
+ std::shared_ptr<IonBuffer> buffer_;
+ android::sp<NativeBuffer> native_buffer_;
+
+ GLuint buffer_texture_id_;
+ GLuint buffer_framebuffer_id_;
+ EGLImageKHR buffer_image_;
+ };
+
+ Compositor(const Compositor&) = delete;
+ void operator=(const Compositor&) = delete;
+
+ bool InitializeEGL();
+
+ void UpdateHudToggle();
+ void PrintStatsHud();
+ void CheckAndUpdateHeadMountMetrics(bool force_update);
+
+ RenderTarget& GetRenderTarget() {
+ return render_target_[active_render_target_];
+ }
+
+ void SetNextRenderTarget() {
+ active_render_target_ = (active_render_target_ + 1) & 1;
+ }
+
+ std::vector<AppFrame> layers_;
+
+ DisplayMetrics display_metrics_;
+ HeadMountMetrics head_mount_metrics_;
+
+ EGLDisplay display_;
+ EGLConfig config_;
+ EGLSurface surface_;
+ EGLContext context_;
+ int active_render_target_;
+ RenderTarget render_target_[2];
+ bool is_render_direct_;
+
+ // FBO for compute shader.
+ GLuint compute_fbo_;
+ GLuint compute_fbo_texture_;
+
+ std::unique_ptr<DebugHudView> debug_hud_;
+
+ // EDS:
+ std::unique_ptr<CompositeHmd> composite_hmd_;
+ bool hmd_metrics_requires_update_;
+ std::unique_ptr<DistortionRenderer> eds_renderer_;
+
+ bool eds_pose_capture_enabled_;
+ std::mutex mutex_;
+ LateLatchOutput eds_pose_capture_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_COMPOSITOR_H_
diff --git a/libs/vr/libvrflinger/debug_hud_data.cpp b/libs/vr/libvrflinger/debug_hud_data.cpp
new file mode 100644
index 0000000..d387bba
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_data.cpp
@@ -0,0 +1,9 @@
+#include "debug_hud_data.h"
+
+namespace android {
+namespace dvr {
+
+DebugHudData DebugHudData::data;
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/debug_hud_data.h b/libs/vr/libvrflinger/debug_hud_data.h
new file mode 100644
index 0000000..778169d
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_data.h
@@ -0,0 +1,110 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_DATA_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_DATA_H_
+
+#include <stdint.h>
+
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/frame_time_history.h>
+
+namespace android {
+namespace dvr {
+
+// Tracks debug stats for the displayd debug HUD. Unless otherwise noted,
+// there is no synchronization of data accesses to avoid performance impact.
+// All accesses to this data are on the displayd HWC post thread. Accesses from
+// other threads will need to be duly protected from races.
+// This is a lightweight struct to make it easy to add and remove
+// tracking data.
+struct DebugHudData {
+ // Maximum supported layers for the debug HUD.
+ enum { kMaxLayers = 4 };
+
+ // The global singleton HUD data instance.
+ static DebugHudData data;
+
+ // Tracks framerate and skipped frames.
+ struct FrameStats {
+ void AddFrame() {
+ int64_t now = GetSystemClockNs();
+ frame_time.AddSample(now - last_frame_ts);
+ last_frame_ts = now;
+ }
+
+ void SkipFrame() {
+ AddFrame();
+ ++drops;
+ }
+
+ int drops = 0;
+ int64_t last_frame_ts = 0;
+ FrameTimeHistory frame_time;
+ };
+
+ // Debug data for compositor layers (applications, system UI, etc).
+ struct LayerData {
+ void Reset() {
+ ResetStats();
+ width = 0;
+ height = 0;
+ is_separate = false;
+ }
+
+ void ResetStats() { frame_stats.drops = 0; }
+
+ FrameStats frame_stats;
+ int width = 0;
+ int height = 0;
+ bool is_separate = false;
+ };
+
+ // Resets the stats.
+ void ResetStats() {
+ hwc_frame_stats.drops = 0;
+ hwc_latency = 0;
+ for (auto& l : layer_data)
+ l.ResetStats();
+ }
+
+ // Resets the layer configuration.
+ void ResetLayers() {
+ num_layers = 0;
+ for (auto& l : layer_data)
+ l.Reset();
+ }
+
+ // Tracks a frame arrival for the given layer.
+ void AddLayerFrame(size_t layer) {
+ if (layer < kMaxLayers) {
+ num_layers = std::max(layer + 1, num_layers);
+ layer_data[layer].frame_stats.AddFrame();
+ }
+ }
+
+ // Tracks a frame skip/drop for the given layer.
+ void SkipLayerFrame(size_t layer) {
+ if (layer < kMaxLayers) {
+ num_layers = std::max(layer + 1, num_layers);
+ layer_data[layer].frame_stats.SkipFrame();
+ }
+ }
+
+ // Sets the resolution and other details of the layer.
+ void SetLayerInfo(size_t layer, int width, int height, bool is_separate) {
+ if (layer < kMaxLayers) {
+ num_layers = std::max(layer + 1, num_layers);
+ layer_data[layer].width = width;
+ layer_data[layer].height = height;
+ layer_data[layer].is_separate = is_separate;
+ }
+ }
+
+ FrameStats hwc_frame_stats;
+ int64_t hwc_latency = 0;
+ size_t num_layers = 0;
+ LayerData layer_data[kMaxLayers];
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_DATA_H_
diff --git a/libs/vr/libvrflinger/debug_hud_view.cpp b/libs/vr/libvrflinger/debug_hud_view.cpp
new file mode 100644
index 0000000..4936ac6
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_view.cpp
@@ -0,0 +1,91 @@
+#include "debug_hud_view.h"
+
+#include <dvr/pose_client.h>
+
+#include "debug_hud_data.h"
+
+namespace android {
+namespace dvr {
+
+DebugHudView::DebugHudView(const CompositeHmd& hmd) {
+ pose_client_ = dvrPoseCreate();
+
+ display_size_ = hmd.GetDisplayMetrics().GetSizePixels();
+ vec2 display_size_meters = hmd.GetDisplayMetrics().GetSizeMeters();
+ inter_lens_dist_screen_space_ =
+ 2.0f * hmd.GetHeadMountMetrics().GetInterLensDistance() /
+ std::max(display_size_meters[0], display_size_meters[1]);
+}
+
+DebugHudView::~DebugHudView() {
+ if (pose_client_)
+ dvrPoseDestroy(pose_client_);
+ pose_client_ = nullptr;
+}
+
+void DebugHudView::Update() {
+ // Check for gesture that enables the debug stats HUD.
+ if (!pose_client_)
+ return;
+ DvrPoseAsync pose;
+ dvrPoseGet(pose_client_, 0, &pose);
+ float32x4_t q = pose.orientation;
+ quat orientation(q[3], q[0], q[1], q[2]);
+ vec3 up = orientation * vec3(0, 1, 0);
+ if (up[1] < -0.8f) {
+ ++switch_timer_;
+ } else {
+ switch_timer_ = 0;
+ }
+ // A few seconds upside down => toggle stats HUD.
+ if (switch_timer_ > 200) {
+ switch_timer_ = 0;
+ enabled_ = !enabled_;
+ DebugHudData::data.ResetStats();
+ ALOGE("Toggle debug stats HUD: %s", enabled_ ? "ON" : "OFF");
+ }
+}
+
+void DebugHudView::Draw() {
+ if (!enabled_)
+ return;
+ if (!debug_text_)
+ debug_text_.reset(new DebugText(400, display_size_[0], display_size_[1]));
+
+ const DebugHudData& data = DebugHudData::data;
+ const size_t layer_char_count = 50;
+ char layer_data[DebugHudData::kMaxLayers][layer_char_count];
+ for (size_t i = 0; i < data.num_layers; ++i) {
+ float fps = data.layer_data[i].frame_stats.frame_time.GetAverageFps();
+ snprintf(layer_data[i], layer_char_count,
+ "Layer %d %dx%d%s FPS: %.2f Drops: %d\n", static_cast<int>(i),
+ data.layer_data[i].width, data.layer_data[i].height,
+ data.layer_data[i].is_separate ? "x2" : "", fps,
+ data.layer_data[i].frame_stats.drops);
+ }
+
+ float hwc_fps = data.hwc_frame_stats.frame_time.GetAverageFps();
+
+ char text[400];
+ float hwc_latency_ms = static_cast<float>(data.hwc_latency) / 1000000.0f;
+ snprintf(text, sizeof(text), "HWC FPS: %.2f Latency: %.3f ms Skips: %d\n",
+ hwc_fps, hwc_latency_ms, data.hwc_frame_stats.drops);
+
+ for (size_t i = 0; i < data.num_layers; ++i) {
+ strncat(text, layer_data[i], sizeof(text) - strlen(text) - 1);
+ }
+
+ // Ensure text termination.
+ text[sizeof(text) - 1] = '\0';
+
+ glViewport(0, 0, display_size_[0], display_size_[1]);
+ glEnable(GL_BLEND);
+ // No stereo, because you can see the HUD OK in one eye. Stereo actually
+ // makes it more difficult to focus sometimes. To enable stereo:
+ // replace the second to last parameter with inter_lens_dist_screen_space_.
+ debug_text_->Draw(0.0f, -0.7f * inter_lens_dist_screen_space_, text, 0.0f, 1);
+ glDisable(GL_BLEND);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/debug_hud_view.h b/libs/vr/libvrflinger/debug_hud_view.h
new file mode 100644
index 0000000..50f38a8
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_view.h
@@ -0,0 +1,48 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_VIEW_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_VIEW_H_
+
+#include <stdint.h>
+
+#include <utils/Log.h>
+
+#include <private/dvr/composite_hmd.h>
+#include <private/dvr/graphics/debug_text.h>
+
+struct DvrPose;
+
+namespace android {
+namespace dvr {
+
+class CompositeHmd;
+
+// The view and the controller for the displayd debug HUD.
+// The HUD is enabled and disabled by internally tracking the head pose.
+// When the head pose is upside down for ~3 seconds, the enabled state toggles.
+// See DebugHudData for the data that is reported.
+class DebugHudView {
+ public:
+ DebugHudView(const CompositeHmd& hmd);
+ ~DebugHudView();
+
+ // Updates HUD state.
+ void Update();
+
+ // Draws HUD into the current framebuffer if it is currently enabled.
+ void Draw();
+
+ private:
+ DebugHudView(const DebugHudView&) = delete;
+ DebugHudView& operator=(const DebugHudView&) = delete;
+
+ DvrPose* pose_client_ = nullptr;
+ vec2i display_size_;
+ bool enabled_ = false;
+ int switch_timer_ = 0;
+ float inter_lens_dist_screen_space_ = 0.0f;
+ std::unique_ptr<DebugText> debug_text_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_VIEW_H_
diff --git a/libs/vr/libvrflinger/display_manager_service.cpp b/libs/vr/libvrflinger/display_manager_service.cpp
new file mode 100644
index 0000000..6730ba8
--- /dev/null
+++ b/libs/vr/libvrflinger/display_manager_service.cpp
@@ -0,0 +1,225 @@
+#include "display_manager_service.h"
+
+#include <pdx/channel_handle.h>
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/display_rpc.h>
+#include <sys/poll.h>
+
+#include <array>
+
+using android::pdx::Channel;
+using android::pdx::LocalChannelHandle;
+using android::pdx::Message;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::IfAnyOf;
+
+namespace {
+
+// As a first line of defense, the display manager endpoint is only accessible
+// to the user and group.
+
+// TODO(dnicoara): Remove read/write permission for others. This is in here just
+// to allow us to experiment with cast functionality from a plain old app.
+constexpr mode_t kDisplayManagerEndpointFileMode =
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+
+constexpr size_t kMaxSurfacesPerRequest = 32;
+
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+void DisplayManager::SetNotificationsPending(bool pending) {
+ int ret = service_->ModifyChannelEvents(channel_id_, pending ? 0 : POLLIN,
+ pending ? POLLIN : 0);
+ ALOGE_IF(ret < 0,
+ "DisplayManager::SetNotificationPending: Failed to modify channel "
+ "events: %s",
+ strerror(-ret));
+}
+
+DisplayManagerService::DisplayManagerService(
+ const std::shared_ptr<DisplayService>& display_service)
+ : BASE("DisplayManagerService",
+ Endpoint::Create(DisplayManagerRPC::kClientPath,
+ kDisplayManagerEndpointFileMode)),
+ display_service_(display_service) {
+ display_service_->SetDisplayConfigurationUpdateNotifier(
+ std::bind(&DisplayManagerService::OnDisplaySurfaceChange, this));
+}
+
+std::shared_ptr<pdx::Channel> DisplayManagerService::OnChannelOpen(
+ pdx::Message& message) {
+ // Prevent more than one display manager from registering at a time.
+ if (display_manager_)
+ REPLY_ERROR_RETURN(message, EPERM, nullptr);
+
+ display_manager_ =
+ std::make_shared<DisplayManager>(this, message.GetChannelId());
+ return display_manager_;
+}
+
+void DisplayManagerService::OnChannelClose(
+ pdx::Message& /*message*/, const std::shared_ptr<pdx::Channel>& channel) {
+ // Unregister the display manager when the channel closes.
+ if (display_manager_ == channel)
+ display_manager_ = nullptr;
+}
+
+int DisplayManagerService::HandleMessage(pdx::Message& message) {
+ auto channel = std::static_pointer_cast<DisplayManager>(message.GetChannel());
+
+ switch (message.GetOp()) {
+ case DisplayManagerRPC::GetSurfaceList::Opcode:
+ DispatchRemoteMethod<DisplayManagerRPC::GetSurfaceList>(
+ *this, &DisplayManagerService::OnGetSurfaceList, message);
+ return 0;
+
+ case DisplayManagerRPC::GetSurfaceBuffers::Opcode:
+ DispatchRemoteMethod<DisplayManagerRPC::GetSurfaceBuffers>(
+ *this, &DisplayManagerService::OnGetSurfaceBuffers, message);
+ return 0;
+
+ case DisplayManagerRPC::UpdateSurfaces::Opcode:
+ DispatchRemoteMethod<DisplayManagerRPC::UpdateSurfaces>(
+ *this, &DisplayManagerService::OnUpdateSurfaces, message);
+ return 0;
+
+ default:
+ return Service::DefaultHandleMessage(message);
+ }
+}
+
+std::vector<DisplaySurfaceInfo> DisplayManagerService::OnGetSurfaceList(
+ pdx::Message& /*message*/) {
+ std::vector<DisplaySurfaceInfo> items;
+
+ display_service_->ForEachDisplaySurface([&items](
+ const std::shared_ptr<DisplaySurface>& surface) mutable {
+ DisplaySurfaceInfo item;
+
+ item.surface_id = surface->surface_id();
+ item.process_id = surface->process_id();
+ item.type = surface->type();
+ item.flags = 0; // TODO(eieio)
+ item.client_attributes = DisplaySurfaceAttributes{
+ {DisplaySurfaceAttributeEnum::Visible,
+ DisplaySurfaceAttributeValue{surface->client_visible()}},
+ {DisplaySurfaceAttributeEnum::ZOrder,
+ DisplaySurfaceAttributeValue{surface->client_z_order()}},
+ {DisplaySurfaceAttributeEnum::Blur, DisplaySurfaceAttributeValue{0.f}}};
+ item.manager_attributes = DisplaySurfaceAttributes{
+ {DisplaySurfaceAttributeEnum::Visible,
+ DisplaySurfaceAttributeValue{surface->manager_visible()}},
+ {DisplaySurfaceAttributeEnum::ZOrder,
+ DisplaySurfaceAttributeValue{surface->manager_z_order()}},
+ {DisplaySurfaceAttributeEnum::Blur,
+ DisplaySurfaceAttributeValue{surface->manager_blur()}}};
+
+ items.push_back(item);
+ });
+
+ // The fact that we're in the message handler implies that display_manager_ is
+ // not nullptr. No check required, unless this service becomes multi-threaded.
+ display_manager_->SetNotificationsPending(false);
+
+ return items;
+}
+
+std::vector<LocalChannelHandle> DisplayManagerService::OnGetSurfaceBuffers(
+ pdx::Message& message, int surface_id) {
+ std::shared_ptr<DisplaySurface> surface =
+ display_service_->GetDisplaySurface(surface_id);
+ if (!surface)
+ REPLY_ERROR_RETURN(message, ENOENT, {});
+
+ std::vector<LocalChannelHandle> consumers;
+ int ret = surface->GetConsumers(&consumers);
+ if (ret < 0) {
+ ALOGE(
+ "DisplayManagerService::OnGetDisplaySurfaceBuffers: Failed to get "
+ "consumers for surface %d: %s",
+ surface_id, strerror(-ret));
+ REPLY_ERROR_RETURN(message, -ret, {});
+ }
+
+ return consumers;
+}
+
+int DisplayManagerService::OnUpdateSurfaces(
+ pdx::Message& /*message*/,
+ const std::map<int, DisplaySurfaceAttributes>& updates) {
+ for (const auto& surface_update : updates) {
+ const int surface_id = surface_update.first;
+ const DisplaySurfaceAttributes& attributes = surface_update.second;
+
+ std::shared_ptr<DisplaySurface> surface =
+ display_service_->GetDisplaySurface(surface_id);
+
+ if (!surface)
+ return -ENOENT;
+
+ for (const auto& attribute : attributes) {
+ const auto& key = attribute.first;
+ const auto* variant = &attribute.second;
+ bool invalid_value = false;
+ switch (key) {
+ case DisplaySurfaceAttributeEnum::ZOrder:
+ invalid_value =
+ !IfAnyOf<int32_t>::Call(variant, [&surface](const auto& value) {
+ surface->ManagerSetZOrder(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::Visible:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [&surface](const auto& value) {
+ surface->ManagerSetVisible(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::Blur:
+ invalid_value = !IfAnyOf<int32_t, int64_t, float>::Call(
+ variant, [&surface](const auto& value) {
+ surface->ManagerSetBlur(value);
+ });
+ break;
+ default:
+ ALOGW(
+ "DisplayManagerService::OnUpdateSurfaces: Attempt to set invalid "
+ "attribute %u on surface %d",
+ key, surface_id);
+ break;
+ }
+
+ if (invalid_value) {
+ ALOGW(
+ "DisplayManagerService::OnUpdateSurfaces: Failed to set display "
+ "surface attribute '%s' because of incompatible type: %d",
+ DisplaySurfaceAttributeEnum::ToString(key).c_str(),
+ variant->index());
+ }
+ }
+ }
+
+ // Reconfigure the display layers for any active surface changes.
+ display_service_->UpdateActiveDisplaySurfaces();
+ return 0;
+}
+
+void DisplayManagerService::OnDisplaySurfaceChange() {
+ if (display_manager_) {
+ display_manager_->SetNotificationsPending(true);
+ } else {
+ // If there isn't a display manager registered, default all display surfaces
+ // to visible.
+ display_service_->ForEachDisplaySurface(
+ [](const std::shared_ptr<DisplaySurface>& surface) {
+ surface->ManagerSetVisible(true);
+ });
+ display_service_->UpdateActiveDisplaySurfaces();
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/display_manager_service.h b/libs/vr/libvrflinger/display_manager_service.h
new file mode 100644
index 0000000..46401fa
--- /dev/null
+++ b/libs/vr/libvrflinger/display_manager_service.h
@@ -0,0 +1,73 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_MANAGER_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_MANAGER_SERVICE_H_
+
+#include <pdx/service.h>
+#include <private/dvr/display_rpc.h>
+
+#include "display_service.h"
+
+namespace android {
+namespace dvr {
+
+class DisplayManagerService;
+
+// The display manager is a client of the display manager service. This class
+// represents the connected client that the display manager service sends
+// notifications to.
+class DisplayManager : public pdx::Channel {
+ public:
+ DisplayManager(DisplayManagerService* service, int channel_id)
+ : service_(service), channel_id_(channel_id) {}
+
+ int channel_id() const { return channel_id_; }
+
+ // Sets or clears the channel event mask to indicate pending events that the
+ // display manager on the other end of the channel should read and handle.
+ // When |pending| is true the POLLIN bit is set in the event mask; when
+ // |pending| is false the POLLIN bit is cleared in the event mask.
+ void SetNotificationsPending(bool pending);
+
+ private:
+ DisplayManager(const DisplayManager&) = delete;
+ void operator=(const DisplayManager&) = delete;
+
+ DisplayManagerService* service_;
+ int channel_id_;
+};
+
+// The display manager service marshalls state and events from the display
+// service to the display manager.
+class DisplayManagerService : public pdx::ServiceBase<DisplayManagerService> {
+ public:
+ std::shared_ptr<pdx::Channel> OnChannelOpen(pdx::Message& message) override;
+ void OnChannelClose(pdx::Message& message,
+ const std::shared_ptr<pdx::Channel>& channel) override;
+ int HandleMessage(pdx::Message& message) override;
+
+ private:
+ friend BASE;
+
+ explicit DisplayManagerService(
+ const std::shared_ptr<DisplayService>& display_service);
+
+ std::vector<DisplaySurfaceInfo> OnGetSurfaceList(pdx::Message& message);
+ std::vector<pdx::LocalChannelHandle> OnGetSurfaceBuffers(
+ pdx::Message& message, int surface_id);
+ int OnUpdateSurfaces(pdx::Message& message,
+ const std::map<int, DisplaySurfaceAttributes>& updates);
+
+ // Called by the display service to indicate changes to display surfaces that
+ // the display manager should evaluate.
+ void OnDisplaySurfaceChange();
+
+ DisplayManagerService(const DisplayManagerService&) = delete;
+ void operator=(const DisplayManagerService&) = delete;
+
+ std::shared_ptr<DisplayService> display_service_;
+ std::shared_ptr<DisplayManager> display_manager_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_MANAGER_SERVICE_H_
diff --git a/libs/vr/libvrflinger/display_service.cpp b/libs/vr/libvrflinger/display_service.cpp
new file mode 100644
index 0000000..c464c98
--- /dev/null
+++ b/libs/vr/libvrflinger/display_service.cpp
@@ -0,0 +1,332 @@
+#include "display_service.h"
+
+#include <vector>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <pdx/rpc/remote_method.h>
+#include <private/dvr/composite_hmd.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/lucid_metrics.h>
+#include <private/dvr/numeric.h>
+#include <private/dvr/polynomial_radial_distortion.h>
+#include <private/dvr/types.h>
+
+using android::pdx::Channel;
+using android::pdx::Message;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::WrapBuffer;
+
+namespace android {
+namespace dvr {
+
+DisplayService::DisplayService() : DisplayService(nullptr) {}
+
+DisplayService::DisplayService(Hwc2::Composer* hidl)
+ : BASE("DisplayService", Endpoint::Create(DisplayRPC::kClientPath)),
+ hardware_composer_(hidl) {}
+
+std::string DisplayService::DumpState(size_t max_length) {
+ std::vector<char> buffer(max_length);
+ uint32_t max_len_p = static_cast<uint32_t>(max_length);
+ hardware_composer_.Dump(buffer.data(), &max_len_p);
+ return std::string(buffer.data());
+}
+
+void DisplayService::OnChannelClose(pdx::Message& /*message*/,
+ const std::shared_ptr<Channel>& channel) {
+ auto surface = std::static_pointer_cast<SurfaceChannel>(channel);
+ if (surface && surface->type() == SurfaceTypeEnum::Normal) {
+ auto display_surface = std::static_pointer_cast<DisplaySurface>(surface);
+ display_surface->ManagerSetVisible(false);
+ display_surface->ClientSetVisible(false);
+ NotifyDisplayConfigurationUpdate();
+ }
+ // TODO(jwcai) Handle ChannelClose of VideoMeshSurface.
+}
+
+// First-level dispatch for display service messages. Directly handles messages
+// that are independent of the display surface (metrics, creation) and routes
+// surface-specific messages to the per-instance handlers.
+int DisplayService::HandleMessage(pdx::Message& message) {
+ auto channel = message.GetChannel<SurfaceChannel>();
+
+ switch (message.GetOp()) {
+ case DisplayRPC::GetMetrics::Opcode:
+ DispatchRemoteMethod<DisplayRPC::GetMetrics>(
+ *this, &DisplayService::OnGetMetrics, message);
+ return 0;
+
+ case DisplayRPC::GetEdsCapture::Opcode:
+ DispatchRemoteMethod<DisplayRPC::GetEdsCapture>(
+ *this, &DisplayService::OnGetEdsCapture, message);
+ return 0;
+
+ case DisplayRPC::CreateSurface::Opcode:
+ DispatchRemoteMethod<DisplayRPC::CreateSurface>(
+ *this, &DisplayService::OnCreateSurface, message);
+ return 0;
+
+ case DisplayRPC::EnterVrMode::Opcode:
+ DispatchRemoteMethod<DisplayRPC::EnterVrMode>(
+ *this, &DisplayService::OnEnterVrMode, message);
+ return 0;
+
+ case DisplayRPC::ExitVrMode::Opcode:
+ DispatchRemoteMethod<DisplayRPC::ExitVrMode>(
+ *this, &DisplayService::OnExitVrMode, message);
+ return 0;
+
+ case DisplayRPC::SetViewerParams::Opcode:
+ DispatchRemoteMethod<DisplayRPC::SetViewerParams>(
+ *this, &DisplayService::OnSetViewerParams, message);
+ return 0;
+
+ // Direct the surface specific messages to the surface instance.
+ case DisplayRPC::AllocateBuffer::Opcode:
+ case DisplayRPC::SetAttributes::Opcode:
+ case DisplayRPC::GetMetadataBuffer::Opcode:
+ case DisplayRPC::CreateVideoMeshSurface::Opcode:
+ case DisplayRPC::VideoMeshSurfaceCreateProducerQueue::Opcode:
+ return HandleSurfaceMessage(message);
+
+ default:
+ return Service::HandleMessage(message);
+ }
+}
+
+SystemDisplayMetrics DisplayService::OnGetMetrics(pdx::Message& message) {
+ const Compositor* compositor = hardware_composer_.GetCompositor();
+ if (compositor == nullptr)
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+
+ HeadMountMetrics head_mount = compositor->head_mount_metrics();
+ CompositeHmd hmd(head_mount, hardware_composer_.GetHmdDisplayMetrics());
+ vec2i distorted_render_size = hmd.GetRecommendedRenderTargetSize();
+ FieldOfView left_fov = hmd.GetEyeFov(kLeftEye);
+ FieldOfView right_fov = hmd.GetEyeFov(kRightEye);
+
+ SystemDisplayMetrics metrics;
+
+ metrics.display_native_width = GetDisplayMetrics().width;
+ metrics.display_native_height = GetDisplayMetrics().height;
+ metrics.display_x_dpi = GetDisplayMetrics().dpi.x;
+ metrics.display_y_dpi = GetDisplayMetrics().dpi.y;
+ metrics.distorted_width = distorted_render_size[0];
+ metrics.distorted_height = distorted_render_size[1];
+ metrics.vsync_period_ns =
+ hardware_composer_.native_display_metrics().vsync_period_ns;
+ metrics.hmd_ipd_mm = 0;
+ metrics.inter_lens_distance_m = head_mount.GetInterLensDistance();
+ metrics.left_fov_lrbt[0] = left_fov.GetLeft();
+ metrics.left_fov_lrbt[1] = left_fov.GetRight();
+ metrics.left_fov_lrbt[2] = left_fov.GetBottom();
+ metrics.left_fov_lrbt[3] = left_fov.GetTop();
+ metrics.right_fov_lrbt[0] = right_fov.GetLeft();
+ metrics.right_fov_lrbt[1] = right_fov.GetRight();
+ metrics.right_fov_lrbt[2] = right_fov.GetBottom();
+ metrics.right_fov_lrbt[3] = right_fov.GetTop();
+
+ return metrics;
+}
+
+// Creates a new DisplaySurface and associates it with this channel. This may
+// only be done once per channel.
+int DisplayService::OnCreateSurface(pdx::Message& message, int width,
+ int height, int format, int usage,
+ DisplaySurfaceFlags flags) {
+ // A surface may only be created once per channel.
+ if (message.GetChannel())
+ return -EINVAL;
+
+ ALOGI_IF(TRACE, "DisplayService::OnCreateSurface: cid=%d",
+ message.GetChannelId());
+
+ // Use the channel id as the unique surface id.
+ const int surface_id = message.GetChannelId();
+ const int process_id = message.GetProcessId();
+
+ ALOGI_IF(TRACE,
+ "DisplayService::OnCreateSurface: surface_id=%d process_id=%d "
+ "width=%d height=%d format=%x usage=%x flags=%x",
+ surface_id, process_id, width, height, format, usage, flags);
+
+ // TODO(eieio,jbates): Validate request parameters.
+ auto channel = std::make_shared<DisplaySurface>(
+ this, surface_id, process_id, width, height, format, usage, flags);
+
+ message.SetChannel(channel);
+ NotifyDisplayConfigurationUpdate();
+ return 0;
+}
+
+DisplayRPC::ByteBuffer DisplayService::OnGetEdsCapture(pdx::Message& message) {
+ Compositor* compositor = hardware_composer_.GetCompositor();
+ if (compositor == nullptr)
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+
+ std::vector<std::uint8_t> buffer(sizeof(LateLatchOutput));
+
+ if (!compositor->GetLastEdsPose(
+ reinterpret_cast<LateLatchOutput*>(buffer.data()))) {
+ REPLY_ERROR_RETURN(message, EPERM, {});
+ }
+
+ return WrapBuffer(std::move(buffer));
+}
+
+int DisplayService::OnEnterVrMode(pdx::Message& /*message*/) {
+ hardware_composer_.Resume();
+ return 0;
+}
+
+int DisplayService::OnExitVrMode(pdx::Message& /*message*/) {
+ hardware_composer_.Suspend();
+ return 0;
+}
+
+void DisplayService::OnSetViewerParams(pdx::Message& message,
+ const ViewerParams& view_params) {
+ Compositor* compositor = hardware_composer_.GetCompositor();
+ if (compositor == nullptr)
+ REPLY_ERROR_RETURN(message, EINVAL);
+
+ FieldOfView left(55.0f, 55.0f, 55.0f, 55.0f);
+ FieldOfView right(55.0f, 55.0f, 55.0f, 55.0f);
+ if (view_params.left_eye_field_of_view_angles.size() >= 4) {
+ left = FieldOfView(ToRad(view_params.left_eye_field_of_view_angles[0]),
+ ToRad(view_params.left_eye_field_of_view_angles[1]),
+ ToRad(view_params.left_eye_field_of_view_angles[2]),
+ ToRad(view_params.left_eye_field_of_view_angles[3]));
+ right = FieldOfView(ToRad(view_params.left_eye_field_of_view_angles[1]),
+ ToRad(view_params.left_eye_field_of_view_angles[0]),
+ ToRad(view_params.left_eye_field_of_view_angles[2]),
+ ToRad(view_params.left_eye_field_of_view_angles[3]));
+ }
+
+ std::shared_ptr<ColorChannelDistortion> red_distortion;
+ std::shared_ptr<ColorChannelDistortion> green_distortion;
+ std::shared_ptr<ColorChannelDistortion> blue_distortion;
+
+ // We should always have a red distortion.
+ LOG_FATAL_IF(view_params.distortion_coefficients_r.empty());
+ red_distortion = std::make_shared<PolynomialRadialDistortion>(
+ view_params.distortion_coefficients_r);
+
+ if (!view_params.distortion_coefficients_g.empty()) {
+ green_distortion = std::make_shared<PolynomialRadialDistortion>(
+ view_params.distortion_coefficients_g);
+ }
+
+ if (!view_params.distortion_coefficients_b.empty()) {
+ blue_distortion = std::make_shared<PolynomialRadialDistortion>(
+ view_params.distortion_coefficients_b);
+ }
+
+ HeadMountMetrics::EyeOrientation left_orientation =
+ HeadMountMetrics::EyeOrientation::kCCW0Degrees;
+ HeadMountMetrics::EyeOrientation right_orientation =
+ HeadMountMetrics::EyeOrientation::kCCW0Degrees;
+
+ if (view_params.eye_orientations.size() > 1) {
+ left_orientation = static_cast<HeadMountMetrics::EyeOrientation>(
+ view_params.eye_orientations[0]);
+ right_orientation = static_cast<HeadMountMetrics::EyeOrientation>(
+ view_params.eye_orientations[1]);
+ }
+
+ HeadMountMetrics head_mount_metrics(
+ view_params.inter_lens_distance, view_params.tray_to_lens_distance,
+ view_params.screen_to_lens_distance,
+ static_cast<HeadMountMetrics::VerticalAlignment>(
+ view_params.vertical_alignment),
+ left, right, red_distortion, green_distortion, blue_distortion,
+ left_orientation, right_orientation,
+ view_params.screen_center_to_lens_distance);
+
+ compositor->UpdateHeadMountMetrics(head_mount_metrics);
+}
+
+// Calls the message handler for the DisplaySurface associated with this
+// channel.
+int DisplayService::HandleSurfaceMessage(pdx::Message& message) {
+ auto surface = std::static_pointer_cast<SurfaceChannel>(message.GetChannel());
+ ALOGW_IF(!surface,
+ "DisplayService::HandleSurfaceMessage: surface is nullptr!");
+
+ if (surface)
+ return surface->HandleMessage(message);
+ else
+ REPLY_ERROR_RETURN(message, EINVAL, 0);
+}
+
+std::shared_ptr<DisplaySurface> DisplayService::GetDisplaySurface(
+ int surface_id) const {
+ return std::static_pointer_cast<DisplaySurface>(GetChannel(surface_id));
+}
+
+std::vector<std::shared_ptr<DisplaySurface>>
+DisplayService::GetDisplaySurfaces() const {
+ return GetChannels<DisplaySurface>();
+}
+
+std::vector<std::shared_ptr<DisplaySurface>>
+DisplayService::GetVisibleDisplaySurfaces() const {
+ std::vector<std::shared_ptr<DisplaySurface>> visible_surfaces;
+
+ ForEachDisplaySurface(
+ [&](const std::shared_ptr<DisplaySurface>& surface) mutable {
+ if (surface->IsVisible())
+ visible_surfaces.push_back(surface);
+ });
+
+ return visible_surfaces;
+}
+
+int DisplayService::UpdateActiveDisplaySurfaces() {
+ auto visible_surfaces = GetVisibleDisplaySurfaces();
+
+ // Sort the surfaces based on manager z order first, then client z order.
+ std::sort(visible_surfaces.begin(), visible_surfaces.end(),
+ [](const std::shared_ptr<DisplaySurface>& a,
+ const std::shared_ptr<DisplaySurface>& b) {
+ return a->manager_z_order() != b->manager_z_order()
+ ? a->manager_z_order() < b->manager_z_order()
+ : a->client_z_order() < b->client_z_order();
+ });
+
+ ALOGD_IF(TRACE,
+ "DisplayService::UpdateActiveDisplaySurfaces: %zd visible surfaces",
+ visible_surfaces.size());
+
+ // TODO(jbates) Have the shell manage blurred layers.
+ bool blur_requested = false;
+ auto end = visible_surfaces.crend();
+ for (auto it = visible_surfaces.crbegin(); it != end; ++it) {
+ auto surface = *it;
+ // Surfaces with exclude_from_blur==true are not blurred
+ // and are excluded from blur computation of other layers.
+ if (surface->client_exclude_from_blur()) {
+ surface->ManagerSetBlur(0.0f);
+ continue;
+ }
+ surface->ManagerSetBlur(blur_requested ? 1.0f : 0.0f);
+ if (surface->client_blur_behind())
+ blur_requested = true;
+ }
+ return hardware_composer_.SetDisplaySurfaces(std::move(visible_surfaces));
+}
+
+void DisplayService::SetDisplayConfigurationUpdateNotifier(
+ DisplayConfigurationUpdateNotifier update_notifier) {
+ update_notifier_ = update_notifier;
+}
+
+void DisplayService::NotifyDisplayConfigurationUpdate() {
+ if (update_notifier_)
+ update_notifier_();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/display_service.h b/libs/vr/libvrflinger/display_service.h
new file mode 100644
index 0000000..ebd97de
--- /dev/null
+++ b/libs/vr/libvrflinger/display_service.h
@@ -0,0 +1,107 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SERVICE_H_
+
+#include <pdx/service.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/late_latch.h>
+
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "acquired_buffer.h"
+#include "display_surface.h"
+#include "epoll_event_dispatcher.h"
+#include "hardware_composer.h"
+
+namespace android {
+namespace dvr {
+
+// DisplayService implements the displayd display service over ServiceFS.
+class DisplayService : public pdx::ServiceBase<DisplayService> {
+ public:
+ std::string DumpState(size_t max_length) override;
+
+ void OnChannelClose(pdx::Message& message,
+ const std::shared_ptr<pdx::Channel>& channel) override;
+ int HandleMessage(pdx::Message& message) override;
+
+ std::shared_ptr<DisplaySurface> GetDisplaySurface(int surface_id) const;
+ std::vector<std::shared_ptr<DisplaySurface>> GetDisplaySurfaces() const;
+ std::vector<std::shared_ptr<DisplaySurface>> GetVisibleDisplaySurfaces()
+ const;
+
+ // Updates the list of actively displayed surfaces. This must be called after
+ // any change to client/manager attributes that affect visibility or z order.
+ int UpdateActiveDisplaySurfaces();
+
+ template <class A>
+ void ForEachDisplaySurface(A action) const {
+ ForEachChannel([action](const ChannelIterator::value_type& pair) mutable {
+ auto surface = std::static_pointer_cast<SurfaceChannel>(pair.second);
+ if (surface->type() == SurfaceTypeEnum::Normal)
+ action(std::static_pointer_cast<DisplaySurface>(surface));
+ });
+ }
+
+ using DisplayConfigurationUpdateNotifier = std::function<void(void)>;
+ void SetDisplayConfigurationUpdateNotifier(
+ DisplayConfigurationUpdateNotifier notifier);
+
+ using VSyncCallback = HardwareComposer::VSyncCallback;
+ void SetVSyncCallback(VSyncCallback callback) {
+ hardware_composer_.SetVSyncCallback(callback);
+ }
+
+ HWCDisplayMetrics GetDisplayMetrics() {
+ return hardware_composer_.display_metrics();
+ }
+
+ void SetActive(bool activated) {
+ if (activated) {
+ hardware_composer_.Resume();
+ } else {
+ hardware_composer_.Suspend();
+ }
+ }
+
+ private:
+ friend BASE;
+ friend DisplaySurface;
+
+ friend class VrDisplayStateService;
+
+ DisplayService();
+ DisplayService(android::Hwc2::Composer* hidl);
+
+ SystemDisplayMetrics OnGetMetrics(pdx::Message& message);
+ int OnCreateSurface(pdx::Message& message, int width, int height,
+ int format, int usage, DisplaySurfaceFlags flags);
+
+ DisplayRPC::ByteBuffer OnGetEdsCapture(pdx::Message& message);
+
+ int OnEnterVrMode(pdx::Message& message);
+ int OnExitVrMode(pdx::Message& message);
+ void OnSetViewerParams(pdx::Message& message, const ViewerParams& view_params);
+
+ // Called by DisplaySurface to signal that a surface property has changed and
+ // the display manager should be notified.
+ void NotifyDisplayConfigurationUpdate();
+
+ int HandleSurfaceMessage(pdx::Message& message);
+
+ DisplayService(const DisplayService&) = delete;
+ void operator=(const DisplayService&) = delete;
+
+ EpollEventDispatcher dispatcher_;
+ HardwareComposer hardware_composer_;
+ DisplayConfigurationUpdateNotifier update_notifier_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SERVICE_H_
diff --git a/libs/vr/libvrflinger/display_surface.cpp b/libs/vr/libvrflinger/display_surface.cpp
new file mode 100644
index 0000000..e1729f8
--- /dev/null
+++ b/libs/vr/libvrflinger/display_surface.cpp
@@ -0,0 +1,445 @@
+#include "display_surface.h"
+
+#include <utils/Trace.h>
+
+#include <private/dvr/platform_defines.h>
+
+#include "display_service.h"
+#include "hardware_composer.h"
+
+#define LOCAL_TRACE 1
+
+using android::pdx::BorrowedChannelHandle;
+using android::pdx::LocalChannelHandle;
+using android::pdx::Message;
+using android::pdx::RemoteChannelHandle;
+using android::pdx::Status;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::IfAnyOf;
+
+namespace android {
+namespace dvr {
+
+DisplaySurface::DisplaySurface(DisplayService* service, int surface_id,
+ int process_id, int width, int height,
+ int format, int usage, int flags)
+ : SurfaceChannel(service, surface_id, SurfaceTypeEnum::Normal,
+ sizeof(DisplaySurfaceMetadata)),
+ process_id_(process_id),
+ posted_buffers_(kMaxPostedBuffers),
+ video_mesh_surfaces_updated_(false),
+ width_(width),
+ height_(height),
+ format_(format),
+ usage_(usage),
+ flags_(flags),
+ client_visible_(false),
+ client_z_order_(0),
+ client_exclude_from_blur_(false),
+ client_blur_behind_(false),
+ manager_visible_(false),
+ manager_z_order_(0),
+ manager_blur_(0.0f),
+ allocated_buffer_index_(0),
+ layer_order_(0) {}
+
+DisplaySurface::~DisplaySurface() {
+ ALOGD_IF(LOCAL_TRACE,
+ "DisplaySurface::~DisplaySurface: surface_id=%d process_id=%d",
+ surface_id(), process_id_);
+}
+
+void DisplaySurface::ManagerSetVisible(bool visible) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ manager_visible_ = visible;
+}
+
+void DisplaySurface::ManagerSetZOrder(int z_order) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ manager_z_order_ = z_order;
+}
+
+void DisplaySurface::ManagerSetBlur(float blur) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ manager_blur_ = blur;
+}
+
+void DisplaySurface::ClientSetVisible(bool visible) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_visible_ = visible;
+}
+
+void DisplaySurface::ClientSetZOrder(int z_order) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_z_order_ = z_order;
+}
+
+void DisplaySurface::ClientSetExcludeFromBlur(bool exclude_from_blur) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_exclude_from_blur_ = exclude_from_blur;
+}
+
+void DisplaySurface::ClientSetBlurBehind(bool blur_behind) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_blur_behind_ = blur_behind;
+}
+
+size_t DisplaySurface::GetBufferCount() const {
+ std::lock_guard<std::mutex> autolock(lock_);
+ return buffers_.size();
+}
+
+std::vector<std::shared_ptr<BufferConsumer>> DisplaySurface::GetBuffers() {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::vector<std::shared_ptr<BufferConsumer>> return_vector(buffers_.size());
+
+ for (const auto pair : buffers_) {
+ return_vector.push_back(pair.second);
+ }
+
+ return return_vector;
+}
+
+AcquiredBuffer DisplaySurface::AcquireNewestAvailableBuffer(
+ AcquiredBuffer* skipped_buffer) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ AcquiredBuffer buffer;
+ int frames = 0;
+ // Basic latency stopgap for when the application misses a frame:
+ // If the application recovers on the 2nd or 3rd (etc) frame after
+ // missing, this code will skip frames to catch up by checking if
+ // the next frame is also available.
+ while (!posted_buffers_.IsEmpty() && posted_buffers_.Front().IsAvailable()) {
+ // Capture the skipped buffer into the result parameter.
+ // Note that this API only supports skipping one buffer per vsync.
+ if (frames > 0 && skipped_buffer)
+ *skipped_buffer = std::move(buffer);
+ ++frames;
+ buffer = std::move(posted_buffers_.Front());
+ posted_buffers_.PopFront();
+ if (frames == 2)
+ break;
+ }
+ return buffer;
+}
+
+bool DisplaySurface::IsBufferAvailable() const {
+ std::lock_guard<std::mutex> autolock(lock_);
+ return !posted_buffers_.IsEmpty() && posted_buffers_.Front().IsAvailable();
+}
+
+bool DisplaySurface::IsBufferPosted() const {
+ std::lock_guard<std::mutex> autolock(lock_);
+ return !posted_buffers_.IsEmpty();
+}
+
+AcquiredBuffer DisplaySurface::AcquireCurrentBuffer() {
+ std::lock_guard<std::mutex> autolock(lock_);
+ if (posted_buffers_.IsEmpty()) {
+ ALOGE("Error: attempt to acquire buffer when none are posted.");
+ return AcquiredBuffer();
+ }
+ AcquiredBuffer buffer = std::move(posted_buffers_.Front());
+ posted_buffers_.PopFront();
+ return buffer;
+}
+
+int DisplaySurface::GetConsumers(std::vector<LocalChannelHandle>* consumers) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::vector<LocalChannelHandle> items;
+
+ for (auto pair : buffers_) {
+ const auto& buffer = pair.second;
+
+ Status<LocalChannelHandle> consumer_channel = buffer->CreateConsumer();
+ if (!consumer_channel) {
+ ALOGE(
+ "DisplaySurface::GetConsumers: Failed to get a new consumer for "
+ "buffer %d: %s",
+ buffer->id(), consumer_channel.GetErrorMessage().c_str());
+ return -consumer_channel.error();
+ }
+
+ items.push_back(consumer_channel.take());
+ }
+
+ *consumers = std::move(items);
+ return 0;
+}
+
+int DisplaySurface::HandleMessage(pdx::Message& message) {
+ switch (message.GetOp()) {
+ case DisplayRPC::SetAttributes::Opcode:
+ DispatchRemoteMethod<DisplayRPC::SetAttributes>(
+ *this, &DisplaySurface::OnClientSetAttributes, message);
+ break;
+
+ case DisplayRPC::AllocateBuffer::Opcode:
+ DispatchRemoteMethod<DisplayRPC::AllocateBuffer>(
+ *this, &DisplaySurface::OnAllocateBuffer, message);
+ break;
+
+ case DisplayRPC::CreateVideoMeshSurface::Opcode:
+ DispatchRemoteMethod<DisplayRPC::CreateVideoMeshSurface>(
+ *this, &DisplaySurface::OnCreateVideoMeshSurface, message);
+ break;
+
+ default:
+ return SurfaceChannel::HandleMessage(message);
+ }
+
+ return 0;
+}
+
+int DisplaySurface::OnClientSetAttributes(
+ pdx::Message& /*message*/, const DisplaySurfaceAttributes& attributes) {
+ for (const auto& attribute : attributes) {
+ const auto& key = attribute.first;
+ const auto* variant = &attribute.second;
+ bool invalid_value = false;
+ switch (key) {
+ case DisplaySurfaceAttributeEnum::ZOrder:
+ invalid_value = !IfAnyOf<int32_t, int64_t, float>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetZOrder(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::Visible:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetVisible(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::ExcludeFromBlur:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetExcludeFromBlur(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::BlurBehind:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetBlurBehind(value);
+ });
+ break;
+ default:
+ ALOGW(
+ "DisplaySurface::OnClientSetAttributes: Unrecognized attribute %d "
+ "surface_id=%d",
+ key, surface_id());
+ break;
+ }
+
+ if (invalid_value) {
+ ALOGW(
+ "DisplaySurface::OnClientSetAttributes: Failed to set display "
+ "surface attribute '%s' because of incompatible type: %d",
+ DisplaySurfaceAttributeEnum::ToString(key).c_str(), variant->index());
+ }
+ }
+
+ service()->NotifyDisplayConfigurationUpdate();
+ return 0;
+}
+
+// Allocates a new buffer for the DisplaySurface associated with this channel.
+std::pair<uint32_t, LocalChannelHandle> DisplaySurface::OnAllocateBuffer(
+ pdx::Message& message) {
+ // Inject flag to enable framebuffer compression for the application buffers.
+ // TODO(eieio,jbates): Make this configurable per hardware platform.
+ const int usage = usage_ | GRALLOC_USAGE_QCOM_FRAMEBUFFER_COMPRESSION;
+ const int slice_count =
+ (flags_ & static_cast<int>(DisplaySurfaceFlagsEnum::SeparateGeometry))
+ ? 2
+ : 1;
+
+ ALOGI_IF(
+ TRACE,
+ "DisplaySurface::OnAllocateBuffer: width=%d height=%d format=%x usage=%x "
+ "slice_count=%d",
+ width_, height_, format_, usage, slice_count);
+
+ // Create a producer buffer to hand back to the sender.
+ auto producer = BufferProducer::Create(width_, height_, format_, usage,
+ sizeof(uint64_t), slice_count);
+ if (!producer)
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+
+ // Create and import a consumer attached to the producer.
+ Status<LocalChannelHandle> consumer_channel = producer->CreateConsumer();
+ if (!consumer_channel)
+ REPLY_ERROR_RETURN(message, consumer_channel.error(), {});
+
+ std::shared_ptr<BufferConsumer> consumer =
+ BufferConsumer::Import(consumer_channel.take());
+ if (!consumer)
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+
+ // Add the consumer to this surface.
+ int err = AddConsumer(consumer);
+ if (err < 0) {
+ ALOGE("DisplaySurface::OnAllocateBuffer: failed to add consumer: buffer=%d",
+ consumer->id());
+ REPLY_ERROR_RETURN(message, -err, {});
+ }
+
+ // Move the channel handle so that it doesn't get closed when the producer
+ // goes out of scope.
+ std::pair<uint32_t, LocalChannelHandle> return_value(
+ allocated_buffer_index_, std::move(producer->GetChannelHandle()));
+
+ // Save buffer index, associated with the buffer id so that it can be looked
+ // up later.
+ buffer_id_to_index_[consumer->id()] = allocated_buffer_index_;
+ ++allocated_buffer_index_;
+
+ return return_value;
+}
+
+RemoteChannelHandle DisplaySurface::OnCreateVideoMeshSurface(
+ pdx::Message& message) {
+ if (flags_ & DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION) {
+ ALOGE(
+ "DisplaySurface::OnCreateVideoMeshSurface: system distorion is "
+ "disabled on this display surface, cannot create VideoMeshSurface on "
+ "top of it.");
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+ }
+
+ int channel_id;
+ auto status = message.PushChannel(0, nullptr, &channel_id);
+
+ if (!status) {
+ ALOGE(
+ "DisplaySurface::OnCreateVideoMeshSurface: failed to push channel: %s",
+ status.GetErrorMessage().c_str());
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+ }
+
+ auto surface = std::make_shared<VideoMeshSurface>(service(), channel_id);
+ const int ret = service()->SetChannel(channel_id, surface);
+ if (ret < 0) {
+ ALOGE(
+ "DisplaySurface::OnCreateVideoMeshSurface: failed to set new video "
+ "mesh surface channel: %s",
+ strerror(-ret));
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+ }
+
+ {
+ std::lock_guard<std::mutex> autolock(lock_);
+ pending_video_mesh_surfaces_.push_back(surface);
+ video_mesh_surfaces_updated_ = true;
+ }
+
+ return status.take();
+}
+
+int DisplaySurface::AddConsumer(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ALOGD_IF(TRACE, "DisplaySurface::AddConsumer: buffer_id=%d", consumer->id());
+ // Add the consumer to the epoll dispatcher, edge-triggered.
+ int err = service()->dispatcher_.AddEventHandler(
+ consumer->event_fd(), EPOLLET | EPOLLIN | EPOLLHUP,
+ std::bind(&DisplaySurface::HandleConsumerEvents,
+ std::static_pointer_cast<DisplaySurface>(shared_from_this()),
+ consumer, std::placeholders::_1));
+ if (err) {
+ ALOGE(
+ "DisplaySurface::AddConsumer: failed to add epoll event handler for "
+ "consumer: %s",
+ strerror(-err));
+ return err;
+ }
+
+ // Add the consumer to the list of buffers for this surface.
+ std::lock_guard<std::mutex> autolock(lock_);
+ buffers_.insert(std::make_pair(consumer->id(), consumer));
+ return 0;
+}
+
+void DisplaySurface::RemoveConsumer(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ALOGD_IF(TRACE, "DisplaySurface::RemoveConsumer: buffer_id=%d",
+ consumer->id());
+ service()->dispatcher_.RemoveEventHandler(consumer->event_fd());
+
+ std::lock_guard<std::mutex> autolock(lock_);
+ buffers_.erase(consumer->id());
+}
+
+void DisplaySurface::RemoveConsumerUnlocked(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ALOGD_IF(TRACE, "DisplaySurface::RemoveConsumerUnlocked: buffer_id=%d",
+ consumer->id());
+ service()->dispatcher_.RemoveEventHandler(consumer->event_fd());
+ buffers_.erase(consumer->id());
+}
+
+void DisplaySurface::OnPostConsumer(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ATRACE_NAME("DisplaySurface::OnPostConsumer");
+ std::lock_guard<std::mutex> autolock(lock_);
+
+ if (posted_buffers_.IsFull()) {
+ ALOGE("Error: posted buffers full, overwriting");
+ posted_buffers_.PopBack();
+ }
+
+ int error;
+ posted_buffers_.Append(AcquiredBuffer(consumer, &error));
+
+ // Remove the consumer if the other end was closed.
+ if (posted_buffers_.Back().IsEmpty() && error == -EPIPE)
+ RemoveConsumerUnlocked(consumer);
+}
+
+void DisplaySurface::HandleConsumerEvents(
+ const std::shared_ptr<BufferConsumer>& consumer, int events) {
+ auto status = consumer->GetEventMask(events);
+ if (!status) {
+ ALOGW(
+ "DisplaySurface::HandleConsumerEvents: Failed to get event mask for "
+ "consumer: %s",
+ status.GetErrorMessage().c_str());
+ return;
+ }
+
+ events = status.get();
+ if (events & EPOLLHUP) {
+ ALOGD_IF(TRACE,
+ "DisplaySurface::HandleConsumerEvents: removing event handler for "
+ "buffer=%d",
+ consumer->id());
+ RemoveConsumer(consumer);
+ } else if (events & EPOLLIN) {
+ // BufferHub uses EPOLLIN to signal consumer ownership.
+ ALOGD_IF(TRACE,
+ "DisplaySurface::HandleConsumerEvents: posting buffer=%d for "
+ "process=%d",
+ consumer->id(), process_id_);
+
+ OnPostConsumer(consumer);
+ }
+}
+
+std::vector<std::shared_ptr<VideoMeshSurface>>
+DisplaySurface::GetVideoMeshSurfaces() {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::vector<std::shared_ptr<VideoMeshSurface>> surfaces;
+
+ for (auto& surface : pending_video_mesh_surfaces_) {
+ if (auto video_surface = surface.lock()) {
+ surfaces.push_back(video_surface);
+ } else {
+ ALOGE("Unable to lock video mesh surface.");
+ }
+ }
+
+ pending_video_mesh_surfaces_.clear();
+ video_mesh_surfaces_updated_ = false;
+ return surfaces;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/display_surface.h b/libs/vr/libvrflinger/display_surface.h
new file mode 100644
index 0000000..b7bcd97
--- /dev/null
+++ b/libs/vr/libvrflinger/display_surface.h
@@ -0,0 +1,211 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SURFACE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SURFACE_H_
+
+#include <pdx/file_handle.h>
+#include <pdx/service.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/ring_buffer.h>
+
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "acquired_buffer.h"
+#include "epoll_event_dispatcher.h"
+#include "surface_channel.h"
+#include "video_mesh_surface.h"
+
+namespace android {
+namespace dvr {
+
+class DisplayService;
+
+// DisplaySurface is the service-side notion of a client display context. It is
+// responsible for managing display buffer format, geometry, and state, and
+// maintains the buffer consumers connected to the client.
+class DisplaySurface : public SurfaceChannel {
+ public:
+ DisplaySurface(DisplayService* service, int surface_id, int process_id,
+ int width, int height, int format, int usage, int flags);
+ ~DisplaySurface() override;
+
+ int process_id() const { return process_id_; }
+ int width() const { return width_; }
+ int height() const { return height_; }
+ int format() const { return format_; }
+ int usage() const { return usage_; }
+ int flags() const { return flags_; }
+
+ bool client_visible() const { return client_visible_; }
+ int client_z_order() const { return client_z_order_; }
+ bool client_exclude_from_blur() const { return client_exclude_from_blur_; }
+ bool client_blur_behind() const { return client_blur_behind_; }
+
+ bool manager_visible() const { return manager_visible_; }
+ int manager_z_order() const { return manager_z_order_; }
+ float manager_blur() const { return manager_blur_; }
+
+ bool video_mesh_surfaces_updated() const {
+ return video_mesh_surfaces_updated_;
+ }
+
+ volatile const DisplaySurfaceMetadata* GetMetadataBufferPtr() {
+ if (EnsureMetadataBuffer()) {
+ void* addr = nullptr;
+ metadata_buffer_->GetBlobReadWritePointer(metadata_size(), &addr);
+ return static_cast<const volatile DisplaySurfaceMetadata*>(addr);
+ } else {
+ return nullptr;
+ }
+ }
+
+ uint32_t GetRenderBufferIndex(int buffer_id) {
+ return buffer_id_to_index_[buffer_id];
+ }
+
+ size_t GetBufferCount() const;
+ std::vector<std::shared_ptr<BufferConsumer>> GetBuffers();
+
+ // Gets a new set of consumers for all of the surface's buffers. These
+ // consumers are independent from the consumers maintained internally to the
+ // surface and may be passed to other processes over IPC.
+ int GetConsumers(std::vector<pdx::LocalChannelHandle>* consumers);
+
+ template <class A>
+ void ForEachBuffer(A action) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::for_each(buffers_.begin(), buffers_.end(), action);
+ }
+
+ bool IsBufferAvailable() const;
+ bool IsBufferPosted() const;
+ AcquiredBuffer AcquireCurrentBuffer();
+
+ // Get the newest buffer. Up to one buffer will be skipped. If a buffer is
+ // skipped, it will be stored in skipped_buffer if non null.
+ AcquiredBuffer AcquireNewestAvailableBuffer(AcquiredBuffer* skipped_buffer);
+
+ // Display manager interface to control visibility and z order.
+ void ManagerSetVisible(bool visible);
+ void ManagerSetZOrder(int z_order);
+ void ManagerSetBlur(float blur);
+
+ // A surface must be set visible by both the client and the display manager to
+ // be visible on screen.
+ bool IsVisible() const { return client_visible_ && manager_visible_; }
+
+ // A surface is blurred if the display manager requests it.
+ bool IsBlurred() const { return manager_blur_ > 0.0f; }
+
+ // Set by HardwareComposer to the current logical layer order of this surface.
+ void SetLayerOrder(int layer_order) { layer_order_ = layer_order; }
+ // Gets the unique z-order index of this surface among other visible surfaces.
+ // This is not the same as the hardware layer index, as not all display
+ // surfaces map directly to hardware layers. Lower layer orders should be
+ // composited underneath higher layer orders.
+ int layer_order() const { return layer_order_; }
+
+ // Lock all video mesh surfaces so that VideoMeshCompositor can access them.
+ std::vector<std::shared_ptr<VideoMeshSurface>> GetVideoMeshSurfaces();
+
+ private:
+ friend class DisplayService;
+
+ // The capacity of the pending buffer queue. Should be enough to hold all the
+ // buffers of this DisplaySurface, although in practice only 1 or 2 frames
+ // will be pending at a time.
+ static constexpr int kMaxPostedBuffers =
+ kSurfaceBufferMaxCount * kSurfaceViewMaxCount;
+
+ // Returns whether a frame is available without locking the mutex.
+ bool IsFrameAvailableNoLock() const;
+
+ // Handles epoll events for BufferHub consumers. Events are mainly generated
+ // by producers posting buffers ready for display. This handler runs on the
+ // epoll event thread.
+ void HandleConsumerEvents(const std::shared_ptr<BufferConsumer>& consumer,
+ int events);
+
+ // Dispatches display surface messages to the appropriate handlers. This
+ // handler runs on the displayd message dispatch thread.
+ int HandleMessage(pdx::Message& message) override;
+
+ // Sets display surface's client-controlled attributes.
+ int OnClientSetAttributes(pdx::Message& message,
+ const DisplaySurfaceAttributes& attributes);
+
+ // Allocates a buffer with the display surface geometry and settings and
+ // returns it to the client.
+ std::pair<uint32_t, pdx::LocalChannelHandle> OnAllocateBuffer(
+ pdx::Message& message);
+
+ // Creates a video mesh surface associated with this surface and returns it
+ // to the client.
+ pdx::RemoteChannelHandle OnCreateVideoMeshSurface(pdx::Message& message);
+
+ // Sets the current buffer for the display surface, discarding the previous
+ // buffer if it is not already claimed. Runs on the epoll event thread.
+ void OnPostConsumer(const std::shared_ptr<BufferConsumer>& consumer);
+
+ // Client interface (called through IPC) to set visibility and z order.
+ void ClientSetVisible(bool visible);
+ void ClientSetZOrder(int z_order);
+ void ClientSetExcludeFromBlur(bool exclude_from_blur);
+ void ClientSetBlurBehind(bool blur_behind);
+
+ // Runs on the displayd message dispatch thread.
+ int AddConsumer(const std::shared_ptr<BufferConsumer>& consumer);
+
+ // Runs on the epoll event thread.
+ void RemoveConsumer(const std::shared_ptr<BufferConsumer>& consumer);
+
+ // Runs on the epoll and display post thread.
+ void RemoveConsumerUnlocked(const std::shared_ptr<BufferConsumer>& consumer);
+
+ DisplaySurface(const DisplaySurface&) = delete;
+ void operator=(const DisplaySurface&) = delete;
+
+ int process_id_;
+
+ // Synchronizes access to mutable state below between message dispatch thread,
+ // epoll event thread, and frame post thread.
+ mutable std::mutex lock_;
+ std::unordered_map<int, std::shared_ptr<BufferConsumer>> buffers_;
+
+ // In a triple-buffered surface, up to kMaxPostedBuffers buffers may be
+ // posted and pending.
+ RingBuffer<AcquiredBuffer> posted_buffers_;
+
+ // Provides access to VideoMeshSurface. Here we don't want to increase
+ // the reference count immediately on allocation, will leave it into
+ // compositor's hand.
+ std::vector<std::weak_ptr<VideoMeshSurface>> pending_video_mesh_surfaces_;
+ volatile bool video_mesh_surfaces_updated_;
+
+ // Surface parameters.
+ int width_;
+ int height_;
+ int format_;
+ int usage_;
+ int flags_;
+ bool client_visible_;
+ int client_z_order_;
+ bool client_exclude_from_blur_;
+ bool client_blur_behind_;
+ bool manager_visible_;
+ int manager_z_order_;
+ float manager_blur_;
+ // The monotonically increasing index for allocated buffers in this surface.
+ uint32_t allocated_buffer_index_;
+ int layer_order_;
+
+ // Maps from the buffer id to the corresponding allocated buffer index.
+ std::unordered_map<int, uint32_t> buffer_id_to_index_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SURFACE_H_
diff --git a/libs/vr/libvrflinger/epoll_event_dispatcher.cpp b/libs/vr/libvrflinger/epoll_event_dispatcher.cpp
new file mode 100644
index 0000000..b37e76e
--- /dev/null
+++ b/libs/vr/libvrflinger/epoll_event_dispatcher.cpp
@@ -0,0 +1,142 @@
+#include "epoll_event_dispatcher.h"
+
+#include <log/log.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/prctl.h>
+
+#include <dvr/performance_client_api.h>
+
+namespace android {
+namespace dvr {
+
+EpollEventDispatcher::EpollEventDispatcher()
+ : exit_thread_(false), epoll_fd_(-1), event_fd_(-1) {
+ epoll_fd_ = epoll_create(64);
+ if (epoll_fd_ < 0) {
+ ALOGE("Failed to create epoll fd: %s", strerror(errno));
+ return;
+ }
+
+ event_fd_ = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (event_fd_ < 0) {
+ ALOGE("Failed to create event for epolling: %s", strerror(errno));
+ return;
+ }
+
+ // Add watch for eventfd. This should only watch for EPOLLIN, which gets set
+ // when eventfd_write occurs. Use "this" as a unique sentinal value to
+ // identify events from the event fd.
+ epoll_event event = {.events = EPOLLIN, .data = {.ptr = this}};
+ if (epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, event_fd_, &event) < 0) {
+ ALOGE("Failed to add eventfd to epoll set because: %s", strerror(errno));
+ return;
+ }
+
+ thread_ = std::thread(&EpollEventDispatcher::EventThread, this);
+}
+
+EpollEventDispatcher::~EpollEventDispatcher() {
+ Stop();
+
+ close(epoll_fd_);
+ close(event_fd_);
+}
+
+void EpollEventDispatcher::Stop() {
+ exit_thread_.store(true);
+ eventfd_write(event_fd_, 1);
+}
+
+int EpollEventDispatcher::AddEventHandler(int fd, int event_mask,
+ Handler handler) {
+ std::lock_guard<std::mutex> lock(lock_);
+
+ epoll_event event;
+ event.events = event_mask;
+ event.data.ptr = &(handlers_[fd] = handler);
+
+ ALOGD_IF(
+ TRACE,
+ "EpollEventDispatcher::AddEventHandler: fd=%d event_mask=0x%x handler=%p",
+ fd, event_mask, event.data.ptr);
+
+ int err = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, fd, &event);
+ return err < 0 ? -errno : 0;
+}
+
+int EpollEventDispatcher::RemoveEventHandler(int fd) {
+ ALOGD_IF(TRACE, "EpollEventDispatcher::RemoveEventHandler: fd=%d", fd);
+ std::lock_guard<std::mutex> lock(lock_);
+
+ epoll_event dummy; // See BUGS in man 2 epoll_ctl.
+ if (epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, fd, &dummy) < 0) {
+ ALOGE("Failed to remove fd from epoll set because: %s", strerror(errno));
+ return -errno;
+ }
+
+ // If the fd was valid above, add it to the list of ids to remove.
+ removed_handlers_.push_back(fd);
+
+ // Wake up the event thread to clean up.
+ eventfd_write(event_fd_, 1);
+
+ return 0;
+}
+
+void EpollEventDispatcher::EventThread() {
+ prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("EpollEvent"), 0, 0, 0);
+
+ const int error = dvrSetSchedulerClass(0, "graphics");
+ LOG_ALWAYS_FATAL_IF(
+ error < 0,
+ "EpollEventDispatcher::EventThread: Failed to set scheduler class: %s",
+ strerror(-error));
+
+ const size_t kMaxNumEvents = 128;
+ epoll_event events[kMaxNumEvents];
+
+ while (!exit_thread_.load()) {
+ int num_events = epoll_wait(epoll_fd_, events, kMaxNumEvents, -1);
+ if (num_events < 0 && errno != EINTR)
+ break;
+
+ ALOGD_IF(TRACE, "EpollEventDispatcher::EventThread: num_events=%d",
+ num_events);
+
+ for (int i = 0; i < num_events; i++) {
+ ALOGD_IF(
+ TRACE,
+ "EpollEventDispatcher::EventThread: event %d: handler=%p events=0x%x",
+ i, events[i].data.ptr, events[i].events);
+
+ if (events[i].data.ptr == this) {
+ // Clear pending event on event_fd_. Serialize the read with respect to
+ // writes from other threads.
+ std::lock_guard<std::mutex> lock(lock_);
+ eventfd_t value;
+ eventfd_read(event_fd_, &value);
+ } else {
+ auto handler = reinterpret_cast<Handler*>(events[i].data.ptr);
+ if (handler)
+ (*handler)(events[i].events);
+ }
+ }
+
+ // Remove any handlers that have been posted for removal. This is done here
+ // instead of in RemoveEventHandler() to prevent races between the dispatch
+ // thread and the code requesting the removal. Handlers are guaranteed to
+ // stay alive between exiting epoll_wait() and the dispatch loop above.
+ std::lock_guard<std::mutex> lock(lock_);
+ for (auto handler_fd : removed_handlers_) {
+ ALOGD_IF(TRACE,
+ "EpollEventDispatcher::EventThread: removing handler: fd=%d",
+ handler_fd);
+ handlers_.erase(handler_fd);
+ }
+ removed_handlers_.clear();
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/epoll_event_dispatcher.h b/libs/vr/libvrflinger/epoll_event_dispatcher.h
new file mode 100644
index 0000000..43bca2e
--- /dev/null
+++ b/libs/vr/libvrflinger/epoll_event_dispatcher.h
@@ -0,0 +1,61 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_EPOLL_EVENT_DISPATCHER_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_EPOLL_EVENT_DISPATCHER_H_
+
+#include <sys/epoll.h>
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <unordered_map>
+#include <vector>
+
+namespace android {
+namespace dvr {
+
+class EpollEventDispatcher {
+ public:
+ // Function type for event handlers. The handler receives a bitmask of the
+ // epoll events that occurred on the file descriptor associated with the
+ // handler.
+ using Handler = std::function<void(int)>;
+
+ EpollEventDispatcher();
+ ~EpollEventDispatcher();
+
+ // |handler| is called on the internal dispatch thread when |fd| is signaled
+ // by events in |event_mask|.
+ // Return 0 on success or a negative error code on failure.
+ int AddEventHandler(int fd, int event_mask, Handler handler);
+ int RemoveEventHandler(int fd);
+
+ void Stop();
+
+ private:
+ void EventThread();
+
+ std::thread thread_;
+ std::atomic<bool> exit_thread_;
+
+ // Protects handlers_ and removed_handlers_ and serializes operations on
+ // epoll_fd_ and event_fd_.
+ std::mutex lock_;
+
+ // Maintains a map of fds to event handlers. This is primarily to keep any
+ // references alive that may be bound in the std::function instances. It is
+ // not used at dispatch time to avoid performance problems with different
+ // versions of std::unordered_map.
+ std::unordered_map<int, Handler> handlers_;
+
+ // List of fds to be removed from the map. The actual removal is performed
+ // by the event dispatch thread to avoid races.
+ std::vector<int> removed_handlers_;
+
+ int epoll_fd_;
+ int event_fd_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_EPOLL_EVENT_DISPATCHER_H_
diff --git a/libs/vr/libvrflinger/hardware_composer.cpp b/libs/vr/libvrflinger/hardware_composer.cpp
new file mode 100644
index 0000000..cc08209
--- /dev/null
+++ b/libs/vr/libvrflinger/hardware_composer.cpp
@@ -0,0 +1,1576 @@
+#include "hardware_composer.h"
+
+#include <log/log.h>
+#include <cutils/properties.h>
+#include <cutils/sched_policy.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sync/sync.h>
+#include <sys/eventfd.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/system_properties.h>
+#include <sys/timerfd.h>
+#include <unistd.h>
+#include <utils/Trace.h>
+
+#include <algorithm>
+#include <functional>
+#include <map>
+
+#include <dvr/performance_client_api.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/pose_client_internal.h>
+#include <private/dvr/sync_util.h>
+
+#include "debug_hud_data.h"
+#include "screenshot_service.h"
+
+using android::pdx::LocalHandle;
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// If the number of pending fences goes over this count at the point when we
+// are about to submit a new frame to HWC, we will drop the frame. This should
+// be a signal that the display driver has begun queuing frames. Note that with
+// smart displays (with RAM), the fence is signaled earlier than the next vsync,
+// at the point when the DMA to the display completes. Currently we use a smart
+// display and the EDS timing coincides with zero pending fences, so this is 0.
+constexpr int kAllowedPendingFenceCount = 0;
+
+// If we think we're going to miss vsync by more than this amount, skip the
+// frame.
+constexpr int64_t kFrameSkipThresholdNs = 4000000; // 4ms
+
+// Counter PostLayers() deficiency by requiring apps to produce a frame at least
+// 2.5ms before vsync. See b/28881672.
+constexpr int64_t kFrameTimeEstimateMin = 2500000; // 2.5ms
+
+constexpr size_t kDefaultDisplayConfigCount = 32;
+
+constexpr float kMetersPerInch = 0.0254f;
+
+const char kBacklightBrightnessSysFile[] =
+ "/sys/class/leds/lcd-backlight/brightness";
+
+const char kPrimaryDisplayVSyncEventFile[] =
+ "/sys/class/graphics/fb0/vsync_event";
+
+const char kPrimaryDisplayWaitPPEventFile[] = "/sys/class/graphics/fb0/wait_pp";
+
+const char kDvrPerformanceProperty[] = "sys.dvr.performance";
+
+const char kRightEyeOffsetProperty[] = "dreamos.right_eye_offset_ns";
+
+// Returns our best guess for the time the compositor will spend rendering the
+// next frame.
+int64_t GuessFrameTime(int compositor_visible_layer_count) {
+ // The cost of asynchronous EDS and lens warp is currently measured at 2.5ms
+ // for one layer and 7ms for two layers, but guess a higher frame time to
+ // account for CPU overhead. This guess is only used before we've measured the
+ // actual time to render a frame for the current compositor configuration.
+ switch (compositor_visible_layer_count) {
+ case 0:
+ return 500000; // .5ms
+ case 1:
+ return 5000000; // 5ms
+ default:
+ return 10500000; // 10.5ms
+ }
+}
+
+// Get time offset from a vsync to when the pose for that vsync should be
+// predicted out to. For example, if scanout gets halfway through the frame
+// at the halfway point between vsyncs, then this could be half the period.
+// With global shutter displays, this should be changed to the offset to when
+// illumination begins. Low persistence adds a frame of latency, so we predict
+// to the center of the next frame.
+inline int64_t GetPosePredictionTimeOffset(int64_t vsync_period_ns) {
+ return (vsync_period_ns * 150) / 100;
+}
+
+} // anonymous namespace
+
+HardwareComposer::HardwareComposer()
+ : HardwareComposer(nullptr) {
+}
+
+HardwareComposer::HardwareComposer(Hwc2::Composer* hwc2_hidl)
+ : hwc2_hidl_(hwc2_hidl),
+ display_transform_(HWC_TRANSFORM_NONE),
+ display_surfaces_updated_(false),
+ hardware_layers_need_update_(false),
+ display_on_(false),
+ active_layer_count_(0),
+ gpu_layer_(nullptr),
+ terminate_post_thread_event_fd_(-1),
+ pause_post_thread_(true),
+ backlight_brightness_fd_(-1),
+ primary_display_vsync_event_fd_(-1),
+ primary_display_wait_pp_fd_(-1),
+ vsync_sleep_timer_fd_(-1),
+ last_vsync_timestamp_(0),
+ vsync_count_(0),
+ frame_skip_count_(0),
+ pose_client_(nullptr) {
+ std::transform(layer_storage_.begin(), layer_storage_.end(), layers_.begin(),
+ [](auto& layer) { return &layer; });
+
+ callbacks_ = new ComposerCallback;
+}
+
+HardwareComposer::~HardwareComposer(void) {
+ if (!IsSuspended()) {
+ Suspend();
+ }
+}
+
+bool HardwareComposer::Resume() {
+ std::lock_guard<std::mutex> autolock(layer_mutex_);
+
+ if (!IsSuspended()) {
+ ALOGE("HardwareComposer::Resume: HardwareComposer is already running.");
+ return false;
+ }
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ static const uint32_t attributes[] = {
+ HWC_DISPLAY_WIDTH, HWC_DISPLAY_HEIGHT, HWC_DISPLAY_VSYNC_PERIOD,
+ HWC_DISPLAY_DPI_X, HWC_DISPLAY_DPI_Y, HWC_DISPLAY_NO_ATTRIBUTE,
+ };
+
+ std::vector<Hwc2::Config> configs;
+ ret = (int32_t)hwc2_hidl_->getDisplayConfigs(HWC_DISPLAY_PRIMARY, &configs);
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display configs");
+ return false;
+ }
+
+ uint32_t num_configs = configs.size();
+
+ for (size_t i = 0; i < num_configs; i++) {
+ ALOGI("HardwareComposer: cfg[%zd/%zd] = 0x%08x", i, num_configs,
+ configs[i]);
+
+ ret = GetDisplayMetrics(HWC_DISPLAY_PRIMARY, configs[i],
+ &native_display_metrics_);
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display attributes %d", ret);
+ continue;
+ } else {
+ ret =
+ (int32_t)hwc2_hidl_->setActiveConfig(HWC_DISPLAY_PRIMARY, configs[i]);
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to set display configuration; ret=%d",
+ ret);
+ continue;
+ }
+
+ break;
+ }
+ }
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Could not set a valid display configuration.");
+ return false;
+ }
+
+ // Set the display metrics but never use rotation to avoid the long latency of
+ // rotation processing in hwc.
+ display_transform_ = HWC_TRANSFORM_NONE;
+ display_metrics_ = native_display_metrics_;
+
+ ALOGI(
+ "HardwareComposer: primary display attributes: width=%d height=%d "
+ "vsync_period_ns=%d DPI=%dx%d",
+ native_display_metrics_.width, native_display_metrics_.height,
+ native_display_metrics_.vsync_period_ns, native_display_metrics_.dpi.x,
+ native_display_metrics_.dpi.y);
+
+ // Always turn off vsync when we start.
+ EnableVsync(false);
+
+ constexpr int format = HAL_PIXEL_FORMAT_RGBA_8888;
+ constexpr int usage =
+ GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_RENDER;
+
+ framebuffer_target_ = std::make_shared<IonBuffer>(
+ native_display_metrics_.width, native_display_metrics_.height, format,
+ usage);
+
+ // Associate each Layer instance with a hardware composer layer.
+ for (auto layer : layers_) {
+ layer->Initialize(hwc2_hidl_.get(), &native_display_metrics_);
+ }
+
+#if ENABLE_BACKLIGHT_BRIGHTNESS
+ // TODO(hendrikw): This isn't required at the moment. It's possible that there
+ // is another method to access this when needed.
+ // Open the backlight brightness control sysfs node.
+ backlight_brightness_fd_ = LocalHandle(kBacklightBrightnessSysFile, O_RDWR);
+ ALOGW_IF(!backlight_brightness_fd_,
+ "HardwareComposer: Failed to open backlight brightness control: %s",
+ strerror(errno));
+#endif // ENABLE_BACKLIGHT_BRIGHTNESS
+
+ // Open the vsync event node for the primary display.
+ // TODO(eieio): Move this into a platform-specific class.
+ primary_display_vsync_event_fd_ =
+ LocalHandle(kPrimaryDisplayVSyncEventFile, O_RDONLY);
+ ALOGE_IF(!primary_display_vsync_event_fd_,
+ "HardwareComposer: Failed to open vsync event node for primary "
+ "display: %s",
+ strerror(errno));
+
+ // Open the wait pingpong status node for the primary display.
+ // TODO(eieio): Move this into a platform-specific class.
+ primary_display_wait_pp_fd_ =
+ LocalHandle(kPrimaryDisplayWaitPPEventFile, O_RDONLY);
+ ALOGE_IF(
+ !primary_display_wait_pp_fd_,
+ "HardwareComposer: Failed to open wait_pp node for primary display: %s",
+ strerror(errno));
+
+ // Create a timerfd based on CLOCK_MONOTINIC.
+ vsync_sleep_timer_fd_.Reset(timerfd_create(CLOCK_MONOTONIC, 0));
+ LOG_ALWAYS_FATAL_IF(
+ !vsync_sleep_timer_fd_,
+ "HardwareComposer: Failed to create vsync sleep timerfd: %s",
+ strerror(errno));
+
+ // Connect to pose service.
+ pose_client_ = dvrPoseCreate();
+ ALOGE_IF(!pose_client_, "HardwareComposer: Failed to create pose client");
+
+ // Variables used to control the post thread state
+ pause_post_thread_ = false;
+ terminate_post_thread_event_fd_.Reset(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
+
+ LOG_ALWAYS_FATAL_IF(
+ !terminate_post_thread_event_fd_,
+ "HardwareComposer: Failed to create terminate PostThread event fd : %s",
+ strerror(errno));
+
+ // If get_id() is the default thread::id object, it has not been created yet
+ if (post_thread_.get_id() == std::thread::id()) {
+ post_thread_ = std::thread(&HardwareComposer::PostThread, this);
+ } else {
+ UpdateDisplayState();
+ thread_pause_semaphore_.notify_one();
+ }
+
+ return true;
+}
+
+bool HardwareComposer::Suspend() {
+ // Wait for any pending layer operations to finish
+ std::unique_lock<std::mutex> layer_lock(layer_mutex_);
+
+ if (IsSuspended()) {
+ ALOGE("HardwareComposer::Suspend: HardwareComposer is already suspended.");
+ return false;
+ }
+
+ PausePostThread();
+
+ EnableVsync(false);
+ SetPowerMode(HWC_DISPLAY_PRIMARY, HWC2_POWER_MODE_OFF);
+
+ backlight_brightness_fd_.Close();
+ primary_display_vsync_event_fd_.Close();
+ primary_display_wait_pp_fd_.Close();
+ vsync_sleep_timer_fd_.Close();
+ retire_fence_fds_.clear();
+ gpu_layer_ = nullptr;
+
+ // We have to destroy the layers before we close the hwc device
+ for (size_t i = 0; i < kMaxHardwareLayers; ++i) {
+ layers_[i]->Reset();
+ }
+
+ active_layer_count_ = 0;
+
+ framebuffer_target_.reset();
+
+ //hwc2_hidl_.reset();
+
+ if (pose_client_)
+ dvrPoseDestroy(pose_client_);
+
+ return true;
+}
+
+void HardwareComposer::PausePostThread() {
+ pause_post_thread_ = true;
+
+ int error = eventfd_write(terminate_post_thread_event_fd_.Get(), 1);
+ ALOGE_IF(error,
+ "HardwareComposer::PausePostThread: could not write post "
+ "thread termination event fd : %d",
+ error);
+
+ std::unique_lock<std::mutex> wait_for_thread(thread_pause_mutex_);
+ terminate_post_thread_event_fd_.Close();
+}
+
+DisplayMetrics HardwareComposer::GetHmdDisplayMetrics() const {
+ vec2i screen_size(display_metrics_.width, display_metrics_.height);
+ DisplayOrientation orientation =
+ (display_metrics_.width > display_metrics_.height
+ ? DisplayOrientation::kLandscape
+ : DisplayOrientation::kPortrait);
+ float dpi_x = static_cast<float>(display_metrics_.dpi.x) / 1000.0f;
+ float dpi_y = static_cast<float>(display_metrics_.dpi.y) / 1000.0f;
+ float meters_per_pixel_x = kMetersPerInch / dpi_x;
+ float meters_per_pixel_y = kMetersPerInch / dpi_y;
+ vec2 meters_per_pixel(meters_per_pixel_x, meters_per_pixel_y);
+ double frame_duration_s =
+ static_cast<double>(display_metrics_.vsync_period_ns) / 1000000000.0;
+ // TODO(hendrikw): Hard coding to 3mm. The Pixel is actually 4mm, but it
+ // seems that their tray to lens distance is wrong too, which
+ // offsets this, at least for the pixel.
+ float border_size = 0.003f;
+ return DisplayMetrics(screen_size, meters_per_pixel, border_size,
+ static_cast<float>(frame_duration_s), orientation);
+}
+
+int32_t HardwareComposer::Validate(hwc2_display_t display) {
+ uint32_t num_types;
+ uint32_t num_requests;
+ int32_t error =
+ (int32_t)hwc2_hidl_->validateDisplay(display, &num_types, &num_requests);
+
+ if (error == HWC2_ERROR_HAS_CHANGES) {
+ // TODO(skiazyk): We might need to inspect the requested changes first, but
+ // so far it seems like we shouldn't ever hit a bad state.
+ // error = hwc2_funcs_.accept_display_changes_fn_(hardware_composer_device_,
+ // display);
+ error = (int32_t)hwc2_hidl_->acceptDisplayChanges(display);
+ }
+
+ return error;
+}
+
+int32_t HardwareComposer::EnableVsync(bool enabled) {
+ return (int32_t)hwc2_hidl_->setVsyncEnabled(
+ HWC_DISPLAY_PRIMARY,
+ (Hwc2::IComposerClient::Vsync)(enabled ? HWC2_VSYNC_ENABLE
+ : HWC2_VSYNC_DISABLE));
+}
+
+int32_t HardwareComposer::Present(hwc2_display_t display) {
+ int32_t present_fence;
+ int32_t error = (int32_t)hwc2_hidl_->presentDisplay(display, &present_fence);
+
+ // According to the documentation, this fence is signaled at the time of
+ // vsync/DMA for physical displays.
+ if (error == HWC2_ERROR_NONE) {
+ ATRACE_INT("HardwareComposer: VsyncFence", present_fence);
+ retire_fence_fds_.emplace_back(present_fence);
+ } else {
+ ATRACE_INT("HardwareComposer: PresentResult", error);
+ }
+
+ return error;
+}
+
+int32_t HardwareComposer::SetPowerMode(hwc2_display_t display,
+ hwc2_power_mode_t mode) {
+ if (mode == HWC2_POWER_MODE_OFF) {
+ EnableVsync(false);
+ }
+
+ display_on_ = mode != HWC2_POWER_MODE_OFF;
+
+ return (int32_t)hwc2_hidl_->setPowerMode(
+ display, (Hwc2::IComposerClient::PowerMode)mode);
+}
+
+int32_t HardwareComposer::GetDisplayAttribute(hwc2_display_t display,
+ hwc2_config_t config,
+ hwc2_attribute_t attribute,
+ int32_t* out_value) const {
+ return (int32_t)hwc2_hidl_->getDisplayAttribute(
+ display, config, (Hwc2::IComposerClient::Attribute)attribute, out_value);
+}
+
+int32_t HardwareComposer::GetDisplayMetrics(
+ hwc2_display_t display, hwc2_config_t config,
+ HWCDisplayMetrics* out_metrics) const {
+ int32_t ret = HWC2_ERROR_NONE;
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_WIDTH,
+ &out_metrics->width);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display width");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_HEIGHT,
+ &out_metrics->height);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display height");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_VSYNC_PERIOD,
+ &out_metrics->vsync_period_ns);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display height");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_DPI_X,
+ &out_metrics->dpi.x);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display DPI X");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_DPI_Y,
+ &out_metrics->dpi.y);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display DPI Y");
+ return ret;
+ }
+
+ return HWC2_ERROR_NONE;
+}
+
+void HardwareComposer::Dump(char* buffer, uint32_t* out_size) {
+ std::string debug_str = hwc2_hidl_->dumpDebugInfo();
+ ALOGI("%s", debug_str.c_str());
+
+ if (buffer == nullptr) {
+ *out_size = debug_str.size();
+ } else {
+ std::copy(debug_str.begin(), debug_str.begin() + *out_size, buffer);
+ }
+}
+
+// TODO(skiazyk): Figure out what to do with `is_geometry_changed`. There does
+// not seem to be any equivalent in the HWC2 API, but that doesn't mean its not
+// there.
+void HardwareComposer::PostLayers(bool /*is_geometry_changed*/) {
+ ATRACE_NAME("HardwareComposer::PostLayers");
+
+ // Setup the hardware composer layers with current buffers.
+ for (size_t i = 0; i < active_layer_count_; i++) {
+ layers_[i]->Prepare();
+ }
+
+ // Now that we have taken in a frame from the application, we have a chance
+ // to drop the frame before passing the frame along to HWC.
+ // If the display driver has become backed up, we detect it here and then
+ // react by skipping this frame to catch up latency.
+ while (!retire_fence_fds_.empty() &&
+ (!retire_fence_fds_.front() ||
+ sync_wait(retire_fence_fds_.front().Get(), 0) == 0)) {
+ // There are only 2 fences in here, no performance problem to shift the
+ // array of ints.
+ retire_fence_fds_.erase(retire_fence_fds_.begin());
+ }
+
+ const bool is_frame_pending = IsFramePendingInDriver();
+ const bool is_fence_pending =
+ retire_fence_fds_.size() > kAllowedPendingFenceCount;
+
+ if (is_fence_pending || is_frame_pending) {
+ ATRACE_INT("frame_skip_count", ++frame_skip_count_);
+
+ ALOGW_IF(is_frame_pending, "Warning: frame already queued, dropping frame");
+ ALOGW_IF(is_fence_pending,
+ "Warning: dropping a frame to catch up with HWC (pending = %zd)",
+ retire_fence_fds_.size());
+
+ for (size_t i = 0; i < active_layer_count_; i++) {
+ layers_[i]->Drop();
+ }
+ return;
+ } else {
+ // Make the transition more obvious in systrace when the frame skip happens
+ // above.
+ ATRACE_INT("frame_skip_count", 0);
+ }
+
+#if TRACE
+ for (size_t i = 0; i < active_layer_count_; i++)
+ ALOGI("HardwareComposer::PostLayers: dl[%zu] ctype=0x%08x", i,
+ layers_[i]->GetCompositionType());
+#endif
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ std::vector<Hwc2::IComposerClient::Rect> full_region(1);
+ full_region[0].left = 0;
+ full_region[0].top = 0;
+ full_region[0].right = framebuffer_target_->width();
+ full_region[0].bottom = framebuffer_target_->height();
+
+ ALOGE_IF(ret, "Error setting client target : %d", ret);
+
+ ret = Validate(HWC_DISPLAY_PRIMARY);
+ if (ret) {
+ ALOGE("HardwareComposer::Validate failed; ret=%d", ret);
+ return;
+ }
+
+ ret = Present(HWC_DISPLAY_PRIMARY);
+ if (ret) {
+ ALOGE("HardwareComposer::Present failed; ret=%d", ret);
+ return;
+ }
+
+ std::vector<Hwc2::Layer> out_layers;
+ std::vector<int> out_fences;
+ ret = (int32_t)hwc2_hidl_->getReleaseFences(HWC_DISPLAY_PRIMARY, &out_layers,
+ &out_fences);
+ uint32_t num_elements = out_layers.size();
+
+ ALOGE_IF(ret, "HardwareComposer: GetReleaseFences failed; ret=%d", ret);
+
+ // Perform post-frame bookkeeping. Unused layers are a no-op.
+ for (size_t i = 0; i < num_elements; ++i) {
+ for (size_t j = 0; j < active_layer_count_; ++j) {
+ if (layers_[j]->GetLayerHandle() == out_layers[i]) {
+ layers_[j]->Finish(out_fences[i]);
+ }
+ }
+ }
+}
+
+// TODO(skiazyk): This is a work-around for the fact that we currently do not
+// handle the case when new surfaces are introduced when displayd is not
+// in an active state. A proper-solution will require re-structuring
+// displayd a little, but hopefully this is sufficient for now.
+// For example, could this be handled in |UpdateLayerSettings| instead?
+void HardwareComposer::UpdateDisplayState() {
+ const bool has_display_surfaces = display_surfaces_.size() > 0;
+
+ if (has_display_surfaces) {
+ int32_t ret = SetPowerMode(HWC_DISPLAY_PRIMARY, HWC2_POWER_MODE_ON);
+
+ ALOGE_IF(ret, "HardwareComposer: Could not set power mode; ret=%d", ret);
+
+ EnableVsync(true);
+ }
+ // TODO(skiazyk): We need to do something about accessing this directly,
+ // supposedly there is a backlight service on the way.
+ SetBacklightBrightness(255);
+
+ if (!display_on_ && has_display_surfaces) {
+ const int error = ReadVSyncTimestamp(&last_vsync_timestamp_);
+ ALOGE_IF(error < 0,
+ "HardwareComposer::SetDisplaySurfaces: Failed to read vsync "
+ "timestamp: %s",
+ strerror(-error));
+ }
+
+ // Trigger target-specific performance mode change.
+ property_set(kDvrPerformanceProperty, display_on_ ? "performance" : "idle");
+}
+
+int HardwareComposer::SetDisplaySurfaces(
+ std::vector<std::shared_ptr<DisplaySurface>> surfaces) {
+ std::lock_guard<std::mutex> autolock(layer_mutex_);
+
+ ALOGI("HardwareComposer::SetDisplaySurfaces: surface count=%zd",
+ surfaces.size());
+
+ // Figure out whether we need to update hardware layers. If this surface
+ // change does not add or remove hardware layers we can avoid display hiccups
+ // by gracefully updating only the GPU compositor layers.
+ // hardware_layers_need_update_ is reset to false by the Post thread.
+ int old_gpu_layer_count = 0;
+ int new_gpu_layer_count = 0;
+ // Look for new hardware layers and count new GPU layers.
+ for (const auto& surface : surfaces) {
+ if (!(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION))
+ ++new_gpu_layer_count;
+ else if (std::find(display_surfaces_.begin(), display_surfaces_.end(),
+ surface) == display_surfaces_.end())
+ // This is a new hardware layer, we need to update.
+ hardware_layers_need_update_ = true;
+ }
+ // Look for deleted hardware layers or compositor layers.
+ for (const auto& surface : display_surfaces_) {
+ if (!(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION))
+ ++old_gpu_layer_count;
+ else if (std::find(surfaces.begin(), surfaces.end(), surface) ==
+ surfaces.end())
+ // This is a deleted hardware layer, we need to update.
+ hardware_layers_need_update_ = true;
+ }
+ // Check for compositor hardware layer transition.
+ if ((!old_gpu_layer_count && new_gpu_layer_count) ||
+ (old_gpu_layer_count && !new_gpu_layer_count))
+ hardware_layers_need_update_ = true;
+
+ display_surfaces_ = std::move(surfaces);
+ display_surfaces_updated_ = true;
+
+ // Set the chosen layer order for all surfaces.
+ for (size_t i = 0; i < display_surfaces_.size(); ++i) {
+ display_surfaces_[i]->SetLayerOrder(static_cast<int>(i));
+ }
+
+ // TODO(skiazyk): fix this so that it is handled seamlessly with dormant/non-
+ // dormant state.
+ if (!IsSuspended()) {
+ UpdateDisplayState();
+ }
+
+ return 0;
+}
+
+// Reads the value of the display driver wait_pingpong state. Returns 0 or 1
+// (the value of the state) on success or a negative error otherwise.
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::ReadWaitPPState() {
+ // Gracefully handle when the kernel does not support this feature.
+ if (!primary_display_wait_pp_fd_)
+ return 0;
+
+ const int wait_pp_fd = primary_display_wait_pp_fd_.Get();
+ int ret, error;
+
+ ret = lseek(wait_pp_fd, 0, SEEK_SET);
+ if (ret < 0) {
+ error = errno;
+ ALOGE("HardwareComposer::ReadWaitPPState: Failed to seek wait_pp fd: %s",
+ strerror(error));
+ return -error;
+ }
+
+ char data = -1;
+ ret = read(wait_pp_fd, &data, sizeof(data));
+ if (ret < 0) {
+ error = errno;
+ ALOGE("HardwareComposer::ReadWaitPPState: Failed to read wait_pp state: %s",
+ strerror(error));
+ return -error;
+ }
+
+ switch (data) {
+ case '0':
+ return 0;
+ case '1':
+ return 1;
+ default:
+ ALOGE(
+ "HardwareComposer::ReadWaitPPState: Unexpected value for wait_pp: %d",
+ data);
+ return -EINVAL;
+ }
+}
+
+// Reads the timestamp of the last vsync from the display driver.
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::ReadVSyncTimestamp(int64_t* timestamp) {
+ const int event_fd = primary_display_vsync_event_fd_.Get();
+ int ret, error;
+
+ // The driver returns data in the form "VSYNC=<timestamp ns>".
+ std::array<char, 32> data;
+ data.fill('\0');
+
+ // Seek back to the beginning of the event file.
+ ret = lseek(event_fd, 0, SEEK_SET);
+ if (ret < 0) {
+ error = errno;
+ ALOGE(
+ "HardwareComposer::ReadVSyncTimestamp: Failed to seek vsync event fd: "
+ "%s",
+ strerror(error));
+ return -error;
+ }
+
+ // Read the vsync event timestamp.
+ ret = read(event_fd, data.data(), data.size());
+ if (ret < 0) {
+ error = errno;
+ ALOGE_IF(
+ error != EAGAIN,
+ "HardwareComposer::ReadVSyncTimestamp: Error while reading timestamp: "
+ "%s",
+ strerror(error));
+ return -error;
+ }
+
+ ret = sscanf(data.data(), "VSYNC=%" PRIu64,
+ reinterpret_cast<uint64_t*>(timestamp));
+ if (ret < 0) {
+ error = errno;
+ ALOGE(
+ "HardwareComposer::ReadVSyncTimestamp: Error while parsing timestamp: "
+ "%s",
+ strerror(error));
+ return -error;
+ }
+
+ return 0;
+}
+
+// Blocks until the next vsync event is signaled by the display driver.
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::BlockUntilVSync() {
+ const int event_fd = primary_display_vsync_event_fd_.Get();
+ pollfd pfd[2] = {
+ {
+ .fd = event_fd, .events = POLLPRI, .revents = 0,
+ },
+ // This extra event fd is to ensure that we can break out of this loop to
+ // pause the thread even when vsync is disabled, and thus no events on the
+ // vsync fd are being generated.
+ {
+ .fd = terminate_post_thread_event_fd_.Get(),
+ .events = POLLPRI | POLLIN,
+ .revents = 0,
+ },
+ };
+ int ret, error;
+ do {
+ ret = poll(pfd, 2, -1);
+ error = errno;
+ ALOGW_IF(ret < 0,
+ "HardwareComposer::BlockUntilVSync: Error while waiting for vsync "
+ "event: %s (%d)",
+ strerror(error), error);
+ } while (ret < 0 && error == EINTR);
+
+ return ret < 0 ? -error : 0;
+}
+
+// Waits for the next vsync and returns the timestamp of the vsync event. If
+// vsync already passed since the last call, returns the latest vsync timestamp
+// instead of blocking. This method updates the last_vsync_timeout_ in the
+// process.
+//
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::WaitForVSync(int64_t* timestamp) {
+ int error;
+
+ // Get the current timestamp and decide what to do.
+ while (true) {
+ int64_t current_vsync_timestamp;
+ error = ReadVSyncTimestamp(¤t_vsync_timestamp);
+ if (error < 0 && error != -EAGAIN)
+ return error;
+
+ if (error == -EAGAIN) {
+ // Vsync was turned off, wait for the next vsync event.
+ error = BlockUntilVSync();
+ if (error < 0)
+ return error;
+
+ // If a request to pause the post thread was given, exit immediately
+ if (IsSuspended()) {
+ return 0;
+ }
+
+ // Try again to get the timestamp for this new vsync interval.
+ continue;
+ }
+
+ // Check that we advanced to a later vsync interval.
+ if (TimestampGT(current_vsync_timestamp, last_vsync_timestamp_)) {
+ *timestamp = last_vsync_timestamp_ = current_vsync_timestamp;
+ return 0;
+ }
+
+ // See how close we are to the next expected vsync. If we're within 1ms,
+ // sleep for 1ms and try again.
+ const int64_t ns_per_frame = display_metrics_.vsync_period_ns;
+ const int64_t threshold_ns = 1000000;
+
+ const int64_t next_vsync_est = last_vsync_timestamp_ + ns_per_frame;
+ const int64_t distance_to_vsync_est = next_vsync_est - GetSystemClockNs();
+
+ if (distance_to_vsync_est > threshold_ns) {
+ // Wait for vsync event notification.
+ error = BlockUntilVSync();
+ if (error < 0)
+ return error;
+
+ // Again, exit immediately if the thread was requested to pause
+ if (IsSuspended()) {
+ return 0;
+ }
+ } else {
+ // Sleep for a short time before retrying.
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ }
+}
+
+int HardwareComposer::SleepUntil(int64_t wakeup_timestamp) {
+ const int timer_fd = vsync_sleep_timer_fd_.Get();
+ const itimerspec wakeup_itimerspec = {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = NsToTimespec(wakeup_timestamp),
+ };
+ int ret =
+ timerfd_settime(timer_fd, TFD_TIMER_ABSTIME, &wakeup_itimerspec, nullptr);
+ int error = errno;
+ if (ret < 0) {
+ ALOGE("HardwareComposer::SleepUntil: Failed to set timerfd: %s",
+ strerror(error));
+ return -error;
+ }
+
+ // Wait for the timer by reading the expiration count.
+ uint64_t expiration_count;
+ ret = read(timer_fd, &expiration_count, sizeof(expiration_count));
+ if (ret < 0) {
+ ALOGE("HardwareComposer::SleepUntil: Failed to wait for timerfd: %s",
+ strerror(error));
+ return -error;
+ }
+
+ return 0;
+}
+
+void HardwareComposer::PostThread() {
+ // NOLINTNEXTLINE(runtime/int)
+ prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("PostThread"), 0, 0, 0);
+
+ std::unique_lock<std::mutex> thread_lock(thread_pause_mutex_);
+
+ // Set the scheduler to SCHED_FIFO with high priority.
+ int error = dvrSetSchedulerClass(0, "graphics:high");
+ LOG_ALWAYS_FATAL_IF(
+ error < 0,
+ "HardwareComposer::PostThread: Failed to set scheduler class: %s",
+ strerror(-error));
+ error = dvrSetCpuPartition(0, "/system/performance");
+ LOG_ALWAYS_FATAL_IF(
+ error < 0,
+ "HardwareComposer::PostThread: Failed to set cpu partition: %s",
+ strerror(-error));
+
+ // Force the layers to be setup at least once.
+ display_surfaces_updated_ = true;
+
+ // Initialize the GPU compositor.
+ LOG_ALWAYS_FATAL_IF(!compositor_.Initialize(GetHmdDisplayMetrics()),
+ "Failed to initialize the compositor");
+
+ const int64_t ns_per_frame = display_metrics_.vsync_period_ns;
+ const int64_t photon_offset_ns = GetPosePredictionTimeOffset(ns_per_frame);
+
+ // TODO(jbates) Query vblank time from device, when such an API is available.
+ // This value (6.3%) was measured on A00 in low persistence mode.
+ int64_t vblank_ns = ns_per_frame * 63 / 1000;
+ int64_t right_eye_photon_offset_ns = (ns_per_frame - vblank_ns) / 2;
+
+ // Check property for overriding right eye offset value.
+ right_eye_photon_offset_ns =
+ property_get_int64(kRightEyeOffsetProperty, right_eye_photon_offset_ns);
+
+ // The list of surfaces the compositor should attempt to render. This is set
+ // at the start of each frame.
+ std::vector<std::shared_ptr<DisplaySurface>> compositor_surfaces;
+ compositor_surfaces.reserve(2);
+
+ // Our history of frame times. This is used to get a better estimate of how
+ // long the next frame will take, to set a schedule for EDS.
+ FrameTimeHistory frame_time_history;
+
+ // The backlog is used to allow us to start rendering the next frame before
+ // the previous frame has finished, and still get an accurate measurement of
+ // frame duration.
+ std::vector<FrameTimeMeasurementRecord> frame_time_backlog;
+ constexpr int kFrameTimeBacklogMax = 2;
+ frame_time_backlog.reserve(kFrameTimeBacklogMax);
+
+ // Storage for retrieving fence info.
+ FenceInfoBuffer fence_info_buffer;
+
+ while (1) {
+ ATRACE_NAME("HardwareComposer::PostThread");
+
+ while (IsSuspended()) {
+ ALOGI("HardwareComposer::PostThread: Post thread pause requested.");
+ thread_pause_semaphore_.wait(thread_lock);
+ // The layers will need to be updated since they were deleted previously
+ display_surfaces_updated_ = true;
+ hardware_layers_need_update_ = true;
+ }
+
+ int64_t vsync_timestamp = 0;
+ {
+ std::array<char, 128> buf;
+ snprintf(buf.data(), buf.size(), "wait_vsync|vsync=%d|",
+ vsync_count_ + 1);
+ ATRACE_NAME(buf.data());
+
+ error = WaitForVSync(&vsync_timestamp);
+ ALOGE_IF(
+ error < 0,
+ "HardwareComposer::PostThread: Failed to wait for vsync event: %s",
+ strerror(-error));
+
+ // Don't bother processing this frame if a pause was requested
+ if (IsSuspended()) {
+ continue;
+ }
+ }
+
+ ++vsync_count_;
+
+ static double last_print_time = -1;
+ double current_time = GetSystemClockSec();
+ if (last_print_time < 0 || current_time - last_print_time > 3) {
+ last_print_time = current_time;
+ }
+
+ if (pose_client_) {
+ // Signal the pose service with vsync info.
+ // Display timestamp is in the middle of scanout.
+ privateDvrPoseNotifyVsync(pose_client_, vsync_count_,
+ vsync_timestamp + photon_offset_ns,
+ ns_per_frame, right_eye_photon_offset_ns);
+ }
+
+ bool layer_config_changed = UpdateLayerConfig(&compositor_surfaces);
+
+ if (layer_config_changed) {
+ frame_time_history.ResetWithSeed(
+ GuessFrameTime(compositor_surfaces.size()));
+ frame_time_backlog.clear();
+ } else {
+ UpdateFrameTimeHistory(&frame_time_backlog, kFrameTimeBacklogMax,
+ &fence_info_buffer, &frame_time_history);
+ }
+
+ // Get our current best estimate at how long the next frame will take to
+ // render, based on how long previous frames took to render. Use this
+ // estimate to decide when to wake up for EDS.
+ int64_t frame_time_estimate =
+ frame_time_history.GetSampleCount() == 0
+ ? GuessFrameTime(compositor_surfaces.size())
+ : frame_time_history.GetAverage();
+ frame_time_estimate = std::max(frame_time_estimate, kFrameTimeEstimateMin);
+ DebugHudData::data.hwc_latency = frame_time_estimate;
+
+ // Signal all of the vsync clients. Because absolute time is used for the
+ // wakeup time below, this can take a little time if necessary.
+ if (vsync_callback_)
+ vsync_callback_(HWC_DISPLAY_PRIMARY, vsync_timestamp, frame_time_estimate,
+ vsync_count_);
+
+ {
+ // Sleep until async EDS wakeup time.
+ ATRACE_NAME("sleep");
+
+ int64_t display_time_est = vsync_timestamp + ns_per_frame;
+ int64_t now = GetSystemClockNs();
+ int64_t frame_finish_time_est = now + frame_time_estimate;
+ int64_t sleep_time_ns = display_time_est - now - frame_time_estimate;
+
+ ATRACE_INT64("sleep_time_ns", sleep_time_ns);
+ if (frame_finish_time_est - display_time_est >= kFrameSkipThresholdNs) {
+ ATRACE_INT("frame_skip_count", ++frame_skip_count_);
+ ALOGE(
+ "HardwareComposer::PostThread: Missed frame schedule, drop "
+ "frame. Expected frame miss: %.1fms",
+ static_cast<double>(frame_finish_time_est - display_time_est) /
+ 1000000);
+
+ // There are several reasons we might skip a frame, but one possibility
+ // is we mispredicted the frame time. Clear out the frame time history.
+ frame_time_history.ResetWithSeed(
+ GuessFrameTime(compositor_surfaces.size()));
+ frame_time_backlog.clear();
+ DebugHudData::data.hwc_frame_stats.SkipFrame();
+
+ continue;
+ } else {
+ // Make the transition more obvious in systrace when the frame skip
+ // happens above.
+ ATRACE_INT("frame_skip_count", 0);
+ }
+
+ if (sleep_time_ns > 0) {
+ error = SleepUntil(display_time_est - frame_time_estimate);
+ ALOGE_IF(error < 0, "HardwareComposer::PostThread: Failed to sleep: %s",
+ strerror(-error));
+ }
+ }
+
+ DebugHudData::data.hwc_frame_stats.AddFrame();
+
+ int64_t frame_start_time = GetSystemClockNs();
+
+ // Setup the output buffer for the compositor. This needs to happen before
+ // you draw with the compositor.
+ if (gpu_layer_ != nullptr) {
+ gpu_layer_->UpdateDirectBuffer(compositor_.GetBuffer());
+ }
+
+ // Call PostLayers now before performing the GL code for the compositor to
+ // avoid missing the deadline that can cause the lower-level hwc to get
+ // permanently backed up.
+ PostLayers(layer_config_changed);
+
+ PostCompositorBuffers(compositor_surfaces);
+
+ if (gpu_layer_ != nullptr) {
+ // Note, with scanline racing, this draw is timed along with the post
+ // layers to finish just in time.
+ LocalHandle frame_fence_fd;
+ compositor_.DrawFrame(vsync_count_ + 1, &frame_fence_fd);
+ if (frame_fence_fd) {
+ LOG_ALWAYS_FATAL_IF(frame_time_backlog.size() >= kFrameTimeBacklogMax,
+ "Frame time backlog exceeds capacity");
+ frame_time_backlog.push_back(
+ {frame_start_time, std::move(frame_fence_fd)});
+ }
+ } else if (!layer_config_changed) {
+ frame_time_history.AddSample(GetSystemClockNs() - frame_start_time);
+ }
+
+ HandlePendingScreenshots();
+ }
+
+ // TODO(skiazyk): Currently the compositor is not fully releasing its EGL
+ // context, which seems to prevent the thread from exiting properly.
+ // This shouldn't be too hard to address, I just don't have time right now.
+ compositor_.Shutdown();
+}
+
+bool HardwareComposer::UpdateLayerConfig(
+ std::vector<std::shared_ptr<DisplaySurface>>* compositor_surfaces) {
+ std::lock_guard<std::mutex> autolock(layer_mutex_);
+
+ if (!display_surfaces_updated_)
+ return false;
+
+ display_surfaces_updated_ = false;
+ DebugHudData::data.ResetLayers();
+
+ // Update compositor layers.
+ {
+ ATRACE_NAME("UpdateLayerConfig_GpuLayers");
+ compositor_.UpdateSurfaces(display_surfaces_);
+ compositor_surfaces->clear();
+ for (size_t i = 0; i < display_surfaces_.size(); ++i) {
+ const auto& surface = display_surfaces_[i];
+ if (!(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION)) {
+ compositor_surfaces->push_back(surface);
+ }
+ }
+ }
+
+ if (!hardware_layers_need_update_)
+ return true;
+
+ // Update hardware layers.
+
+ ATRACE_NAME("UpdateLayerConfig_HwLayers");
+ hardware_layers_need_update_ = false;
+
+ // Update the display layers in a non-destructive fashion.
+
+ // Create a map from surface id to hardware layer
+ std::map<int, Layer*> display_surface_layers;
+
+ for (size_t i = 0; i < active_layer_count_; ++i) {
+ auto layer = layers_[i];
+ int surface_id = layer->GetSurfaceId();
+
+ auto found =
+ std::find_if(display_surfaces_.begin(), display_surfaces_.end(),
+ [surface_id](const auto& surface) {
+ return surface->surface_id() == surface_id;
+ });
+
+ if (found != display_surfaces_.end()) {
+ display_surface_layers[surface_id] = layer;
+ }
+ }
+
+ bool has_gpu_layer = std::any_of(
+ display_surfaces_.begin(), display_surfaces_.end(),
+ [](const auto& surface) {
+ return !(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION);
+ });
+
+ if (!has_gpu_layer) {
+ gpu_layer_ = nullptr;
+ }
+
+ auto is_layer_active = [&display_surface_layers, has_gpu_layer](auto layer) {
+ int surface_id = layer->GetSurfaceId();
+ if (surface_id >= 0) {
+ return display_surface_layers.count(surface_id) > 0;
+ } else {
+ return has_gpu_layer;
+ }
+ };
+
+ // Compress the in-use layers to the top of the list
+ auto part = std::partition(
+ layers_.begin(), layers_.begin() + active_layer_count_, is_layer_active);
+
+ size_t new_active_layer_count = part - layers_.begin();
+
+ // Clear any unused layers
+ for (size_t i = new_active_layer_count; i < active_layer_count_; ++i) {
+ layers_[i]->Reset();
+ }
+
+ active_layer_count_ = new_active_layer_count;
+
+ bool gpu_layer_applied = false;
+
+ // Create/update all of the hardware layers
+ for (size_t i = 0; i < display_surfaces_.size(); ++i) {
+ const auto& surface = display_surfaces_[i];
+ bool is_hw_surface =
+ surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION;
+ hwc2_blend_mode_t blending =
+ i == 0 ? HWC2_BLEND_MODE_NONE : HWC2_BLEND_MODE_COVERAGE;
+
+ DebugHudData::data.SetLayerInfo(
+ i, surface->width(), surface->height(),
+ !!(surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_GEOMETRY_SEPARATE_2));
+
+ if (!is_hw_surface && gpu_layer_applied) {
+ continue;
+ }
+
+ Layer* target_layer;
+ bool existing_layer = false;
+
+ if (is_hw_surface) {
+ auto it = display_surface_layers.find(surface->surface_id());
+
+ if (it != display_surface_layers.end()) {
+ target_layer = it->second;
+ existing_layer = true;
+ }
+ } else if (gpu_layer_ != nullptr) {
+ target_layer = gpu_layer_;
+ existing_layer = true;
+ }
+
+ if (!existing_layer) {
+ if (active_layer_count_ >= kMaxHardwareLayers) {
+ ALOGI("HardwareComposer: More than %d hardware layers requested.",
+ kMaxHardwareLayers);
+ break;
+ } else {
+ target_layer = layers_[active_layer_count_];
+ ++active_layer_count_;
+ }
+
+ ALOGD_IF(TRACE,
+ "HardwareComposer::UpdateLayerConfig: (new) surface_id=%d -> "
+ "layer=%zd",
+ surface->surface_id(), i);
+
+ if (is_hw_surface) {
+ target_layer->Setup(surface, blending, display_transform_,
+ HWC2_COMPOSITION_DEVICE, i);
+ } else {
+ gpu_layer_ = target_layer;
+ target_layer->Setup(compositor_.GetBuffer(), blending,
+ display_transform_, HWC2_COMPOSITION_DEVICE, i);
+ }
+ } else {
+ ALOGD_IF(TRACE,
+ "HardwareComposer::UpdateLayerConfig: (retained) surface_id=%d "
+ "-> layer=%zd",
+ surface->surface_id(), i);
+
+ target_layer->SetBlending(blending);
+ target_layer->SetZOrderIndex(i);
+ target_layer->UpdateLayerSettings();
+ }
+
+ gpu_layer_applied = !is_hw_surface;
+ }
+
+ ALOGD_IF(TRACE, "HardwareComposer::UpdateLayerConfig: %zd active layers",
+ active_layer_count_);
+
+ return true;
+}
+
+void HardwareComposer::PostCompositorBuffers(
+ const std::vector<std::shared_ptr<DisplaySurface>>& compositor_surfaces) {
+ ATRACE_NAME("PostCompositorBuffers");
+ for (const auto& surface : compositor_surfaces) {
+ compositor_.PostBuffer(surface);
+ }
+}
+
+void HardwareComposer::UpdateFrameTimeHistory(
+ std::vector<FrameTimeMeasurementRecord>* backlog, int backlog_max,
+ FenceInfoBuffer* fence_info_buffer, FrameTimeHistory* history) {
+ while (!backlog->empty()) {
+ const auto& frame_time_record = backlog->front();
+ int64_t end_time = 0;
+ bool frame_finished = CheckFrameFinished(frame_time_record.fence.Get(),
+ fence_info_buffer, &end_time);
+ if (frame_finished) {
+ int64_t frame_duration = end_time - frame_time_record.start_time;
+ history->AddSample(frame_duration);
+ // Our backlog is tiny (2 elements), so erasing from the front is ok
+ backlog->erase(backlog->begin());
+ } else {
+ break;
+ }
+ }
+
+ if (backlog->size() == static_cast<size_t>(backlog_max)) {
+ // Yikes, something must've gone wrong if our oldest frame hasn't finished
+ // yet. Give up on waiting for it.
+ const auto& stale_frame_time_record = backlog->front();
+ int64_t frame_duration =
+ GetSystemClockNs() - stale_frame_time_record.start_time;
+ backlog->erase(backlog->begin());
+ history->AddSample(frame_duration);
+ ALOGW("Frame didn't finish after %.1fms",
+ static_cast<double>(frame_duration) / 1000000);
+ }
+}
+
+bool HardwareComposer::CheckFrameFinished(int frame_fence_fd,
+ FenceInfoBuffer* fence_info_buffer,
+ int64_t* timestamp) {
+ int result = -1;
+ int sync_result = sync_wait(frame_fence_fd, 0);
+ if (sync_result == 0) {
+ result =
+ GetFenceSignaledTimestamp(frame_fence_fd, fence_info_buffer, timestamp);
+ if (result < 0) {
+ ALOGE("Failed getting signaled timestamp from fence");
+ }
+ } else if (errno != ETIME) {
+ ALOGE("sync_wait on frame fence failed");
+ }
+ return result >= 0;
+}
+
+void HardwareComposer::HandlePendingScreenshots() {
+ // Take a screenshot of the requested layer, if available.
+ // TODO(eieio): Look into using virtual displays to composite the layer stack
+ // into a single output buffer that can be returned to the screenshot clients.
+ if (active_layer_count_ > 0) {
+ if (auto screenshot_service = ScreenshotService::GetInstance()) {
+ if (screenshot_service->IsScreenshotRequestPending()) {
+ ATRACE_NAME("screenshot");
+ screenshot_service->TakeIfNeeded(layers_, compositor_);
+ }
+ } else {
+ ALOGW(
+ "HardwareComposer::HandlePendingScreenshots: Failed to get "
+ "screenshot service!");
+ }
+ }
+}
+
+void HardwareComposer::SetVSyncCallback(VSyncCallback callback) {
+ vsync_callback_ = callback;
+}
+
+void HardwareComposer::HwcRefresh(hwc2_callback_data_t /*data*/,
+ hwc2_display_t /*display*/) {
+ // TODO(eieio): implement invalidate callbacks.
+}
+
+void HardwareComposer::HwcVSync(hwc2_callback_data_t /*data*/,
+ hwc2_display_t /*display*/,
+ int64_t /*timestamp*/) {
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+ // Intentionally empty. HWC may require a callback to be set to enable vsync
+ // signals. We bypass this callback thread by monitoring the vsync event
+ // directly, but signals still need to be enabled.
+}
+
+void HardwareComposer::HwcHotplug(hwc2_callback_data_t /*callbackData*/,
+ hwc2_display_t /*display*/,
+ hwc2_connection_t /*connected*/) {
+ // TODO(eieio): implement display hotplug callbacks.
+}
+
+void HardwareComposer::SetBacklightBrightness(int brightness) {
+ if (backlight_brightness_fd_) {
+ std::array<char, 32> text;
+ const int length = snprintf(text.data(), text.size(), "%d", brightness);
+ write(backlight_brightness_fd_.Get(), text.data(), length);
+ }
+}
+
+Layer::Layer()
+ : hwc2_hidl_(nullptr),
+ surface_index_(-1),
+ hardware_composer_layer_(0),
+ display_metrics_(nullptr),
+ blending_(HWC2_BLEND_MODE_NONE),
+ transform_(HWC_TRANSFORM_NONE),
+ composition_type_(HWC2_COMPOSITION_DEVICE),
+ surface_rect_functions_applied_(false) {}
+
+void Layer::Initialize(Hwc2::Composer* hwc2_hidl, HWCDisplayMetrics* metrics) {
+ hwc2_hidl_ = hwc2_hidl;
+ display_metrics_ = metrics;
+}
+
+void Layer::Reset() {
+ const int ret = acquired_buffer_.Release(std::move(release_fence_));
+ ALOGE_IF(ret < 0, "Layer::Reset: failed to release buffer: %s",
+ strerror(-ret));
+
+ if (hwc2_hidl_ != nullptr && hardware_composer_layer_) {
+ hwc2_hidl_->destroyLayer(HWC_DISPLAY_PRIMARY, hardware_composer_layer_);
+ hardware_composer_layer_ = 0;
+ }
+
+ surface_index_ = static_cast<size_t>(-1);
+ blending_ = HWC2_BLEND_MODE_NONE;
+ transform_ = HWC_TRANSFORM_NONE;
+ composition_type_ = HWC2_COMPOSITION_DEVICE;
+ direct_buffer_ = nullptr;
+ surface_ = nullptr;
+ acquire_fence_fd_.Close();
+ surface_rect_functions_applied_ = false;
+}
+
+void Layer::Setup(const std::shared_ptr<DisplaySurface>& surface,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t index) {
+ Reset();
+ surface_index_ = index;
+ surface_ = surface;
+ blending_ = blending;
+ transform_ = transform;
+ composition_type_ = composition_type;
+ CommonLayerSetup();
+}
+
+void Layer::Setup(const std::shared_ptr<IonBuffer>& buffer,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t z_order) {
+ Reset();
+ surface_index_ = z_order;
+ direct_buffer_ = buffer;
+ blending_ = blending;
+ transform_ = transform;
+ composition_type_ = composition_type;
+ CommonLayerSetup();
+}
+
+void Layer::UpdateDirectBuffer(const std::shared_ptr<IonBuffer>& buffer) {
+ direct_buffer_ = buffer;
+}
+
+void Layer::SetBlending(hwc2_blend_mode_t blending) { blending_ = blending; }
+
+void Layer::SetZOrderIndex(int z_index) { surface_index_ = z_index; }
+
+IonBuffer* Layer::GetBuffer() {
+ if (direct_buffer_)
+ return direct_buffer_.get();
+ else if (acquired_buffer_.IsAvailable())
+ return acquired_buffer_.buffer()->buffer();
+ else
+ return nullptr;
+}
+
+void Layer::UpdateLayerSettings() {
+ if (!IsLayerSetup()) {
+ ALOGE("HardwareComposer: Trying to update layers data on an unused layer.");
+ return;
+ }
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ hwc2_display_t display = HWC_DISPLAY_PRIMARY;
+
+ ret = (int32_t)hwc2_hidl_->setLayerCompositionType(
+ display, hardware_composer_layer_,
+ (Hwc2::IComposerClient::Composition)composition_type_);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer composition type : %d",
+ ret);
+ // ret = (int32_t) hwc2_hidl_->setLayerTransform(display,
+ // hardware_composer_layer_,
+ // (Hwc2::IComposerClient::Transform)
+ // transform_);
+ // ALOGE_IF(ret, "HardwareComposer: Error setting layer transform : %d", ret);
+
+ // ret = hwc2_funcs_->set_layer_blend_mode_fn_(
+ // hardware_composer_device_, display, hardware_composer_layer_,
+ // blending_);
+ ret = (int32_t)hwc2_hidl_->setLayerBlendMode(
+ display, hardware_composer_layer_,
+ (Hwc2::IComposerClient::BlendMode)blending_);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer blend mode : %d", ret);
+
+ Hwc2::IComposerClient::Rect display_frame;
+ display_frame.left = 0;
+ display_frame.top = 0;
+ display_frame.right = display_metrics_->width;
+ display_frame.bottom = display_metrics_->height;
+ ret = (int32_t)hwc2_hidl_->setLayerDisplayFrame(
+ display, hardware_composer_layer_, display_frame);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer display frame : %d",
+ ret);
+
+ std::vector<Hwc2::IComposerClient::Rect> visible_region(1);
+ visible_region[0] = display_frame;
+ ret = (int32_t)hwc2_hidl_->setLayerVisibleRegion(
+ display, hardware_composer_layer_, visible_region);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer visible region : %d",
+ ret);
+
+ ret = (int32_t)hwc2_hidl_->setLayerPlaneAlpha(display,
+ hardware_composer_layer_, 1.0f);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer plane alpha : %d", ret);
+
+ ret = (int32_t)hwc2_hidl_->setLayerZOrder(display, hardware_composer_layer_,
+ surface_index_);
+ ALOGE_IF(ret, "HardwareComposer: Error, setting z order index : %d", ret);
+}
+
+void Layer::CommonLayerSetup() {
+ int32_t ret = (int32_t)hwc2_hidl_->createLayer(HWC_DISPLAY_PRIMARY,
+ &hardware_composer_layer_);
+
+ ALOGE_IF(ret,
+ "HardwareComposer: Failed to create layer on primary display : %d",
+ ret);
+
+ UpdateLayerSettings();
+}
+
+void Layer::Prepare() {
+ int right, bottom;
+ buffer_handle_t handle;
+
+ if (surface_) {
+ // Only update the acquired buffer when one is either available or this is
+ // the first time through.
+ if (surface_->IsBufferAvailable()) {
+ // If we previously set this to a solid color layer to stall for time,
+ // revert it to a device layer.
+ if (acquired_buffer_.IsEmpty() &&
+ composition_type_ != HWC2_COMPOSITION_DEVICE) {
+ composition_type_ = HWC2_COMPOSITION_DEVICE;
+ hwc2_hidl_->setLayerCompositionType(
+ HWC_DISPLAY_PRIMARY, hardware_composer_layer_,
+ (Hwc2::IComposerClient::Composition)HWC2_COMPOSITION_DEVICE);
+ }
+
+ DebugHudData::data.AddLayerFrame(surface_index_);
+ acquired_buffer_.Release(std::move(release_fence_));
+ acquired_buffer_ = surface_->AcquireCurrentBuffer();
+
+ // Basic latency stopgap for when the application misses a frame:
+ // If the application recovers on the 2nd or 3rd (etc) frame after
+ // missing, this code will skip a frame to catch up by checking if
+ // the next frame is also available.
+ if (surface_->IsBufferAvailable()) {
+ DebugHudData::data.SkipLayerFrame(surface_index_);
+ ATRACE_NAME("DropToCatchUp");
+ ATRACE_ASYNC_END("BufferPost", acquired_buffer_.buffer()->id());
+ acquired_buffer_ = surface_->AcquireCurrentBuffer();
+ }
+ ATRACE_ASYNC_END("BufferPost", acquired_buffer_.buffer()->id());
+ } else if (acquired_buffer_.IsEmpty()) {
+ // While we are waiting for a buffer, set this to be an empty layer
+ if (composition_type_ != HWC2_COMPOSITION_SOLID_COLOR) {
+ composition_type_ = HWC2_COMPOSITION_SOLID_COLOR;
+ hwc2_hidl_->setLayerCompositionType(
+ HWC_DISPLAY_PRIMARY, hardware_composer_layer_,
+ (Hwc2::IComposerClient::Composition)HWC2_COMPOSITION_SOLID_COLOR);
+
+ Hwc2::IComposerClient::Color layer_color = {
+ 0, 0, 0, 0,
+ };
+ hwc2_hidl_->setLayerColor(HWC_DISPLAY_PRIMARY, hardware_composer_layer_,
+ layer_color);
+ }
+ return;
+ }
+ right = acquired_buffer_.buffer()->width();
+ bottom = acquired_buffer_.buffer()->height();
+ handle = acquired_buffer_.buffer()->native_handle();
+ acquire_fence_fd_.Reset(acquired_buffer_.ClaimAcquireFence().Release());
+ } else {
+ right = direct_buffer_->width();
+ bottom = direct_buffer_->height();
+ handle = direct_buffer_->handle();
+ acquire_fence_fd_.Close();
+ }
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ if (composition_type_ == HWC2_COMPOSITION_DEVICE) {
+ ret = (int32_t)hwc2_hidl_->setLayerBuffer(HWC_DISPLAY_PRIMARY,
+ hardware_composer_layer_, handle,
+ acquire_fence_fd_.Get());
+
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer buffer : %d", ret);
+ }
+
+ if (!surface_rect_functions_applied_) {
+ Hwc2::IComposerClient::FRect crop_rect = {
+ 0, 0, static_cast<float>(right), static_cast<float>(bottom),
+ };
+ hwc2_hidl_->setLayerSourceCrop(HWC_DISPLAY_PRIMARY,
+ hardware_composer_layer_, crop_rect);
+
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer source crop : %d",
+ ret);
+
+// TODO(skiazyk): why is this ifdef'd out. Is if a driver-specific issue where
+// it must/cannot be called?
+#ifdef QCOM_BSP
+ hwc_rect_t damage_rect = {
+ 0, 0, right, bottom,
+ };
+ hwc_region_t damage = {
+ 1, &damage_rect,
+ };
+ // ret = hwc2_funcs_->set_layer_surface_damage(
+ // hardware_composer_device_, HWC_DISPLAY_PRIMARY,
+ // hardware_composer_layer_, damage);
+ // uses a std::vector as the listing
+ // hwc2_hidl_->setLayerSurfaceDamage(HWC_DISPLAY_PRIMARY,
+ // hardware_composer_layer_, vector here);
+
+ ALOGE_IF(ret, "HardwareComposer: Error settings layer surface damage : %d",
+ ret);
+#endif
+
+ surface_rect_functions_applied_ = true;
+ }
+}
+
+void Layer::Finish(int release_fence_fd) {
+ release_fence_.Reset(release_fence_fd);
+}
+
+void Layer::Drop() { acquire_fence_fd_.Close(); }
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/hardware_composer.h b/libs/vr/libvrflinger/hardware_composer.h
new file mode 100644
index 0000000..cfe8c84
--- /dev/null
+++ b/libs/vr/libvrflinger/hardware_composer.h
@@ -0,0 +1,406 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_HARDWARE_COMPOSER_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_HARDWARE_COMPOSER_H_
+
+#include <log/log.h>
+#include <hardware/gralloc.h>
+#include <hardware/hardware.h>
+#include <hardware/hwcomposer2.h>
+
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/sync_util.h>
+
+#include <array>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <tuple>
+#include <vector>
+
+#include <pdx/file_handle.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/frame_time_history.h>
+#include <private/dvr/sync_util.h>
+
+#include "acquired_buffer.h"
+#include "compositor.h"
+#include "display_surface.h"
+
+#include "DisplayHardware/ComposerHal.h"
+
+// Hardware composer HAL doesn't define HWC_TRANSFORM_NONE as of this writing.
+#ifndef HWC_TRANSFORM_NONE
+#define HWC_TRANSFORM_NONE static_cast<hwc_transform_t>(0)
+#endif
+
+namespace android {
+namespace dvr {
+
+// Basic display metrics for physical displays. Dimensions and densities are
+// relative to the physical display orientation, which may be different from the
+// logical display orientation exposed to applications.
+struct HWCDisplayMetrics {
+ int width;
+ int height;
+ struct {
+ int x;
+ int y;
+ } dpi;
+ int vsync_period_ns;
+};
+
+// Layer represents the connection between a hardware composer layer and the
+// source supplying buffers for the layer's contents.
+class Layer {
+ public:
+ Layer();
+
+ // Sets the hardware composer layer and display metrics that this Layer should
+ // use each Prepare cycle. This class does not own either of these pointers,
+ // which MUST remain valid for its lifetime. This method MUST be called once
+ // in the life of the instance before any other method is valid to call.
+ void Initialize(Hwc2::Composer* hwc2_hidl, HWCDisplayMetrics* metrics);
+
+ // Releases any shared pointers and fence handles held by this instance.
+ void Reset();
+
+ // Sets up the layer to use a display surface as its content source. The Layer
+ // will automatically handle ACQUIRE/RELEASE phases for the surface's buffer
+ // train every frame.
+ //
+ // |blending| receives HWC_BLENDING_* values.
+ // |transform| receives HWC_TRANSFORM_* values.
+ // |composition_type| receives either HWC_FRAMEBUFFER for most layers or
+ // HWC_FRAMEBUFFER_TARGET (unless you know what you are doing).
+ // |index| is the index of this surface in the DisplaySurface array.
+ void Setup(const std::shared_ptr<DisplaySurface>& surface,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t index);
+
+ // Sets up the layer to use a direct buffer as its content source. No special
+ // handling of the buffer is performed; responsibility for updating or
+ // changing the buffer each frame is on the caller.
+ //
+ // |blending| receives HWC_BLENDING_* values.
+ // |transform| receives HWC_TRANSFORM_* values.
+ // |composition_type| receives either HWC_FRAMEBUFFER for most layers or
+ // HWC_FRAMEBUFFER_TARGET (unless you know what you are doing).
+ void Setup(const std::shared_ptr<IonBuffer>& buffer,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t z_order);
+
+ // Layers that use a direct IonBuffer should call this each frame to update
+ // which buffer will be used for the next PostLayers.
+ void UpdateDirectBuffer(const std::shared_ptr<IonBuffer>& buffer);
+
+ // Sets up the hardware composer layer for the next frame. When the layer is
+ // associated with a display surface, this method automatically ACQUIRES a new
+ // buffer if one is available.
+ void Prepare();
+
+ // After calling prepare, if this frame is to be dropped instead of passing
+ // along to the HWC, call Drop to close the contained fence(s).
+ void Drop();
+
+ // Performs fence bookkeeping after the frame has been posted to hardware
+ // composer.
+ void Finish(int release_fence_fd);
+
+ // Sets the blending for the layer. |blending| receives HWC_BLENDING_* values.
+ void SetBlending(hwc2_blend_mode_t blending);
+
+ // Sets the Z-order of this layer
+ void SetZOrderIndex(int surface_index);
+
+ // Gets the current IonBuffer associated with this layer. Ownership of the
+ // buffer DOES NOT pass to the caller and the pointer is not guaranteed to
+ // remain valid across calls to Layer::Setup(), Layer::Prepare(), or
+ // Layer::Reset(). YOU HAVE BEEN WARNED.
+ IonBuffer* GetBuffer();
+
+ hwc2_composition_t GetCompositionType() const { return composition_type_; }
+
+ hwc2_layer_t GetLayerHandle() const { return hardware_composer_layer_; }
+
+ bool UsesDirectBuffer() const { return direct_buffer_ != nullptr; }
+
+ bool IsLayerSetup() const {
+ return direct_buffer_ != nullptr || surface_ != nullptr;
+ }
+
+ // Applies all of the settings to this layer using the hwc functions
+ void UpdateLayerSettings();
+
+ int GetSurfaceId() const {
+ if (surface_ != nullptr) {
+ return surface_->surface_id();
+ } else {
+ return -1;
+ }
+ }
+
+ private:
+ void CommonLayerSetup();
+
+ Hwc2::Composer* hwc2_hidl_;
+
+ // Original display surface array index for tracking purposes.
+ size_t surface_index_;
+
+ // The hardware composer layer and metrics to use during the prepare cycle.
+ hwc2_layer_t hardware_composer_layer_;
+ HWCDisplayMetrics* display_metrics_;
+
+ // Layer properties used to setup the hardware composer layer during the
+ // Prepare phase.
+ hwc2_blend_mode_t blending_;
+ hwc_transform_t transform_;
+ hwc2_composition_t composition_type_;
+
+ // These two members are mutually exclusive. When direct_buffer_ is set the
+ // Layer gets its contents directly from that buffer; when surface_ is set the
+ // Layer gets it contents from the surface's buffer train.
+ std::shared_ptr<IonBuffer> direct_buffer_;
+ std::shared_ptr<DisplaySurface> surface_;
+
+ // State when associated with a display surface.
+ AcquiredBuffer acquired_buffer_;
+ pdx::LocalHandle release_fence_;
+
+ pdx::LocalHandle acquire_fence_fd_;
+ bool surface_rect_functions_applied_;
+
+ Layer(const Layer&) = delete;
+ void operator=(const Layer&) = delete;
+};
+
+// HardwareComposer encapsulates the hardware composer HAL, exposing a
+// simplified API to post buffers to the display.
+class HardwareComposer {
+ public:
+ // Type for vsync callback.
+ using VSyncCallback = std::function<void(int, int64_t, int64_t, uint32_t)>;
+
+ // Since there is no universal way to query the number of hardware layers,
+ // just set it to 4 for now.
+ static constexpr int kMaxHardwareLayers = 4;
+
+ HardwareComposer();
+ HardwareComposer(Hwc2::Composer* hidl);
+ ~HardwareComposer();
+
+ bool Suspend();
+ bool Resume();
+ bool IsSuspended() const { return pause_post_thread_; }
+
+ // Get the HMD display metrics for the current display.
+ DisplayMetrics GetHmdDisplayMetrics() const;
+
+ int32_t GetDisplayAttribute(hwc2_display_t display, hwc2_config_t config,
+ hwc2_attribute_t attributes,
+ int32_t* out_value) const;
+ int32_t GetDisplayMetrics(hwc2_display_t display, hwc2_config_t config,
+ HWCDisplayMetrics* out_metrics) const;
+ void Dump(char* buffer, uint32_t* out_size);
+
+ void SetVSyncCallback(VSyncCallback callback);
+
+ // Metrics of the logical display, which is always landscape.
+ int DisplayWidth() const { return display_metrics_.width; }
+ int DisplayHeight() const { return display_metrics_.height; }
+ HWCDisplayMetrics display_metrics() const { return display_metrics_; }
+
+ // Metrics of the native display, which depends on the specific hardware
+ // implementation of the display.
+ HWCDisplayMetrics native_display_metrics() const {
+ return native_display_metrics_;
+ }
+
+ std::shared_ptr<IonBuffer> framebuffer_target() const {
+ return framebuffer_target_;
+ }
+
+ // Set the display surface stack to compose to the display each frame.
+ int SetDisplaySurfaces(std::vector<std::shared_ptr<DisplaySurface>> surfaces);
+
+ Compositor* GetCompositor() { return &compositor_; }
+
+ private:
+ int32_t EnableVsync(bool enabled);
+ int32_t SetPowerMode(hwc2_display_t display, hwc2_power_mode_t mode);
+
+ class ComposerCallback : public Hwc2::IComposerCallback {
+ public:
+ ComposerCallback() {}
+
+ hardware::Return<void> onHotplug(Hwc2::Display /*display*/,
+ Connection /*connected*/) override {
+ // TODO(skiazyk): depending on how the server is implemented, we might
+ // have to set it up to synchronize with receiving this event, as it can
+ // potentially be a critical event for setting up state within the
+ // hwc2 module. That is, we (technically) should not call any other hwc
+ // methods until this method has been called after registering the
+ // callbacks.
+ return hardware::Void();
+ }
+
+ hardware::Return<void> onRefresh(Hwc2::Display /*display*/) override {
+ return hardware::Void();
+ }
+
+ hardware::Return<void> onVsync(Hwc2::Display /*display*/,
+ int64_t /*timestamp*/) override {
+ return hardware::Void();
+ }
+ };
+
+ int32_t Validate(hwc2_display_t display);
+ int32_t Present(hwc2_display_t display);
+
+ void SetBacklightBrightness(int brightness);
+
+ void PostLayers(bool is_geometry_changed);
+ void PostThread();
+
+ int ReadWaitPPState();
+ int BlockUntilVSync();
+ int ReadVSyncTimestamp(int64_t* timestamp);
+ int WaitForVSync(int64_t* timestamp);
+ int SleepUntil(int64_t wakeup_timestamp);
+
+ bool IsFramePendingInDriver() { return ReadWaitPPState() == 1; }
+
+ // Returns true if the layer config changed, false otherwise
+ bool UpdateLayerConfig(
+ std::vector<std::shared_ptr<DisplaySurface>>* compositor_surfaces);
+ void PostCompositorBuffers(
+ const std::vector<std::shared_ptr<DisplaySurface>>& compositor_surfaces);
+
+ void UpdateDisplayState();
+
+ struct FrameTimeMeasurementRecord {
+ int64_t start_time;
+ pdx::LocalHandle fence;
+
+ FrameTimeMeasurementRecord(FrameTimeMeasurementRecord&&) = default;
+ FrameTimeMeasurementRecord& operator=(FrameTimeMeasurementRecord&&) =
+ default;
+ FrameTimeMeasurementRecord(const FrameTimeMeasurementRecord&) = delete;
+ FrameTimeMeasurementRecord& operator=(const FrameTimeMeasurementRecord&) =
+ delete;
+ };
+
+ void UpdateFrameTimeHistory(std::vector<FrameTimeMeasurementRecord>* backlog,
+ int backlog_max,
+ FenceInfoBuffer* fence_info_buffer,
+ FrameTimeHistory* history);
+
+ // Returns true if the frame finished rendering, false otherwise. If the frame
+ // finished the frame end time is stored in timestamp. Doesn't block.
+ bool CheckFrameFinished(int frame_fence_fd,
+ FenceInfoBuffer* fence_info_buffer,
+ int64_t* timestamp);
+
+ void HandlePendingScreenshots();
+
+ void PausePostThread();
+
+ // Hardware composer HAL device.
+ std::unique_ptr<Hwc2::Composer> hwc2_hidl_;
+ sp<ComposerCallback> callbacks_;
+
+ // Display metrics of the physical display.
+ HWCDisplayMetrics native_display_metrics_;
+ // Display metrics of the logical display, adjusted so that orientation is
+ // landscape.
+ HWCDisplayMetrics display_metrics_;
+ // Transform required to get from native to logical display orientation.
+ hwc_transform_t display_transform_;
+
+ // Buffer for the background layer required by hardware composer.
+ std::shared_ptr<IonBuffer> framebuffer_target_;
+
+ // Protects access to the display surfaces and logical layers.
+ std::mutex layer_mutex_;
+
+ // Active display surfaces configured by the display manager.
+ std::vector<std::shared_ptr<DisplaySurface>> display_surfaces_;
+ std::vector<std::shared_ptr<DisplaySurface>> added_display_surfaces_;
+ bool display_surfaces_updated_;
+ bool hardware_layers_need_update_;
+
+ // Cache whether the display was turned on by us
+ bool display_on_; // TODO(hendrikw): The display is always on. Revisit.
+
+ // Layer array for handling buffer flow into hardware composer layers.
+ // Note that the first array is the actual storage for the layer objects,
+ // and the latter is an array of pointers, which can be freely re-arranged
+ // without messing up the underlying objects.
+ std::array<Layer, kMaxHardwareLayers> layer_storage_;
+ std::array<Layer*, kMaxHardwareLayers> layers_;
+ size_t active_layer_count_;
+
+ // Set by the Post thread to the index of the GPU compositing output
+ // buffer in the layers_ array.
+ Layer* gpu_layer_;
+
+ // Handler to hook vsync events outside of this class.
+ VSyncCallback vsync_callback_;
+
+ // Thread and condition for managing the layer posting thread. This thread
+ // wakes up a short time before vsync to hand buffers to post processing and
+ // the results to hardware composer.
+ std::thread post_thread_;
+
+ // Control variables to control the state of the post thread
+ pdx::LocalHandle terminate_post_thread_event_fd_;
+ bool pause_post_thread_;
+ std::mutex thread_pause_mutex_;
+ std::condition_variable thread_pause_semaphore_;
+
+ // Backlight LED brightness sysfs node.
+ pdx::LocalHandle backlight_brightness_fd_;
+
+ // Primary display vsync event sysfs node.
+ pdx::LocalHandle primary_display_vsync_event_fd_;
+
+ // Primary display wait_pingpong state sysfs node.
+ pdx::LocalHandle primary_display_wait_pp_fd_;
+
+ // VSync sleep timerfd.
+ pdx::LocalHandle vsync_sleep_timer_fd_;
+
+ // The timestamp of the last vsync.
+ int64_t last_vsync_timestamp_;
+
+ // Vsync count since display on.
+ uint32_t vsync_count_;
+
+ // Counter tracking the number of skipped frames.
+ int frame_skip_count_;
+
+ // After construction, only accessed on post_thread_.
+ Compositor compositor_;
+
+ // Fd array for tracking retire fences that are returned by hwc. This allows
+ // us to detect when the display driver begins queuing frames.
+ std::vector<pdx::LocalHandle> retire_fence_fds_;
+
+ // Pose client for frame count notifications. Pose client predicts poses
+ // out to display frame boundaries, so we need to tell it about vsyncs.
+ DvrPose* pose_client_;
+
+ static void HwcRefresh(hwc2_callback_data_t data, hwc2_display_t display);
+ static void HwcVSync(hwc2_callback_data_t data, hwc2_display_t display,
+ int64_t timestamp);
+ static void HwcHotplug(hwc2_callback_data_t callbackData,
+ hwc2_display_t display, hwc2_connection_t connected);
+
+ HardwareComposer(const HardwareComposer&) = delete;
+ void operator=(const HardwareComposer&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_HARDWARE_COMPOSER_H_
diff --git a/libs/vr/libvrflinger/include/dvr/vr_flinger.h b/libs/vr/libvrflinger/include/dvr/vr_flinger.h
new file mode 100644
index 0000000..04c8363
--- /dev/null
+++ b/libs/vr/libvrflinger/include/dvr/vr_flinger.h
@@ -0,0 +1,33 @@
+#ifndef ANDROID_DVR_VR_FLINGER_H_
+#define ANDROID_DVR_VR_FLINGER_H_
+
+#include <thread>
+#include <memory>
+
+namespace android {
+
+namespace Hwc2 {
+class Composer;
+} // namespace Hwc2
+
+namespace dvr {
+
+class DisplayService;
+
+class VrFlinger {
+ public:
+ VrFlinger();
+ int Run(Hwc2::Composer* hidl);
+
+ void EnterVrMode();
+ void ExitVrMode();
+
+ private:
+ std::thread displayd_thread_;
+ std::shared_ptr<android::dvr::DisplayService> display_service_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_VR_FLINGER_H_
diff --git a/libs/vr/libvrflinger/screenshot_service.cpp b/libs/vr/libvrflinger/screenshot_service.cpp
new file mode 100644
index 0000000..e174943
--- /dev/null
+++ b/libs/vr/libvrflinger/screenshot_service.cpp
@@ -0,0 +1,181 @@
+#include "screenshot_service.h"
+
+#include <utils/Trace.h>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/display_types.h>
+
+using android::pdx::Message;
+using android::pdx::MessageInfo;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::RemoteMethodError;
+using android::pdx::rpc::RemoteMethodReturn;
+
+namespace android {
+namespace dvr {
+
+ScreenshotService::~ScreenshotService() { instance_ = nullptr; }
+
+int ScreenshotService::HandleMessage(pdx::Message& message) {
+ switch (message.GetOp()) {
+ case DisplayScreenshotRPC::GetFormat::Opcode:
+ DispatchRemoteMethod<DisplayScreenshotRPC::GetFormat>(
+ *this, &ScreenshotService::OnGetFormat, message);
+ return 0;
+
+ case DisplayScreenshotRPC::TakeScreenshot::Opcode:
+ DispatchRemoteMethod<DisplayScreenshotRPC::TakeScreenshot>(
+ *this, &ScreenshotService::OnTakeScreenshot, message);
+ return 0;
+
+ default:
+ return Service::HandleMessage(message);
+ }
+}
+
+int ScreenshotService::OnGetFormat(pdx::Message&) {
+ return HAL_PIXEL_FORMAT_RGB_888;
+}
+
+ScreenshotData ScreenshotService::OnTakeScreenshot(pdx::Message& message,
+ int layer_index) {
+ AddWaiter(std::move(message), layer_index);
+ return {};
+}
+
+void ScreenshotService::AddWaiter(pdx::Message&& message, int layer_index) {
+ std::lock_guard<std::mutex> lock(mutex_);
+ waiters_.emplace_back(std::move(message), layer_index);
+}
+
+void ScreenshotService::TakeIfNeeded(
+ std::array<Layer*, HardwareComposer::kMaxHardwareLayers>& hw_layers,
+ Compositor& compositor) {
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ // Send the buffer contents to all of our waiting clients.
+ for (auto& waiter : waiters_) {
+ if (waiter.IsDone())
+ continue;
+
+ if (waiter.layer_index() == 0) {
+ ALOGE(
+ "ScreenshotService::TakeIfNeeded: Capturing the composited display "
+ "output is not yet supported.");
+
+ waiter.Error(EINVAL);
+ continue;
+ }
+
+ if (waiter.layer_index() > 0) {
+ // Check for hardware layer screenshot requests.
+ // Hardware layers are requested with positive indices starting at 1.
+ const size_t layer_index = static_cast<size_t>(waiter.layer_index() - 1);
+
+ if (layer_index >= hw_layers.size()) {
+ waiter.Error(EINVAL);
+ continue;
+ }
+
+ auto buffer = hw_layers[layer_index]->GetBuffer();
+ if (!buffer) {
+ waiter.Error(ENOBUFS);
+ continue;
+ }
+
+ auto data = compositor.ReadBufferPixels(buffer);
+ if (data.empty()) {
+ waiter.Error(ENOBUFS);
+ continue;
+ }
+
+ Take(&waiter, data.data(), buffer->width(), buffer->height(),
+ buffer->width());
+ } else {
+ // Check for compositor input layer screenshot requests.
+ // Prewarp surfaces are requested with negative indices starting at -1.
+ const size_t layer_index = static_cast<size_t>(-waiter.layer_index() - 1);
+
+ if (layer_index >= compositor.GetLayerCount()) {
+ waiter.Error(EINVAL);
+ continue;
+ }
+
+ int width = 0;
+ int height = 0;
+ auto data = compositor.ReadLayerPixels(layer_index, &width, &height);
+ if (data.empty()) {
+ waiter.Error(ENOBUFS);
+ continue;
+ }
+
+ Take(&waiter, data.data(), width, height, width);
+ }
+ }
+
+ // Reply with error to requests that did not match up with a source layer.
+ for (auto& waiter : waiters_) {
+ if (!waiter.IsDone())
+ waiter.Error(EAGAIN);
+ }
+ waiters_.clear();
+}
+
+void ScreenshotWaiter::Reply(const ScreenshotData& screenshot) {
+ ALOGI("Returning screenshot: size=%zu recv_size=%zu",
+ screenshot.buffer.size(), message_.GetReceiveLength());
+ RemoteMethodReturn<DisplayScreenshotRPC::TakeScreenshot>(message_,
+ screenshot);
+}
+
+void ScreenshotWaiter::Error(int error) { RemoteMethodError(message_, error); }
+
+void ScreenshotService::Take(ScreenshotWaiter* waiter, const void* rgba_data,
+ int32_t width, int32_t height, int buffer_stride) {
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+
+ bool is_portrait = height > width;
+ if (is_portrait) {
+ std::swap(width, height);
+ }
+ int response_stride = width;
+
+ // Convert from RGBA to RGB and if in portrait, rotates to landscape; store
+ // the result in the response buffer.
+ ScreenshotData screenshot{width, height,
+ std::vector<uint8_t>(width * height * 3)};
+
+ const auto rgba_bytes = static_cast<const uint8_t*>(rgba_data);
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ // If the screenshot is in portrait mode, rotate into landscape mode.
+ const int response_index = is_portrait
+ ? (height - j - 1) * response_stride + i
+ : j * response_stride + i;
+ const int buffer_index =
+ is_portrait ? i * buffer_stride + j : j * buffer_stride + i;
+ screenshot.buffer[response_index * 3 + 0] =
+ rgba_bytes[buffer_index * 4 + 0];
+ screenshot.buffer[response_index * 3 + 1] =
+ rgba_bytes[buffer_index * 4 + 1];
+ screenshot.buffer[response_index * 3 + 2] =
+ rgba_bytes[buffer_index * 4 + 2];
+ }
+ }
+
+ waiter->Reply(screenshot);
+}
+
+ScreenshotService::ScreenshotService()
+ : BASE("ScreenshotService",
+ Endpoint::Create(DisplayScreenshotRPC::kClientPath)) {
+ instance_ = this;
+}
+
+ScreenshotService* ScreenshotService::GetInstance() { return instance_; }
+
+ScreenshotService* ScreenshotService::instance_ = nullptr;
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/screenshot_service.h b/libs/vr/libvrflinger/screenshot_service.h
new file mode 100644
index 0000000..ec4c527
--- /dev/null
+++ b/libs/vr/libvrflinger/screenshot_service.h
@@ -0,0 +1,82 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_SCREENSHOT_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_SCREENSHOT_SERVICE_H_
+
+#include <pdx/rpc/pointer_wrapper.h>
+#include <pdx/service.h>
+#include <private/dvr/ion_buffer.h>
+
+#include <mutex>
+#include <vector>
+
+#include "hardware_composer.h"
+
+namespace android {
+namespace dvr {
+
+class ScreenshotWaiter {
+ public:
+ explicit ScreenshotWaiter(pdx::Message&& message, int layer_index)
+ : message_(std::move(message)), layer_index_(layer_index) {}
+ ScreenshotWaiter(ScreenshotWaiter&&) = default;
+
+ void Reply(const ScreenshotData& screenshot);
+ void Error(int error);
+
+ bool IsDone() const { return message_.replied(); }
+ int layer_index() const { return layer_index_; }
+
+ private:
+ pdx::Message message_;
+ int layer_index_;
+
+ ScreenshotWaiter(const ScreenshotWaiter&) = delete;
+ void operator=(const ScreenshotWaiter&) = delete;
+};
+
+// The screenshot service allows clients to obtain screenshots from displayd.
+class ScreenshotService : public pdx::ServiceBase<ScreenshotService> {
+ public:
+ ~ScreenshotService();
+
+ int HandleMessage(pdx::Message& message) override;
+
+ // Returns true if there is a pending screenshot request.
+ bool IsScreenshotRequestPending() const {
+ std::lock_guard<std::mutex> lock(mutex_);
+ return !waiters_.empty();
+ }
+
+ // If any clients are currently waiting for a screenshot, read back the
+ // contents of requested layers and send the resulting
+ // image to the clients.
+ void TakeIfNeeded(
+ std::array<Layer*, HardwareComposer::kMaxHardwareLayers>& hw_layers,
+ Compositor& compositor);
+
+ static ScreenshotService* GetInstance();
+
+ private:
+ friend BASE;
+
+ ScreenshotService();
+
+ void AddWaiter(pdx::Message&& message, int layer_index);
+
+ ScreenshotData OnTakeScreenshot(pdx::Message& message, int index);
+ int OnGetFormat(pdx::Message& message);
+
+ // Copy the given screenshot data into the message reply.
+ void Take(ScreenshotWaiter* waiter, const void* rgba_data, int32_t width,
+ int32_t height, int buffer_stride);
+
+ static ScreenshotService* instance_;
+
+ // Protects access to subsequent member variables.
+ mutable std::mutex mutex_;
+ std::vector<ScreenshotWaiter> waiters_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_SCREENSHOT_SERVICE_H_
diff --git a/libs/vr/libvrflinger/surface_channel.cpp b/libs/vr/libvrflinger/surface_channel.cpp
new file mode 100644
index 0000000..8aa220b
--- /dev/null
+++ b/libs/vr/libvrflinger/surface_channel.cpp
@@ -0,0 +1,44 @@
+#include "surface_channel.h"
+
+using android::pdx::BorrowedChannelHandle;
+using android::pdx::Message;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+int SurfaceChannel::HandleMessage(Message& message) {
+ switch (message.GetOp()) {
+ case DisplayRPC::GetMetadataBuffer::Opcode:
+ DispatchRemoteMethod<DisplayRPC::GetMetadataBuffer>(
+ *this, &SurfaceChannel::OnGetMetadataBuffer, message);
+ break;
+ }
+
+ return 0;
+}
+
+BorrowedChannelHandle SurfaceChannel::OnGetMetadataBuffer(Message& message) {
+ if (EnsureMetadataBuffer()) {
+ return metadata_buffer_->GetChannelHandle().Borrow();
+ } else {
+ REPLY_ERROR_RETURN(message, -ENOMEM, {});
+ }
+}
+
+bool SurfaceChannel::EnsureMetadataBuffer() {
+ if (!metadata_buffer_) {
+ metadata_buffer_ =
+ BufferProducer::CreateUncachedBlob(metadata_size());
+ if (!metadata_buffer_) {
+ ALOGE(
+ "DisplaySurface::EnsureMetadataBuffer: could not allocate metadata "
+ "buffer");
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/surface_channel.h b/libs/vr/libvrflinger/surface_channel.h
new file mode 100644
index 0000000..870e1a4
--- /dev/null
+++ b/libs/vr/libvrflinger/surface_channel.h
@@ -0,0 +1,63 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_SURFACE_CHANNEL_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_SURFACE_CHANNEL_H_
+
+#include <pdx/service.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/display_rpc.h>
+
+namespace android {
+namespace dvr {
+
+class DisplayService;
+
+class SurfaceChannel : public pdx::Channel {
+ public:
+ SurfaceChannel(DisplayService* service, int channel_id, SurfaceType type,
+ size_t metadata_size)
+ : service_(service),
+ surface_id_(channel_id),
+ type_(type),
+ metadata_size_(metadata_size) {}
+
+ ~SurfaceChannel() override = default;
+
+ DisplayService* service() const { return service_; }
+ int surface_id() const { return surface_id_; }
+ SurfaceType type() const { return type_; }
+ size_t metadata_size() const { return metadata_size_; }
+
+ pdx::LocalHandle GetMetadataBufferFd() {
+ return EnsureMetadataBuffer() ? metadata_buffer_->GetBlobFd()
+ : pdx::LocalHandle{};
+ }
+
+ // Dispatches surface channel messages to the appropriate handlers. This
+ // handler runs on the displayd message dispatch thread.
+ virtual int HandleMessage(pdx::Message& message);
+
+ protected:
+ // Contains the surface metadata.
+ std::shared_ptr<BufferProducer> metadata_buffer_;
+
+ // Returns the metadata buffer for this surface. The first call allocates the
+ // buffer, while subsequent calls return the same buffer.
+ pdx::BorrowedChannelHandle OnGetMetadataBuffer(pdx::Message& message);
+
+ // Allocates the single metadata buffer for this surface unless it is already
+ // allocated. Idempotent when called multiple times.
+ bool EnsureMetadataBuffer();
+
+ private:
+ DisplayService* service_;
+ int surface_id_;
+ SurfaceType type_;
+ size_t metadata_size_;
+
+ SurfaceChannel(const SurfaceChannel&) = delete;
+ void operator=(const SurfaceChannel&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_SURFACE_CHANNEL_H_
diff --git a/libs/vr/libvrflinger/video_compositor.cpp b/libs/vr/libvrflinger/video_compositor.cpp
new file mode 100644
index 0000000..6b39a3c
--- /dev/null
+++ b/libs/vr/libvrflinger/video_compositor.cpp
@@ -0,0 +1,129 @@
+#include "video_compositor.h"
+
+#include <EGL/eglext.h>
+#include <GLES2/gl2ext.h>
+
+#include <private/dvr/debug.h>
+#include <private/dvr/display_rpc.h>
+
+namespace android {
+namespace dvr {
+
+VideoCompositor::Texture::Texture(
+ EGLDisplay display, const std::shared_ptr<BufferConsumer>& buffer_consumer)
+ : display_(display),
+ image_(EGL_NO_IMAGE_KHR),
+ texture_id_(0),
+ buffer_consumer_(buffer_consumer) {}
+
+VideoCompositor::Texture::~Texture() {
+ if (image_ != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(display_, image_);
+ if (texture_id_ != 0)
+ glDeleteTextures(1, &texture_id_);
+}
+
+GLuint VideoCompositor::Texture::EnsureTextureReady() {
+ if (!image_) {
+ native_buffer_ = new NativeBuffer(buffer_consumer_);
+ CHECK_GL();
+
+ image_ = eglCreateImageKHR(
+ display_, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer_.get()), nullptr);
+ if (!image_) {
+ ALOGE("Failed to create EGLImage.");
+ return 0;
+ }
+
+ glGenTextures(1, &texture_id_);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, image_);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE);
+ CHECK_GL();
+ }
+
+ return texture_id_;
+}
+
+void VideoCompositor::Texture::Release() {
+ const int ret = buffer_consumer_->Release({});
+ if (ret < 0) {
+ ALOGE(
+ "VideoCompositor::Texture::Release: Failed to release buffer, error: "
+ "%s",
+ strerror(-ret));
+ }
+}
+
+VideoCompositor::VideoCompositor(
+ const std::shared_ptr<VideoMeshSurface>& surface,
+ const volatile DisplaySurfaceMetadata* display_surface_metadata)
+ : surface_(surface),
+ consumer_queue_(surface->GetConsumerQueue()),
+ transform_metadata_(display_surface_metadata),
+ active_texture_slot_(-1) {}
+
+GLuint VideoCompositor::GetActiveTextureId(EGLDisplay display) {
+ size_t slot;
+ VideoMeshSurfaceBufferMetadata metadata;
+
+ while (true) {
+ // A native way to pick the active texture: always dequeue all buffers from
+ // the queue until it's empty. This works well as long as video frames are
+ // queued in order from the producer side.
+ // TODO(jwcai) Use |metadata.timestamp_ns| to schedule video frames
+ // accurately.
+ auto buffer_consumer = consumer_queue_->Dequeue(0, &slot, &metadata);
+
+ if (buffer_consumer) {
+ // Create a new texture if it hasn't been created yet, or the same slot
+ // has a new |buffer_consumer|.
+ if (textures_[slot] == nullptr ||
+ textures_[slot]->event_fd() != buffer_consumer->event_fd()) {
+ textures_[slot] =
+ std::unique_ptr<Texture>(new Texture(display, buffer_consumer));
+ }
+
+ if (active_texture_slot_ != static_cast<int>(slot)) {
+ if (active_texture_slot_ >= 0) {
+ // Release the last active texture and move on to use the new one.
+ textures_[active_texture_slot_]->Release();
+ }
+ active_texture_slot_ = slot;
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (active_texture_slot_ < 0) {
+ // No texture is active yet.
+ return 0;
+ }
+
+ return textures_[active_texture_slot_]->EnsureTextureReady();
+}
+
+mat4 VideoCompositor::GetTransform(int eye, size_t render_buffer_index) {
+ volatile const VideoMeshSurfaceMetadata* transform_metadata =
+ surface_->GetMetadataBufferPtr();
+
+ mat4 screen_transform;
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ screen_transform(i, j) =
+ transform_metadata->transform[render_buffer_index][eye].val[i][j];
+ }
+ }
+
+ return screen_transform;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/video_compositor.h b/libs/vr/libvrflinger/video_compositor.h
new file mode 100644
index 0000000..d0e72e1
--- /dev/null
+++ b/libs/vr/libvrflinger/video_compositor.h
@@ -0,0 +1,84 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_COMPOSITOR_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_COMPOSITOR_H_
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+#include <private/dvr/buffer_hub_queue_core.h>
+#include <private/dvr/types.h>
+
+#include <vector>
+
+#include "display_surface.h"
+#include "video_mesh_surface.h"
+
+namespace android {
+namespace dvr {
+
+using pdx::LocalHandle;
+
+// Manages video buffer consumers, texture mapping, and playback timing.
+class VideoCompositor {
+ public:
+ VideoCompositor(
+ const std::shared_ptr<VideoMeshSurface>& surface,
+ const volatile DisplaySurfaceMetadata* display_surface_metadata);
+
+ int surface_id() const { return surface_ ? surface_->surface_id() : -1; }
+
+ // Returns a GL texture id that should be composited by displayd during the
+ // current rendering loop. Note that this function must be called in
+ // displayd's GL context.
+ GLuint GetActiveTextureId(EGLDisplay display);
+
+ // Returns a basic video mesh tranform.
+ mat4 GetTransform(int eye, size_t render_buffer_index);
+
+ private:
+ class Texture {
+ public:
+ Texture(EGLDisplay display,
+ const std::shared_ptr<BufferConsumer>& buffer_consumer);
+ ~Texture();
+
+ // Returns the |event_fd| of the underlying buffer consumer. Caller can use
+ // this to decided whether the Texture need to be recreated for a different
+ // buffer consumer.
+ int event_fd() const { return buffer_consumer_->event_fd(); }
+
+ // Method to map a dvr::BufferConsumer to a GL texture within the current GL
+ // context. If the current Texture object's |image_| hasn't been
+ // initialized, the method will do so based on the |buffer_consumer| and a
+ // new GL texture will be generated, cached, and returned. Otherwise, the
+ // cached |texture_id_| will be returned directly.
+ GLuint EnsureTextureReady();
+
+ // Signal bufferhub that the texture is done rendering so that the buffer
+ // can be re-gained by the producer for future use.
+ void Release();
+
+ private:
+ using NativeBuffer = BufferHubQueueCore::NativeBuffer;
+
+ EGLDisplay display_;
+ EGLImageKHR image_;
+ GLuint texture_id_;
+ sp<NativeBuffer> native_buffer_;
+ std::shared_ptr<BufferConsumer> buffer_consumer_;
+ };
+
+ std::shared_ptr<VideoMeshSurface> surface_;
+ std::shared_ptr<ConsumerQueue> consumer_queue_;
+ std::array<std::unique_ptr<Texture>, BufferHubQueue::kMaxQueueCapacity>
+ textures_;
+
+ const volatile DisplaySurfaceMetadata* transform_metadata_;
+ int active_texture_slot_;
+
+ VideoCompositor(const VideoCompositor&) = delete;
+ void operator=(const VideoCompositor&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_COMPOSITOR_H_
diff --git a/libs/vr/libvrflinger/video_mesh_surface.cpp b/libs/vr/libvrflinger/video_mesh_surface.cpp
new file mode 100644
index 0000000..a961a3d
--- /dev/null
+++ b/libs/vr/libvrflinger/video_mesh_surface.cpp
@@ -0,0 +1,59 @@
+#include "video_mesh_surface.h"
+
+#include <private/dvr/display_rpc.h>
+
+using android::pdx::LocalChannelHandle;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+VideoMeshSurface::VideoMeshSurface(DisplayService* service, int surface_id)
+ : SurfaceChannel(service, surface_id, SurfaceTypeEnum::VideoMesh,
+ sizeof(VideoMeshSurfaceMetadata)) {}
+
+VideoMeshSurface::~VideoMeshSurface() {}
+
+int VideoMeshSurface::HandleMessage(Message& message) {
+ ATRACE_NAME("VideoMeshSurface::HandleMessage");
+
+ switch (message.GetOp()) {
+ case DisplayRPC::VideoMeshSurfaceCreateProducerQueue::Opcode:
+ DispatchRemoteMethod<DisplayRPC::VideoMeshSurfaceCreateProducerQueue>(
+ *this, &VideoMeshSurface::OnCreateProducerQueue, message);
+ break;
+
+ default:
+ return SurfaceChannel::HandleMessage(message);
+ }
+
+ return 0;
+}
+
+std::shared_ptr<ConsumerQueue> VideoMeshSurface::GetConsumerQueue() {
+ if (!consumer_queue_) {
+ ALOGE(
+ "VideoMeshSurface::GetConsumerQueue: consumer_queue is uninitialized.");
+ }
+
+ return consumer_queue_;
+}
+
+LocalChannelHandle VideoMeshSurface::OnCreateProducerQueue(Message& message) {
+ ATRACE_NAME("VideoMeshSurface::OnCreateProducerQueue");
+
+ if (consumer_queue_ != nullptr) {
+ ALOGE(
+ "VideoMeshSurface::OnCreateProducerQueue: A ProdcuerQueue has already "
+ "been created and transported to VideoMeshSurfaceClient.");
+ REPLY_ERROR_RETURN(message, EALREADY, {});
+ }
+
+ auto producer = ProducerQueue::Create<VideoMeshSurfaceBufferMetadata>();
+ consumer_queue_ = producer->CreateConsumerQueue();
+
+ return std::move(producer->GetChannelHandle());
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/video_mesh_surface.h b/libs/vr/libvrflinger/video_mesh_surface.h
new file mode 100644
index 0000000..1370793
--- /dev/null
+++ b/libs/vr/libvrflinger/video_mesh_surface.h
@@ -0,0 +1,52 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_MESH_SURFACE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_MESH_SURFACE_H_
+
+#include <private/dvr/buffer_hub_queue_client.h>
+
+#include "surface_channel.h"
+
+namespace android {
+namespace dvr {
+
+class DisplayService;
+
+// VideoMeshSurface takes three inputs: 1) buffers filled by Android system
+// components (e.g. MediaCodec or camera stack) other than applications' GL
+// context; 2) a 3D mesh choosen by application to define the shape of the
+// surface; 3) a transformation matrix from application to define the rotation,
+// position, and scaling of the video surface.
+class VideoMeshSurface : public SurfaceChannel {
+ public:
+ using Message = pdx::Message;
+ using LocalChannelHandle = pdx::LocalChannelHandle;
+
+ VideoMeshSurface(DisplayService* service, int channel_id);
+ ~VideoMeshSurface() override;
+
+ volatile const VideoMeshSurfaceMetadata* GetMetadataBufferPtr() {
+ if (EnsureMetadataBuffer()) {
+ void* addr = nullptr;
+ metadata_buffer_->GetBlobReadWritePointer(metadata_size(), &addr);
+ return static_cast<const volatile VideoMeshSurfaceMetadata*>(addr);
+ } else {
+ return nullptr;
+ }
+ }
+
+ int HandleMessage(Message& message) override;
+
+ std::shared_ptr<ConsumerQueue> GetConsumerQueue();
+
+ private:
+ LocalChannelHandle OnCreateProducerQueue(Message& message);
+
+ std::shared_ptr<ConsumerQueue> consumer_queue_;
+
+ VideoMeshSurface(const VideoMeshSurface&) = delete;
+ void operator=(const VideoMeshSurface&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_MESH_SURFACE_H_
diff --git a/libs/vr/libvrflinger/vr_flinger.cpp b/libs/vr/libvrflinger/vr_flinger.cpp
new file mode 100644
index 0000000..82ce067
--- /dev/null
+++ b/libs/vr/libvrflinger/vr_flinger.cpp
@@ -0,0 +1,110 @@
+#include <dvr/vr_flinger.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <memory>
+
+#include <binder/ProcessState.h>
+#include <log/log.h>
+#include <cutils/properties.h>
+#include <cutils/sched_policy.h>
+#include <private/dvr/display_client.h>
+#include <sys/resource.h>
+
+#include <pdx/default_transport/service_dispatcher.h>
+
+#include <functional>
+
+#include "DisplayHardware/ComposerHal.h"
+#include "display_manager_service.h"
+#include "display_service.h"
+#include "screenshot_service.h"
+#include "vsync_service.h"
+
+namespace android {
+namespace dvr {
+
+VrFlinger::VrFlinger() {}
+
+int VrFlinger::Run(Hwc2::Composer* hidl) {
+ if (!hidl)
+ return EINVAL;
+
+ std::shared_ptr<android::pdx::Service> service;
+
+ ALOGI("Starting up VrFlinger...");
+
+ setpriority(PRIO_PROCESS, 0, android::PRIORITY_URGENT_DISPLAY);
+ set_sched_policy(0, SP_FOREGROUND);
+
+ // We need to be able to create endpoints with full perms.
+ umask(0000);
+
+ android::ProcessState::self()->startThreadPool();
+
+ std::shared_ptr<android::pdx::ServiceDispatcher> dispatcher =
+ android::pdx::default_transport::ServiceDispatcher::Create();
+ CHECK_ERROR(!dispatcher, error, "Failed to create service dispatcher.");
+
+ display_service_ = android::dvr::DisplayService::Create(hidl);
+ CHECK_ERROR(!display_service_, error, "Failed to create display service.");
+ dispatcher->AddService(display_service_);
+
+ service = android::dvr::DisplayManagerService::Create(display_service_);
+ CHECK_ERROR(!service, error, "Failed to create display manager service.");
+ dispatcher->AddService(service);
+
+ service = android::dvr::ScreenshotService::Create();
+ CHECK_ERROR(!service, error, "Failed to create screenshot service.");
+ dispatcher->AddService(service);
+
+ service = android::dvr::VSyncService::Create();
+ CHECK_ERROR(!service, error, "Failed to create vsync service.");
+ dispatcher->AddService(service);
+
+ display_service_->SetVSyncCallback(
+ std::bind(&android::dvr::VSyncService::VSyncEvent,
+ std::static_pointer_cast<android::dvr::VSyncService>(service),
+ std::placeholders::_1, std::placeholders::_2,
+ std::placeholders::_3, std::placeholders::_4));
+
+ displayd_thread_ = std::thread([this, dispatcher]() {
+ ALOGI("Entering message loop.");
+
+ int ret = dispatcher->EnterDispatchLoop();
+ if (ret < 0) {
+ ALOGE("Dispatch loop exited because: %s\n", strerror(-ret));
+ }
+ });
+
+ return NO_ERROR;
+
+error:
+ display_service_.reset();
+
+ return -1;
+}
+
+void VrFlinger::EnterVrMode() {
+ if (display_service_) {
+ display_service_->SetActive(true);
+ } else {
+ ALOGE("Failed to enter VR mode : Display service is not started.");
+ }
+}
+
+void VrFlinger::ExitVrMode() {
+ if (display_service_) {
+ display_service_->SetActive(false);
+ } else {
+ ALOGE("Failed to exit VR mode : Display service is not started.");
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/vsync_service.cpp b/libs/vr/libvrflinger/vsync_service.cpp
new file mode 100644
index 0000000..48fa2c2
--- /dev/null
+++ b/libs/vr/libvrflinger/vsync_service.cpp
@@ -0,0 +1,208 @@
+#include "vsync_service.h"
+
+#include <log/log.h>
+#include <hardware/hwcomposer.h>
+#include <poll.h>
+#include <sys/prctl.h>
+#include <time.h>
+#include <utils/Trace.h>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/display_types.h>
+
+using android::pdx::Channel;
+using android::pdx::Message;
+using android::pdx::MessageInfo;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+VSyncService::VSyncService()
+ : BASE("VSyncService", Endpoint::Create(DisplayVSyncRPC::kClientPath)),
+ last_vsync_(0),
+ current_vsync_(0),
+ compositor_time_ns_(0),
+ current_vsync_count_(0) {}
+
+VSyncService::~VSyncService() {}
+
+void VSyncService::VSyncEvent(int display, int64_t timestamp_ns,
+ int64_t compositor_time_ns,
+ uint32_t vsync_count) {
+ ATRACE_NAME("VSyncService::VSyncEvent");
+ std::lock_guard<std::mutex> autolock(mutex_);
+
+ if (display == HWC_DISPLAY_PRIMARY) {
+ last_vsync_ = current_vsync_;
+ current_vsync_ = timestamp_ns;
+ compositor_time_ns_ = compositor_time_ns;
+ current_vsync_count_ = vsync_count;
+
+ NotifyWaiters();
+ UpdateClients();
+ }
+}
+
+std::shared_ptr<Channel> VSyncService::OnChannelOpen(pdx::Message& message) {
+ const MessageInfo& info = message.GetInfo();
+
+ auto client = std::make_shared<VSyncChannel>(*this, info.pid, info.cid);
+ AddClient(client);
+
+ return client;
+}
+
+void VSyncService::OnChannelClose(pdx::Message& /*message*/,
+ const std::shared_ptr<Channel>& channel) {
+ auto client = std::static_pointer_cast<VSyncChannel>(channel);
+ if (!client) {
+ ALOGW("WARNING: VSyncChannel was NULL!!!\n");
+ return;
+ }
+
+ RemoveClient(client);
+}
+
+void VSyncService::AddWaiter(pdx::Message& message) {
+ std::lock_guard<std::mutex> autolock(mutex_);
+ std::unique_ptr<VSyncWaiter> waiter(new VSyncWaiter(message));
+ waiters_.push_back(std::move(waiter));
+}
+
+void VSyncService::AddClient(const std::shared_ptr<VSyncChannel>& client) {
+ std::lock_guard<std::mutex> autolock(mutex_);
+ clients_.push_back(client);
+}
+
+void VSyncService::RemoveClient(const std::shared_ptr<VSyncChannel>& client) {
+ std::lock_guard<std::mutex> autolock(mutex_);
+ clients_.remove(client);
+}
+
+// Private. Assumes mutex is held.
+void VSyncService::NotifyWaiters() {
+ ATRACE_NAME("VSyncService::NotifyWaiters");
+ auto first = waiters_.begin();
+ auto last = waiters_.end();
+
+ while (first != last) {
+ (*first)->Notify(current_vsync_);
+ waiters_.erase(first++);
+ }
+}
+
+// Private. Assumes mutex is held.
+void VSyncService::UpdateClients() {
+ ATRACE_NAME("VSyncService::UpdateClients");
+ auto first = clients_.begin();
+ auto last = clients_.end();
+
+ while (first != last) {
+ (*first)->Signal();
+ first++;
+ }
+}
+
+int VSyncService::HandleMessage(pdx::Message& message) {
+ switch (message.GetOp()) {
+ case DisplayVSyncRPC::Wait::Opcode:
+ AddWaiter(message);
+ return 0;
+
+ case DisplayVSyncRPC::GetLastTimestamp::Opcode:
+ DispatchRemoteMethod<DisplayVSyncRPC::GetLastTimestamp>(
+ *this, &VSyncService::OnGetLastTimestamp, message);
+ return 0;
+
+ case DisplayVSyncRPC::GetSchedInfo::Opcode:
+ DispatchRemoteMethod<DisplayVSyncRPC::GetSchedInfo>(
+ *this, &VSyncService::OnGetSchedInfo, message);
+ return 0;
+
+ case DisplayVSyncRPC::Acknowledge::Opcode:
+ DispatchRemoteMethod<DisplayVSyncRPC::Acknowledge>(
+ *this, &VSyncService::OnAcknowledge, message);
+ return 0;
+
+ default:
+ return Service::HandleMessage(message);
+ }
+}
+
+int64_t VSyncService::OnGetLastTimestamp(pdx::Message& message) {
+ auto client = std::static_pointer_cast<VSyncChannel>(message.GetChannel());
+ std::lock_guard<std::mutex> autolock(mutex_);
+
+ // Getting the timestamp has the side effect of ACKing.
+ client->Ack();
+ return current_vsync_;
+}
+
+VSyncSchedInfo VSyncService::OnGetSchedInfo(pdx::Message& message) {
+ auto client = std::static_pointer_cast<VSyncChannel>(message.GetChannel());
+ std::lock_guard<std::mutex> autolock(mutex_);
+
+ // Getting the timestamp has the side effect of ACKing.
+ client->Ack();
+
+ uint32_t next_vsync_count = current_vsync_count_ + 1;
+ int64_t current_time = GetSystemClockNs();
+ int64_t vsync_period_ns = 0;
+ int64_t next_warp;
+ if (current_vsync_ == 0 || last_vsync_ == 0) {
+ // Handle startup when current_vsync_ or last_vsync_ are 0.
+ // Normally should not happen because vsync_service is running before
+ // applications, but in case it does a sane time prevents applications
+ // from malfunctioning.
+ vsync_period_ns = 20000000;
+ next_warp = current_time;
+ } else {
+ // TODO(jbates) When we have an accurate reading of the true vsync
+ // period, use that instead of this estimated value.
+ vsync_period_ns = current_vsync_ - last_vsync_;
+ // Clamp the period, because when there are no surfaces the last_vsync_
+ // value will get stale. Note this is temporary and goes away as soon
+ // as we have an accurate vsync period reported by the system.
+ vsync_period_ns = std::min(vsync_period_ns, INT64_C(20000000));
+ next_warp = current_vsync_ + vsync_period_ns - compositor_time_ns_;
+ // If the request missed the present window, move up to the next vsync.
+ if (current_time > next_warp) {
+ next_warp += vsync_period_ns;
+ ++next_vsync_count;
+ }
+ }
+
+ return {vsync_period_ns, next_warp, next_vsync_count};
+}
+
+int VSyncService::OnAcknowledge(pdx::Message& message) {
+ auto client = std::static_pointer_cast<VSyncChannel>(message.GetChannel());
+ std::lock_guard<std::mutex> autolock(mutex_);
+ client->Ack();
+ return 0;
+}
+
+void VSyncWaiter::Notify(int64_t timestamp) {
+ timestamp_ = timestamp;
+ DispatchRemoteMethod<DisplayVSyncRPC::Wait>(*this, &VSyncWaiter::OnWait,
+ message_);
+}
+
+int64_t VSyncWaiter::OnWait(pdx::Message& /*message*/) { return timestamp_; }
+
+void VSyncChannel::Ack() {
+ ALOGD_IF(TRACE, "VSyncChannel::Ack: pid=%d cid=%d\n", pid_, cid_);
+ service_.ModifyChannelEvents(cid_, POLLPRI, 0);
+}
+
+void VSyncChannel::Signal() {
+ ALOGD_IF(TRACE, "VSyncChannel::Signal: pid=%d cid=%d\n", pid_, cid_);
+ service_.ModifyChannelEvents(cid_, 0, POLLPRI);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/vsync_service.h b/libs/vr/libvrflinger/vsync_service.h
new file mode 100644
index 0000000..ba1d4df
--- /dev/null
+++ b/libs/vr/libvrflinger/vsync_service.h
@@ -0,0 +1,107 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_VSYNC_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_VSYNC_SERVICE_H_
+
+#include <pdx/service.h>
+
+#include <list>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include "display_service.h"
+
+namespace android {
+namespace dvr {
+
+// VSyncWaiter encapsulates a client blocked waiting for the next vsync.
+// It is used to enqueue the Message to reply to when the next vsync event
+// occurs.
+class VSyncWaiter {
+ public:
+ explicit VSyncWaiter(pdx::Message& message) : message_(std::move(message)) {}
+
+ void Notify(int64_t timestamp);
+
+ private:
+ int64_t OnWait(pdx::Message& message);
+
+ pdx::Message message_;
+ int64_t timestamp_ = 0;
+
+ VSyncWaiter(const VSyncWaiter&) = delete;
+ void operator=(const VSyncWaiter&) = delete;
+};
+
+// VSyncChannel manages the service-side per-client context for each client
+// using the service.
+class VSyncChannel : public pdx::Channel {
+ public:
+ VSyncChannel(pdx::Service& service, int pid, int cid)
+ : service_(service), pid_(pid), cid_(cid) {}
+
+ void Ack();
+ void Signal();
+
+ private:
+ pdx::Service& service_;
+ pid_t pid_;
+ int cid_;
+
+ VSyncChannel(const VSyncChannel&) = delete;
+ void operator=(const VSyncChannel&) = delete;
+};
+
+// VSyncService implements the displayd vsync service over ServiceFS.
+class VSyncService : public pdx::ServiceBase<VSyncService> {
+ public:
+ ~VSyncService() override;
+
+ int HandleMessage(pdx::Message& message) override;
+
+ std::shared_ptr<pdx::Channel> OnChannelOpen(pdx::Message& message) override;
+ void OnChannelClose(pdx::Message& message,
+ const std::shared_ptr<pdx::Channel>& channel) override;
+
+ // Called by the hardware composer HAL, or similar,
+ // whenever a vsync event occurs.
+ // |compositor_time_ns| is the number of ns before the next vsync when the
+ // compositor will preempt the GPU to do EDS and lens warp.
+ void VSyncEvent(int display, int64_t timestamp_ns, int64_t compositor_time_ns,
+ uint32_t vsync_count);
+
+ private:
+ friend BASE;
+
+ VSyncService();
+
+ int64_t OnGetLastTimestamp(pdx::Message& message);
+ VSyncSchedInfo OnGetSchedInfo(pdx::Message& message);
+ int OnAcknowledge(pdx::Message& message);
+
+ void NotifierThreadFunction();
+
+ void AddWaiter(pdx::Message& message);
+ void NotifyWaiters();
+ void UpdateClients();
+
+ void AddClient(const std::shared_ptr<VSyncChannel>& client);
+ void RemoveClient(const std::shared_ptr<VSyncChannel>& client);
+
+ int64_t last_vsync_;
+ int64_t current_vsync_;
+ int64_t compositor_time_ns_;
+ uint32_t current_vsync_count_;
+
+ std::mutex mutex_;
+
+ std::list<std::unique_ptr<VSyncWaiter>> waiters_;
+ std::list<std::shared_ptr<VSyncChannel>> clients_;
+
+ VSyncService(const VSyncService&) = delete;
+ void operator=(VSyncService&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_VSYNC_SERVICE_H_
diff --git a/services/audiomanager/IPlayer.cpp b/services/audiomanager/IPlayer.cpp
index 3b0b4e9..47edc4b 100644
--- a/services/audiomanager/IPlayer.cpp
+++ b/services/audiomanager/IPlayer.cpp
@@ -33,6 +33,8 @@
PAUSE = IBinder::FIRST_CALL_TRANSACTION + 1,
STOP = IBinder::FIRST_CALL_TRANSACTION + 2,
SET_VOLUME = IBinder::FIRST_CALL_TRANSACTION + 3,
+ SET_PAN = IBinder::FIRST_CALL_TRANSACTION + 4,
+ SET_START_DELAY_MS = IBinder::FIRST_CALL_TRANSACTION + 5,
};
class BpPlayer : public BpInterface<IPlayer>
@@ -71,6 +73,21 @@
data.writeFloat(vol);
remote()->transact(SET_VOLUME, data, &reply);
}
+
+ virtual void setPan(float pan)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IPlayer::getInterfaceDescriptor());
+ data.writeFloat(pan);
+ remote()->transact(SET_PAN, data, &reply);
+ }
+
+ virtual void setStartDelayMs(int32_t delayMs) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IPlayer::getInterfaceDescriptor());
+ data.writeInt32(delayMs);
+ remote()->transact(SET_START_DELAY_MS, data, &reply);
+ }
};
IMPLEMENT_META_INTERFACE(Player, "android.media.IPlayer");
@@ -100,7 +117,17 @@
CHECK_INTERFACE(IPlayer, data, reply);
setVolume(data.readFloat());
return NO_ERROR;
- }
+ } break;
+ case SET_PAN: {
+ CHECK_INTERFACE(IPlayer, data, reply);
+ setPan(data.readFloat());
+ return NO_ERROR;
+ } break;
+ case SET_START_DELAY_MS: {
+ CHECK_INTERFACE(IPlayer, data, reply);
+ setStartDelayMs(data.readInt32());
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/services/batteryservice/Android.bp b/services/batteryservice/Android.bp
index 79db871..e441bda 100644
--- a/services/batteryservice/Android.bp
+++ b/services/batteryservice/Android.bp
@@ -1,4 +1,4 @@
-cc_library_static {
+cc_library {
name: "libbatteryservice",
srcs: [
@@ -8,7 +8,7 @@
"IBatteryPropertiesRegistrar.cpp",
],
- static_libs: [
+ shared_libs: [
"libutils",
"libbinder",
],
@@ -19,4 +19,4 @@
"-Wunused",
"-Wunreachable-code",
],
-}
+}
\ No newline at end of file
diff --git a/services/batteryservice/IBatteryPropertiesListener.cpp b/services/batteryservice/IBatteryPropertiesListener.cpp
index 7555f4b..6e5bcfe 100644
--- a/services/batteryservice/IBatteryPropertiesListener.cpp
+++ b/services/batteryservice/IBatteryPropertiesListener.cpp
@@ -43,4 +43,22 @@
// ----------------------------------------------------------------------------
+status_t BnBatteryPropertiesListener::onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags)
+{
+ switch(code) {
+ case TRANSACT_BATTERYPROPERTIESCHANGED: {
+ CHECK_INTERFACE(IBatteryPropertiesListener, data, reply);
+ struct BatteryProperties props = {};
+ if (data.readInt32() != 0) {
+ props.readFromParcel((Parcel*)&data);
+ }
+ batteryPropertiesChanged(props);
+ return NO_ERROR;
+ }
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+};
+
}; // namespace android
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
new file mode 100644
index 0000000..cc93105
--- /dev/null
+++ b/services/surfaceflinger/Android.bp
@@ -0,0 +1,4 @@
+cc_library_static {
+ name: "libsurfaceflingerincludes",
+ export_include_dirs: ["."],
+}
diff --git a/services/surfaceflinger/Android.mk b/services/surfaceflinger/Android.mk
index 6f5947a..0e05d54 100644
--- a/services/surfaceflinger/Android.mk
+++ b/services/surfaceflinger/Android.mk
@@ -9,6 +9,7 @@
DisplayDevice.cpp \
DispSync.cpp \
EventControlThread.cpp \
+ StartBootAnimThread.cpp \
EventThread.cpp \
FrameTracker.cpp \
GpuService.cpp \
diff --git a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
index 37de7a2..2b603cc 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
@@ -1298,6 +1298,7 @@
auto& hwc1Layer = mHwc1RequestedContents->hwLayers[layer->getHwc1Id()];
hwc1Layer.releaseFenceFd = -1;
hwc1Layer.acquireFenceFd = -1;
+ ALOGV("Applying states for layer %" PRIu64 " ", layer->getId());
layer->applyState(hwc1Layer, applyAllState);
}
@@ -2009,7 +2010,6 @@
mZ(0),
mReleaseFence(),
mHwc1Id(0),
- mHasUnsupportedDataspace(false),
mHasUnsupportedPlaneAlpha(false) {}
bool HWC2On1Adapter::SortLayersByZ::operator()(
@@ -2070,9 +2070,8 @@
return Error::None;
}
-Error HWC2On1Adapter::Layer::setDataspace(android_dataspace_t dataspace)
+Error HWC2On1Adapter::Layer::setDataspace(android_dataspace_t)
{
- mHasUnsupportedDataspace = (dataspace != HAL_DATASPACE_UNKNOWN);
return Error::None;
}
@@ -2318,8 +2317,13 @@
// HWC1 never supports color transforms or dataspaces and only sometimes
// supports plane alpha (depending on the version). These require us to drop
// some or all layers to client composition.
- if (mHasUnsupportedDataspace || mHasUnsupportedPlaneAlpha ||
- mDisplay.hasColorTransform() || mHasUnsupportedBackgroundColor) {
+ ALOGV("applyCompositionType");
+ ALOGV("mHasUnsupportedPlaneAlpha = %d", mHasUnsupportedPlaneAlpha);
+ ALOGV("mDisplay.hasColorTransform() = %d", mDisplay.hasColorTransform());
+ ALOGV("mHasUnsupportedBackgroundColor = %d", mHasUnsupportedBackgroundColor);
+
+ if (mHasUnsupportedPlaneAlpha || mDisplay.hasColorTransform() ||
+ mHasUnsupportedBackgroundColor) {
hwc1Layer.compositionType = HWC_FRAMEBUFFER;
hwc1Layer.flags = HWC_SKIP_LAYER;
return;
diff --git a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h
index 9abdc38..df33ec3 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h
+++ b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h
@@ -605,7 +605,6 @@
DeferredFence mReleaseFence;
size_t mHwc1Id;
- bool mHasUnsupportedDataspace;
bool mHasUnsupportedPlaneAlpha;
bool mHasUnsupportedBackgroundColor;
};
diff --git a/services/surfaceflinger/EventControlThread.h b/services/surfaceflinger/EventControlThread.h
index 9368db6..1b1ef75 100644
--- a/services/surfaceflinger/EventControlThread.h
+++ b/services/surfaceflinger/EventControlThread.h
@@ -45,4 +45,4 @@
}
-#endif // ANDROID_DISPSYNC_H
+#endif // ANDROID_EVENTCONTROLTHREAD_H
diff --git a/services/surfaceflinger/StartBootAnimThread.cpp b/services/surfaceflinger/StartBootAnimThread.cpp
new file mode 100644
index 0000000..c3f7296
--- /dev/null
+++ b/services/surfaceflinger/StartBootAnimThread.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/properties.h>
+#include "StartBootAnimThread.h"
+
+namespace android {
+
+StartBootAnimThread::StartBootAnimThread():
+ Thread(false) {
+}
+
+status_t StartBootAnimThread::Start() {
+ return run("SurfaceFlinger::StartBootAnimThread", PRIORITY_NORMAL);
+}
+
+bool StartBootAnimThread::threadLoop() {
+ property_set("service.bootanim.exit", "0");
+ property_set("ctl.start", "bootanim");
+ // Exit immediately
+ return false;
+}
+
+} // namespace android
diff --git a/services/surfaceflinger/StartBootAnimThread.h b/services/surfaceflinger/StartBootAnimThread.h
new file mode 100644
index 0000000..dba2bee
--- /dev/null
+++ b/services/surfaceflinger/StartBootAnimThread.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_STARTBOOTANIMTHREAD_H
+#define ANDROID_STARTBOOTANIMTHREAD_H
+
+#include <stddef.h>
+
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+
+namespace android {
+
+class StartBootAnimThread : public Thread {
+// Boot animation is triggered via calls to "property_set()" which can block
+// if init's executing slow operation such as 'mount_all --late' (currently
+// happening 1/10th with fsck) concurrently. Running in a separate thread
+// allows to pursue the SurfaceFlinger's init process without blocking.
+// see b/34499826.
+public:
+ StartBootAnimThread();
+ status_t Start();
+private:
+ virtual bool threadLoop();
+};
+
+}
+
+#endif // ANDROID_STARTBOOTANIMTHREAD_H
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 01655c6..61e336f 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -338,6 +338,9 @@
void SurfaceFlinger::bootFinished()
{
+ if (mStartBootAnimThread->join() != NO_ERROR) {
+ ALOGE("Join StartBootAnimThread failed!");
+ }
const nsecs_t now = systemTime();
const nsecs_t duration = now - mBootTime;
ALOGI("Boot is finished (%ld ms)", long(ns2ms(duration)) );
@@ -579,16 +582,22 @@
mRenderEngine->primeCache();
- // start boot animation
- startBootAnim();
+ mStartBootAnimThread = new StartBootAnimThread();
+ if (mStartBootAnimThread->Start() != NO_ERROR) {
+ ALOGE("Run StartBootAnimThread failed!");
+ }
ALOGV("Done initializing");
}
void SurfaceFlinger::startBootAnim() {
- // start boot animation
- property_set("service.bootanim.exit", "0");
- property_set("ctl.start", "bootanim");
+ // Start boot animation service by setting a property mailbox
+ // if property setting thread is already running, Start() will be just a NOP
+ mStartBootAnimThread->Start();
+ // Wait until property was set
+ if (mStartBootAnimThread->join() != NO_ERROR) {
+ ALOGE("Join StartBootAnimThread failed!");
+ }
}
size_t SurfaceFlinger::getMaxTextureSize() const {
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 75c1920..55735b1 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -58,6 +58,7 @@
#include "LayerVector.h"
#include "MessageQueue.h"
#include "SurfaceInterceptor.h"
+#include "StartBootAnimThread.h"
#include "DisplayHardware/HWComposer.h"
#include "Effects/Daltonizer.h"
@@ -345,6 +346,8 @@
bool useIdentityTransform, Transform::orientation_flags rotation,
bool isLocalScreenshot);
+ sp<StartBootAnimThread> mStartBootAnimThread = nullptr;
+
/* ------------------------------------------------------------------------
* EGL
*/
diff --git a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
index 7b83834..fe9ba96 100644
--- a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
+++ b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
@@ -317,6 +317,9 @@
void SurfaceFlinger::bootFinished()
{
+ if (mStartBootAnimThread->join() != NO_ERROR) {
+ ALOGE("Join StartBootAnimThread failed!");
+ }
const nsecs_t now = systemTime();
const nsecs_t duration = now - mBootTime;
ALOGI("Boot is finished (%ld ms)", long(ns2ms(duration)) );
@@ -589,8 +592,12 @@
mRenderEngine->primeCache();
- // start boot animation
- startBootAnim();
+ mStartBootAnimThread = new StartBootAnimThread();
+ if (mStartBootAnimThread->Start() != NO_ERROR) {
+ ALOGE("Run StartBootAnimThread failed!");
+ }
+
+ ALOGV("Done initializing");
}
int32_t SurfaceFlinger::allocateHwcDisplayId(DisplayDevice::DisplayType type) {
@@ -599,9 +606,13 @@
}
void SurfaceFlinger::startBootAnim() {
- // start boot animation
- property_set("service.bootanim.exit", "0");
- property_set("ctl.start", "bootanim");
+ // Start boot animation service by setting a property mailbox
+ // if property setting thread is already running, Start() will be just a NOP
+ mStartBootAnimThread->Start();
+ // Wait until property was set
+ if (mStartBootAnimThread->join() != NO_ERROR) {
+ ALOGE("Join StartBootAnimThread failed!");
+ }
}
size_t SurfaceFlinger::getMaxTextureSize() const {
diff --git a/services/vr/bufferhubd/Android.mk b/services/vr/bufferhubd/Android.mk
index 492acb2..4ba2373 100644
--- a/services/vr/bufferhubd/Android.mk
+++ b/services/vr/bufferhubd/Android.mk
@@ -23,7 +23,6 @@
producer_queue_channel.cpp \
staticLibraries := \
- libchrome \
libperformance \
libpdx_default_transport \
libbufferhub
diff --git a/services/vr/bufferhubd/buffer_hub.cpp b/services/vr/bufferhubd/buffer_hub.cpp
index a0c7439..0906476 100644
--- a/services/vr/bufferhubd/buffer_hub.cpp
+++ b/services/vr/bufferhubd/buffer_hub.cpp
@@ -1,6 +1,6 @@
#include "buffer_hub.h"
-#include <cutils/log.h>
+#include <log/log.h>
#include <poll.h>
#include <utils/Trace.h>
@@ -108,6 +108,7 @@
// consumer_count is tracked by producer. When it's zero, producer must
// have already hung up and the consumer is orphaned.
stream << std::setw(14) << "Orphaned.";
+ stream << (" channel_id=" + std::to_string(channel->channel_id()));
stream << std::endl;
continue;
}
@@ -433,6 +434,9 @@
void BufferHubChannel::SignalAvailable() {
ATRACE_NAME("BufferHubChannel::SignalAvailable");
+ ALOGD_IF(TRACE,
+ "BufferHubChannel::SignalAvailable: channel_id=%d buffer_id=%d",
+ channel_id(), buffer_id());
if (!IsDetached()) {
const int ret = service_->ModifyChannelEvents(channel_id_, 0, POLLIN);
ALOGE_IF(ret < 0,
@@ -446,6 +450,9 @@
void BufferHubChannel::ClearAvailable() {
ATRACE_NAME("BufferHubChannel::ClearAvailable");
+ ALOGD_IF(TRACE,
+ "BufferHubChannel::ClearAvailable: channel_id=%d buffer_id=%d",
+ channel_id(), buffer_id());
if (!IsDetached()) {
const int ret = service_->ModifyChannelEvents(channel_id_, POLLIN, 0);
ALOGE_IF(ret < 0,
@@ -459,6 +466,8 @@
void BufferHubChannel::Hangup() {
ATRACE_NAME("BufferHubChannel::Hangup");
+ ALOGD_IF(TRACE, "BufferHubChannel::Hangup: channel_id=%d buffer_id=%d",
+ channel_id(), buffer_id());
if (!IsDetached()) {
const int ret = service_->ModifyChannelEvents(channel_id_, 0, POLLHUP);
ALOGE_IF(
diff --git a/services/vr/bufferhubd/bufferhubd.cpp b/services/vr/bufferhubd/bufferhubd.cpp
index a8e2ddf..d4fc540 100644
--- a/services/vr/bufferhubd/bufferhubd.cpp
+++ b/services/vr/bufferhubd/bufferhubd.cpp
@@ -1,7 +1,7 @@
#include <sched.h>
#include <unistd.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include <dvr/performance_client_api.h>
#include <pdx/default_transport/service_dispatcher.h>
diff --git a/services/vr/bufferhubd/consumer_channel.cpp b/services/vr/bufferhubd/consumer_channel.cpp
index 8db92a3..2264cef 100644
--- a/services/vr/bufferhubd/consumer_channel.cpp
+++ b/services/vr/bufferhubd/consumer_channel.cpp
@@ -1,6 +1,6 @@
#include "consumer_channel.h"
-#include <cutils/log.h>
+#include <log/log.h>
#include <utils/Trace.h>
#include <thread>
@@ -27,8 +27,9 @@
}
ConsumerChannel::~ConsumerChannel() {
- ALOGD_IF(TRACE, "ConsumerChannel::~ConsumerChannel: channel_id=%d",
- channel_id());
+ ALOGD_IF(TRACE,
+ "ConsumerChannel::~ConsumerChannel: channel_id=%d buffer_id=%d",
+ channel_id(), buffer_id());
if (auto producer = GetProducer()) {
if (!handled_) // Producer is waiting for our Release.
diff --git a/services/vr/bufferhubd/producer_channel.cpp b/services/vr/bufferhubd/producer_channel.cpp
index b87b709..98a419f 100644
--- a/services/vr/bufferhubd/producer_channel.cpp
+++ b/services/vr/bufferhubd/producer_channel.cpp
@@ -1,6 +1,6 @@
#include "producer_channel.h"
-#include <cutils/log.h>
+#include <log/log.h>
#include <sync/sync.h>
#include <sys/poll.h>
#include <utils/Trace.h>
@@ -9,7 +9,6 @@
#include <atomic>
#include <thread>
-#include <base/logging.h>
#include <private/dvr/bufferhub_rpc.h>
#include "consumer_channel.h"
@@ -61,8 +60,9 @@
}
ProducerChannel::~ProducerChannel() {
- ALOGD_IF(TRACE, "ProducerChannel::~ProducerChannel: channel_id=%d",
- channel_id());
+ ALOGD_IF(TRACE,
+ "ProducerChannel::~ProducerChannel: channel_id=%d buffer_id=%d",
+ channel_id(), buffer_id());
for (auto consumer : consumer_channels_)
consumer->OnProducerClosed();
}
@@ -275,8 +275,9 @@
// Attempt to merge the fences if necessary.
if (release_fence) {
if (returned_fence_) {
- LocalFence merged_fence(sync_merge(
- "bufferhub_merged", returned_fence_.get_fd(), release_fence.get_fd()));
+ LocalFence merged_fence(sync_merge("bufferhub_merged",
+ returned_fence_.get_fd(),
+ release_fence.get_fd()));
const int error = errno;
if (!merged_fence) {
ALOGE("ProducerChannel::OnConsumerRelease: Failed to merge fences: %s",
@@ -367,10 +368,9 @@
bool ProducerChannel::CheckParameters(int width, int height, int format,
int usage, size_t meta_size_bytes,
size_t slice_count) {
- return slices_.size() == slice_count &&
- meta_size_bytes == meta_size_bytes_ && slices_[0].width() == width &&
- slices_[0].height() == height && slices_[0].format() == format &&
- slices_[0].usage() == usage;
+ return slices_.size() == slice_count && meta_size_bytes == meta_size_bytes_ &&
+ slices_[0].width() == width && slices_[0].height() == height &&
+ slices_[0].format() == format && slices_[0].usage() == usage;
}
} // namespace dvr
diff --git a/services/vr/performanced/cpu_set.cpp b/services/vr/performanced/cpu_set.cpp
index 916226e..1a3723f 100644
--- a/services/vr/performanced/cpu_set.cpp
+++ b/services/vr/performanced/cpu_set.cpp
@@ -1,6 +1,6 @@
#include "cpu_set.h"
-#include <cutils/log.h>
+#include <log/log.h>
#include <algorithm>
#include <iomanip>
diff --git a/services/vr/performanced/main.cpp b/services/vr/performanced/main.cpp
index 114413d..ca66c71 100644
--- a/services/vr/performanced/main.cpp
+++ b/services/vr/performanced/main.cpp
@@ -3,9 +3,9 @@
#include <sys/prctl.h>
#include <sys/stat.h>
-#include <cutils/log.h>
#include <cutils/properties.h>
#include <cutils/sched_policy.h>
+#include <log/log.h>
#include <sys/resource.h>
#include <utils/threads.h>
diff --git a/services/vr/performanced/task.cpp b/services/vr/performanced/task.cpp
index ad12858..1175a7b 100644
--- a/services/vr/performanced/task.cpp
+++ b/services/vr/performanced/task.cpp
@@ -1,8 +1,8 @@
#include "task.h"
-#include <cutils/log.h>
#include <errno.h>
#include <fcntl.h>
+#include <log/log.h>
#include <stdio.h>
#include <cctype>
diff --git a/services/vr/sensord/Android.mk b/services/vr/sensord/Android.mk
index 907c3d6..36d8400 100644
--- a/services/vr/sensord/Android.mk
+++ b/services/vr/sensord/Android.mk
@@ -32,7 +32,6 @@
libperformance \
libbufferhub \
libpdx_default_transport \
- libchrome \
libposepredictor \
sharedLibraries := \
diff --git a/services/vr/sensord/pose_service.cpp b/services/vr/sensord/pose_service.cpp
index 75919d8..8e4dbba 100644
--- a/services/vr/sensord/pose_service.cpp
+++ b/services/vr/sensord/pose_service.cpp
@@ -11,12 +11,12 @@
#include <sstream>
#include <type_traits>
-#include <cutils/log.h>
#include <cutils/properties.h>
#include <cutils/trace.h>
#include <dvr/performance_client_api.h>
#include <dvr/pose_client.h>
#include <hardware/sensors.h>
+#include <log/log.h>
#include <pdx/default_transport/service_endpoint.h>
#include <private/dvr/benchmark.h>
#include <private/dvr/clock_ns.h>
diff --git a/services/vr/sensord/sensor_hal_thread.cpp b/services/vr/sensord/sensor_hal_thread.cpp
index 59b433f..c321d4f 100644
--- a/services/vr/sensord/sensor_hal_thread.cpp
+++ b/services/vr/sensord/sensor_hal_thread.cpp
@@ -1,7 +1,7 @@
#include "sensor_hal_thread.h"
-#include <cutils/log.h>
#include <dvr/performance_client_api.h>
+#include <log/log.h>
namespace android {
namespace dvr {
diff --git a/services/vr/sensord/sensor_ndk_thread.cpp b/services/vr/sensord/sensor_ndk_thread.cpp
index b5e16e7..815453b 100644
--- a/services/vr/sensord/sensor_ndk_thread.cpp
+++ b/services/vr/sensord/sensor_ndk_thread.cpp
@@ -1,7 +1,7 @@
#include "sensor_ndk_thread.h"
-#include <cutils/log.h>
#include <dvr/performance_client_api.h>
+#include <log/log.h>
namespace android {
namespace dvr {
diff --git a/services/vr/sensord/sensor_service.cpp b/services/vr/sensord/sensor_service.cpp
index 4396851..1b809b0 100644
--- a/services/vr/sensord/sensor_service.cpp
+++ b/services/vr/sensord/sensor_service.cpp
@@ -1,9 +1,9 @@
#include "sensor_service.h"
-#include <cutils/log.h>
#include <hardware/sensors.h>
-#include <poll.h>
+#include <log/log.h>
#include <pdx/default_transport/service_endpoint.h>
+#include <poll.h>
#include <private/dvr/sensor-ipc.h>
#include <time.h>
diff --git a/services/vr/virtual_touchpad/EvdevInjector.cpp b/services/vr/virtual_touchpad/EvdevInjector.cpp
index be20c6c..d8a1dfa 100644
--- a/services/vr/virtual_touchpad/EvdevInjector.cpp
+++ b/services/vr/virtual_touchpad/EvdevInjector.cpp
@@ -1,9 +1,9 @@
#include "EvdevInjector.h"
-#include <cutils/log.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/input.h>
+#include <log/log.h>
#include <string.h>
#include <sys/fcntl.h>
#include <unistd.h>
diff --git a/services/vr/virtual_touchpad/VirtualTouchpad.cpp b/services/vr/virtual_touchpad/VirtualTouchpad.cpp
index f3936fc..4793058 100644
--- a/services/vr/virtual_touchpad/VirtualTouchpad.cpp
+++ b/services/vr/virtual_touchpad/VirtualTouchpad.cpp
@@ -1,9 +1,9 @@
#include "VirtualTouchpad.h"
#include <android/input.h>
-#include <cutils/log.h>
#include <inttypes.h>
#include <linux/input.h>
+#include <log/log.h>
// References:
// [0] Multi-touch (MT) Protocol,
diff --git a/services/vr/virtual_touchpad/VirtualTouchpadService.cpp b/services/vr/virtual_touchpad/VirtualTouchpadService.cpp
index 5e3321f..25c1a4f 100644
--- a/services/vr/virtual_touchpad/VirtualTouchpadService.cpp
+++ b/services/vr/virtual_touchpad/VirtualTouchpadService.cpp
@@ -1,8 +1,8 @@
#include "VirtualTouchpadService.h"
#include <binder/Status.h>
-#include <cutils/log.h>
#include <linux/input.h>
+#include <log/log.h>
#include <utils/Errors.h>
namespace android {
diff --git a/services/vr/virtual_touchpad/main.cpp b/services/vr/virtual_touchpad/main.cpp
index 57471c5..1debe9f 100644
--- a/services/vr/virtual_touchpad/main.cpp
+++ b/services/vr/virtual_touchpad/main.cpp
@@ -1,7 +1,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
-#include <cutils/log.h>
+#include <log/log.h>
#include "VirtualTouchpadService.h"
diff --git a/services/vr/vr_manager/vr_manager.cpp b/services/vr/vr_manager/vr_manager.cpp
index a31fcb7..d24cbb5 100644
--- a/services/vr/vr_manager/vr_manager.cpp
+++ b/services/vr/vr_manager/vr_manager.cpp
@@ -88,33 +88,4 @@
IMPLEMENT_META_INTERFACE(VrManager, "android.service.vr.IVrManager");
-class BpVrDisplayStateService : public BpInterface<IVrDisplayStateService> {
- public:
- explicit BpVrDisplayStateService(const sp<IBinder>& impl)
- : BpInterface<IVrDisplayStateService>(impl) {}
-
- void displayAvailable(bool available) {
- Parcel data, reply;
- data.writeInterfaceToken(IVrDisplayStateService::getInterfaceDescriptor());
- data.writeBool(available);
- remote()->transact(static_cast<uint32_t>(
- VrDisplayStateTransaction::ON_DISPLAY_STATE_CHANGED),
- data, &reply);
- }
-};
-
-status_t BnVrDisplayStateService::onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags) {
- switch (static_cast<VrDisplayStateTransaction>(code)) {
- case VrDisplayStateTransaction::ON_DISPLAY_STATE_CHANGED:
- CHECK_INTERFACE(IVrDisplayStateService, data, reply);
- displayAvailable(data.readBool());
- return OK;
- }
- return BBinder::onTransact(code, data, reply, flags);
-}
-
-IMPLEMENT_META_INTERFACE(VrDisplayStateService,
- "android.service.vr.IVrDisplayStateService");
-
} // namespace android
diff --git a/services/vr/vr_window_manager/Android.mk_disable b/services/vr/vr_window_manager/Android.mk_disable
index d7d98b3..9a6f752 100644
--- a/services/vr/vr_window_manager/Android.mk_disable
+++ b/services/vr/vr_window_manager/Android.mk_disable
@@ -51,7 +51,6 @@
libsensor \
libperformance \
libpdx_default_transport \
- libchrome \
libcutils \
shared_libs := \
@@ -79,7 +78,7 @@
LOCAL_SRC_FILES := $(src)
LOCAL_C_INCLUDES := hardware/qcom/display/msm8996/libgralloc
LOCAL_STATIC_LIBRARIES := $(static_libs)
-LOCAL_SHARED_LIBRARIES := $(shared_libs) libevent
+LOCAL_SHARED_LIBRARIES := $(shared_libs)
LOCAL_SHARED_LIBRARIES += libgvr
LOCAL_STATIC_LIBRARIES += libgvr_ext
LOCAL_CFLAGS += -DGL_GLEXT_PROTOTYPES
diff --git a/services/vr/vr_window_manager/application.cpp b/services/vr/vr_window_manager/application.cpp
index 081de74..62db639 100644
--- a/services/vr/vr_window_manager/application.cpp
+++ b/services/vr/vr_window_manager/application.cpp
@@ -1,14 +1,14 @@
#include "application.h"
+#include <EGL/egl.h>
+#include <GLES3/gl3.h>
#include <binder/IServiceManager.h>
-#include <cutils/log.h>
#include <dvr/graphics.h>
#include <dvr/performance_client_api.h>
#include <dvr/pose_client.h>
-#include <EGL/egl.h>
-#include <GLES3/gl3.h>
#include <gui/ISurfaceComposer.h>
#include <hardware/hwcomposer_defs.h>
+#include <log/log.h>
#include <private/dvr/graphics/vr_gl_extensions.h>
#include <vector>
@@ -161,8 +161,10 @@
}
break;
case MainThreadTask::EnteringVrMode:
- if (!initialized_)
- AllocateResources();
+ if (!initialized_) {
+ if (AllocateResources())
+ ALOGE("Failed to allocate resources");
+ }
break;
case MainThreadTask::ExitingVrMode:
if (initialized_)
diff --git a/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp b/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
index 53c7d8e..f83fa86 100644
--- a/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
+++ b/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
@@ -142,36 +142,48 @@
}
}
-std::vector<ComposerView::ComposerLayer> HwcDisplay::GetFrame() {
- // Increment the time the fence is signalled every time we get the
- // presentation frame. This ensures that calling ReleaseFrame() only affects
- // the current frame.
- fence_time_++;
-
+Error HwcDisplay::GetFrame(
+ std::vector<ComposerView::ComposerLayer>* out_frames) {
bool queued_client_target = false;
std::vector<ComposerView::ComposerLayer> frame;
for (const auto& layer : layers_) {
if (layer.composition_type == IComposerClient::Composition::CLIENT) {
- if (!queued_client_target) {
- ComposerView::ComposerLayer client_target_layer = {
- .buffer = buffer_,
- .fence = fence_.get() ? fence_ : new Fence(-1),
- .display_frame = {0, 0, static_cast<int32_t>(buffer_->getWidth()),
- static_cast<int32_t>(buffer_->getHeight())},
- .crop = {0.0f, 0.0f, static_cast<float>(buffer_->getWidth()),
- static_cast<float>(buffer_->getHeight())},
- .blend_mode = IComposerClient::BlendMode::NONE,
- };
+ if (queued_client_target)
+ continue;
- frame.push_back(client_target_layer);
- queued_client_target = true;
+ if (!buffer_.get()) {
+ ALOGE("Client composition requested but no client target buffer");
+ return Error::BAD_LAYER;
}
+
+ ComposerView::ComposerLayer client_target_layer = {
+ .buffer = buffer_,
+ .fence = fence_.get() ? fence_ : new Fence(-1),
+ .display_frame = {0, 0, static_cast<int32_t>(buffer_->getWidth()),
+ static_cast<int32_t>(buffer_->getHeight())},
+ .crop = {0.0f, 0.0f, static_cast<float>(buffer_->getWidth()),
+ static_cast<float>(buffer_->getHeight())},
+ .blend_mode = IComposerClient::BlendMode::NONE,
+ };
+
+ frame.push_back(client_target_layer);
+ queued_client_target = true;
} else {
+ if (!layer.info.buffer.get() || !layer.info.fence.get()) {
+ ALOGE("Layer requested without valid buffer");
+ return Error::BAD_LAYER;
+ }
+
frame.push_back(layer.info);
}
}
- return frame;
+ // Increment the time the fence is signalled every time we get the
+ // presentation frame. This ensures that calling ReleaseFrame() only affects
+ // the current frame.
+ fence_time_++;
+ out_frames->swap(frame);
+ return Error::NONE;
}
void HwcDisplay::GetReleaseFences(int* present_fence,
@@ -392,7 +404,8 @@
base::unique_fd fence(releaseFence);
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
- return Error::NONE;
+ ALOGE("Virtual display support not implemented");
+ return Error::UNSUPPORTED;
}
Error VrHwc::validateDisplay(
@@ -423,7 +436,10 @@
std::vector<ComposerView::ComposerLayer> frame;
{
std::lock_guard<std::mutex> guard(mutex_);
- frame = display_.GetFrame();
+ Error status = display_.GetFrame(&frame);
+ if (status != Error::NONE)
+ return status;
+
display_.GetReleaseFences(outPresentFence, outLayers, outReleaseFences);
}
diff --git a/services/vr/vr_window_manager/composer/impl/vr_hwc.h b/services/vr/vr_window_manager/composer/impl/vr_hwc.h
index 1de056a..6b9487b 100644
--- a/services/vr/vr_window_manager/composer/impl/vr_hwc.h
+++ b/services/vr/vr_window_manager/composer/impl/vr_hwc.h
@@ -115,7 +115,7 @@
std::vector<Layer>* layer_ids,
std::vector<IComposerClient::Composition>* composition);
- std::vector<ComposerView::ComposerLayer> GetFrame();
+ Error GetFrame(std::vector<ComposerView::ComposerLayer>* out_frame);
void GetReleaseFences(int* present_fence, std::vector<Layer>* layer_ids,
std::vector<int>* fences);
diff --git a/services/vr/vr_window_manager/elbow_model.cpp b/services/vr/vr_window_manager/elbow_model.cpp
index 54d1eb4..9543f17 100644
--- a/services/vr/vr_window_manager/elbow_model.cpp
+++ b/services/vr/vr_window_manager/elbow_model.cpp
@@ -1,6 +1,6 @@
#include "elbow_model.h"
-#include <cutils/log.h>
+#include <log/log.h>
namespace android {
namespace dvr {
diff --git a/services/vr/vr_window_manager/hwc_callback.cpp b/services/vr/vr_window_manager/hwc_callback.cpp
index b2edc20..5045790 100644
--- a/services/vr/vr_window_manager/hwc_callback.cpp
+++ b/services/vr/vr_window_manager/hwc_callback.cpp
@@ -79,8 +79,7 @@
}
std::lock_guard<std::mutex> guard(mutex_);
- if (client_)
- client_->OnFrame(std::make_unique<Frame>(std::move(hwc_frame)));
+ client_->OnFrame(std::make_unique<Frame>(std::move(hwc_frame)));
return Void();
}
diff --git a/services/vr/vr_window_manager/render_thread.cpp b/services/vr/vr_window_manager/render_thread.cpp
index 00e3161..b67a051 100644
--- a/services/vr/vr_window_manager/render_thread.cpp
+++ b/services/vr/vr_window_manager/render_thread.cpp
@@ -1,6 +1,6 @@
-#include <cutils/log.h>
-#include <future>
#include <jni.h>
+#include <log/log.h>
+#include <future>
#include "render_thread.h"
#include "shell_view.h"
@@ -75,7 +75,6 @@
jobject android_context = env->NewLocalRef(android_context_global_ref_);
int init_result = shell_view_.Initialize(env, android_context, class_loader);
- init_result += shell_view_.AllocateResources();
init_result_promise->set_value(init_result);
if (init_result == 0) {
while (!quit_)
diff --git a/services/vr/vr_window_manager/shell_view.cpp b/services/vr/vr_window_manager/shell_view.cpp
index 3b18f74..11680af 100644
--- a/services/vr/vr_window_manager/shell_view.cpp
+++ b/services/vr/vr_window_manager/shell_view.cpp
@@ -1,11 +1,11 @@
#include "shell_view.h"
-#include <android/input.h>
-#include <binder/IServiceManager.h>
-#include <cutils/log.h>
#include <EGL/eglext.h>
#include <GLES3/gl3.h>
+#include <android/input.h>
+#include <binder/IServiceManager.h>
#include <hardware/hwcomposer2.h>
+#include <log/log.h>
#include "controller_mesh.h"
#include "texture.h"
@@ -15,6 +15,8 @@
namespace {
+constexpr float kLayerScaleFactor = 4.0f;
+
constexpr unsigned int kVRAppLayerCount = 2;
constexpr unsigned int kMaximumPendingFrames = 8;
@@ -105,6 +107,9 @@
else
xscale = ar;
+ xscale *= kLayerScaleFactor;
+ yscale *= kLayerScaleFactor;
+
return mat4(Eigen::Scaling<float>(xscale, yscale, 1.0));
}
@@ -126,7 +131,7 @@
m(3, 0) = 0.0f; m(3, 1) = 0.0f; m(3, 2) = 0.0f; m(3, 3) = 1.0f;
// clang-format on
- return m;
+ return m * Eigen::AngleAxisf(M_PI * 0.5f, vec3::UnitZ());
}
// Helper function that applies the crop transform to the texture layer and
@@ -194,16 +199,22 @@
}
// Determine if ths frame should be shown or hidden.
-bool CalculateVisibilityFromLayerConfig(const HwcCallback::Frame& frame,
- uint32_t vr_app) {
+ViewMode CalculateVisibilityFromLayerConfig(const HwcCallback::Frame& frame,
+ uint32_t vr_app) {
auto& layers = frame.layers();
// We assume the first two layers are the VR app.
if (layers.size() < kVRAppLayerCount)
- return false;
+ return ViewMode::Hidden;
- if (vr_app != layers[0].appid || layers[0].appid == 0)
- return false;
+ if (vr_app != layers[0].appid || layers[0].appid == 0 ||
+ layers[1].appid != layers[0].appid) {
+ if (layers[1].appid != layers[0].appid && layers[0].appid) {
+ // This might be a 2D app.
+ return ViewMode::App;
+ }
+ return ViewMode::Hidden;
+ }
// If a non-VR-app, non-skipped layer appears, show.
size_t index = kVRAppLayerCount;
@@ -219,11 +230,12 @@
// If any non-skipped layers exist now then we show, otherwise hide.
for (size_t i = index; i < layers.size(); i++) {
if (!layers[i].should_skip_layer())
- return true;
+ return ViewMode::VR;
}
- return false;
+ return ViewMode::Hidden;
}
+
} // namespace
ShellView::ShellView() {
@@ -245,6 +257,10 @@
if (!InitializeTouch())
ALOGE("Failed to initialize virtual touchpad");
+ surface_flinger_view_.reset(new SurfaceFlingerView);
+ if (!surface_flinger_view_->Initialize(this))
+ return 1;
+
return 0;
}
@@ -262,10 +278,6 @@
if (!program_ || !overlay_program_ || !controller_program_)
return 1;
- surface_flinger_view_.reset(new SurfaceFlingerView);
- if (!surface_flinger_view_->Initialize(this))
- return 1;
-
reticle_.reset(new Reticle());
if (!reticle_->Initialize())
return 1;
@@ -299,34 +311,45 @@
: MainThreadTask::ExitingVrMode);
}
+void ShellView::AdvanceFrame() {
+ if (!pending_frames_.empty()) {
+ // Check if we should advance the frame.
+ auto& frame = pending_frames_.front();
+ if (frame.visibility == ViewMode::Hidden ||
+ frame.frame->Finish() == HwcCallback::FrameStatus::kFinished) {
+ current_frame_ = std::move(frame);
+ pending_frames_.pop_front();
+
+ for(int i = 0; i < skipped_frame_count_ + 1; i++)
+ surface_flinger_view_->ReleaseFrame();
+ skipped_frame_count_ = 0;
+ }
+ }
+}
+
void ShellView::OnDrawFrame() {
textures_.clear();
has_ime_ = false;
{
std::unique_lock<std::mutex> l(pending_frame_mutex_);
- if (!pending_frames_.empty()) {
- // Check if we should advance the frame.
- auto& frame = pending_frames_.front();
- if (!frame.visibility ||
- frame.frame->Finish() == HwcCallback::FrameStatus::kFinished) {
- current_frame_ = std::move(frame);
- pending_frames_.pop_front();
- }
- }
+ AdvanceFrame();
}
- if (!debug_mode_ && current_frame_.visibility != is_visible_) {
- SetVisibility(current_frame_.visibility);
+ bool visible = current_frame_.visibility != ViewMode::Hidden;
+
+ if (!debug_mode_ && visible != is_visible_) {
+ SetVisibility(current_frame_.visibility != ViewMode::Hidden);
}
- if (!current_frame_.visibility)
+ if (!debug_mode_ && !visible)
return;
ime_texture_ = TextureLayer();
surface_flinger_view_->GetTextures(*current_frame_.frame.get(), &textures_,
- &ime_texture_, debug_mode_);
+ &ime_texture_, debug_mode_,
+ current_frame_.visibility == ViewMode::VR);
has_ime_ = ime_texture_.texture != nullptr;
}
@@ -370,33 +393,35 @@
}
void ShellView::OnFrame(std::unique_ptr<HwcCallback::Frame> frame) {
- if (!frame || frame->layers().empty())
- return;
+ ViewMode visibility =
+ CalculateVisibilityFromLayerConfig(*frame.get(), current_vr_app_);
- bool visibility = debug_mode_ || CalculateVisibilityFromLayerConfig(
- *frame.get(), current_vr_app_);
+ if (visibility == ViewMode::Hidden && debug_mode_)
+ visibility = ViewMode::VR;
current_vr_app_ = frame->layers().front().appid;
- // If we are not showing the frame there's no need to keep anything around.
- if (!visibility) {
- // Hidden, no change so drop it completely
- if (!current_frame_.visibility)
- return;
-
- frame.reset(nullptr);
- }
-
std::unique_lock<std::mutex> l(pending_frame_mutex_);
pending_frames_.emplace_back(std::move(frame), visibility);
- if (pending_frames_.size() > kMaximumPendingFrames)
+ if (pending_frames_.size() > kMaximumPendingFrames) {
+ skipped_frame_count_++;
pending_frames_.pop_front();
+ }
+
+ if (visibility == ViewMode::Hidden &&
+ current_frame_.visibility == ViewMode::Hidden) {
+ // Consume all frames while hidden.
+ while (!pending_frames_.empty())
+ AdvanceFrame();
+ }
// If we are showing ourselves the main thread is not processing anything,
// so give it a kick.
- if (visibility && !current_frame_.visibility)
+ if (visibility != ViewMode::Hidden && current_frame_.visibility == ViewMode::Hidden) {
+ QueueTask(MainThreadTask::EnteringVrMode);
QueueTask(MainThreadTask::Show);
+ }
}
bool ShellView::IsHit(const vec3& view_location, const vec3& view_direction,
diff --git a/services/vr/vr_window_manager/shell_view.h b/services/vr/vr_window_manager/shell_view.h
index 589902e..ba46e6d 100644
--- a/services/vr/vr_window_manager/shell_view.h
+++ b/services/vr/vr_window_manager/shell_view.h
@@ -14,6 +14,12 @@
namespace android {
namespace dvr {
+enum class ViewMode {
+ Hidden,
+ VR,
+ App,
+};
+
class ShellView : public Application, public HwcCallback::Client {
public:
ShellView();
@@ -57,6 +63,8 @@
bool OnClick(bool down);
+ void AdvanceFrame();
+
// HwcCallback::Client:
void OnFrame(std::unique_ptr<HwcCallback::Frame> frame) override;
@@ -64,6 +72,9 @@
std::unique_ptr<ShaderProgram> overlay_program_;
std::unique_ptr<ShaderProgram> controller_program_;
+ // This starts at -1 so we don't call ReleaseFrame for the first frame.
+ int skipped_frame_count_ = -1;
+
uint32_t current_vr_app_;
// Used to center the scene when the shell becomes visible.
@@ -92,7 +103,7 @@
struct PendingFrame {
PendingFrame() = default;
- PendingFrame(std::unique_ptr<HwcCallback::Frame>&& frame, bool visibility)
+ PendingFrame(std::unique_ptr<HwcCallback::Frame>&& frame, ViewMode visibility)
: frame(std::move(frame)), visibility(visibility) {}
PendingFrame(PendingFrame&& r)
: frame(std::move(r.frame)), visibility(r.visibility) {}
@@ -103,7 +114,7 @@
}
std::unique_ptr<HwcCallback::Frame> frame;
- bool visibility = false;
+ ViewMode visibility = ViewMode::Hidden;
};
std::deque<PendingFrame> pending_frames_;
std::mutex pending_frame_mutex_;
diff --git a/services/vr/vr_window_manager/surface_flinger_view.cpp b/services/vr/vr_window_manager/surface_flinger_view.cpp
index d38fcc0..b15d262 100644
--- a/services/vr/vr_window_manager/surface_flinger_view.cpp
+++ b/services/vr/vr_window_manager/surface_flinger_view.cpp
@@ -38,13 +38,13 @@
bool SurfaceFlingerView::GetTextures(const HwcCallback::Frame& frame,
std::vector<TextureLayer>* texture_layers,
TextureLayer* ime_layer,
- bool debug) const {
+ bool debug, bool skip_first_layer) const {
auto& layers = frame.layers();
texture_layers->clear();
size_t start = 0;
// Skip the second layer if it is from the VR app.
- if (!debug) {
+ if (!debug && skip_first_layer) {
start = 1;
if (layers[0].appid && layers[0].appid == layers[1].appid)
start = 2;
@@ -75,5 +75,9 @@
return true;
}
+void SurfaceFlingerView::ReleaseFrame() {
+ composer_service_->releaseFrame();
+}
+
} // namespace dvr
} // namespace android
diff --git a/services/vr/vr_window_manager/surface_flinger_view.h b/services/vr/vr_window_manager/surface_flinger_view.h
index e079cdb..2e36ec1 100644
--- a/services/vr/vr_window_manager/surface_flinger_view.h
+++ b/services/vr/vr_window_manager/surface_flinger_view.h
@@ -33,7 +33,10 @@
bool GetTextures(const HwcCallback::Frame& layers,
std::vector<TextureLayer>* texture_layers,
- TextureLayer* ime_layer, bool debug) const;
+ TextureLayer* ime_layer, bool debug,
+ bool skip_first_layer) const;
+
+ void ReleaseFrame();
private:
sp<IVrComposerView> composer_service_;
diff --git a/services/vr/vr_window_manager/texture.cpp b/services/vr/vr_window_manager/texture.cpp
index dbd91b7..2229efa 100644
--- a/services/vr/vr_window_manager/texture.cpp
+++ b/services/vr/vr_window_manager/texture.cpp
@@ -1,7 +1,7 @@
#include "texture.h"
-#include <cutils/log.h>
#include <GLES/glext.h>
+#include <log/log.h>
#include <system/window.h>
namespace android {
diff --git a/services/vr/vr_window_manager/vr_window_manager.cpp b/services/vr/vr_window_manager/vr_window_manager.cpp
index 736a14f..8d9ad79 100644
--- a/services/vr/vr_window_manager/vr_window_manager.cpp
+++ b/services/vr/vr_window_manager/vr_window_manager.cpp
@@ -11,11 +11,6 @@
return 1;
}
- if (app.AllocateResources()) {
- ALOGE("Failed to allocate resources");
- return 1;
- }
-
while (true)
app.DrawFrame();
diff --git a/services/vr/vr_window_manager/vr_window_manager_jni.cpp b/services/vr/vr_window_manager/vr_window_manager_jni.cpp
index f52658a..49eaba1 100644
--- a/services/vr/vr_window_manager/vr_window_manager_jni.cpp
+++ b/services/vr/vr_window_manager/vr_window_manager_jni.cpp
@@ -1,5 +1,5 @@
-#include <cutils/log.h>
#include <jni.h>
+#include <log/log.h>
#include <memory>
diff --git a/vulkan/libvulkan/driver.cpp b/vulkan/libvulkan/driver.cpp
index e4e242a..b34e9be 100644
--- a/vulkan/libvulkan/driver.cpp
+++ b/vulkan/libvulkan/driver.cpp
@@ -467,6 +467,7 @@
name = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
ext_bit = ProcHook::ANDROID_native_buffer;
break;
+ case ProcHook::KHR_incremental_present:
case ProcHook::GOOGLE_display_timing:
hook_extensions_.set(ext_bit);
// return now as these extensions do not require HAL support