Add DaydreamVR native libraries and services
Upstreaming the main VR system components from master-dreamos-dev
into goog/master.
Bug: None
Test: `m -j32` succeeds. Sailfish boots and basic_vr sample app works
Change-Id: I853015872afc443aecee10411ef2d6b79184d051
diff --git a/services/vr/.clang-format b/services/vr/.clang-format
new file mode 100644
index 0000000..04d7970
--- /dev/null
+++ b/services/vr/.clang-format
@@ -0,0 +1,5 @@
+BasedOnStyle: Google
+DerivePointerAlignment: false
+PointerAlignment: Left
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
diff --git a/services/vr/Android.bp b/services/vr/Android.bp
index 80df479..af8212a 100644
--- a/services/vr/Android.bp
+++ b/services/vr/Android.bp
@@ -1,3 +1,3 @@
subdirs = [
- "*",
+ "*/*",
]
diff --git a/services/vr/bufferhubd/Android.mk b/services/vr/bufferhubd/Android.mk
new file mode 100644
index 0000000..492acb2
--- /dev/null
+++ b/services/vr/bufferhubd/Android.mk
@@ -0,0 +1,51 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+sourceFiles := \
+ buffer_hub.cpp \
+ bufferhubd.cpp \
+ consumer_channel.cpp \
+ producer_channel.cpp \
+ consumer_queue_channel.cpp \
+ producer_queue_channel.cpp \
+
+staticLibraries := \
+ libchrome \
+ libperformance \
+ libpdx_default_transport \
+ libbufferhub
+
+sharedLibraries := \
+ libbase \
+ libcutils \
+ libhardware \
+ liblog \
+ libsync \
+ libutils
+
+include $(CLEAR_VARS)
+# Don't strip symbols so we see stack traces in logcat.
+LOCAL_STRIP_MODULE := false
+LOCAL_SRC_FILES := $(sourceFiles)
+LOCAL_CFLAGS := -DLOG_TAG=\"bufferhubd\"
+LOCAL_CFLAGS += -DTRACE=0
+LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_GRAPHICS
+LOCAL_STATIC_LIBRARIES := $(staticLibraries)
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_MODULE := bufferhubd
+LOCAL_INIT_RC := bufferhubd.rc
+include $(BUILD_EXECUTABLE)
+
diff --git a/services/vr/bufferhubd/buffer_hub.cpp b/services/vr/bufferhubd/buffer_hub.cpp
new file mode 100644
index 0000000..a0c7439
--- /dev/null
+++ b/services/vr/bufferhubd/buffer_hub.cpp
@@ -0,0 +1,474 @@
+#include "buffer_hub.h"
+
+#include <cutils/log.h>
+#include <poll.h>
+#include <utils/Trace.h>
+
+#include <iomanip>
+#include <sstream>
+#include <string>
+#include <thread>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/bufferhub_rpc.h>
+#include "consumer_channel.h"
+#include "producer_channel.h"
+#include "producer_queue_channel.h"
+
+using android::pdx::Channel;
+using android::pdx::Message;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::default_transport::Endpoint;
+
+namespace android {
+namespace dvr {
+
+BufferHubService::BufferHubService()
+ : BASE("BufferHub", Endpoint::Create(BufferHubRPC::kClientPath)) {}
+
+BufferHubService::~BufferHubService() {}
+
+bool BufferHubService::IsInitialized() const {
+ return BASE::IsInitialized() && IonBuffer::GetGrallocModule();
+}
+
+std::string BufferHubService::DumpState(size_t /*max_length*/) {
+ std::ostringstream stream;
+ auto channels = GetChannels<BufferHubChannel>();
+
+ std::sort(channels.begin(), channels.end(),
+ [](const std::shared_ptr<BufferHubChannel>& a,
+ const std::shared_ptr<BufferHubChannel>& b) {
+ return a->buffer_id() < b->buffer_id();
+ });
+
+ stream << "Active Producer Buffers:\n";
+ stream << std::right;
+ stream << std::setw(6) << "Id";
+ stream << " ";
+ stream << std::setw(9) << "Consumers";
+ stream << " ";
+ stream << std::setw(14) << "Geometry";
+ stream << " ";
+ stream << std::setw(6) << "Format";
+ stream << " ";
+ stream << std::setw(10) << "Usage";
+ stream << " ";
+ stream << "Name";
+ stream << std::endl;
+
+ for (const auto& channel : channels) {
+ if (channel->channel_type() == BufferHubChannel::kProducerType) {
+ BufferHubChannel::BufferInfo info = channel->GetBufferInfo();
+
+ stream << std::right;
+ stream << std::setw(6) << info.id;
+ stream << " ";
+ stream << std::setw(9) << info.consumer_count;
+ stream << " ";
+ if (info.format == HAL_PIXEL_FORMAT_BLOB) {
+ std::string size = std::to_string(info.width) + " B";
+ stream << std::setw(14) << size;
+ } else {
+ std::string dimensions = std::to_string(info.width) + "x" +
+ std::to_string(info.height) + "x" +
+ std::to_string(info.slice_count);
+ stream << std::setw(14) << dimensions;
+ }
+ stream << " ";
+ stream << std::setw(6) << info.format;
+ stream << " ";
+ stream << "0x" << std::hex << std::setfill('0');
+ stream << std::setw(8) << info.usage;
+ stream << std::dec << std::setfill(' ');
+ stream << " ";
+ stream << info.name;
+ stream << std::endl;
+ }
+ }
+
+ stream << "Active Consumer Buffers:\n";
+ stream << std::right;
+ stream << std::setw(6) << "Id";
+ stream << " ";
+ stream << std::setw(14) << "Geometry";
+ stream << " ";
+ stream << "Name";
+ stream << std::endl;
+
+ for (const auto& channel : channels) {
+ if (channel->channel_type() == BufferHubChannel::kConsumerType) {
+ BufferHubChannel::BufferInfo info = channel->GetBufferInfo();
+
+ stream << std::right;
+ stream << std::setw(6) << info.id;
+ stream << " ";
+
+ if (info.consumer_count == 0) {
+ // consumer_count is tracked by producer. When it's zero, producer must
+ // have already hung up and the consumer is orphaned.
+ stream << std::setw(14) << "Orphaned.";
+ stream << std::endl;
+ continue;
+ }
+
+ if (info.format == HAL_PIXEL_FORMAT_BLOB) {
+ std::string size = std::to_string(info.width) + " B";
+ stream << std::setw(14) << size;
+ } else {
+ std::string dimensions = std::to_string(info.width) + "x" +
+ std::to_string(info.height) + "x" +
+ std::to_string(info.slice_count);
+ stream << std::setw(14) << dimensions;
+ }
+ stream << " ";
+ stream << info.name;
+ stream << std::endl;
+ }
+ }
+
+ stream << std::endl;
+ stream << "Active Producer Queues:\n";
+ stream << std::right << std::setw(6) << "Id";
+ stream << std::right << std::setw(12) << " Allocated";
+ stream << std::right << std::setw(12) << " Consumers";
+ stream << " UsageSetMask";
+ stream << " UsageClearMask";
+ stream << " UsageDenySetMask";
+ stream << " UsageDenyClearMask";
+ stream << " ";
+ stream << std::endl;
+
+ for (const auto& channel : channels) {
+ if (channel->channel_type() == BufferHubChannel::kProducerQueueType) {
+ BufferHubChannel::BufferInfo info = channel->GetBufferInfo();
+
+ stream << std::dec << std::setfill(' ');
+ stream << std::right << std::setw(6) << info.id;
+ stream << std::right << std::setw(12) << info.capacity;
+ stream << std::right << std::setw(12) << info.consumer_count;
+ stream << std::setw(5) << std::setfill(' ') << "0x";
+ stream << std::hex << std::setfill('0');
+ stream << std::setw(8) << info.usage_set_mask;
+ stream << std::setw(7) << std::setfill(' ') << "0x";
+ stream << std::hex << std::setfill('0');
+ stream << std::setw(8) << info.usage_clear_mask;
+ stream << std::setw(9) << std::setfill(' ') << "0x";
+ stream << std::hex << std::setfill('0');
+ stream << std::setw(8) << info.usage_deny_set_mask;
+ stream << std::setw(11) << std::setfill(' ') << "0x";
+ stream << std::hex << std::setfill('0');
+ stream << std::setw(8) << info.usage_deny_clear_mask;
+ }
+ }
+
+ stream << std::endl;
+ stream << "Active Consumer Queues:\n";
+ stream << std::dec << std::setfill(' ');
+ stream << std::right << std::setw(6) << "Id";
+ stream << std::right << std::setw(12) << " Imported";
+ stream << " ";
+ stream << std::endl;
+
+ for (const auto& channel : channels) {
+ if (channel->channel_type() == BufferHubChannel::kConsumerQueueType) {
+ BufferHubChannel::BufferInfo info = channel->GetBufferInfo();
+
+ stream << std::right << std::setw(6) << info.id;
+ stream << std::right << std::setw(12) << info.capacity;
+ }
+ }
+
+ return stream.str();
+}
+
+void BufferHubService::HandleImpulse(Message& message) {
+ ATRACE_NAME("BufferHubService::HandleImpulse");
+ if (auto channel = message.GetChannel<BufferHubChannel>())
+ channel->HandleImpulse(message);
+}
+
+int BufferHubService::HandleMessage(Message& message) {
+ ATRACE_NAME("BufferHubService::HandleMessage");
+ auto channel = message.GetChannel<BufferHubChannel>();
+
+ ALOGD_IF(
+ TRACE,
+ "BufferHubService::HandleMessage: channel=%p channel_id=%d opcode=%d",
+ channel.get(), message.GetChannelId(), message.GetOp());
+
+ // If the channel is already set up, let it handle the message.
+ if (channel && !channel->HandleMessage(message))
+ return DefaultHandleMessage(message);
+
+ // This channel has not been set up yet, the following are valid operations.
+ switch (message.GetOp()) {
+ case BufferHubRPC::CreateBuffer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::CreateBuffer>(
+ *this, &BufferHubService::OnCreateBuffer, message);
+ return 0;
+
+ case BufferHubRPC::CreatePersistentBuffer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::CreatePersistentBuffer>(
+ *this, &BufferHubService::OnCreatePersistentBuffer, message);
+ return 0;
+
+ case BufferHubRPC::GetPersistentBuffer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::GetPersistentBuffer>(
+ *this, &BufferHubService::OnGetPersistentBuffer, message);
+ return 0;
+
+ case BufferHubRPC::CreateProducerQueue::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::CreateProducerQueue>(
+ *this, &BufferHubService::OnCreateProducerQueue, message);
+ return 0;
+
+ default:
+ return DefaultHandleMessage(message);
+ }
+}
+
+void BufferHubService::OnChannelClose(Message&,
+ const std::shared_ptr<Channel>& channel) {
+ if (auto buffer = std::static_pointer_cast<BufferHubChannel>(channel))
+ buffer->Detach();
+}
+
+int BufferHubService::OnCreateBuffer(Message& message, int width, int height,
+ int format, int usage,
+ size_t meta_size_bytes,
+ size_t slice_count) {
+ // Use the producer channel id as the global buffer id.
+ const int buffer_id = message.GetChannelId();
+ ALOGD_IF(TRACE,
+ "BufferHubService::OnCreateBuffer: buffer_id=%d width=%d height=%d "
+ "format=%d usage=%d meta_size_bytes=%zu slice_count=%zu",
+ buffer_id, width, height, format, usage, meta_size_bytes,
+ slice_count);
+
+ // See if this channel is already attached to a buffer.
+ if (const auto channel = message.GetChannel<BufferHubChannel>()) {
+ ALOGE("BufferHubService::OnCreateBuffer: Buffer already created: buffer=%d",
+ buffer_id);
+ return -EALREADY;
+ }
+
+ int error;
+ if (const auto producer_channel =
+ ProducerChannel::Create(this, buffer_id, width, height, format, usage,
+ meta_size_bytes, slice_count, &error)) {
+ message.SetChannel(producer_channel);
+ return 0;
+ } else {
+ ALOGE("BufferHubService::OnCreateBuffer: Failed to create producer!!");
+ return error;
+ }
+}
+
+int BufferHubService::OnCreatePersistentBuffer(
+ Message& message, const std::string& name, int user_id, int group_id,
+ int width, int height, int format, int usage, size_t meta_size_bytes,
+ size_t slice_count) {
+ const int channel_id = message.GetChannelId();
+ ALOGD_IF(TRACE,
+ "BufferHubService::OnCreatePersistentBuffer: channel_id=%d name=%s "
+ "user_id=%d group_id=%d width=%d height=%d format=%d usage=%d "
+ "meta_size_bytes=%zu slice_count=%zu",
+ channel_id, name.c_str(), user_id, group_id, width, height, format,
+ usage, meta_size_bytes, slice_count);
+
+ // See if this channel is already attached to a buffer.
+ if (const auto channel = message.GetChannel<BufferHubChannel>()) {
+ ALOGE(
+ "BufferHubService::OnCreatePersistentBuffer: Channel already attached "
+ "to buffer: channel_id=%d buffer_id=%d",
+ channel_id, channel->buffer_id());
+ return -EALREADY;
+ }
+
+ const int euid = message.GetEffectiveUserId();
+ const int egid = message.GetEffectiveGroupId();
+ int error;
+
+ if (auto buffer = GetNamedBuffer(name)) {
+ if (!buffer->CheckAccess(euid, egid)) {
+ ALOGE(
+ "BufferHubService::OnCreatePersistentBuffer: Requesting process does "
+ "not have permission to access named buffer: name=%s euid=%d egid=%d",
+ name.c_str(), euid, euid);
+ return -EPERM;
+ } else if (!buffer->CheckParameters(width, height, format, usage,
+ meta_size_bytes, slice_count)) {
+ ALOGE(
+ "BufferHubService::OnCreatePersistentBuffer: Requested an existing "
+ "buffer with different parameters: name=%s",
+ name.c_str());
+ return -EINVAL;
+ } else if (!buffer->IsDetached()) {
+ ALOGE(
+ "BufferHubService::OnCreatePersistentBuffer: Requesting a persistent "
+ "buffer that is already attached to a channel: name=%s",
+ name.c_str());
+ return -EINVAL;
+ } else {
+ buffer->Attach(channel_id);
+ message.SetChannel(buffer);
+ return 0;
+ }
+ } else if (auto buffer = ProducerChannel::Create(
+ this, channel_id, width, height, format, usage,
+ meta_size_bytes, slice_count, &error)) {
+ const int ret =
+ buffer->OnProducerMakePersistent(message, name, user_id, group_id);
+ if (!ret)
+ message.SetChannel(buffer);
+ return ret;
+ } else {
+ ALOGE("BufferHubService::OnCreateBuffer: Failed to create producer!!");
+ return error;
+ }
+}
+
+int BufferHubService::OnGetPersistentBuffer(Message& message,
+ const std::string& name) {
+ const int channel_id = message.GetChannelId();
+ ALOGD_IF(TRACE,
+ "BufferHubService::OnGetPersistentBuffer: channel_id=%d name=%s",
+ channel_id, name.c_str());
+
+ // See if this channel is already attached to a buffer.
+ if (const auto channel = message.GetChannel<BufferHubChannel>()) {
+ ALOGE(
+ "BufferHubService::OnGetPersistentBuffer: Channel already attached to "
+ "buffer: channel_id=%d buffer_id=%d",
+ channel_id, channel->buffer_id());
+ return -EALREADY;
+ }
+
+ const int euid = message.GetEffectiveUserId();
+ const int egid = message.GetEffectiveGroupId();
+
+ if (auto buffer = GetNamedBuffer(name)) {
+ if (!buffer->CheckAccess(euid, egid)) {
+ ALOGE(
+ "BufferHubService::OnGetPersistentBuffer: Requesting process does "
+ "not have permission to access named buffer: name=%s euid=%d egid=%d",
+ name.c_str(), euid, egid);
+ return -EPERM;
+ } else if (!buffer->IsDetached()) {
+ ALOGE(
+ "BufferHubService::OnGetPersistentBuffer: Requesting a persistent "
+ "buffer that is already attached to a channel: name=%s",
+ name.c_str());
+ return -EINVAL;
+ } else {
+ buffer->Attach(channel_id);
+ message.SetChannel(buffer);
+ return 0;
+ }
+ } else {
+ ALOGE("BufferHubService::OnGetPersistentBuffer: Buffer \"%s\" not found!",
+ name.c_str());
+ return -ENOENT;
+ }
+}
+
+int BufferHubService::OnCreateProducerQueue(
+ pdx::Message& message, size_t meta_size_bytes, int usage_set_mask,
+ int usage_clear_mask, int usage_deny_set_mask, int usage_deny_clear_mask) {
+ // Use the producer channel id as the global queue id.
+ const int queue_id = message.GetChannelId();
+ ALOGD_IF(TRACE, "BufferHubService::OnCreateProducerQueue: queue_id=%d",
+ queue_id);
+
+ // See if this channel is already attached to another object.
+ if (const auto channel = message.GetChannel<BufferHubChannel>()) {
+ ALOGE("BufferHubService::OnCreateProducerQueue: already created: queue=%d",
+ queue_id);
+ return -EALREADY;
+ }
+
+ int error;
+ if (const auto producer_channel = ProducerQueueChannel::Create(
+ this, queue_id, meta_size_bytes, usage_set_mask, usage_clear_mask,
+ usage_deny_set_mask, usage_deny_clear_mask, &error)) {
+ message.SetChannel(producer_channel);
+ return 0;
+ } else {
+ ALOGE("BufferHubService::OnCreateBuffer: Failed to create producer!!");
+ return error;
+ }
+}
+
+bool BufferHubService::AddNamedBuffer(
+ const std::string& name, const std::shared_ptr<ProducerChannel>& buffer) {
+ auto search = named_buffers_.find(name);
+ if (search == named_buffers_.end()) {
+ named_buffers_.emplace(name, buffer);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+std::shared_ptr<ProducerChannel> BufferHubService::GetNamedBuffer(
+ const std::string& name) {
+ auto search = named_buffers_.find(name);
+ if (search != named_buffers_.end())
+ return search->second;
+ else
+ return nullptr;
+}
+
+bool BufferHubService::RemoveNamedBuffer(const ProducerChannel& buffer) {
+ for (auto it = named_buffers_.begin(); it != named_buffers_.end();) {
+ if (it->second.get() == &buffer) {
+ named_buffers_.erase(it);
+ return true;
+ }
+ ++it;
+ }
+ return false;
+}
+
+void BufferHubChannel::SignalAvailable() {
+ ATRACE_NAME("BufferHubChannel::SignalAvailable");
+ if (!IsDetached()) {
+ const int ret = service_->ModifyChannelEvents(channel_id_, 0, POLLIN);
+ ALOGE_IF(ret < 0,
+ "BufferHubChannel::SignalAvailable: failed to signal availability "
+ "channel_id=%d: %s",
+ channel_id_, strerror(-ret));
+ } else {
+ ALOGD_IF(TRACE, "BufferHubChannel::SignalAvailable: detached buffer.");
+ }
+}
+
+void BufferHubChannel::ClearAvailable() {
+ ATRACE_NAME("BufferHubChannel::ClearAvailable");
+ if (!IsDetached()) {
+ const int ret = service_->ModifyChannelEvents(channel_id_, POLLIN, 0);
+ ALOGE_IF(ret < 0,
+ "BufferHubChannel::ClearAvailable: failed to clear availability "
+ "channel_id=%d: %s",
+ channel_id_, strerror(-ret));
+ } else {
+ ALOGD_IF(TRACE, "BufferHubChannel::ClearAvailable: detached buffer.");
+ }
+}
+
+void BufferHubChannel::Hangup() {
+ ATRACE_NAME("BufferHubChannel::Hangup");
+ if (!IsDetached()) {
+ const int ret = service_->ModifyChannelEvents(channel_id_, 0, POLLHUP);
+ ALOGE_IF(
+ ret < 0,
+ "BufferHubChannel::Hangup: failed to signal hangup channel_id=%d: %s",
+ channel_id_, strerror(-ret));
+ } else {
+ ALOGD_IF(TRACE, "BufferHubChannel::Hangup: detached buffer.");
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/bufferhubd/buffer_hub.h b/services/vr/bufferhubd/buffer_hub.h
new file mode 100644
index 0000000..28cb468
--- /dev/null
+++ b/services/vr/bufferhubd/buffer_hub.h
@@ -0,0 +1,182 @@
+#ifndef ANDROID_DVR_BUFFERHUBD_BUFFER_HUB_H_
+#define ANDROID_DVR_BUFFERHUBD_BUFFER_HUB_H_
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+
+#include <hardware/gralloc.h>
+#include <pdx/service.h>
+
+namespace android {
+namespace dvr {
+
+class BufferHubService;
+class ConsumerChannel;
+class ProducerChannel;
+class ConsumerQueueChannel;
+class ProducerQueueChannel;
+
+class BufferHubChannel : public pdx::Channel {
+ public:
+ enum ChannelType {
+ kProducerType,
+ kConsumerType,
+ kProducerQueueType,
+ kConsumerQueueType,
+ };
+
+ enum : int { kDetachedId = -1 };
+
+ BufferHubChannel(BufferHubService* service, int buffer_id, int channel_id,
+ ChannelType channel_type)
+ : service_(service),
+ buffer_id_(buffer_id),
+ channel_id_(channel_id),
+ channel_type_(channel_type) {}
+ virtual ~BufferHubChannel() {}
+
+ virtual bool HandleMessage(pdx::Message& message) = 0;
+ virtual void HandleImpulse(pdx::Message& message) = 0;
+
+ // Captures buffer info for use by BufferHubService::DumpState().
+ struct BufferInfo {
+ // Common data field shared by BufferProducer and ProducerQueue.
+ int id = -1;
+ int type = -1;
+ size_t consumer_count = 0;
+
+ // Data field for buffer producer.
+ int width = 0;
+ int height = 0;
+ int format = 0;
+ int usage = 0;
+ size_t slice_count = 0;
+ std::string name;
+
+ // Data filed for producer queue.
+ size_t capacity = 0;
+ int usage_set_mask = 0;
+ int usage_clear_mask = 0;
+ int usage_deny_set_mask = 0;
+ int usage_deny_clear_mask = 0;
+
+ BufferInfo(int id, size_t consumer_count, int width, int height, int format,
+ int usage, size_t slice_count, const std::string& name)
+ : id(id),
+ type(kProducerType),
+ consumer_count(consumer_count),
+ width(width),
+ height(height),
+ format(format),
+ usage(usage),
+ slice_count(slice_count),
+ name(name) {}
+
+ BufferInfo(int id, size_t consumer_count, size_t capacity, int usage_set_mask,
+ int usage_clear_mask, int usage_deny_set_mask,
+ int usage_deny_clear_mask)
+ : id(id),
+ type(kProducerQueueType),
+ consumer_count(consumer_count),
+ capacity(capacity),
+ usage_set_mask(usage_set_mask),
+ usage_clear_mask(usage_clear_mask),
+ usage_deny_set_mask(usage_deny_set_mask),
+ usage_deny_clear_mask(usage_deny_clear_mask) {}
+
+ BufferInfo() {}
+ };
+
+ // Returns the buffer info for this buffer.
+ virtual BufferInfo GetBufferInfo() const = 0;
+
+ // Signal the client fd that an ownership change occurred using POLLIN.
+ void SignalAvailable();
+
+ // Clear the ownership change event.
+ void ClearAvailable();
+
+ // Signal hangup event.
+ void Hangup();
+
+ BufferHubService* service() const { return service_; }
+ ChannelType channel_type() const { return channel_type_; }
+ int buffer_id() const { return buffer_id_; }
+
+ int channel_id() const { return channel_id_; }
+ bool IsDetached() const { return channel_id_ == kDetachedId; }
+
+ void Detach() {
+ if (channel_type_ == kProducerType)
+ channel_id_ = kDetachedId;
+ }
+ void Attach(int channel_id) {
+ if (channel_type_ == kProducerType && channel_id_ == kDetachedId)
+ channel_id_ = channel_id;
+ }
+
+ private:
+ BufferHubService* service_;
+
+ // Static id of the buffer for logging and informational purposes. This id
+ // does not change for the life of the buffer.
+ // TODO(eieio): Consider using an id allocator instead of the originating
+ // channel id; channel ids wrap after 2^31 ids, but this is not a problem in
+ // general because channel ids are not used for any lookup in this service.
+ int buffer_id_;
+
+ // The channel id of the buffer. This may change for a persistent producer
+ // buffer if it is detached and re-attached to another channel.
+ int channel_id_;
+
+ ChannelType channel_type_;
+
+ BufferHubChannel(const BufferHubChannel&) = delete;
+ void operator=(const BufferHubChannel&) = delete;
+};
+
+class BufferHubService : public pdx::ServiceBase<BufferHubService> {
+ public:
+ BufferHubService();
+ ~BufferHubService() override;
+
+ int HandleMessage(pdx::Message& message) override;
+ void HandleImpulse(pdx::Message& message) override;
+
+ void OnChannelClose(pdx::Message& message,
+ const std::shared_ptr<pdx::Channel>& channel) override;
+
+ bool IsInitialized() const override;
+ std::string DumpState(size_t max_length) override;
+
+ bool AddNamedBuffer(const std::string& name,
+ const std::shared_ptr<ProducerChannel>& buffer);
+ std::shared_ptr<ProducerChannel> GetNamedBuffer(const std::string& name);
+ bool RemoveNamedBuffer(const ProducerChannel& buffer);
+
+ private:
+ friend BASE;
+
+ std::unordered_map<std::string, std::shared_ptr<ProducerChannel>>
+ named_buffers_;
+
+ int OnCreateBuffer(pdx::Message& message, int width, int height, int format,
+ int usage, size_t meta_size_bytes, size_t slice_count);
+ int OnCreatePersistentBuffer(pdx::Message& message, const std::string& name,
+ int user_id, int group_id, int width, int height,
+ int format, int usage, size_t meta_size_bytes,
+ size_t slice_count);
+ int OnGetPersistentBuffer(pdx::Message& message, const std::string& name);
+ int OnCreateProducerQueue(pdx::Message& message, size_t meta_size_bytes,
+ int usage_set_mask, int usage_clear_mask,
+ int usage_deny_set_mask, int usage_deny_clear_mask);
+
+ BufferHubService(const BufferHubService&) = delete;
+ void operator=(const BufferHubService&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_BUFFERHUBD_BUFFER_HUB_H_
diff --git a/services/vr/bufferhubd/bufferhubd.cpp b/services/vr/bufferhubd/bufferhubd.cpp
new file mode 100644
index 0000000..a8e2ddf
--- /dev/null
+++ b/services/vr/bufferhubd/bufferhubd.cpp
@@ -0,0 +1,37 @@
+#include <sched.h>
+#include <unistd.h>
+
+#include <cutils/log.h>
+
+#include <dvr/performance_client_api.h>
+#include <pdx/default_transport/service_dispatcher.h>
+
+#include "buffer_hub.h"
+
+int main(int, char**) {
+ int ret = -1;
+ std::shared_ptr<android::pdx::Service> service;
+ std::unique_ptr<android::pdx::ServiceDispatcher> dispatcher;
+
+ // We need to be able to create endpoints with full perms.
+ umask(0000);
+
+ dispatcher = android::pdx::default_transport::ServiceDispatcher::Create();
+ CHECK_ERROR(!dispatcher, error, "Failed to create service dispatcher\n");
+
+ service = android::dvr::BufferHubService::Create();
+ CHECK_ERROR(!service, error, "Failed to create buffer hub service\n");
+ dispatcher->AddService(service);
+
+ ret = dvrSetSchedulerClass(0, "graphics");
+ CHECK_ERROR(ret < 0, error, "Failed to set thread priority");
+
+ ALOGI("Entering message loop.");
+
+ ret = dispatcher->EnterDispatchLoop();
+ CHECK_ERROR(ret < 0, error, "Dispatch loop exited because: %s\n",
+ strerror(-ret));
+
+error:
+ return -ret;
+}
diff --git a/services/vr/bufferhubd/bufferhubd.rc b/services/vr/bufferhubd/bufferhubd.rc
new file mode 100644
index 0000000..ceedf1a
--- /dev/null
+++ b/services/vr/bufferhubd/bufferhubd.rc
@@ -0,0 +1,6 @@
+service bufferhubd /system/bin/bufferhubd
+ class core
+ user system
+ group system
+ cpuset /
+
diff --git a/services/vr/bufferhubd/consumer_channel.cpp b/services/vr/bufferhubd/consumer_channel.cpp
new file mode 100644
index 0000000..8db92a3
--- /dev/null
+++ b/services/vr/bufferhubd/consumer_channel.cpp
@@ -0,0 +1,182 @@
+#include "consumer_channel.h"
+
+#include <cutils/log.h>
+#include <utils/Trace.h>
+
+#include <thread>
+
+#include <private/dvr/bufferhub_rpc.h>
+#include "producer_channel.h"
+
+using android::pdx::BorrowedHandle;
+using android::pdx::Channel;
+using android::pdx::Message;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+ConsumerChannel::ConsumerChannel(BufferHubService* service, int buffer_id,
+ int channel_id,
+ const std::shared_ptr<Channel> producer)
+ : BufferHubChannel(service, buffer_id, channel_id, kConsumerType),
+ handled_(true),
+ ignored_(false),
+ producer_(producer) {
+ GetProducer()->AddConsumer(this);
+}
+
+ConsumerChannel::~ConsumerChannel() {
+ ALOGD_IF(TRACE, "ConsumerChannel::~ConsumerChannel: channel_id=%d",
+ channel_id());
+
+ if (auto producer = GetProducer()) {
+ if (!handled_) // Producer is waiting for our Release.
+ producer->OnConsumerIgnored();
+ producer->RemoveConsumer(this);
+ }
+}
+
+BufferHubChannel::BufferInfo ConsumerChannel::GetBufferInfo() const {
+ BufferHubChannel::BufferInfo info;
+ if (auto producer = GetProducer()) {
+ // If producer has not hung up, copy most buffer info from the producer.
+ info = producer->GetBufferInfo();
+ }
+ info.id = buffer_id();
+ return info;
+}
+
+std::shared_ptr<ProducerChannel> ConsumerChannel::GetProducer() const {
+ return std::static_pointer_cast<ProducerChannel>(producer_.lock());
+}
+
+void ConsumerChannel::HandleImpulse(Message& message) {
+ ATRACE_NAME("ConsumerChannel::HandleImpulse");
+ switch (message.GetOp()) {
+ case BufferHubRPC::ConsumerRelease::Opcode:
+ OnConsumerRelease(message, {});
+ break;
+ }
+}
+
+bool ConsumerChannel::HandleMessage(Message& message) {
+ ATRACE_NAME("ConsumerChannel::HandleMessage");
+ auto producer = GetProducer();
+ if (!producer)
+ REPLY_ERROR_RETURN(message, EPIPE, true);
+
+ switch (message.GetOp()) {
+ case BufferHubRPC::GetBuffer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::GetBuffer>(
+ *producer, &ProducerChannel::OnGetBuffer, message);
+ return true;
+
+ case BufferHubRPC::GetBuffers::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::GetBuffers>(
+ *producer, &ProducerChannel::OnGetBuffers, message);
+ return true;
+
+ case BufferHubRPC::NewConsumer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::NewConsumer>(
+ *producer, &ProducerChannel::OnNewConsumer, message);
+ return true;
+
+ case BufferHubRPC::ConsumerAcquire::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ConsumerAcquire>(
+ *this, &ConsumerChannel::OnConsumerAcquire, message);
+ return true;
+
+ case BufferHubRPC::ConsumerRelease::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ConsumerRelease>(
+ *this, &ConsumerChannel::OnConsumerRelease, message);
+ return true;
+
+ case BufferHubRPC::ConsumerSetIgnore::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ConsumerSetIgnore>(
+ *this, &ConsumerChannel::OnConsumerSetIgnore, message);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+std::pair<BorrowedFence, ConsumerChannel::MetaData>
+ConsumerChannel::OnConsumerAcquire(Message& message,
+ std::size_t metadata_size) {
+ ATRACE_NAME("ConsumerChannel::OnConsumerAcquire");
+ auto producer = GetProducer();
+ if (!producer)
+ REPLY_ERROR_RETURN(message, EPIPE, {});
+
+ if (ignored_ || handled_) {
+ ALOGE(
+ "ConsumerChannel::OnConsumerAcquire: Acquire when not posted: "
+ "ignored=%d handled=%d channel_id=%d buffer_id=%d",
+ ignored_, handled_, message.GetChannelId(), producer->buffer_id());
+ REPLY_ERROR_RETURN(message, EBUSY, {});
+ } else {
+ ClearAvailable();
+ return producer->OnConsumerAcquire(message, metadata_size);
+ }
+}
+
+int ConsumerChannel::OnConsumerRelease(Message& message,
+ LocalFence release_fence) {
+ ATRACE_NAME("ConsumerChannel::OnConsumerRelease");
+ auto producer = GetProducer();
+ if (!producer)
+ return -EPIPE;
+
+ if (ignored_ || handled_) {
+ ALOGE(
+ "ConsumerChannel::OnConsumerRelease: Release when not acquired: "
+ "ignored=%d handled=%d channel_id=%d buffer_id=%d",
+ ignored_, handled_, message.GetChannelId(), producer->buffer_id());
+ return -EBUSY;
+ } else {
+ ClearAvailable();
+ const int ret =
+ producer->OnConsumerRelease(message, std::move(release_fence));
+ handled_ = ret == 0;
+ return ret;
+ }
+}
+
+int ConsumerChannel::OnConsumerSetIgnore(Message&, bool ignored) {
+ ATRACE_NAME("ConsumerChannel::OnConsumerSetIgnore");
+ auto producer = GetProducer();
+ if (!producer)
+ return -EPIPE;
+
+ ignored_ = ignored;
+ if (ignored_ && !handled_) {
+ // Update the producer if ignore is set after the consumer acquires the
+ // buffer.
+ ClearAvailable();
+ producer->OnConsumerIgnored();
+ handled_ = false;
+ }
+
+ return 0;
+}
+
+bool ConsumerChannel::OnProducerPosted() {
+ if (ignored_) {
+ handled_ = true;
+ return false;
+ } else {
+ handled_ = false;
+ SignalAvailable();
+ return true;
+ }
+}
+
+void ConsumerChannel::OnProducerClosed() {
+ producer_.reset();
+ Hangup();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/bufferhubd/consumer_channel.h b/services/vr/bufferhubd/consumer_channel.h
new file mode 100644
index 0000000..d2a078f
--- /dev/null
+++ b/services/vr/bufferhubd/consumer_channel.h
@@ -0,0 +1,51 @@
+#ifndef ANDROID_DVR_BUFFERHUBD_CONSUMER_CHANNEL_H_
+#define ANDROID_DVR_BUFFERHUBD_CONSUMER_CHANNEL_H_
+
+#include "buffer_hub.h"
+
+#include <pdx/rpc/buffer_wrapper.h>
+#include <private/dvr/bufferhub_rpc.h>
+
+namespace android {
+namespace dvr {
+
+// Consumer channels are attached to a Producer channel
+class ConsumerChannel : public BufferHubChannel {
+ public:
+ using Channel = pdx::Channel;
+ using Message = pdx::Message;
+
+ ConsumerChannel(BufferHubService* service, int buffer_id, int channel_id,
+ const std::shared_ptr<Channel> producer);
+ ~ConsumerChannel() override;
+
+ bool HandleMessage(Message& message) override;
+ void HandleImpulse(Message& message) override;
+
+ BufferInfo GetBufferInfo() const override;
+
+ bool OnProducerPosted();
+ void OnProducerClosed();
+
+ private:
+ using MetaData = pdx::rpc::BufferWrapper<std::uint8_t*>;
+
+ std::shared_ptr<ProducerChannel> GetProducer() const;
+
+ std::pair<BorrowedFence, MetaData> OnConsumerAcquire(
+ Message& message, std::size_t metadata_size);
+ int OnConsumerRelease(Message& message, LocalFence release_fence);
+ int OnConsumerSetIgnore(Message& message, bool ignore);
+
+ bool handled_; // True if we have processed RELEASE.
+ bool ignored_; // True if we are ignoring events.
+ std::weak_ptr<Channel> producer_;
+
+ ConsumerChannel(const ConsumerChannel&) = delete;
+ void operator=(const ConsumerChannel&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_BUFFERHUBD_CONSUMER_CHANNEL_H_
diff --git a/services/vr/bufferhubd/consumer_queue_channel.cpp b/services/vr/bufferhubd/consumer_queue_channel.cpp
new file mode 100644
index 0000000..39d6bc8
--- /dev/null
+++ b/services/vr/bufferhubd/consumer_queue_channel.cpp
@@ -0,0 +1,122 @@
+#include "consumer_queue_channel.h"
+
+#include <pdx/channel_handle.h>
+
+#include "producer_channel.h"
+
+using android::pdx::RemoteChannelHandle;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+ConsumerQueueChannel::ConsumerQueueChannel(
+ BufferHubService* service, int buffer_id, int channel_id,
+ const std::shared_ptr<Channel>& producer)
+ : BufferHubChannel(service, buffer_id, channel_id, kConsumerQueueType),
+ producer_(producer),
+ capacity_(0) {
+ GetProducer()->AddConsumer(this);
+}
+
+ConsumerQueueChannel::~ConsumerQueueChannel() {
+ ALOGD_IF(TRACE, "ConsumerQueueChannel::~ConsumerQueueChannel: channel_id=%d",
+ channel_id());
+
+ if (auto producer = GetProducer()) {
+ producer->RemoveConsumer(this);
+ }
+}
+
+bool ConsumerQueueChannel::HandleMessage(Message& message) {
+ ATRACE_NAME("ConsumerQueueChannel::HandleMessage");
+ auto producer = GetProducer();
+ if (!producer)
+ REPLY_ERROR_RETURN(message, EPIPE, true);
+
+ switch (message.GetOp()) {
+ case BufferHubRPC::CreateConsumerQueue::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::CreateConsumerQueue>(
+ *producer, &ProducerQueueChannel::OnCreateConsumerQueue, message);
+ return true;
+
+ case BufferHubRPC::ConsumerQueueImportBuffers::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ConsumerQueueImportBuffers>(
+ *this, &ConsumerQueueChannel::OnConsumerQueueImportBuffers, message);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+std::shared_ptr<ProducerQueueChannel> ConsumerQueueChannel::GetProducer()
+ const {
+ return std::static_pointer_cast<ProducerQueueChannel>(producer_.lock());
+}
+
+void ConsumerQueueChannel::HandleImpulse(Message& /* message */) {
+ ATRACE_NAME("ConsumerQueueChannel::HandleImpulse");
+}
+
+BufferHubChannel::BufferInfo ConsumerQueueChannel::GetBufferInfo() const {
+ BufferHubChannel::BufferInfo info;
+ if (auto producer = GetProducer()) {
+ // If producer has not hung up, copy most buffer info from the producer.
+ info = producer->GetBufferInfo();
+ }
+ info.id = buffer_id();
+ info.capacity = capacity_;
+ return info;
+}
+
+void ConsumerQueueChannel::RegisterNewBuffer(
+ const std::shared_ptr<ProducerChannel>& producer_channel, size_t slot) {
+ pending_buffer_slots_.emplace(producer_channel, slot);
+
+ // Signal the client that there is new buffer available throught POLLIN.
+ SignalAvailable();
+}
+
+std::vector<std::pair<RemoteChannelHandle, size_t>>
+ConsumerQueueChannel::OnConsumerQueueImportBuffers(Message& message) {
+ std::vector<std::pair<RemoteChannelHandle, size_t>> buffer_handles;
+ ATRACE_NAME("ConsumerQueueChannel::OnConsumerQueueImportBuffers");
+ ALOGD(
+ "ConsumerQueueChannel::OnConsumerQueueImportBuffers number of buffers to "
+ "import: %zu",
+ pending_buffer_slots_.size());
+
+ while (!pending_buffer_slots_.empty()) {
+ auto producer_channel = pending_buffer_slots_.front().first.lock();
+ size_t producer_slot = pending_buffer_slots_.front().second;
+ pending_buffer_slots_.pop();
+
+ // It's possible that the producer channel has expired.
+ if (producer_channel == nullptr) {
+ ALOGE(
+ "ConsumerQueueChannel::OnConsumerQueueImportBuffers: producer "
+ "channel has already been expired.");
+ REPLY_ERROR_RETURN(message, ENOENT, {});
+ }
+
+ RemoteChannelHandle consumer_handle(
+ producer_channel->CreateConsumer(message));
+
+ // All buffer imports should succeed together.
+ if (!consumer_handle.valid()) {
+ ALOGE(
+ "ConsumerQueueChannel::OnConsumerQueueImportBuffers: imported "
+ "consumer handle is invalid.");
+ REPLY_ERROR_RETURN(message, EIO, {});
+ }
+
+ // Move consumer_handle into buffer_handles.
+ buffer_handles.emplace_back(std::move(consumer_handle), producer_slot);
+ }
+
+ return buffer_handles;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/bufferhubd/consumer_queue_channel.h b/services/vr/bufferhubd/consumer_queue_channel.h
new file mode 100644
index 0000000..b345595
--- /dev/null
+++ b/services/vr/bufferhubd/consumer_queue_channel.h
@@ -0,0 +1,62 @@
+#ifndef ANDROID_DVR_BUFFERHUBD_CONSUMER_QUEUE_CHANNEL_H_
+#define ANDROID_DVR_BUFFERHUBD_CONSUMER_QUEUE_CHANNEL_H_
+
+#include "buffer_hub.h"
+
+#include <private/dvr/bufferhub_rpc.h>
+
+#include <queue>
+
+#include "consumer_channel.h"
+#include "producer_queue_channel.h"
+
+namespace android {
+namespace dvr {
+
+class ConsumerQueueChannel : public BufferHubChannel {
+ public:
+ using Message = pdx::Message;
+ using RemoteChannelHandle = pdx::RemoteChannelHandle;
+
+ ConsumerQueueChannel(BufferHubService* service, int buffer_id, int channel_id,
+ const std::shared_ptr<Channel>& producer);
+ ~ConsumerQueueChannel() override;
+
+ bool HandleMessage(Message& message) override;
+ void HandleImpulse(Message& message) override;
+
+ BufferInfo GetBufferInfo() const override;
+
+ // Called by ProdcuerQueueChannel to notify consumer queue that a new
+ // buffer has been allocated.
+ void RegisterNewBuffer(
+ const std::shared_ptr<ProducerChannel>& producer_channel, size_t slot);
+
+ // Called after clients been signaled by service that new buffer has been
+ // allocated. Clients uses kOpConsumerQueueImportBuffers to import new
+ // consumer buffers and this handler returns a vector of fd representing
+ // BufferConsumers that clients can import.
+ std::vector<std::pair<RemoteChannelHandle, size_t>>
+ OnConsumerQueueImportBuffers(Message& message);
+
+ private:
+ std::shared_ptr<ProducerQueueChannel> GetProducer() const;
+
+ // Pointer to the prodcuer channel
+ std::weak_ptr<Channel> producer_;
+
+ // Tracks newly allocated buffer producers along with it's slot number.
+ std::queue<std::pair<std::weak_ptr<ProducerChannel>, size_t>>
+ pending_buffer_slots_;
+
+ // Tracks how many buffers have this queue imported.
+ size_t capacity_;
+
+ ConsumerQueueChannel(const ConsumerQueueChannel&) = delete;
+ void operator=(const ConsumerQueueChannel&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_BUFFERHUBD_CONSUMER_QUEUE_CHANNEL_H_
diff --git a/services/vr/bufferhubd/producer_channel.cpp b/services/vr/bufferhubd/producer_channel.cpp
new file mode 100644
index 0000000..b87b709
--- /dev/null
+++ b/services/vr/bufferhubd/producer_channel.cpp
@@ -0,0 +1,377 @@
+#include "producer_channel.h"
+
+#include <cutils/log.h>
+#include <sync/sync.h>
+#include <sys/poll.h>
+#include <utils/Trace.h>
+
+#include <algorithm>
+#include <atomic>
+#include <thread>
+
+#include <base/logging.h>
+#include <private/dvr/bufferhub_rpc.h>
+#include "consumer_channel.h"
+
+using android::pdx::BorrowedHandle;
+using android::pdx::Message;
+using android::pdx::RemoteChannelHandle;
+using android::pdx::rpc::BufferWrapper;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::WrapBuffer;
+
+namespace android {
+namespace dvr {
+
+ProducerChannel::ProducerChannel(BufferHubService* service, int channel_id,
+ int width, int height, int format, int usage,
+ size_t meta_size_bytes, size_t slice_count,
+ int* error)
+ : BufferHubChannel(service, channel_id, channel_id, kProducerType),
+ pending_consumers_(0),
+ slices_(std::max(static_cast<size_t>(1), slice_count)),
+ producer_owns_(true),
+ meta_size_bytes_(meta_size_bytes),
+ meta_(meta_size_bytes ? new uint8_t[meta_size_bytes] : nullptr) {
+ for (auto& ion_buffer : slices_) {
+ const int ret = ion_buffer.Alloc(width, height, format, usage);
+ if (ret < 0) {
+ ALOGE("ProducerChannel::ProducerChannel: Failed to allocate buffer: %s",
+ strerror(-ret));
+ *error = ret;
+ return;
+ }
+ }
+
+ // Success.
+ *error = 0;
+}
+
+std::shared_ptr<ProducerChannel> ProducerChannel::Create(
+ BufferHubService* service, int channel_id, int width, int height,
+ int format, int usage, size_t meta_size_bytes, size_t slice_count,
+ int* error) {
+ std::shared_ptr<ProducerChannel> producer(
+ new ProducerChannel(service, channel_id, width, height, format, usage,
+ meta_size_bytes, slice_count, error));
+ if (*error < 0)
+ return nullptr;
+ else
+ return producer;
+}
+
+ProducerChannel::~ProducerChannel() {
+ ALOGD_IF(TRACE, "ProducerChannel::~ProducerChannel: channel_id=%d",
+ channel_id());
+ for (auto consumer : consumer_channels_)
+ consumer->OnProducerClosed();
+}
+
+BufferHubChannel::BufferInfo ProducerChannel::GetBufferInfo() const {
+ return BufferInfo(buffer_id(), consumer_channels_.size(), slices_[0].width(),
+ slices_[0].height(), slices_[0].format(),
+ slices_[0].usage(), slices_.size(), name_);
+}
+
+void ProducerChannel::HandleImpulse(Message& message) {
+ ATRACE_NAME("ProducerChannel::HandleImpulse");
+ switch (message.GetOp()) {
+ case BufferHubRPC::ProducerGain::Opcode:
+ OnProducerGain(message);
+ break;
+ }
+}
+
+bool ProducerChannel::HandleMessage(Message& message) {
+ ATRACE_NAME("ProducerChannel::HandleMessage");
+ switch (message.GetOp()) {
+ case BufferHubRPC::GetBuffer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::GetBuffer>(
+ *this, &ProducerChannel::OnGetBuffer, message);
+ return true;
+
+ case BufferHubRPC::GetBuffers::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::GetBuffers>(
+ *this, &ProducerChannel::OnGetBuffers, message);
+ return true;
+
+ case BufferHubRPC::NewConsumer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::NewConsumer>(
+ *this, &ProducerChannel::OnNewConsumer, message);
+ return true;
+
+ case BufferHubRPC::ProducerPost::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ProducerPost>(
+ *this, &ProducerChannel::OnProducerPost, message);
+ return true;
+
+ case BufferHubRPC::ProducerGain::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ProducerGain>(
+ *this, &ProducerChannel::OnProducerGain, message);
+ return true;
+
+ case BufferHubRPC::ProducerMakePersistent::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ProducerMakePersistent>(
+ *this, &ProducerChannel::OnProducerMakePersistent, message);
+ return true;
+
+ case BufferHubRPC::ProducerRemovePersistence::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ProducerRemovePersistence>(
+ *this, &ProducerChannel::OnRemovePersistence, message);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+NativeBufferHandle<BorrowedHandle> ProducerChannel::OnGetBuffer(
+ Message& message, unsigned index) {
+ ATRACE_NAME("ProducerChannel::OnGetBuffer");
+ ALOGD_IF(TRACE, "ProducerChannel::OnGetBuffer: buffer=%d", buffer_id());
+ if (index < slices_.size()) {
+ return NativeBufferHandle<BorrowedHandle>(slices_[index], buffer_id());
+ } else {
+ REPLY_ERROR_RETURN(message, EINVAL, NativeBufferHandle<BorrowedHandle>());
+ }
+}
+
+std::vector<NativeBufferHandle<BorrowedHandle>> ProducerChannel::OnGetBuffers(
+ Message&) {
+ ATRACE_NAME("ProducerChannel::OnGetBuffers");
+ ALOGD_IF(TRACE, "ProducerChannel::OnGetBuffers: buffer_id=%d", buffer_id());
+ std::vector<NativeBufferHandle<BorrowedHandle>> buffer_handles;
+ for (const auto& buffer : slices_)
+ buffer_handles.emplace_back(buffer, buffer_id());
+ return buffer_handles;
+}
+
+RemoteChannelHandle ProducerChannel::CreateConsumer(Message& message) {
+ ATRACE_NAME("ProducerChannel::CreateConsumer");
+ ALOGD_IF(TRACE, "ProducerChannel::CreateConsumer: buffer_id=%d", buffer_id());
+
+ int channel_id;
+ auto status = message.PushChannel(0, nullptr, &channel_id);
+ if (!status) {
+ ALOGE(
+ "ProducerChannel::CreateConsumer: failed to push consumer channel: %s",
+ status.GetErrorMessage().c_str());
+ return RemoteChannelHandle();
+ }
+
+ auto consumer = std::make_shared<ConsumerChannel>(
+ service(), buffer_id(), channel_id, shared_from_this());
+ const int ret = service()->SetChannel(channel_id, consumer);
+ if (ret < 0) {
+ ALOGE(
+ "ProducerChannel::CreateConsumer: failed to set new consumer channel: "
+ "%s",
+ strerror(-ret));
+ return RemoteChannelHandle();
+ }
+
+ if (!producer_owns_) {
+ // Signal the new consumer when adding it to a posted producer.
+ if (consumer->OnProducerPosted())
+ pending_consumers_++;
+ }
+
+ return status.take();
+}
+
+RemoteChannelHandle ProducerChannel::OnNewConsumer(Message& message) {
+ ATRACE_NAME("ProducerChannel::OnNewConsumer");
+ ALOGD_IF(TRACE, "ProducerChannel::OnNewConsumer: buffer_id=%d", buffer_id());
+
+ RemoteChannelHandle consumer_handle(CreateConsumer(message));
+
+ if (consumer_handle.valid())
+ return consumer_handle;
+ else
+ REPLY_ERROR_RETURN(message, ENOMEM, RemoteChannelHandle());
+}
+
+int ProducerChannel::OnProducerPost(
+ Message&, LocalFence acquire_fence,
+ BufferWrapper<std::vector<std::uint8_t>> metadata) {
+ ATRACE_NAME("ProducerChannel::OnProducerPost");
+ ALOGD_IF(TRACE, "ProducerChannel::OnProducerPost: buffer_id=%d", buffer_id());
+ if (!producer_owns_) {
+ ALOGE("ProducerChannel::OnProducerPost: Not in gained state!");
+ return -EBUSY;
+ }
+
+ if (meta_size_bytes_ != metadata.size())
+ return -EINVAL;
+ std::copy(metadata.begin(), metadata.end(), meta_.get());
+
+ post_fence_ = std::move(acquire_fence);
+ producer_owns_ = false;
+
+ // Signal any interested consumers. If there are none, automatically release
+ // the buffer.
+ pending_consumers_ = 0;
+ for (auto consumer : consumer_channels_) {
+ if (consumer->OnProducerPosted())
+ pending_consumers_++;
+ }
+ if (pending_consumers_ == 0)
+ SignalAvailable();
+ ALOGD_IF(TRACE, "ProducerChannel::OnProducerPost: %d pending consumers",
+ pending_consumers_);
+
+ return 0;
+}
+
+LocalFence ProducerChannel::OnProducerGain(Message& message) {
+ ATRACE_NAME("ProducerChannel::OnGain");
+ ALOGD_IF(TRACE, "ProducerChannel::OnGain: buffer_id=%d", buffer_id());
+ if (producer_owns_) {
+ ALOGE("ProducerChanneL::OnGain: Already in gained state: channel=%d",
+ channel_id());
+ REPLY_ERROR_RETURN(message, EALREADY, {});
+ }
+
+ // There are still pending consumers, return busy.
+ if (pending_consumers_ > 0)
+ REPLY_ERROR_RETURN(message, EBUSY, {});
+
+ ClearAvailable();
+ producer_owns_ = true;
+ post_fence_.get_fd();
+ return std::move(returned_fence_);
+}
+
+std::pair<BorrowedFence, BufferWrapper<std::uint8_t*>>
+ProducerChannel::OnConsumerAcquire(Message& message,
+ std::size_t metadata_size) {
+ ATRACE_NAME("ProducerChannel::OnConsumerAcquire");
+ ALOGD_IF(TRACE, "ProducerChannel::OnConsumerAcquire: buffer_id=%d",
+ buffer_id());
+ if (producer_owns_) {
+ ALOGE("ProducerChannel::OnConsumerAcquire: Not in posted state!");
+ REPLY_ERROR_RETURN(message, EBUSY, {});
+ }
+
+ // Return a borrowed fd to avoid unnecessary duplication of the underlying fd.
+ // Serialization just needs to read the handle.
+ if (metadata_size == 0)
+ return std::make_pair(post_fence_.borrow(),
+ WrapBuffer<std::uint8_t>(nullptr, 0));
+ else
+ return std::make_pair(post_fence_.borrow(),
+ WrapBuffer(meta_.get(), meta_size_bytes_));
+}
+
+int ProducerChannel::OnConsumerRelease(Message&, LocalFence release_fence) {
+ ATRACE_NAME("ProducerChannel::OnConsumerRelease");
+ ALOGD_IF(TRACE, "ProducerChannel::OnConsumerRelease: buffer_id=%d",
+ buffer_id());
+ if (producer_owns_) {
+ ALOGE("ProducerChannel::OnConsumerRelease: Not in acquired state!");
+ return -EBUSY;
+ }
+
+ // Attempt to merge the fences if necessary.
+ if (release_fence) {
+ if (returned_fence_) {
+ LocalFence merged_fence(sync_merge(
+ "bufferhub_merged", returned_fence_.get_fd(), release_fence.get_fd()));
+ const int error = errno;
+ if (!merged_fence) {
+ ALOGE("ProducerChannel::OnConsumerRelease: Failed to merge fences: %s",
+ strerror(error));
+ return -error;
+ }
+ returned_fence_ = std::move(merged_fence);
+ } else {
+ returned_fence_ = std::move(release_fence);
+ }
+ }
+
+ OnConsumerIgnored();
+ return 0;
+}
+
+void ProducerChannel::OnConsumerIgnored() {
+ if (!--pending_consumers_)
+ SignalAvailable();
+ ALOGD_IF(TRACE,
+ "ProducerChannel::OnConsumerIgnored: buffer_id=%d %d consumers left",
+ buffer_id(), pending_consumers_);
+}
+
+int ProducerChannel::OnProducerMakePersistent(Message& message,
+ const std::string& name,
+ int user_id, int group_id) {
+ ATRACE_NAME("ProducerChannel::OnProducerMakePersistent");
+ ALOGD_IF(TRACE,
+ "ProducerChannel::OnProducerMakePersistent: buffer_id=%d name=%s "
+ "user_id=%d group_id=%d",
+ buffer_id(), name.c_str(), user_id, group_id);
+
+ if (name.empty() || (user_id < 0 && user_id != kNoCheckId) ||
+ (group_id < 0 && group_id != kNoCheckId)) {
+ return -EINVAL;
+ }
+
+ // Try to add this buffer with the requested name.
+ if (service()->AddNamedBuffer(name, std::static_pointer_cast<ProducerChannel>(
+ shared_from_this()))) {
+ // If successful, set the requested permissions.
+
+ // A value of zero indicates that the ids from the sending process should be
+ // used.
+ if (user_id == kUseCallerId)
+ user_id = message.GetEffectiveUserId();
+ if (group_id == kUseCallerId)
+ group_id = message.GetEffectiveGroupId();
+
+ owner_user_id_ = user_id;
+ owner_group_id_ = group_id;
+ name_ = name;
+ return 0;
+ } else {
+ // Otherwise a buffer with that name already exists.
+ return -EALREADY;
+ }
+}
+
+int ProducerChannel::OnRemovePersistence(Message&) {
+ if (service()->RemoveNamedBuffer(*this))
+ return 0;
+ else
+ return -ENOENT;
+}
+
+void ProducerChannel::AddConsumer(ConsumerChannel* channel) {
+ consumer_channels_.push_back(channel);
+}
+
+void ProducerChannel::RemoveConsumer(ConsumerChannel* channel) {
+ consumer_channels_.erase(
+ std::find(consumer_channels_.begin(), consumer_channels_.end(), channel));
+}
+
+// Returns true if either the user or group ids match the owning ids or both
+// owning ids are not set, in which case access control does not apply.
+bool ProducerChannel::CheckAccess(int euid, int egid) {
+ const bool no_check =
+ owner_user_id_ == kNoCheckId && owner_group_id_ == kNoCheckId;
+ const bool euid_check = euid == owner_user_id_ || euid == kRootId;
+ const bool egid_check = egid == owner_group_id_ || egid == kRootId;
+ return no_check || euid_check || egid_check;
+}
+
+// Returns true if the given parameters match the underlying buffer parameters.
+bool ProducerChannel::CheckParameters(int width, int height, int format,
+ int usage, size_t meta_size_bytes,
+ size_t slice_count) {
+ return slices_.size() == slice_count &&
+ meta_size_bytes == meta_size_bytes_ && slices_[0].width() == width &&
+ slices_[0].height() == height && slices_[0].format() == format &&
+ slices_[0].usage() == usage;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/bufferhubd/producer_channel.h b/services/vr/bufferhubd/producer_channel.h
new file mode 100644
index 0000000..e7ca459
--- /dev/null
+++ b/services/vr/bufferhubd/producer_channel.h
@@ -0,0 +1,109 @@
+#ifndef ANDROID_DVR_BUFFERHUBD_PRODUCER_CHANNEL_H_
+#define ANDROID_DVR_BUFFERHUBD_PRODUCER_CHANNEL_H_
+
+#include "buffer_hub.h"
+
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include <pdx/channel_handle.h>
+#include <pdx/file_handle.h>
+#include <pdx/rpc/buffer_wrapper.h>
+#include <private/dvr/bufferhub_rpc.h>
+#include <private/dvr/ion_buffer.h>
+
+namespace android {
+namespace dvr {
+
+// The buffer changes ownership according to the following sequence:
+// POST -> ACQUIRE/RELEASE (all consumers) -> GAIN (producer acquires) -> POST
+
+// The producer channel is owned by a single app that writes into buffers and
+// calls POST when drawing is complete. This channel has a set of consumer
+// channels associated with it that are waiting for notifications.
+class ProducerChannel : public BufferHubChannel {
+ public:
+ using Message = pdx::Message;
+ using BorrowedHandle = pdx::BorrowedHandle;
+ using RemoteChannelHandle = pdx::RemoteChannelHandle;
+ template <typename T>
+ using BufferWrapper = pdx::rpc::BufferWrapper<T>;
+
+ static std::shared_ptr<ProducerChannel> Create(
+ BufferHubService* service, int channel_id, int width, int height,
+ int format, int usage, size_t meta_size_bytes, size_t slice_count,
+ int* error);
+
+ ~ProducerChannel() override;
+
+ bool HandleMessage(Message& message) override;
+ void HandleImpulse(Message& message) override;
+
+ BufferInfo GetBufferInfo() const override;
+
+ NativeBufferHandle<BorrowedHandle> OnGetBuffer(Message& message,
+ unsigned index);
+ std::vector<NativeBufferHandle<BorrowedHandle>> OnGetBuffers(
+ Message& message);
+
+ RemoteChannelHandle CreateConsumer(Message& message);
+ RemoteChannelHandle OnNewConsumer(Message& message);
+
+ std::pair<BorrowedFence, BufferWrapper<std::uint8_t*>> OnConsumerAcquire(
+ Message& message, std::size_t metadata_size);
+ int OnConsumerRelease(Message& message, LocalFence release_fence);
+
+ void OnConsumerIgnored();
+
+ void AddConsumer(ConsumerChannel* channel);
+ void RemoveConsumer(ConsumerChannel* channel);
+
+ bool CheckAccess(int euid, int egid);
+ bool CheckParameters(int width, int height, int format, int usage,
+ size_t meta_size_bytes, size_t slice_count);
+
+ int OnProducerMakePersistent(Message& message, const std::string& name,
+ int user_id, int group_id);
+ int OnRemovePersistence(Message& message);
+
+ private:
+ std::vector<ConsumerChannel*> consumer_channels_;
+ // This counts the number of consumers left to process this buffer. If this is
+ // zero then the producer can re-acquire ownership.
+ int pending_consumers_;
+
+ std::vector<IonBuffer> slices_;
+
+ bool producer_owns_;
+ LocalFence post_fence_;
+ LocalFence returned_fence_;
+ size_t meta_size_bytes_;
+ std::unique_ptr<uint8_t[]> meta_;
+
+ static constexpr int kNoCheckId = -1;
+ static constexpr int kUseCallerId = 0;
+ static constexpr int kRootId = 0;
+
+ // User and group id to check when obtaining a persistent buffer.
+ int owner_user_id_ = kNoCheckId;
+ int owner_group_id_ = kNoCheckId;
+
+ std::string name_;
+
+ ProducerChannel(BufferHubService* service, int channel, int width, int height,
+ int format, int usage, size_t meta_size_bytes,
+ size_t slice_count, int* error);
+
+ int OnProducerPost(Message& message, LocalFence acquire_fence,
+ BufferWrapper<std::vector<std::uint8_t>> metadata);
+ LocalFence OnProducerGain(Message& message);
+
+ ProducerChannel(const ProducerChannel&) = delete;
+ void operator=(const ProducerChannel&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_BUFFERHUBD_PRODUCER_CHANNEL_H_
diff --git a/services/vr/bufferhubd/producer_queue_channel.cpp b/services/vr/bufferhubd/producer_queue_channel.cpp
new file mode 100644
index 0000000..08f1e9d
--- /dev/null
+++ b/services/vr/bufferhubd/producer_queue_channel.cpp
@@ -0,0 +1,287 @@
+#include "producer_queue_channel.h"
+
+#include "consumer_queue_channel.h"
+#include "producer_channel.h"
+
+using android::pdx::RemoteChannelHandle;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+ProducerQueueChannel::ProducerQueueChannel(
+ BufferHubService* service, int channel_id, size_t meta_size_bytes,
+ int usage_set_mask, int usage_clear_mask, int usage_deny_set_mask,
+ int usage_deny_clear_mask, int* error)
+ : BufferHubChannel(service, channel_id, channel_id, kProducerQueueType),
+ meta_size_bytes_(meta_size_bytes),
+ usage_set_mask_(usage_set_mask),
+ usage_clear_mask_(usage_clear_mask),
+ usage_deny_set_mask_(usage_deny_set_mask),
+ usage_deny_clear_mask_(usage_deny_clear_mask),
+ capacity_(0) {
+ *error = 0;
+}
+
+ProducerQueueChannel::~ProducerQueueChannel() {}
+
+/* static */
+std::shared_ptr<ProducerQueueChannel> ProducerQueueChannel::Create(
+ BufferHubService* service, int channel_id, size_t meta_size_bytes,
+ int usage_set_mask, int usage_clear_mask, int usage_deny_set_mask,
+ int usage_deny_clear_mask, int* error) {
+ // Configuration between |usage_deny_set_mask| and |usage_deny_clear_mask|
+ // should be mutually exclusive.
+ if (usage_deny_set_mask & usage_deny_clear_mask) {
+ ALOGE(
+ "BufferHubService::OnCreateProducerQueue: illegal usage mask "
+ "configuration: usage_deny_set_mask=%d, usage_deny_clear_mask=%d",
+ usage_deny_set_mask, usage_deny_clear_mask);
+ *error = -EINVAL;
+ return nullptr;
+ }
+
+ std::shared_ptr<ProducerQueueChannel> producer(new ProducerQueueChannel(
+ service, channel_id, meta_size_bytes, usage_set_mask, usage_clear_mask,
+ usage_deny_set_mask, usage_deny_clear_mask, error));
+ if (*error < 0)
+ return nullptr;
+ else
+ return producer;
+}
+
+bool ProducerQueueChannel::HandleMessage(Message& message) {
+ ATRACE_NAME("ProducerQueueChannel::HandleMessage");
+ switch (message.GetOp()) {
+ case BufferHubRPC::CreateConsumerQueue::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::CreateConsumerQueue>(
+ *this, &ProducerQueueChannel::OnCreateConsumerQueue, message);
+ return true;
+
+ case BufferHubRPC::ProducerQueueAllocateBuffers::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ProducerQueueAllocateBuffers>(
+ *this, &ProducerQueueChannel::OnProducerQueueAllocateBuffers,
+ message);
+ return true;
+
+ case BufferHubRPC::ProducerQueueDetachBuffer::Opcode:
+ DispatchRemoteMethod<BufferHubRPC::ProducerQueueDetachBuffer>(
+ *this, &ProducerQueueChannel::OnProducerQueueDetachBuffer, message);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+void ProducerQueueChannel::HandleImpulse(Message& /* message */) {
+ ATRACE_NAME("ProducerQueueChannel::HandleImpulse");
+}
+
+BufferHubChannel::BufferInfo ProducerQueueChannel::GetBufferInfo() const {
+ return BufferInfo(channel_id(), consumer_channels_.size(), capacity_,
+ usage_set_mask_, usage_clear_mask_, usage_deny_set_mask_,
+ usage_deny_clear_mask_);
+}
+
+std::pair<RemoteChannelHandle, size_t>
+ProducerQueueChannel::OnCreateConsumerQueue(Message& message) {
+ ATRACE_NAME("ProducerQueueChannel::OnCreateConsumerQueue");
+ ALOGD_IF(TRACE, "ProducerQueueChannel::OnCreateConsumerQueue: channel_id=%d",
+ channel_id());
+
+ int channel_id;
+ auto status = message.PushChannel(0, nullptr, &channel_id);
+ if (!status) {
+ ALOGE(
+ "ProducerQueueChannel::OnCreateConsumerQueue: failed to push consumer "
+ "channel: %s",
+ status.GetErrorMessage().c_str());
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+ }
+
+ const int ret = service()->SetChannel(
+ channel_id, std::make_shared<ConsumerQueueChannel>(
+ service(), buffer_id(), channel_id, shared_from_this()));
+ if (ret < 0) {
+ ALOGE(
+ "ProducerQueueChannel::OnCreateConsumerQueue: failed to set new "
+ "consumer channel: %s",
+ strerror(-ret));
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+ }
+
+ return std::make_pair(status.take(), meta_size_bytes_);
+}
+
+std::vector<std::pair<RemoteChannelHandle, size_t>>
+ProducerQueueChannel::OnProducerQueueAllocateBuffers(Message& message,
+ int width, int height,
+ int format, int usage,
+ size_t slice_count,
+ size_t buffer_count) {
+ ATRACE_NAME("ProducerQueueChannel::OnProducerQueueAllocateBuffers");
+ ALOGD_IF(TRACE,
+ "ProducerQueueChannel::OnProducerQueueAllocateBuffers: "
+ "producer_channel_id=%d",
+ channel_id());
+
+ std::vector<std::pair<RemoteChannelHandle, size_t>> buffer_handles;
+
+ // Deny buffer allocation violating preset rules.
+ if (usage & usage_deny_set_mask_) {
+ ALOGE(
+ "ProducerQueueChannel::OnProducerQueueAllocateBuffers: usage: %d is "
+ "not permitted. Violating usage_deny_set_mask, the following bits "
+ "shall not be set: %d.",
+ usage, usage_deny_set_mask_);
+ REPLY_ERROR_RETURN(message, EINVAL, buffer_handles);
+ }
+
+ if (~usage & usage_deny_clear_mask_) {
+ ALOGE(
+ "ProducerQueueChannel::OnProducerQueueAllocateBuffers: usage: %d is "
+ "not permitted. Violating usage_deny_clear_mask, the following bits "
+ "must be set: %d.",
+ usage, usage_deny_clear_mask_);
+ REPLY_ERROR_RETURN(message, EINVAL, buffer_handles);
+ }
+
+ // Force set mask and clear mask. Note that |usage_set_mask_| takes precedence
+ // and will overwrite |usage_clear_mask_|.
+ int effective_usage = (usage & ~usage_clear_mask_) | usage_set_mask_;
+
+ for (size_t i = 0; i < buffer_count; i++) {
+ auto buffer_handle_slot = AllocateBuffer(message, width, height, format,
+ effective_usage, slice_count);
+ if (!buffer_handle_slot.first) {
+ ALOGE(
+ "ProducerQueueChannel::OnProducerQueueAllocateBuffers: failed to "
+ "allocate new buffer.");
+ REPLY_ERROR_RETURN(message, ENOMEM, buffer_handles);
+ }
+ buffer_handles.emplace_back(std::move(buffer_handle_slot.first),
+ buffer_handle_slot.second);
+ }
+
+ return buffer_handles;
+}
+
+std::pair<RemoteChannelHandle, size_t> ProducerQueueChannel::AllocateBuffer(
+ Message& message, int width, int height, int format, int usage,
+ size_t slice_count) {
+ ATRACE_NAME("ProducerQueueChannel::AllocateBuffer");
+ ALOGD_IF(TRACE,
+ "ProducerQueueChannel::AllocateBuffer: producer_channel_id=%d",
+ channel_id());
+
+ if (capacity_ >= BufferHubRPC::kMaxQueueCapacity) {
+ ALOGE("ProducerQueueChannel::AllocateBuffer: reaches kMaxQueueCapacity.");
+ return {};
+ }
+
+ // Here we are creating a new BufferHubBuffer, initialize the producer
+ // channel, and returning its file handle back to the client.
+ // buffer_id is the id of the producer channel of BufferHubBuffer.
+ int buffer_id;
+ auto status = message.PushChannel(0, nullptr, &buffer_id);
+
+ if (!status) {
+ ALOGE("ProducerQueueChannel::AllocateBuffer: failed to push channel: %s",
+ status.GetErrorMessage().c_str());
+ return {};
+ }
+
+ ALOGD_IF(TRACE,
+ "ProducerQueueChannel::AllocateBuffer: buffer_id=%d width=%d "
+ "height=%d format=%d usage=%d slice_count=%zu",
+ buffer_id, width, height, format, usage, slice_count);
+ auto buffer_handle = status.take();
+
+ int error;
+ const auto producer_channel = ProducerChannel::Create(
+ service(), buffer_id, width, height, format, usage,
+ meta_size_bytes_, slice_count, &error);
+ if (!producer_channel) {
+ ALOGE(
+ "ProducerQueueChannel::AllocateBuffer: Failed to create "
+ "BufferHubBuffer producer!!");
+ return {};
+ }
+
+ ALOGD_IF(
+ TRACE,
+ "ProducerQueueChannel::AllocateBuffer: buffer_id=%d, buffer_handle=%d",
+ buffer_id, buffer_handle.value());
+
+ const int ret = service()->SetChannel(buffer_id, producer_channel);
+ if (ret < 0) {
+ ALOGE(
+ "ProducerQueueChannel::AllocateBuffer: failed to set prodcuer channel "
+ "for new BufferHubBuffer: %s",
+ strerror(-ret));
+ return {};
+ }
+
+ // Register the newly allocated buffer's channel_id into the first empty
+ // buffer slot.
+ size_t slot = 0;
+ for (; slot < BufferHubRPC::kMaxQueueCapacity; slot++) {
+ if (buffers_[slot].expired())
+ break;
+ }
+ if (slot == BufferHubRPC::kMaxQueueCapacity) {
+ ALOGE(
+ "ProducerQueueChannel::AllocateBuffer: Cannot find empty slot for new "
+ "buffer allocation.");
+ return {};
+ }
+
+ buffers_[slot] = producer_channel;
+ capacity_++;
+
+ // Notify each consumer channel about the new buffer.
+ for (auto consumer_channel : consumer_channels_) {
+ ALOGD(
+ "ProducerQueueChannel::AllocateBuffer: Notified consumer with new "
+ "buffer, buffer_id=%d",
+ buffer_id);
+ consumer_channel->RegisterNewBuffer(producer_channel, slot);
+ }
+
+ return {std::move(buffer_handle), slot};
+}
+
+int ProducerQueueChannel::OnProducerQueueDetachBuffer(Message& message,
+ size_t slot) {
+ if (buffers_[slot].expired()) {
+ ALOGE(
+ "ProducerQueueChannel::OnProducerQueueDetachBuffer: trying to detach "
+ "an invalid buffer producer at slot %zu",
+ slot);
+ return -EINVAL;
+ }
+
+ if (capacity_ == 0) {
+ ALOGE(
+ "ProducerQueueChannel::OnProducerQueueDetachBuffer: trying to detach a "
+ "buffer producer while the queue's capacity is already zero.");
+ return -EINVAL;
+ }
+
+ buffers_[slot].reset();
+ capacity_--;
+ return 0;
+}
+
+void ProducerQueueChannel::AddConsumer(ConsumerQueueChannel* channel) {
+ consumer_channels_.push_back(channel);
+}
+
+void ProducerQueueChannel::RemoveConsumer(ConsumerQueueChannel* channel) {
+ consumer_channels_.erase(
+ std::find(consumer_channels_.begin(), consumer_channels_.end(), channel));
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/bufferhubd/producer_queue_channel.h b/services/vr/bufferhubd/producer_queue_channel.h
new file mode 100644
index 0000000..49611d4
--- /dev/null
+++ b/services/vr/bufferhubd/producer_queue_channel.h
@@ -0,0 +1,96 @@
+#ifndef ANDROID_DVR_BUFFERHUBD_PRODUCER_QUEUE_CHANNEL_H_
+#define ANDROID_DVR_BUFFERHUBD_PRODUCER_QUEUE_CHANNEL_H_
+
+#include "buffer_hub.h"
+
+#include <private/dvr/bufferhub_rpc.h>
+
+namespace android {
+namespace dvr {
+
+class ProducerQueueChannel : public BufferHubChannel {
+ public:
+ using Message = pdx::Message;
+ using RemoteChannelHandle = pdx::RemoteChannelHandle;
+
+ static std::shared_ptr<ProducerQueueChannel> Create(
+ BufferHubService* service, int channel_id, size_t meta_size_bytes,
+ int usage_set_mask, int usage_clear_mask, int usage_deny_set_mask,
+ int usage_deny_clear_mask, int* error);
+ ~ProducerQueueChannel() override;
+
+ bool HandleMessage(Message& message) override;
+ void HandleImpulse(Message& message) override;
+
+ BufferInfo GetBufferInfo() const override;
+
+ // Handles client request to create a new consumer queue attached to current
+ // producer queue.
+ // Returns a handle for the service channel, as well as the size of the
+ // metadata associated with the queue.
+ std::pair<RemoteChannelHandle, size_t> OnCreateConsumerQueue(
+ Message& message);
+
+ // Allocate a new BufferHubProducer according to the input spec. Client may
+ // handle this as if a new producer is created through kOpCreateBuffer.
+ std::vector<std::pair<RemoteChannelHandle, size_t>>
+ OnProducerQueueAllocateBuffers(Message& message, int width, int height,
+ int format, int usage, size_t slice_count,
+ size_t buffer_count);
+
+ // Detach a BufferHubProducer indicated by |slot|. Note that the buffer must
+ // be in Gain'ed state for the producer queue to detach.
+ int OnProducerQueueDetachBuffer(Message& message, size_t slot);
+
+ void AddConsumer(ConsumerQueueChannel* channel);
+ void RemoveConsumer(ConsumerQueueChannel* channel);
+
+ private:
+ ProducerQueueChannel(BufferHubService* service, int channel_id,
+ size_t meta_size_bytes, int usage_set_mask,
+ int usage_clear_mask, int usage_deny_set_mask,
+ int usage_deny_clear_mask, int* error);
+
+ // Allocate one single producer buffer by |OnProducerQueueAllocateBuffers|.
+ // Note that the newly created buffer's file handle will be pushed to client
+ // and our return type is a RemoteChannelHandle.
+ // Returns the remote channdel handle and the slot number for the newly
+ // allocated buffer.
+ std::pair<RemoteChannelHandle, size_t> AllocateBuffer(Message& message,
+ int width, int height,
+ int format, int usage,
+ size_t slice_count);
+
+ // Size of the meta data associated with all the buffers allocated from the
+ // queue. Now we assume the metadata size is immutable once the queue is
+ // created.
+ size_t meta_size_bytes_;
+
+ // A set of variables to control what |usage| bits can this ProducerQueue
+ // allocate.
+ int usage_set_mask_;
+ int usage_clear_mask_;
+ int usage_deny_set_mask_;
+ int usage_deny_clear_mask_;
+
+ // Provides access to the |channel_id| of all consumer channels associated
+ // with this producer.
+ std::vector<ConsumerQueueChannel*> consumer_channels_;
+
+ // Tracks how many buffers have this queue allocated.
+ size_t capacity_;
+
+ // Tracks of all buffer producer allocated through this buffer queue. Once
+ // a buffer get allocated, it will take a logical slot in the |buffers_| array
+ // and the slot number will stay unchanged during the entire life cycle of the
+ // queue.
+ std::weak_ptr<ProducerChannel> buffers_[BufferHubRPC::kMaxQueueCapacity];
+
+ ProducerQueueChannel(const ProducerQueueChannel&) = delete;
+ void operator=(const ProducerQueueChannel&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_BUFFERHUBD_PRODUCER_QUEUE_CHANNEL_H_
diff --git a/services/vr/performanced/Android.mk b/services/vr/performanced/Android.mk
new file mode 100644
index 0000000..6256e90
--- /dev/null
+++ b/services/vr/performanced/Android.mk
@@ -0,0 +1,49 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+sourceFiles := \
+ cpu_set.cpp \
+ main.cpp \
+ performance_service.cpp \
+ task.cpp
+
+staticLibraries := \
+ libperformance \
+ libpdx_default_transport \
+
+sharedLibraries := \
+ libbase \
+ libcutils \
+ liblog \
+ libutils
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(sourceFiles)
+LOCAL_CFLAGS := -DLOG_TAG=\"performanced\"
+LOCAL_CFLAGS += -DTRACE=0
+LOCAL_STATIC_LIBRARIES := $(staticLibraries)
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_MODULE := performanced
+LOCAL_INIT_RC := performanced.rc
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := performance_service_tests.cpp
+LOCAL_STATIC_LIBRARIES := $(staticLibraries) libgtest_main
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_MODULE := performance_service_tests
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_NATIVE_TEST)
diff --git a/services/vr/performanced/CPPLINT.cfg b/services/vr/performanced/CPPLINT.cfg
new file mode 100644
index 0000000..fd379da
--- /dev/null
+++ b/services/vr/performanced/CPPLINT.cfg
@@ -0,0 +1 @@
+filter=-runtime/int
diff --git a/services/vr/performanced/cpu_set.cpp b/services/vr/performanced/cpu_set.cpp
new file mode 100644
index 0000000..916226e
--- /dev/null
+++ b/services/vr/performanced/cpu_set.cpp
@@ -0,0 +1,287 @@
+#include "cpu_set.h"
+
+#include <cutils/log.h>
+
+#include <algorithm>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#include <android-base/file.h>
+
+#include "directory_reader.h"
+#include "stdio_filebuf.h"
+#include "task.h"
+#include "unique_file.h"
+
+namespace {
+
+constexpr int kDirectoryFlags = O_RDONLY | O_DIRECTORY | O_CLOEXEC;
+constexpr pid_t kKernelThreadDaemonPid = 2;
+
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+bool CpuSet::prefix_enabled_ = false;
+
+void CpuSetManager::Load(const std::string& cpuset_root) {
+ if (!root_set_)
+ root_set_ = Create(cpuset_root);
+}
+
+std::unique_ptr<CpuSet> CpuSetManager::Create(const std::string& path) {
+ base::unique_fd root_cpuset_fd(open(path.c_str(), kDirectoryFlags));
+ if (root_cpuset_fd.get() < 0) {
+ ALOGE("CpuSet::Create: Failed to open \"%s\": %s", path.c_str(),
+ strerror(errno));
+ return nullptr;
+ }
+
+ return Create(std::move(root_cpuset_fd), "/", nullptr);
+}
+
+std::unique_ptr<CpuSet> CpuSetManager::Create(base::unique_fd base_fd,
+ const std::string& name,
+ CpuSet* parent) {
+ DirectoryReader directory(base::unique_fd(dup(base_fd)));
+ if (!directory) {
+ ALOGE("CpuSet::Create: Failed to opendir %s cpuset: %s", name.c_str(),
+ strerror(directory.GetError()));
+ return nullptr;
+ }
+
+ std::unique_ptr<CpuSet> group(
+ new CpuSet(parent, name, base::unique_fd(dup(base_fd))));
+ path_map_.insert(std::make_pair(group->path(), group.get()));
+
+ while (dirent* entry = directory.Next()) {
+ if (entry->d_type == DT_DIR) {
+ std::string directory_name(entry->d_name);
+
+ if (directory_name == "." || directory_name == "..")
+ continue;
+
+ base::unique_fd entry_fd(
+ openat(base_fd.get(), directory_name.c_str(), kDirectoryFlags));
+ if (entry_fd.get() >= 0) {
+ auto child =
+ Create(std::move(entry_fd), directory_name.c_str(), group.get());
+
+ if (child)
+ group->AddChild(std::move(child));
+ else
+ return nullptr;
+ } else {
+ ALOGE("CpuSet::Create: Failed to openat \"%s\": %s", entry->d_name,
+ strerror(errno));
+ return nullptr;
+ }
+ }
+ }
+
+ return group;
+}
+
+CpuSet* CpuSetManager::Lookup(const std::string& path) {
+ auto search = path_map_.find(path);
+ if (search != path_map_.end())
+ return search->second;
+ else
+ return nullptr;
+}
+
+std::vector<CpuSet*> CpuSetManager::GetCpuSets() {
+ std::vector<CpuSet*> sets(path_map_.size());
+
+ for (const auto& pair : path_map_) {
+ sets.push_back(pair.second);
+ }
+
+ return sets;
+}
+
+std::string CpuSetManager::DumpState() const {
+ size_t max_path = 0;
+ std::vector<CpuSet*> sets;
+
+ for (const auto& pair : path_map_) {
+ max_path = std::max(max_path, pair.second->path().length());
+ sets.push_back(pair.second);
+ }
+
+ std::sort(sets.begin(), sets.end(), [](const CpuSet* a, const CpuSet* b) {
+ return a->path() < b->path();
+ });
+
+ std::ostringstream stream;
+
+ stream << std::left;
+ stream << std::setw(max_path) << "Path";
+ stream << " ";
+ stream << std::setw(6) << "CPUs";
+ stream << " ";
+ stream << std::setw(6) << "Tasks";
+ stream << std::endl;
+
+ stream << std::string(max_path, '_');
+ stream << " ";
+ stream << std::string(6, '_');
+ stream << " ";
+ stream << std::string(6, '_');
+ stream << std::endl;
+
+ for (const auto set : sets) {
+ stream << std::left;
+ stream << std::setw(max_path) << set->path();
+ stream << " ";
+ stream << std::right;
+ stream << std::setw(6) << set->GetCpuList();
+ stream << " ";
+ stream << std::setw(6) << set->GetTasks().size();
+ stream << std::endl;
+ }
+
+ return stream.str();
+}
+
+void CpuSetManager::MoveUnboundTasks(const std::string& target_set) {
+ auto root = Lookup("/");
+ if (!root) {
+ ALOGE("CpuSetManager::MoveUnboundTasks: Failed to find root cpuset!");
+ return;
+ }
+
+ auto target = Lookup(target_set);
+ if (!target) {
+ ALOGE(
+ "CpuSetManager::MoveUnboundTasks: Failed to find target cpuset \"%s\"!",
+ target_set.c_str());
+ return;
+ }
+
+ auto cpu_list = root->GetCpuList();
+
+ for (auto task_id : root->GetTasks()) {
+ Task task(task_id);
+
+ // Move only unbound kernel threads to the target cpuset.
+ if (task.cpus_allowed_list() == cpu_list &&
+ task.parent_process_id() == kKernelThreadDaemonPid) {
+ ALOGD_IF(TRACE,
+ "CpuSetManager::MoveUnboundTasks: Moving task_id=%d name=%s to "
+ "target_set=%s tgid=%d ppid=%d.",
+ task_id, task.name().c_str(), target_set.c_str(),
+ task.thread_group_id(), task.parent_process_id());
+
+ const int ret = target->AttachTask(task_id);
+ ALOGW_IF(ret < 0 && ret != -EINVAL,
+ "CpuSetManager::MoveUnboundTasks: Failed to attach task_id=%d "
+ "to cpuset=%s: %s",
+ task_id, target_set.c_str(), strerror(-ret));
+ } else {
+ ALOGD_IF(TRACE,
+ "CpuSet::MoveUnboundTasks: Skipping task_id=%d name=%s cpus=%s.",
+ task_id, task.name().c_str(), task.cpus_allowed_list().c_str());
+ }
+ }
+}
+
+CpuSet::CpuSet(CpuSet* parent, const std::string& name,
+ base::unique_fd&& cpuset_fd)
+ : parent_(parent), name_(name), cpuset_fd_(std::move(cpuset_fd)) {
+ if (parent_ == nullptr)
+ path_ = name_;
+ else if (parent_->IsRoot())
+ path_ = parent_->name() + name_;
+ else
+ path_ = parent_->path() + "/" + name_;
+
+ ALOGI("CpuSet::CpuSet: path=%s", path().c_str());
+}
+
+base::unique_fd CpuSet::OpenPropertyFile(const std::string& name) const {
+ return OpenFile(prefix_enabled_ ? "cpuset." + name : name);
+}
+
+UniqueFile CpuSet::OpenPropertyFilePointer(const std::string& name) const {
+ return OpenFilePointer(prefix_enabled_ ? "cpuset." + name : name);
+}
+
+base::unique_fd CpuSet::OpenFile(const std::string& name, int flags) const {
+ const std::string relative_path = "./" + name;
+ return base::unique_fd(
+ openat(cpuset_fd_.get(), relative_path.c_str(), flags));
+}
+
+UniqueFile CpuSet::OpenFilePointer(const std::string& name, int flags) const {
+ const std::string relative_path = "./" + name;
+ base::unique_fd fd(openat(cpuset_fd_.get(), relative_path.c_str(), flags));
+ if (fd.get() < 0) {
+ ALOGE("CpuSet::OpenPropertyFilePointer: Failed to open %s/%s: %s",
+ path_.c_str(), name.c_str(), strerror(errno));
+ return nullptr;
+ }
+
+ UniqueFile fp(fdopen(fd.release(), "r"));
+ if (!fp)
+ ALOGE("CpuSet::OpenPropertyFilePointer: Failed to fdopen %s/%s: %s",
+ path_.c_str(), name.c_str(), strerror(errno));
+
+ return fp;
+}
+
+int CpuSet::AttachTask(pid_t task_id) const {
+ auto file = OpenFile("tasks", O_RDWR);
+ if (file.get() >= 0) {
+ std::ostringstream stream;
+ stream << task_id;
+ std::string value = stream.str();
+
+ const bool ret = base::WriteStringToFd(value, file.get());
+ return !ret ? -errno : 0;
+ } else {
+ ALOGE("CpuSet::AttachTask: Failed to open %s/tasks: %s", path_.c_str(),
+ strerror(errno));
+ return -errno;
+ }
+}
+
+std::vector<pid_t> CpuSet::GetTasks() const {
+ std::vector<pid_t> tasks;
+
+ if (auto file = OpenFilePointer("tasks")) {
+ stdio_filebuf<char> filebuf(file.get());
+ std::istream file_stream(&filebuf);
+
+ for (std::string line; std::getline(file_stream, line);) {
+ pid_t task_id = std::strtol(line.c_str(), nullptr, 10);
+ tasks.push_back(task_id);
+ }
+ }
+
+ return tasks;
+}
+
+std::string CpuSet::GetCpuList() const {
+ if (auto file = OpenPropertyFilePointer("cpus")) {
+ stdio_filebuf<char> filebuf(file.get());
+ std::istream file_stream(&filebuf);
+
+ std::string line;
+ if (std::getline(file_stream, line))
+ return line;
+ }
+
+ ALOGE("CpuSet::GetCpuList: Failed to read cpu list!!!");
+ return "";
+}
+
+void CpuSet::AddChild(std::unique_ptr<CpuSet> child) {
+ children_.push_back(std::move(child));
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/performanced/cpu_set.h b/services/vr/performanced/cpu_set.h
new file mode 100644
index 0000000..fcf280a
--- /dev/null
+++ b/services/vr/performanced/cpu_set.h
@@ -0,0 +1,105 @@
+#ifndef ANDROID_DVR_PERFORMANCED_CPU_SET_H_
+#define ANDROID_DVR_PERFORMANCED_CPU_SET_H_
+
+#include <fcntl.h>
+
+#include <memory>
+#include <mutex>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <android-base/unique_fd.h>
+
+#include "unique_file.h"
+
+namespace android {
+namespace dvr {
+
+class CpuSet {
+ public:
+ // Returns the parent group for this group, if any. This pointer is owned by
+ // the group hierarchy and is only valid as long as the hierarchy is valid.
+ CpuSet* parent() const { return parent_; }
+ std::string name() const { return name_; }
+ std::string path() const { return path_; }
+
+ bool IsRoot() const { return parent_ == nullptr; }
+
+ std::string GetCpuList() const;
+
+ int AttachTask(pid_t task_id) const;
+ std::vector<pid_t> GetTasks() const;
+
+ private:
+ friend class CpuSetManager;
+
+ CpuSet(CpuSet* parent, const std::string& name, base::unique_fd&& cpuset_fd);
+
+ void AddChild(std::unique_ptr<CpuSet> child);
+
+ base::unique_fd OpenPropertyFile(const std::string& name) const;
+ UniqueFile OpenPropertyFilePointer(const std::string& name) const;
+
+ base::unique_fd OpenFile(const std::string& name, int flags = O_RDONLY) const;
+ UniqueFile OpenFilePointer(const std::string& name,
+ int flags = O_RDONLY) const;
+
+ CpuSet* parent_;
+ std::string name_;
+ std::string path_;
+ base::unique_fd cpuset_fd_;
+ std::vector<std::unique_ptr<CpuSet>> children_;
+
+ static void SetPrefixEnabled(bool enabled) { prefix_enabled_ = enabled; }
+ static bool prefix_enabled_;
+
+ CpuSet(const CpuSet&) = delete;
+ void operator=(const CpuSet&) = delete;
+};
+
+class CpuSetManager {
+ public:
+ CpuSetManager() {}
+
+ // Creats a CpuSet hierarchy by walking the directory tree starting at
+ // |cpuset_root|. This argument must be the path to the root cpuset for the
+ // system, which is usually /dev/cpuset.
+ void Load(const std::string& cpuset_root);
+
+ // Lookup and return a CpuSet from a cpuset path. Ownership of the pointer
+ // DOES NOT pass to the caller; the pointer remains valid as long as the
+ // CpuSet hierarchy is valid.
+ CpuSet* Lookup(const std::string& path);
+
+ // Returns a vector of all the cpusets found at initializaiton. Ownership of
+ // the pointers to CpuSets DOES NOT pass to the caller; the pointers remain
+ // valid as long as the CpuSet hierarchy is valid.
+ std::vector<CpuSet*> GetCpuSets();
+
+ // Moves all unbound tasks from the root set into the target set. This is used
+ // to shield the system from interference from unbound kernel threads.
+ void MoveUnboundTasks(const std::string& target_set);
+
+ std::string DumpState() const;
+
+ operator bool() const { return root_set_ != nullptr; }
+
+ private:
+ // Creates a CpuSet from a path to a cpuset cgroup directory. Recursively
+ // creates child groups for each directory found under |path|.
+ std::unique_ptr<CpuSet> Create(const std::string& path);
+ std::unique_ptr<CpuSet> Create(base::unique_fd base_fd,
+ const std::string& name, CpuSet* parent);
+
+ std::unique_ptr<CpuSet> root_set_;
+ std::unordered_map<std::string, CpuSet*> path_map_;
+
+ CpuSetManager(const CpuSetManager&) = delete;
+ void operator=(const CpuSetManager&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_PERFORMANCED_CPU_SET_H_
diff --git a/services/vr/performanced/directory_reader.h b/services/vr/performanced/directory_reader.h
new file mode 100644
index 0000000..7d7ecc5
--- /dev/null
+++ b/services/vr/performanced/directory_reader.h
@@ -0,0 +1,55 @@
+#ifndef ANDROID_DVR_PERFORMANCED_DIRECTORY_READER_H_
+#define ANDROID_DVR_PERFORMANCED_DIRECTORY_READER_H_
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <android-base/unique_fd.h>
+
+namespace android {
+namespace dvr {
+
+// Utility class around readdir() that handles automatic cleanup.
+class DirectoryReader {
+ public:
+ explicit DirectoryReader(base::unique_fd directory_fd) {
+ directory_ = fdopendir(directory_fd.get());
+ error_ = errno;
+ if (directory_ != nullptr)
+ directory_fd.release();
+ }
+
+ ~DirectoryReader() {
+ if (directory_)
+ closedir(directory_);
+ }
+
+ bool IsValid() const { return directory_ != nullptr; }
+ explicit operator bool() const { return IsValid(); }
+ int GetError() const { return error_; }
+
+ // Returns a pointer to a dirent describing the next directory entry. The
+ // pointer is only valid unitl the next call to Next() or the DirectoryReader
+ // is destroyed. Returns nullptr when the end of the directory is reached.
+ dirent* Next() {
+ if (directory_)
+ return readdir(directory_);
+ else
+ return nullptr;
+ }
+
+ private:
+ DIR* directory_;
+ int error_;
+
+ DirectoryReader(const DirectoryReader&) = delete;
+ void operator=(const DirectoryReader&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_PERFORMANCED_DIRECTORY_READER_H_
diff --git a/services/vr/performanced/main.cpp b/services/vr/performanced/main.cpp
new file mode 100644
index 0000000..114413d
--- /dev/null
+++ b/services/vr/performanced/main.cpp
@@ -0,0 +1,76 @@
+#include <errno.h>
+#include <sys/capability.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include <cutils/sched_policy.h>
+#include <sys/resource.h>
+#include <utils/threads.h>
+
+#include <pdx/default_transport/service_dispatcher.h>
+#include <private/android_filesystem_config.h>
+
+#include "performance_service.h"
+
+namespace {
+
+// Annoying that sys/capability.h doesn't define this directly.
+constexpr int kMaxCapNumber = (CAP_TO_INDEX(CAP_LAST_CAP) + 1);
+
+} // anonymous namespace
+
+int main(int /*argc*/, char** /*argv*/) {
+ int ret = -1;
+
+ struct __user_cap_header_struct capheader;
+ struct __user_cap_data_struct capdata[kMaxCapNumber];
+
+ std::shared_ptr<android::pdx::Service> service;
+ std::unique_ptr<android::pdx::ServiceDispatcher> dispatcher;
+
+ ALOGI("Starting up...");
+
+ // We need to be able to create endpoints with full perms.
+ umask(0000);
+
+ // Keep capabilities when switching UID to AID_SYSTEM.
+ ret = prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0);
+ CHECK_ERROR(ret < 0, error, "Failed to set KEEPCAPS: %s", strerror(errno));
+
+ // Set UID and GID to system.
+ ret = setresgid(AID_SYSTEM, AID_SYSTEM, AID_SYSTEM);
+ CHECK_ERROR(ret < 0, error, "Failed to set GID: %s", strerror(errno));
+ ret = setresuid(AID_SYSTEM, AID_SYSTEM, AID_SYSTEM);
+ CHECK_ERROR(ret < 0, error, "Failed to set UID: %s", strerror(errno));
+
+ // Keep CAP_SYS_NICE, allowing control of scheduler class, priority, and
+ // cpuset for other tasks in the system.
+ memset(&capheader, 0, sizeof(capheader));
+ memset(&capdata, 0, sizeof(capdata));
+ capheader.version = _LINUX_CAPABILITY_VERSION_3;
+ capdata[CAP_TO_INDEX(CAP_SYS_NICE)].effective |= CAP_TO_MASK(CAP_SYS_NICE);
+ capdata[CAP_TO_INDEX(CAP_SYS_NICE)].permitted |= CAP_TO_MASK(CAP_SYS_NICE);
+
+ // Drop all caps but the ones configured above.
+ ret = capset(&capheader, capdata);
+ CHECK_ERROR(ret < 0, error, "Could not set capabilities: %s",
+ strerror(errno));
+
+ dispatcher = android::pdx::default_transport::ServiceDispatcher::Create();
+ CHECK_ERROR(!dispatcher, error, "Failed to create service dispatcher.");
+
+ service = android::dvr::PerformanceService::Create();
+ CHECK_ERROR(!service, error, "Failed to create performance service service.");
+ dispatcher->AddService(service);
+
+ ALOGI("Entering message loop.");
+
+ ret = dispatcher->EnterDispatchLoop();
+ CHECK_ERROR(ret < 0, error, "Dispatch loop exited because: %s\n",
+ strerror(-ret));
+
+error:
+ return ret;
+}
diff --git a/services/vr/performanced/performance_service.cpp b/services/vr/performanced/performance_service.cpp
new file mode 100644
index 0000000..c99c8d4
--- /dev/null
+++ b/services/vr/performanced/performance_service.cpp
@@ -0,0 +1,196 @@
+#include "performance_service.h"
+
+#include <sched.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <pdx/rpc/argument_encoder.h>
+#include <pdx/rpc/message_buffer.h>
+#include <pdx/rpc/remote_method.h>
+#include <private/dvr/performance_rpc.h>
+
+#include "task.h"
+
+// This prctl is only available in Android kernels.
+#define PR_SET_TIMERSLACK_PID 41
+
+using android::pdx::Message;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::default_transport::Endpoint;
+
+namespace {
+
+const char kCpuSetBasePath[] = "/dev/cpuset";
+
+constexpr unsigned long kTimerSlackForegroundNs = 50000;
+constexpr unsigned long kTimerSlackBackgroundNs = 40000000;
+
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+PerformanceService::PerformanceService()
+ : BASE("PerformanceService",
+ Endpoint::Create(PerformanceRPC::kClientPath)) {
+ cpuset_.Load(kCpuSetBasePath);
+
+ Task task(getpid());
+ ALOGI("Running in cpuset=%s uid=%d gid=%d", task.GetCpuSetPath().c_str(),
+ task.user_id()[Task::kUidReal], task.group_id()[Task::kUidReal]);
+
+ // Errors here are checked in IsInitialized().
+ sched_fifo_min_priority_ = sched_get_priority_min(SCHED_FIFO);
+ sched_fifo_max_priority_ = sched_get_priority_max(SCHED_FIFO);
+
+ const int fifo_range = sched_fifo_max_priority_ - sched_fifo_min_priority_;
+ const int fifo_low = sched_fifo_min_priority_;
+ const int fifo_medium = sched_fifo_min_priority_ + fifo_range / 5;
+
+ // TODO(eieio): Make this configurable on the command line.
+ cpuset_.MoveUnboundTasks("/kernel");
+
+ // Setup the scheduler classes.
+ scheduler_classes_ = {
+ {"audio:low",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_medium}},
+ {"audio:high",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_medium + 3}},
+ {"graphics",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_medium}},
+ {"graphics:low",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_medium}},
+ {"graphics:high",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_medium + 2}},
+ {"sensors",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_low}},
+ {"sensors:low",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_low}},
+ {"sensors:high",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_FIFO | SCHED_RESET_ON_FORK,
+ .priority = fifo_low + 1}},
+ {"normal",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_NORMAL,
+ .priority = 0}},
+ {"foreground",
+ {.timer_slack = kTimerSlackForegroundNs,
+ .scheduler_policy = SCHED_NORMAL,
+ .priority = 0}},
+ {"background",
+ {.timer_slack = kTimerSlackBackgroundNs,
+ .scheduler_policy = SCHED_BATCH,
+ .priority = 0}},
+ {"batch",
+ {.timer_slack = kTimerSlackBackgroundNs,
+ .scheduler_policy = SCHED_BATCH,
+ .priority = 0}},
+ };
+}
+
+bool PerformanceService::IsInitialized() const {
+ return BASE::IsInitialized() && cpuset_ && sched_fifo_min_priority_ >= 0 &&
+ sched_fifo_max_priority_ >= 0;
+}
+
+std::string PerformanceService::DumpState(size_t /*max_length*/) {
+ return cpuset_.DumpState();
+}
+
+int PerformanceService::OnSetCpuPartition(Message& message, pid_t task_id,
+ const std::string& partition) {
+ Task task(task_id);
+ if (!task || task.thread_group_id() != message.GetProcessId())
+ return -EINVAL;
+
+ auto target_set = cpuset_.Lookup(partition);
+ if (!target_set)
+ return -ENOENT;
+
+ const auto attach_error = target_set->AttachTask(task_id);
+ if (attach_error)
+ return attach_error;
+
+ return 0;
+}
+
+int PerformanceService::OnSetSchedulerClass(
+ Message& message, pid_t task_id, const std::string& scheduler_class) {
+ // Make sure the task id is valid and belongs to the sending process.
+ Task task(task_id);
+ if (!task || task.thread_group_id() != message.GetProcessId())
+ return -EINVAL;
+
+ struct sched_param param;
+
+ // TODO(eieio): Apply rules based on the requesting process. Applications are
+ // only allowed one audio thread that runs at SCHED_FIFO. System services can
+ // have more than one.
+ auto search = scheduler_classes_.find(scheduler_class);
+ if (search != scheduler_classes_.end()) {
+ auto config = search->second;
+ param.sched_priority = config.priority;
+ sched_setscheduler(task_id, config.scheduler_policy, ¶m);
+ prctl(PR_SET_TIMERSLACK_PID, config.timer_slack, task_id);
+ ALOGI("PerformanceService::OnSetSchedulerClass: Set task=%d to class=%s.",
+ task_id, scheduler_class.c_str());
+ return 0;
+ } else {
+ ALOGE(
+ "PerformanceService::OnSetSchedulerClass: Invalid class=%s requested "
+ "by task=%d.",
+ scheduler_class.c_str(), task_id);
+ return -EINVAL;
+ }
+}
+
+std::string PerformanceService::OnGetCpuPartition(Message& message,
+ pid_t task_id) {
+ // Make sure the task id is valid and belongs to the sending process.
+ Task task(task_id);
+ if (!task || task.thread_group_id() != message.GetProcessId())
+ REPLY_ERROR_RETURN(message, EINVAL, "");
+
+ return task.GetCpuSetPath();
+}
+
+int PerformanceService::HandleMessage(Message& message) {
+ switch (message.GetOp()) {
+ case PerformanceRPC::SetCpuPartition::Opcode:
+ DispatchRemoteMethod<PerformanceRPC::SetSchedulerClass>(
+ *this, &PerformanceService::OnSetCpuPartition, message);
+ return 0;
+
+ case PerformanceRPC::SetSchedulerClass::Opcode:
+ DispatchRemoteMethod<PerformanceRPC::SetSchedulerClass>(
+ *this, &PerformanceService::OnSetSchedulerClass, message);
+ return 0;
+
+ case PerformanceRPC::GetCpuPartition::Opcode:
+ DispatchRemoteMethod<PerformanceRPC::GetCpuPartition>(
+ *this, &PerformanceService::OnGetCpuPartition, message);
+ return 0;
+
+ default:
+ return Service::HandleMessage(message);
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/performanced/performance_service.h b/services/vr/performanced/performance_service.h
new file mode 100644
index 0000000..e32d834
--- /dev/null
+++ b/services/vr/performanced/performance_service.h
@@ -0,0 +1,57 @@
+#ifndef ANDROID_DVR_PERFORMANCED_PERFORMANCE_SERVICE_H_
+#define ANDROID_DVR_PERFORMANCED_PERFORMANCE_SERVICE_H_
+
+#include <string>
+#include <unordered_map>
+
+#include <pdx/service.h>
+
+#include "cpu_set.h"
+
+namespace android {
+namespace dvr {
+
+// PerformanceService manages compute partitions usings cpusets. Different
+// cpusets are assigned specific purposes and performance characteristics;
+// clients may request for threads to be moved into these cpusets to help
+// achieve system performance goals.
+class PerformanceService : public pdx::ServiceBase<PerformanceService> {
+ public:
+ int HandleMessage(pdx::Message& message) override;
+ bool IsInitialized() const override;
+
+ std::string DumpState(size_t max_length) override;
+
+ private:
+ friend BASE;
+
+ PerformanceService();
+
+ int OnSetCpuPartition(pdx::Message& message, pid_t task_id,
+ const std::string& partition);
+ int OnSetSchedulerClass(pdx::Message& message, pid_t task_id,
+ const std::string& scheduler_class);
+ std::string OnGetCpuPartition(pdx::Message& message, pid_t task_id);
+
+ CpuSetManager cpuset_;
+
+ int sched_fifo_min_priority_;
+ int sched_fifo_max_priority_;
+
+ // Scheduler class config type.
+ struct SchedulerClassConfig {
+ unsigned long timer_slack;
+ int scheduler_policy;
+ int priority;
+ };
+
+ std::unordered_map<std::string, SchedulerClassConfig> scheduler_classes_;
+
+ PerformanceService(const PerformanceService&) = delete;
+ void operator=(const PerformanceService&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_PERFORMANCED_PERFORMANCE_SERVICE_H_
diff --git a/services/vr/performanced/performance_service_tests.cpp b/services/vr/performanced/performance_service_tests.cpp
new file mode 100644
index 0000000..b526082
--- /dev/null
+++ b/services/vr/performanced/performance_service_tests.cpp
@@ -0,0 +1,137 @@
+#include <errno.h>
+#include <sched.h>
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+#include <dvr/performance_client_api.h>
+#include <gtest/gtest.h>
+
+TEST(DISABLED_PerformanceTest, SetCpuPartition) {
+ int error;
+
+ // Test setting the the partition for the current task.
+ error = dvrSetCpuPartition(0, "/application/background");
+ EXPECT_EQ(0, error);
+
+ error = dvrSetCpuPartition(0, "/application/performance");
+ EXPECT_EQ(0, error);
+
+ // Test setting the partition for one of our tasks.
+ bool done = false;
+ pid_t task_id = 0;
+ std::mutex mutex;
+ std::condition_variable done_condition, id_condition;
+
+ std::thread thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+
+ task_id = gettid();
+ id_condition.notify_one();
+
+ done_condition.wait(lock, [&done] { return done; });
+ });
+
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ id_condition.wait(lock, [&task_id] { return task_id != 0; });
+ }
+ EXPECT_NE(0, task_id);
+
+ error = dvrSetCpuPartition(task_id, "/application");
+ EXPECT_EQ(0, error);
+
+ {
+ std::lock_guard<std::mutex> lock(mutex);
+ done = true;
+ done_condition.notify_one();
+ }
+ thread.join();
+
+ // Test setting the partition for a task that isn't valid using
+ // the task id of the thread that we just joined. Technically the
+ // id could wrap around by the time we get here, but this is
+ // extremely unlikely.
+ error = dvrSetCpuPartition(task_id, "/application");
+ EXPECT_EQ(-EINVAL, error);
+
+ // Test setting the partition for a task that doesn't belong to us.
+ error = dvrSetCpuPartition(1, "/application");
+ EXPECT_EQ(-EINVAL, error);
+
+ // Test setting the partition to one that doesn't exist.
+ error = dvrSetCpuPartition(0, "/foobar");
+ EXPECT_EQ(-ENOENT, error);
+}
+
+TEST(PerformanceTest, SetSchedulerClass) {
+ int error;
+
+ // TODO(eieio): Test all supported scheduler classes and priority levels.
+
+ error = dvrSetSchedulerClass(0, "background");
+ EXPECT_EQ(0, error);
+ EXPECT_EQ(SCHED_BATCH, sched_getscheduler(0));
+
+ error = dvrSetSchedulerClass(0, "audio:low");
+ EXPECT_EQ(0, error);
+ EXPECT_EQ(SCHED_FIFO | SCHED_RESET_ON_FORK, sched_getscheduler(0));
+
+ error = dvrSetSchedulerClass(0, "normal");
+ EXPECT_EQ(0, error);
+ EXPECT_EQ(SCHED_NORMAL, sched_getscheduler(0));
+
+ error = dvrSetSchedulerClass(0, "foobar");
+ EXPECT_EQ(-EINVAL, error);
+}
+
+TEST(PerformanceTest, SchedulerClassResetOnFork) {
+ int error;
+
+ error = dvrSetSchedulerClass(0, "graphics:high");
+ EXPECT_EQ(0, error);
+ EXPECT_EQ(SCHED_FIFO | SCHED_RESET_ON_FORK, sched_getscheduler(0));
+
+ int scheduler = -1;
+ std::thread thread([&]() { scheduler = sched_getscheduler(0); });
+ thread.join();
+
+ EXPECT_EQ(SCHED_NORMAL, scheduler);
+
+ // Return to SCHED_NORMAL.
+ error = dvrSetSchedulerClass(0, "normal");
+ EXPECT_EQ(0, error);
+ EXPECT_EQ(SCHED_NORMAL, sched_getscheduler(0));
+}
+
+TEST(PerformanceTest, GetCpuPartition) {
+ int error;
+ char partition[PATH_MAX + 1];
+
+ error = dvrSetCpuPartition(0, "/");
+ ASSERT_EQ(0, error);
+
+ error = dvrGetCpuPartition(0, partition, sizeof(partition));
+ EXPECT_EQ(0, error);
+ EXPECT_EQ("/", std::string(partition));
+
+ error = dvrSetCpuPartition(0, "/application");
+ EXPECT_EQ(0, error);
+
+ error = dvrGetCpuPartition(0, partition, sizeof(partition));
+ EXPECT_EQ(0, error);
+ EXPECT_EQ("/application", std::string(partition));
+
+ // Test passing a buffer that is too short.
+ error = dvrGetCpuPartition(0, partition, 5);
+ EXPECT_EQ(-ENOBUFS, error);
+
+ // Test getting the partition for a task that doesn't belong to us.
+ error = dvrGetCpuPartition(1, partition, sizeof(partition));
+ EXPECT_EQ(-EINVAL, error);
+
+ // Test passing a nullptr value for partition buffer.
+ error = dvrGetCpuPartition(0, nullptr, sizeof(partition));
+ EXPECT_EQ(-EINVAL, error);
+}
diff --git a/services/vr/performanced/performanced.rc b/services/vr/performanced/performanced.rc
new file mode 100644
index 0000000..754c97f
--- /dev/null
+++ b/services/vr/performanced/performanced.rc
@@ -0,0 +1,5 @@
+service performanced /system/bin/performanced
+ class core
+ user root
+ group system readproc
+ cpuset /
diff --git a/services/vr/performanced/stdio_filebuf.h b/services/vr/performanced/stdio_filebuf.h
new file mode 100644
index 0000000..5988aa8
--- /dev/null
+++ b/services/vr/performanced/stdio_filebuf.h
@@ -0,0 +1,219 @@
+// Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
+// Copyright (c) 2016 Google, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+#ifndef ANDROID_DVR_PERFORMANCED_STDIO_FILEBUF_H_
+#define ANDROID_DVR_PERFORMANCED_STDIO_FILEBUF_H_
+
+#include <cstdio>
+#include <istream>
+#include <locale>
+#include <streambuf>
+
+namespace android {
+namespace dvr {
+
+// An implementation of std::basic_streambuf backed by a FILE pointer. This is
+// ported from the internal llvm-libc++ support for std::cin. It's really
+// unfortunate that we have to do this, but the C++11 standard is too pendantic
+// to support creating streams from file descriptors or FILE pointers. This
+// implementation uses all standard interfaces, except for the call to
+// std::__throw_runtime_error(), which is only needed to deal with exceeding
+// locale encoding limits. This class is meant to be used for reading system
+// files, which don't require exotic locale support, so this call could be
+// removed in the future, if necessary.
+//
+// Original source file: llvm-libcxx/llvm-libc++/include/__std_stream
+// Original class name: __stdinbuf
+//
+template <class _CharT>
+class stdio_filebuf
+ : public std::basic_streambuf<_CharT, std::char_traits<_CharT> > {
+ public:
+ typedef _CharT char_type;
+ typedef std::char_traits<char_type> traits_type;
+ typedef typename traits_type::int_type int_type;
+ typedef typename traits_type::pos_type pos_type;
+ typedef typename traits_type::off_type off_type;
+ typedef typename traits_type::state_type state_type;
+
+ explicit stdio_filebuf(FILE* __fp);
+ ~stdio_filebuf() override;
+
+ protected:
+ virtual int_type underflow() override;
+ virtual int_type uflow() override;
+ virtual int_type pbackfail(int_type __c = traits_type::eof()) override;
+ virtual void imbue(const std::locale& __loc) override;
+
+ private:
+ FILE* __file_;
+ const std::codecvt<char_type, char, state_type>* __cv_;
+ state_type __st_;
+ int __encoding_;
+ int_type __last_consumed_;
+ bool __last_consumed_is_next_;
+ bool __always_noconv_;
+
+ stdio_filebuf(const stdio_filebuf&);
+ stdio_filebuf& operator=(const stdio_filebuf&);
+
+ int_type __getchar(bool __consume);
+
+ static const int __limit = 8;
+};
+
+template <class _CharT>
+stdio_filebuf<_CharT>::stdio_filebuf(FILE* __fp)
+ : __file_(__fp),
+ __last_consumed_(traits_type::eof()),
+ __last_consumed_is_next_(false) {
+ imbue(this->getloc());
+}
+
+template <class _CharT>
+stdio_filebuf<_CharT>::~stdio_filebuf() {
+ if (__file_)
+ fclose(__file_);
+}
+
+template <class _CharT>
+void stdio_filebuf<_CharT>::imbue(const std::locale& __loc) {
+ __cv_ = &std::use_facet<std::codecvt<char_type, char, state_type> >(__loc);
+ __encoding_ = __cv_->encoding();
+ __always_noconv_ = __cv_->always_noconv();
+ if (__encoding_ > __limit)
+ std::__throw_runtime_error("unsupported locale for standard io");
+}
+
+template <class _CharT>
+typename stdio_filebuf<_CharT>::int_type stdio_filebuf<_CharT>::underflow() {
+ return __getchar(false);
+}
+
+template <class _CharT>
+typename stdio_filebuf<_CharT>::int_type stdio_filebuf<_CharT>::uflow() {
+ return __getchar(true);
+}
+
+template <class _CharT>
+typename stdio_filebuf<_CharT>::int_type stdio_filebuf<_CharT>::__getchar(
+ bool __consume) {
+ if (__last_consumed_is_next_) {
+ int_type __result = __last_consumed_;
+ if (__consume) {
+ __last_consumed_ = traits_type::eof();
+ __last_consumed_is_next_ = false;
+ }
+ return __result;
+ }
+ char __extbuf[__limit];
+ int __nread = std::max(1, __encoding_);
+ for (int __i = 0; __i < __nread; ++__i) {
+ int __c = getc(__file_);
+ if (__c == EOF)
+ return traits_type::eof();
+ __extbuf[__i] = static_cast<char>(__c);
+ }
+ char_type __1buf;
+ if (__always_noconv_)
+ __1buf = static_cast<char_type>(__extbuf[0]);
+ else {
+ const char* __enxt;
+ char_type* __inxt;
+ std::codecvt_base::result __r;
+ do {
+ state_type __sv_st = __st_;
+ __r = __cv_->in(__st_, __extbuf, __extbuf + __nread, __enxt, &__1buf,
+ &__1buf + 1, __inxt);
+ switch (__r) {
+ case std::codecvt_base::ok:
+ break;
+ case std::codecvt_base::partial:
+ __st_ = __sv_st;
+ if (__nread == sizeof(__extbuf))
+ return traits_type::eof();
+ {
+ int __c = getc(__file_);
+ if (__c == EOF)
+ return traits_type::eof();
+ __extbuf[__nread] = static_cast<char>(__c);
+ }
+ ++__nread;
+ break;
+ case std::codecvt_base::error:
+ return traits_type::eof();
+ case std::codecvt_base::noconv:
+ __1buf = static_cast<char_type>(__extbuf[0]);
+ break;
+ }
+ } while (__r == std::codecvt_base::partial);
+ }
+ if (!__consume) {
+ for (int __i = __nread; __i > 0;) {
+ if (ungetc(traits_type::to_int_type(__extbuf[--__i]), __file_) == EOF)
+ return traits_type::eof();
+ }
+ } else
+ __last_consumed_ = traits_type::to_int_type(__1buf);
+ return traits_type::to_int_type(__1buf);
+}
+
+template <class _CharT>
+typename stdio_filebuf<_CharT>::int_type stdio_filebuf<_CharT>::pbackfail(
+ int_type __c) {
+ if (traits_type::eq_int_type(__c, traits_type::eof())) {
+ if (!__last_consumed_is_next_) {
+ __c = __last_consumed_;
+ __last_consumed_is_next_ =
+ !traits_type::eq_int_type(__last_consumed_, traits_type::eof());
+ }
+ return __c;
+ }
+ if (__last_consumed_is_next_) {
+ char __extbuf[__limit];
+ char* __enxt;
+ const char_type __ci = traits_type::to_char_type(__last_consumed_);
+ const char_type* __inxt;
+ switch (__cv_->out(__st_, &__ci, &__ci + 1, __inxt, __extbuf,
+ __extbuf + sizeof(__extbuf), __enxt)) {
+ case std::codecvt_base::ok:
+ break;
+ case std::codecvt_base::noconv:
+ __extbuf[0] = static_cast<char>(__last_consumed_);
+ __enxt = __extbuf + 1;
+ break;
+ case std::codecvt_base::partial:
+ case std::codecvt_base::error:
+ return traits_type::eof();
+ }
+ while (__enxt > __extbuf)
+ if (ungetc(*--__enxt, __file_) == EOF)
+ return traits_type::eof();
+ }
+ __last_consumed_ = __c;
+ __last_consumed_is_next_ = true;
+ return __c;
+}
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_PERFORMANCED_STDIO_FILEBUF_H_
diff --git a/services/vr/performanced/string_trim.h b/services/vr/performanced/string_trim.h
new file mode 100644
index 0000000..7094e9f
--- /dev/null
+++ b/services/vr/performanced/string_trim.h
@@ -0,0 +1,46 @@
+#ifndef ANDROID_DVR_PERFORMANCED_STRING_TRIM_H_
+#define ANDROID_DVR_PERFORMANCED_STRING_TRIM_H_
+
+#include <functional>
+#include <locale>
+#include <string>
+
+namespace android {
+namespace dvr {
+
+// Trims whitespace from the left side of |subject| and returns the result as a
+// new string.
+inline std::string LeftTrim(std::string subject) {
+ subject.erase(subject.begin(),
+ std::find_if(subject.begin(), subject.end(),
+ std::not1(std::ptr_fun<int, int>(std::isspace))));
+ return subject;
+}
+
+// Trims whitespace from the right side of |subject| and returns the result as a
+// new string.
+inline std::string RightTrim(std::string subject) {
+ subject.erase(std::find_if(subject.rbegin(), subject.rend(),
+ std::not1(std::ptr_fun<int, int>(std::isspace)))
+ .base(),
+ subject.end());
+ return subject;
+}
+
+// Trims whitespace from the both sides of |subject| and returns the result as a
+// new string.
+inline std::string Trim(std::string subject) {
+ subject.erase(subject.begin(),
+ std::find_if(subject.begin(), subject.end(),
+ std::not1(std::ptr_fun<int, int>(std::isspace))));
+ subject.erase(std::find_if(subject.rbegin(), subject.rend(),
+ std::not1(std::ptr_fun<int, int>(std::isspace)))
+ .base(),
+ subject.end());
+ return subject;
+}
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_PERFORMANCED_STRING_TRIM_H_
diff --git a/services/vr/performanced/task.cpp b/services/vr/performanced/task.cpp
new file mode 100644
index 0000000..ad12858
--- /dev/null
+++ b/services/vr/performanced/task.cpp
@@ -0,0 +1,163 @@
+#include "task.h"
+
+#include <cutils/log.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+
+#include <cctype>
+#include <cstdlib>
+#include <memory>
+#include <sstream>
+
+#include <android-base/unique_fd.h>
+
+#include "stdio_filebuf.h"
+#include "string_trim.h"
+
+namespace {
+
+const char kProcBase[] = "/proc";
+
+android::base::unique_fd OpenTaskDirectory(pid_t task_id) {
+ std::ostringstream stream;
+ stream << kProcBase << "/" << task_id;
+
+ return android::base::unique_fd(
+ open(stream.str().c_str(), O_RDONLY | O_DIRECTORY));
+}
+
+void ParseUidStatusField(const std::string& value, std::array<int, 4>& ids) {
+ const char* start = value.c_str();
+
+ ids[0] = std::strtol(start, const_cast<char**>(&start), 10);
+ ids[1] = std::strtol(start, const_cast<char**>(&start), 10);
+ ids[2] = std::strtol(start, const_cast<char**>(&start), 10);
+ ids[3] = std::strtol(start, const_cast<char**>(&start), 10);
+}
+
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+Task::Task(pid_t task_id)
+ : task_id_(task_id),
+ thread_group_id_(-1),
+ parent_process_id_(-1),
+ thread_count_(0),
+ cpus_allowed_mask_(0) {
+ task_fd_ = OpenTaskDirectory(task_id_);
+ ALOGE_IF(task_fd_.get() < 0,
+ "Task::Task: Failed to open task directory for task_id=%d: %s",
+ task_id, strerror(errno));
+
+ ReadStatusFields();
+
+ ALOGD_IF(TRACE, "Task::Task: task_id=%d name=%s tgid=%d ppid=%d cpu_mask=%x",
+ task_id_, name_.c_str(), thread_group_id_, parent_process_id_,
+ cpus_allowed_mask_);
+}
+
+base::unique_fd Task::OpenTaskFile(const std::string& name) const {
+ const std::string relative_path = "./" + name;
+ return base::unique_fd(
+ openat(task_fd_.get(), relative_path.c_str(), O_RDONLY));
+}
+
+UniqueFile Task::OpenTaskFilePointer(const std::string& name) const {
+ const std::string relative_path = "./" + name;
+ base::unique_fd fd(openat(task_fd_.get(), relative_path.c_str(), O_RDONLY));
+ if (fd.get() < 0) {
+ ALOGE("Task::OpenTaskFilePointer: Failed to open /proc/%d/%s: %s", task_id_,
+ name.c_str(), strerror(errno));
+ return nullptr;
+ }
+
+ UniqueFile fp(fdopen(fd.release(), "r"));
+ if (!fp)
+ ALOGE("Task::OpenTaskFilePointer: Failed to fdopen /proc/%d/%s: %s",
+ task_id_, name.c_str(), strerror(errno));
+
+ return fp;
+}
+
+std::string Task::GetStatusField(const std::string& field) const {
+ if (auto file = OpenTaskFilePointer("status")) {
+ stdio_filebuf<char> filebuf(file.get());
+ std::istream file_stream(&filebuf);
+
+ for (std::string line; std::getline(file_stream, line);) {
+ auto offset = line.find(field);
+
+ ALOGD_IF(TRACE,
+ "Task::GetStatusField: field=\"%s\" line=\"%s\" offset=%zd",
+ field.c_str(), line.c_str(), offset);
+
+ if (offset == std::string::npos)
+ continue;
+
+ // The status file has lines with the format <field>:<value>. Extract the
+ // value after the colon.
+ return Trim(line.substr(offset + field.size() + 1));
+ }
+ }
+
+ return "[unknown]";
+}
+
+void Task::ReadStatusFields() {
+ if (auto file = OpenTaskFilePointer("status")) {
+ stdio_filebuf<char> filebuf(file.get());
+ std::istream file_stream(&filebuf);
+
+ for (std::string line; std::getline(file_stream, line);) {
+ auto offset = line.find(":");
+ if (offset == std::string::npos) {
+ ALOGW("ReadStatusFields: Failed to find delimiter \":\" in line=\"%s\"",
+ line.c_str());
+ continue;
+ }
+
+ std::string key = line.substr(0, offset);
+ std::string value = Trim(line.substr(offset + 1));
+
+ ALOGD_IF(TRACE, "Task::ReadStatusFields: key=\"%s\" value=\"%s\"",
+ key.c_str(), value.c_str());
+
+ if (key == "Name")
+ name_ = value;
+ else if (key == "Tgid")
+ thread_group_id_ = std::strtol(value.c_str(), nullptr, 10);
+ else if (key == "PPid")
+ parent_process_id_ = std::strtol(value.c_str(), nullptr, 10);
+ else if (key == "Uid")
+ ParseUidStatusField(value, user_id_);
+ else if (key == "Gid")
+ ParseUidStatusField(value, group_id_);
+ else if (key == "Threads")
+ thread_count_ = std::strtoul(value.c_str(), nullptr, 10);
+ else if (key == "Cpus_allowed")
+ cpus_allowed_mask_ = std::strtoul(value.c_str(), nullptr, 16);
+ else if (key == "Cpus_allowed_list")
+ cpus_allowed_list_ = value;
+ }
+ }
+}
+
+std::string Task::GetCpuSetPath() const {
+ if (auto file = OpenTaskFilePointer("cpuset")) {
+ stdio_filebuf<char> filebuf(file.get());
+ std::istream file_stream(&filebuf);
+
+ std::string line = "";
+ std::getline(file_stream, line);
+
+ return Trim(line);
+ } else {
+ return "";
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/performanced/task.h b/services/vr/performanced/task.h
new file mode 100644
index 0000000..4a3b7f2
--- /dev/null
+++ b/services/vr/performanced/task.h
@@ -0,0 +1,83 @@
+#ifndef ANDROID_DVR_PERFORMANCED_TASK_H_
+#define ANDROID_DVR_PERFORMANCED_TASK_H_
+
+#include <sys/types.h>
+
+#include <array>
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <android-base/unique_fd.h>
+
+#include "unique_file.h"
+
+namespace android {
+namespace dvr {
+
+// Task provides access to task-related information from the procfs
+// pseudo-filesystem.
+class Task {
+ public:
+ explicit Task(pid_t task_id);
+
+ bool IsValid() const { return task_fd_.get() >= 0; }
+ explicit operator bool() const { return IsValid(); }
+
+ pid_t task_id() const { return task_id_; }
+ std::string name() const { return name_; }
+ pid_t thread_group_id() const { return thread_group_id_; }
+ pid_t parent_process_id() const { return parent_process_id_; }
+ size_t thread_count() const { return thread_count_; }
+ uint32_t cpus_allowed_mask() const { return cpus_allowed_mask_; }
+ const std::string& cpus_allowed_list() const { return cpus_allowed_list_; }
+ const std::array<int, 4>& user_id() const { return user_id_; }
+ const std::array<int, 4>& group_id() const { return group_id_; }
+
+ // Indices into user and group id arrays.
+ enum {
+ kUidReal = 0,
+ kUidEffective,
+ kUidSavedSet,
+ kUidFilesystem,
+ };
+
+ std::string GetCpuSetPath() const;
+
+ private:
+ pid_t task_id_;
+ base::unique_fd task_fd_;
+
+ // Fields read from /proc/<task_id_>/status.
+ std::string name_;
+ pid_t thread_group_id_;
+ pid_t parent_process_id_;
+ std::array<int, 4> user_id_;
+ std::array<int, 4> group_id_;
+ size_t thread_count_;
+ uint32_t cpus_allowed_mask_;
+ std::string cpus_allowed_list_;
+
+ // Opens the file /proc/<task_id_>/|name| and returns the open file
+ // descriptor.
+ base::unique_fd OpenTaskFile(const std::string& name) const;
+
+ // Similar to OpenTaskFile() but returns a file pointer.
+ UniqueFile OpenTaskFilePointer(const std::string& name) const;
+
+ // Reads the field named |field| from /proc/<task_id_>/status.
+ std::string GetStatusField(const std::string& field) const;
+
+ // Reads a subset of the fields in /proc/<task_id_>/status.
+ void ReadStatusFields();
+
+ Task(const Task&) = delete;
+ void operator=(const Task&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_PERFORMANCED_TASK_H_
diff --git a/services/vr/performanced/unique_file.h b/services/vr/performanced/unique_file.h
new file mode 100644
index 0000000..86e487a
--- /dev/null
+++ b/services/vr/performanced/unique_file.h
@@ -0,0 +1,20 @@
+#ifndef ANDROID_DVR_PERFORMANCED_UNIQUE_FILE_H_
+#define ANDROID_DVR_PERFORMANCED_UNIQUE_FILE_H_
+
+#include <stdio.h>
+
+#include <memory>
+
+namespace android {
+namespace dvr {
+
+// Utility to manage the lifetime of a file pointer.
+struct FileDeleter {
+ void operator()(FILE* fp) { fclose(fp); }
+};
+using UniqueFile = std::unique_ptr<FILE, FileDeleter>;
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_PERFORMANCED_UNIQUE_FILE_H_
diff --git a/services/vr/sensord/Android.mk b/services/vr/sensord/Android.mk
new file mode 100644
index 0000000..907c3d6
--- /dev/null
+++ b/services/vr/sensord/Android.mk
@@ -0,0 +1,81 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+sourceFiles := \
+ pose_service.cpp \
+ sensord.cpp \
+ sensor_fusion.cpp \
+ sensor_hal_thread.cpp \
+ sensor_ndk_thread.cpp \
+ sensor_service.cpp \
+ sensor_thread.cpp \
+
+includeFiles += \
+ $(LOCAL_PATH)/include
+
+staticLibraries := \
+ libdvrcommon \
+ libsensor \
+ libperformance \
+ libbufferhub \
+ libpdx_default_transport \
+ libchrome \
+ libposepredictor \
+
+sharedLibraries := \
+ libandroid \
+ libbase \
+ libbinder \
+ libcutils \
+ liblog \
+ libhardware \
+ libutils \
+
+cFlags := -DLOG_TAG=\"sensord\" \
+ -DTRACE=0
+
+ifeq ($(TARGET_USES_QCOM_BSP), true)
+ifneq ($(TARGET_QCOM_DISPLAY_VARIANT),)
+ platform := .
+else
+ platform := $(TARGET_BOARD_PLATFORM)
+endif
+ cFlags += -DQCOM_B_FAMILY \
+ -DQCOM_BSP
+endif
+
+include $(CLEAR_VARS)
+# Don't strip symbols so we see stack traces in logcat.
+LOCAL_STRIP_MODULE := false
+LOCAL_SRC_FILES := $(sourceFiles)
+PLATFORM := $(platform)
+LOCAL_CFLAGS := $(cFlags)
+LOCAL_STATIC_LIBRARIES := $(staticLibraries)
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_MODULE_CLASS := EXECUTABLES
+LOCAL_MODULE := sensord
+LOCAL_C_INCLUDES := $(includeFiles)
+LOCAL_C_INCLUDES += \
+ $(call local-generated-sources-dir)/proto/frameworks/native/services/vr/sensord
+LOCAL_INIT_RC := sensord.rc
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_STATIC_LIBRARIES := $(staticLibraries)
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_SRC_FILES := test/poselatencytest.cpp
+LOCAL_MODULE := poselatencytest
+include $(BUILD_EXECUTABLE)
diff --git a/services/vr/sensord/pose_service.cpp b/services/vr/sensord/pose_service.cpp
new file mode 100644
index 0000000..c2863ee
--- /dev/null
+++ b/services/vr/sensord/pose_service.cpp
@@ -0,0 +1,677 @@
+#define ATRACE_TAG ATRACE_TAG_INPUT
+#include "pose_service.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <time.h>
+
+#include <array>
+#include <cmath>
+#include <cstdint>
+#include <sstream>
+#include <type_traits>
+
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include <cutils/trace.h>
+#include <dvr/performance_client_api.h>
+#include <dvr/pose_client.h>
+#include <hardware/sensors.h>
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/benchmark.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/platform_defines.h>
+#include <private/dvr/pose-ipc.h>
+#include <private/dvr/sensor_constants.h>
+#include <utils/Trace.h>
+
+#define arraysize(x) (static_cast<ssize_t>(std::extent<decltype(x)>::value))
+
+using android::pdx::LocalChannelHandle;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::Status;
+
+namespace android {
+namespace dvr {
+
+using Vector3d = vec3d;
+using Vector3f = vec3f;
+using Rotationd = quatd;
+using Rotationf = quatf;
+using AngleAxisd = Eigen::AngleAxis<double>;
+using AngleAxisf = Eigen::AngleAxis<float>;
+
+namespace {
+// Wait a few seconds before checking if we need to disable sensors.
+static constexpr int64_t kSensorTimeoutNs = 5000000000ll;
+
+static constexpr float kTwoPi = 2.0 * M_PI;
+static constexpr float kDegToRad = M_PI / 180.f;
+
+// Head model code data.
+static constexpr float kDefaultNeckHorizontalOffset = 0.080f; // meters
+static constexpr float kDefaultNeckVerticalOffset = 0.075f; // meters
+
+static constexpr char kDisablePosePredictionProp[] =
+ "persist.dreamos.disable_predict";
+
+// Device type property for controlling classes of behavior that differ
+// between devices. If unset, defaults to kOrientationTypeSmartphone.
+static constexpr char kOrientationTypeProp[] = "dvr.orientation_type";
+
+static constexpr char kEnableSensorRecordProp[] = "dvr.enable_6dof_recording";
+static constexpr char kEnableSensorPlayProp[] = "dvr.enable_6dof_playback";
+static constexpr char kEnableSensorPlayIdProp[] = "dvr.6dof_playback_id";
+static constexpr char kEnablePoseRecordProp[] = "dvr.enable_pose_recording";
+
+// Persistent buffer names.
+static constexpr char kPoseRingBufferName[] = "PoseService:RingBuffer";
+
+static constexpr int kDatasetIdLength = 36;
+static constexpr char kDatasetIdChars[] = "0123456789abcdef-";
+static constexpr char kDatasetLocation[] = "/data/sdcard/datasets/";
+
+// These are the flags used by BufferProducer::CreatePersistentUncachedBlob,
+// plus PRIVATE_ADSP_HEAP to allow access from the DSP.
+static constexpr int kPoseRingBufferFlags =
+ GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_RARELY |
+ GRALLOC_USAGE_PRIVATE_UNCACHED | GRALLOC_USAGE_PRIVATE_ADSP_HEAP;
+
+// Extract yaw angle from a given quaternion rotation.
+// Y-axis is considered to be vertical. Result is in rad.
+template <typename T>
+T ExtractYaw(Eigen::Quaternion<T> rotation) {
+ const Eigen::Vector3<T> yaw_axis = rotation * vec3::UnitZ();
+ return std::atan2(yaw_axis.z(), yaw_axis.x());
+}
+
+std::string GetPoseModeString(DvrPoseMode mode) {
+ switch (mode) {
+ case DVR_POSE_MODE_6DOF:
+ return "DVR_POSE_MODE_6DOF";
+ case DVR_POSE_MODE_3DOF:
+ return "DVR_POSE_MODE_3DOF";
+ case DVR_POSE_MODE_MOCK_FROZEN:
+ return "DVR_POSE_MODE_MOCK_FROZEN";
+ case DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW:
+ return "DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW";
+ case DVR_POSE_MODE_MOCK_HEAD_TURN_FAST:
+ return "DVR_POSE_MODE_MOCK_HEAD_TURN_FAST";
+ case DVR_POSE_MODE_MOCK_ROTATE_SLOW:
+ return "DVR_POSE_MODE_MOCK_ROTATE_SLOW";
+ case DVR_POSE_MODE_MOCK_ROTATE_MEDIUM:
+ return "DVR_POSE_MODE_MOCK_ROTATE_MEDIUM";
+ case DVR_POSE_MODE_MOCK_ROTATE_FAST:
+ return "DVR_POSE_MODE_MOCK_ROTATE_FAST";
+ case DVR_POSE_MODE_MOCK_CIRCLE_STRAFE:
+ return "DVR_POSE_MODE_MOCK_CIRCLE_STRAFE";
+ default:
+ return "Unknown pose mode";
+ }
+}
+
+inline std::string GetVector3dString(const Vector3d& vector) {
+ std::ostringstream stream;
+ stream << "[" << vector[0] << "," << vector[1] << "," << vector[2] << "]";
+ return stream.str();
+}
+
+inline std::string GetRotationdString(const Rotationd& rotation) {
+ std::ostringstream stream;
+ stream << "[" << rotation.w() << ", " << GetVector3dString(rotation.vec())
+ << "]";
+ return stream.str();
+}
+
+} // namespace
+
+PoseService::PoseService(SensorThread* sensor_thread)
+ : BASE("PoseService", Endpoint::Create(DVR_POSE_SERVICE_CLIENT)),
+ sensor_thread_(sensor_thread),
+ last_sensor_usage_time_ns_(0),
+ watchdog_shutdown_(false),
+ sensors_on_(false),
+ accelerometer_index_(-1),
+ gyroscope_index_(-1),
+ pose_mode_(DVR_POSE_MODE_6DOF),
+ mapped_pose_buffer_(nullptr),
+ vsync_count_(0),
+ photon_timestamp_(0),
+ // Will be updated by external service, but start with a non-zero value:
+ display_period_ns_(16000000) {
+ last_known_pose_ = {
+ .orientation = {1.0f, 0.0f, 0.0f, 0.0f},
+ .translation = {0.0f, 0.0f, 0.0f, 0.0f},
+ .angular_velocity = {0.0f, 0.0f, 0.0f, 0.0f},
+ .velocity = {0.0f, 0.0f, 0.0f, 0.0f},
+ .timestamp_ns = 0,
+ .flags = DVR_POSE_FLAG_HEAD,
+ .pad = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ };
+
+ switch (property_get_int32(kOrientationTypeProp, kOrientationTypePortrait)) {
+ case kOrientationTypeLandscape:
+ device_orientation_type_ = kOrientationTypeLandscape;
+ break;
+ default:
+ device_orientation_type_ = kOrientationTypePortrait;
+ break;
+ }
+
+ ring_buffer_ =
+ BufferProducer::Create(kPoseRingBufferName, 0, 0, kPoseRingBufferFlags,
+ sizeof(DvrPoseRingBuffer));
+ if (!ring_buffer_) {
+ ALOGE("PoseService::PoseService: Failed to create/get pose ring buffer!");
+ return;
+ }
+
+ void* addr = nullptr;
+ int ret =
+ ring_buffer_->GetBlobReadWritePointer(sizeof(DvrPoseRingBuffer), &addr);
+ if (ret < 0) {
+ ALOGE("PoseService::PoseService: Failed to map pose ring buffer: %s",
+ strerror(-ret));
+ return;
+ }
+ memset(addr, 0, sizeof(DvrPoseRingBuffer));
+ mapped_pose_buffer_ = static_cast<DvrPoseRingBuffer*>(addr);
+ addr = nullptr;
+
+ for (int i = 0; i < sensor_thread->GetSensorCount(); ++i) {
+ if (sensor_thread->GetSensorType(i) == SENSOR_TYPE_ACCELEROMETER)
+ accelerometer_index_ = i;
+ if (sensor_thread->GetSensorType(i) == SENSOR_TYPE_GYROSCOPE_UNCALIBRATED)
+ gyroscope_index_ = i;
+ }
+ // If we failed to find the uncalibrated gyroscope, use the regular one.
+ if (gyroscope_index_ < 0) {
+ ALOGW("PoseService was unable to find uncalibrated gyroscope");
+ for (int i = 0; i < sensor_thread->GetSensorCount(); ++i) {
+ ALOGI("Type %d", sensor_thread->GetSensorType(i));
+ if (sensor_thread->GetSensorType(i) == SENSOR_TYPE_GYROSCOPE)
+ gyroscope_index_ = i;
+ }
+ }
+
+ if (accelerometer_index_ < 0) {
+ ALOGE("PoseService was unable to find accelerometer");
+ }
+ if (gyroscope_index_ < 0) {
+ ALOGE("PoseService was unable to find gyroscope");
+ }
+
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ KickSensorWatchDogThread();
+ }
+
+ // Read the persistent dreamos flags before using them in SetPoseMode.
+ enable_pose_prediction_ =
+ property_get_bool(kDisablePosePredictionProp, 0) == 0;
+
+ enable_sensor_recording_ = property_get_bool(kEnableSensorRecordProp, 0) == 1;
+
+ enable_sensor_playback_ = property_get_bool(kEnableSensorPlayProp, 0) == 1;
+
+ if (enable_sensor_playback_) {
+ char dataset_id[PROPERTY_VALUE_MAX];
+ property_get(kEnableSensorPlayIdProp, dataset_id, "");
+ sensor_playback_id_ = std::string(dataset_id);
+
+ if (sensor_playback_id_.length() != kDatasetIdLength ||
+ sensor_playback_id_.find_first_not_of(kDatasetIdChars) !=
+ std::string::npos) {
+ ALOGE("Error: invalid playback id %s", sensor_playback_id_.c_str());
+ sensor_playback_id_ = "";
+ enable_sensor_playback_ = false;
+ } else {
+ ALOGI("Playback id %s", sensor_playback_id_.c_str());
+ }
+ }
+
+ enable_pose_recording_ = property_get_bool(kEnablePoseRecordProp, 0) == 1;
+
+ SetPoseMode(DVR_POSE_MODE_6DOF);
+}
+
+PoseService::~PoseService() {
+ if (watchdog_thread_.get_id() != std::thread::id()) {
+ {
+ std::lock_guard<std::mutex> guard(mutex_);
+ watchdog_shutdown_ = true;
+ watchdog_condition_.notify_one();
+ }
+ watchdog_thread_.join();
+ }
+}
+
+void PoseService::KickSensorWatchDogThread() {
+ // This method is called every frame while rendering so we want to make sure
+ // it is very light weight with synchronization.
+ // TODO(jbates) For better performance, we can consider a lock-free atomic
+ // solution instead of locking this mutex.
+
+ // Update the usage time. The watchdog thread will poll this value to know
+ // when to disable sensors.
+ last_sensor_usage_time_ns_ = GetSystemClockNs();
+
+ // If sensors are still on, there's nothing else to do.
+ if (sensors_on_)
+ return;
+
+ // Enable sensors.
+ ALOGI("Start using sensors.");
+ sensors_on_ = true;
+ if (accelerometer_index_ >= 0) {
+ sensor_thread_->StartUsingSensor(accelerometer_index_);
+ }
+ if (gyroscope_index_ >= 0) {
+ sensor_thread_->StartUsingSensor(gyroscope_index_);
+ }
+
+ // Tell the thread to wake up to disable the sensors when no longer needed.
+ watchdog_condition_.notify_one();
+
+ if (watchdog_thread_.get_id() == std::thread::id()) {
+ // The sensor watchdog thread runs while sensors are in use. When no APIs
+ // have requested sensors beyond a threshold (5 seconds), sensors are
+ // disabled.
+ watchdog_thread_ = std::thread([this] {
+ std::unique_lock<std::mutex> lock(mutex_);
+ while (!watchdog_shutdown_) {
+ int64_t remaining_sensor_time_ns =
+ last_sensor_usage_time_ns_ + kSensorTimeoutNs - GetSystemClockNs();
+
+ if (remaining_sensor_time_ns > 0) {
+ // Wait for the remaining usage time before checking again.
+ watchdog_condition_.wait_for(
+ lock, std::chrono::nanoseconds(remaining_sensor_time_ns));
+ continue;
+ }
+
+ if (sensors_on_) {
+ // Disable sensors.
+ ALOGI("Stop using sensors.");
+ sensors_on_ = false;
+ if (accelerometer_index_ >= 0) {
+ sensor_thread_->StopUsingSensor(accelerometer_index_);
+ }
+ if (gyroscope_index_ >= 0) {
+ sensor_thread_->StopUsingSensor(gyroscope_index_);
+ }
+ }
+
+ // Wait for sensors to be enabled again.
+ watchdog_condition_.wait(lock);
+ }
+ });
+ }
+}
+
+bool PoseService::IsInitialized() const {
+ return BASE::IsInitialized() && ring_buffer_ && mapped_pose_buffer_;
+}
+
+void PoseService::WriteAsyncPoses(const Vector3d& start_t_head,
+ const Rotationd& start_q_head,
+ int64_t pose_timestamp) {
+ if (enable_external_pose_) {
+ return;
+ }
+
+ // If playing back data, the timestamps are different enough from the
+ // current time that prediction doesn't work. This hack pretends that
+ // there was one nanosecond of latency between the sensors and here.
+ if (enable_sensor_playback_)
+ pose_timestamp = GetSystemClockNs() - 1;
+
+ // Feed the sample to the predictor
+ pose_predictor_.Add(PosePredictor::Sample{.position = start_t_head,
+ .orientation = start_q_head,
+ .time_ns = pose_timestamp},
+ &last_known_pose_);
+
+ // Store one extra value, because the application is working on the next
+ // frame and expects the minimum count from that frame on.
+ for (uint32_t i = 0; i < kPoseAsyncBufferMinFutureCount + 1; ++i) {
+ int64_t target_time = photon_timestamp_ + i * display_period_ns_;
+
+ // TODO(jbates, cwolfe) For the DSP code, we may still want poses even when
+ // the vsyncs are not ticking up. But it's important not to update the pose
+ // data that's in the past so that applications have the most accurate
+ // estimate of the last frame's *actual* pose, so that they can update
+ // simulations and calculate collisions, etc.
+ if (target_time < pose_timestamp) {
+ // Already in the past, do not update this head pose slot.
+ continue;
+ }
+
+ // Write to the actual shared memory ring buffer.
+ uint32_t index = ((vsync_count_ + i) & kPoseAsyncBufferIndexMask);
+
+ // Make a pose prediction
+ if (enable_pose_prediction_) {
+ pose_predictor_.Predict(target_time,
+ target_time + right_eye_photon_offset_ns_,
+ mapped_pose_buffer_->ring + index);
+ } else {
+ mapped_pose_buffer_->ring[index] = last_known_pose_;
+ }
+ }
+}
+
+void PoseService::UpdatePoseMode() {
+ ALOGI_IF(TRACE, "UpdatePoseMode: %f %f %f", last_known_pose_.translation[0],
+ last_known_pose_.translation[1], last_known_pose_.translation[2]);
+
+ const int64_t current_time_ns = GetSystemClockNs();
+
+ const PoseState pose_state = sensor_fusion_.GetLatestPoseState();
+
+ switch (pose_mode_) {
+ case DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW:
+ case DVR_POSE_MODE_MOCK_HEAD_TURN_FAST:
+ case DVR_POSE_MODE_MOCK_ROTATE_SLOW:
+ case DVR_POSE_MODE_MOCK_ROTATE_MEDIUM:
+ case DVR_POSE_MODE_MOCK_ROTATE_FAST:
+ case DVR_POSE_MODE_MOCK_CIRCLE_STRAFE: {
+ // Calculate a pose based on monotic system time.
+ const Vector3d y_axis(0., 1., 0.);
+ double time_s = current_time_ns / 1e9;
+
+ // Generate fake yaw data.
+ float yaw = 0.0f;
+ Vector3d head_trans(0.0, 0.0, 0.0);
+ switch (pose_mode_) {
+ default:
+ case DVR_POSE_MODE_MOCK_HEAD_TURN_SLOW:
+ // Pan across 120 degrees in 15 seconds.
+ yaw = std::cos(kTwoPi * time_s / 15.0) * 60.0 * kDegToRad;
+ break;
+ case DVR_POSE_MODE_MOCK_HEAD_TURN_FAST:
+ // Pan across 120 degrees in 4 seconds.
+ yaw = std::cos(kTwoPi * time_s / 4.0) * 60.0 * kDegToRad;
+ break;
+ case DVR_POSE_MODE_MOCK_ROTATE_SLOW:
+ // Rotate 5 degrees per second.
+ yaw = std::fmod(time_s * 5.0 * kDegToRad, kTwoPi);
+ break;
+ case DVR_POSE_MODE_MOCK_ROTATE_MEDIUM:
+ // Rotate 30 degrees per second.
+ yaw = std::fmod(time_s * 30.0 * kDegToRad, kTwoPi);
+ break;
+ case DVR_POSE_MODE_MOCK_ROTATE_FAST:
+ // Rotate 90 degrees per second.
+ yaw = std::fmod(time_s * 90.0 * kDegToRad, kTwoPi);
+ break;
+ case DVR_POSE_MODE_MOCK_CIRCLE_STRAFE:
+ // Circle strafe around origin at distance of 3 meters.
+ yaw = std::fmod(time_s * 30.0 * kDegToRad, kTwoPi);
+ head_trans += 3.0 * Vector3d(sin(yaw), 0.0, cos(yaw));
+ break;
+ }
+
+ // Calculate the simulated head rotation in an absolute "head" space.
+ // This space is not related to start space and doesn't need a
+ // reference.
+ Rotationd head_rotation_in_head_space(AngleAxisd(yaw, y_axis));
+
+ WriteAsyncPoses(head_trans, head_rotation_in_head_space, current_time_ns);
+ break;
+ }
+ case DVR_POSE_MODE_MOCK_FROZEN: {
+ // Even when frozen, we still provide a current timestamp, because
+ // consumers may rely on it being monotonic.
+
+ Rotationd start_from_head_rotation(
+ frozen_state_.head_from_start_rotation.w,
+ frozen_state_.head_from_start_rotation.x,
+ frozen_state_.head_from_start_rotation.y,
+ frozen_state_.head_from_start_rotation.z);
+ Vector3d head_from_start_translation(
+ frozen_state_.head_from_start_translation.x,
+ frozen_state_.head_from_start_translation.y,
+ frozen_state_.head_from_start_translation.z);
+
+ WriteAsyncPoses(head_from_start_translation, start_from_head_rotation,
+ current_time_ns);
+ break;
+ }
+ case DVR_POSE_MODE_3DOF: {
+ // Sensor fusion provides IMU-space data, transform to world space.
+
+ // Constants to perform IMU orientation adjustments. Note that these
+ // calculations will be optimized out in a release build.
+ constexpr double k90DegInRad = 90.0 * M_PI / 180.0;
+ const Vector3d kVecAxisX(1.0, 0.0, 0.0);
+ const Vector3d kVecAxisY(0.0, 1.0, 0.0);
+ const Vector3d kVecAxisZ(0.0, 0.0, 1.0);
+ const Rotationd kRotX90(AngleAxisd(k90DegInRad, kVecAxisX));
+
+ Rotationd start_from_head_rotation;
+ if (device_orientation_type_ == kOrientationTypeLandscape) {
+ const Rotationd kPostRotation =
+ kRotX90 * Rotationd(AngleAxisd(-k90DegInRad, kVecAxisY));
+ start_from_head_rotation =
+ (pose_state.sensor_from_start_rotation * kPostRotation).inverse();
+ } else {
+ const Rotationd kPreRotation =
+ Rotationd(AngleAxisd(k90DegInRad, kVecAxisZ));
+ const Rotationd kPostRotation = kRotX90;
+ start_from_head_rotation =
+ (kPreRotation * pose_state.sensor_from_start_rotation *
+ kPostRotation)
+ .inverse();
+ }
+ start_from_head_rotation.normalize();
+
+ // Neck / head model code procedure for when no 6dof is available.
+ // To apply the neck model, first translate the head pose to the new
+ // center of eyes, then rotate around the origin (the original head
+ // pos).
+ Vector3d position =
+ start_from_head_rotation * Vector3d(0.0, kDefaultNeckVerticalOffset,
+ -kDefaultNeckHorizontalOffset);
+
+ // IMU driver gives timestamps on its own clock, but we need monotonic
+ // clock. Subtract 5ms to account for estimated IMU sample latency.
+ WriteAsyncPoses(position, start_from_head_rotation,
+ pose_state.timestamp_ns + 5000000);
+ break;
+ }
+ default:
+ case DVR_POSE_MODE_6DOF:
+ ALOGE("ERROR: invalid pose mode");
+ break;
+ }
+}
+
+int PoseService::HandleMessage(pdx::Message& msg) {
+ int ret = 0;
+ const pdx::MessageInfo& info = msg.GetInfo();
+ switch (info.op) {
+ case DVR_POSE_NOTIFY_VSYNC: {
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ // Kick the sensor thread, because we are still rendering.
+ KickSensorWatchDogThread();
+
+ const struct iovec data[] = {
+ {.iov_base = &vsync_count_, .iov_len = sizeof(vsync_count_)},
+ {.iov_base = &photon_timestamp_,
+ .iov_len = sizeof(photon_timestamp_)},
+ {.iov_base = &display_period_ns_,
+ .iov_len = sizeof(display_period_ns_)},
+ {.iov_base = &right_eye_photon_offset_ns_,
+ .iov_len = sizeof(right_eye_photon_offset_ns_)},
+ };
+ constexpr int expected_size =
+ sizeof(vsync_count_) + sizeof(photon_timestamp_) +
+ sizeof(display_period_ns_) + sizeof(right_eye_photon_offset_ns_);
+ ret = msg.ReadVector(data, sizeof(data) / sizeof(data[0]));
+ if (ret < expected_size) {
+ ALOGI("error: msg.Read read too little (%d < %d)", ret, expected_size);
+ REPLY_ERROR(msg, EIO, error);
+ }
+
+ if (!enable_external_pose_) {
+ mapped_pose_buffer_->vsync_count = vsync_count_;
+ }
+
+ // TODO(jbates, eieio): make this async, no need to reply.
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ case DVR_POSE_POLL: {
+ ATRACE_NAME("pose_poll");
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ DvrPoseState client_state;
+ client_state = {
+ .head_from_start_rotation = {last_known_pose_.orientation[0],
+ last_known_pose_.orientation[1],
+ last_known_pose_.orientation[2],
+ last_known_pose_.orientation[3]},
+ .head_from_start_translation = {last_known_pose_.translation[0],
+ last_known_pose_.translation[1],
+ last_known_pose_.translation[2]},
+ .timestamp_ns = static_cast<uint64_t>(last_known_pose_.timestamp_ns),
+ .sensor_from_start_rotation_velocity = {
+ last_known_pose_.angular_velocity[0],
+ last_known_pose_.angular_velocity[1],
+ last_known_pose_.angular_velocity[2]}};
+
+ Btrace("Sensor data received",
+ static_cast<int64_t>(client_state.timestamp_ns));
+
+ Btrace("Pose polled");
+
+ ret = msg.Write(&client_state, sizeof(client_state));
+ const int expected_size = sizeof(client_state);
+ if (ret < expected_size) {
+ ALOGI("error: msg.Write wrote too little (%d < %d)", ret,
+ expected_size);
+ REPLY_ERROR(msg, EIO, error);
+ }
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ case DVR_POSE_FREEZE: {
+ {
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ DvrPoseState frozen_state;
+ const int expected_size = sizeof(frozen_state);
+ ret = msg.Read(&frozen_state, expected_size);
+ if (ret < expected_size) {
+ ALOGI("error: msg.Read read too little (%d < %d)", ret,
+ expected_size);
+ REPLY_ERROR(msg, EIO, error);
+ }
+ frozen_state_ = frozen_state;
+ }
+ SetPoseMode(DVR_POSE_MODE_MOCK_FROZEN);
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ case DVR_POSE_SET_MODE: {
+ int mode;
+ {
+ std::lock_guard<std::mutex> guard(mutex_);
+ const int expected_size = sizeof(mode);
+ ret = msg.Read(&mode, expected_size);
+ if (ret < expected_size) {
+ ALOGI("error: msg.Read read too little (%d < %d)", ret,
+ expected_size);
+ REPLY_ERROR(msg, EIO, error);
+ }
+ if (mode < 0 || mode >= DVR_POSE_MODE_COUNT) {
+ REPLY_ERROR(msg, EINVAL, error);
+ }
+ }
+ SetPoseMode(DvrPoseMode(mode));
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ case DVR_POSE_GET_MODE: {
+ std::lock_guard<std::mutex> guard(mutex_);
+ int mode = pose_mode_;
+ ret = msg.Write(&mode, sizeof(mode));
+ const int expected_size = sizeof(mode);
+ if (ret < expected_size) {
+ ALOGI("error: msg.Write wrote too little (%d < %d)", ret,
+ expected_size);
+ REPLY_ERROR(msg, EIO, error);
+ }
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ case DVR_POSE_GET_RING_BUFFER: {
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ // Kick the sensor thread, because we have a new consumer.
+ KickSensorWatchDogThread();
+
+ Status<LocalChannelHandle> consumer_channel =
+ ring_buffer_->CreateConsumer();
+ REPLY_MESSAGE(msg, consumer_channel, error);
+ }
+ case DVR_POSE_GET_CONTROLLER_RING_BUFFER: {
+ std::lock_guard<std::mutex> guard(mutex_);
+ REPLY_ERROR(msg, EINVAL, error);
+ }
+ case DVR_POSE_LOG_CONTROLLER: {
+ std::lock_guard<std::mutex> guard(mutex_);
+ REPLY_ERROR(msg, EINVAL, error);
+ }
+ default:
+ // Do not lock mutex_ here, because this may call the on*() handlers,
+ // which will lock the mutex themselves.
+ ret = Service::HandleMessage(msg);
+ break;
+ }
+error:
+ return ret;
+}
+
+std::string PoseService::DumpState(size_t /*max_length*/) {
+ DvrPoseMode pose_mode;
+ {
+ std::lock_guard<std::mutex> guard(mutex_);
+ pose_mode = pose_mode_;
+ }
+
+ std::ostringstream stream;
+ stream << "Pose mode: " << GetPoseModeString(pose_mode);
+ return stream.str();
+}
+
+void PoseService::HandleEvents(const sensors_event_t* begin_events,
+ const sensors_event_t* end_events) {
+ ATRACE_NAME("PoseService::HandleEvents");
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ for (const sensors_event_t* event = begin_events; event != end_events;
+ ++event) {
+ if (event->type == SENSOR_TYPE_ACCELEROMETER) {
+ sensor_fusion_.ProcessAccelerometerSample(
+ event->acceleration.x, event->acceleration.y, event->acceleration.z,
+ event->timestamp);
+ } else if (event->type == SENSOR_TYPE_GYROSCOPE_UNCALIBRATED) {
+ sensor_fusion_.ProcessGyroscopeSample(event->gyro.x, event->gyro.y,
+ event->gyro.z, event->timestamp);
+ }
+ }
+
+ UpdatePoseMode();
+}
+
+void PoseService::SetPoseMode(DvrPoseMode mode) {
+ if (mode == DVR_POSE_MODE_6DOF) {
+ // Only 3DoF is currently supported.
+ mode = DVR_POSE_MODE_3DOF;
+ }
+
+ pose_mode_ = mode;
+
+ sensor_thread_->SetPaused(false);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/sensord/pose_service.h b/services/vr/sensord/pose_service.h
new file mode 100644
index 0000000..300737c
--- /dev/null
+++ b/services/vr/sensord/pose_service.h
@@ -0,0 +1,143 @@
+#ifndef ANDROID_DVR_SENSORD_POSE_SERVICE_H_
+#define ANDROID_DVR_SENSORD_POSE_SERVICE_H_
+
+#include <condition_variable>
+#include <forward_list>
+#include <mutex>
+#include <thread>
+#include <unordered_map>
+#include <vector>
+
+#include <dvr/pose_client.h>
+#include <pdx/service.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/pose_client_internal.h>
+#include <private/dvr/ring_buffer.h>
+#include <private/dvr/linear_pose_predictor.h>
+
+#include "sensor_fusion.h"
+#include "sensor_thread.h"
+
+namespace android {
+namespace dvr {
+
+// PoseService implements the HMD pose service over ServiceFS.
+class PoseService : public pdx::ServiceBase<PoseService> {
+ public:
+ ~PoseService() override;
+
+ bool IsInitialized() const override;
+ int HandleMessage(pdx::Message& msg) override;
+ std::string DumpState(size_t max_length) override;
+
+ // Handle events from the sensor HAL.
+ // Safe to call concurrently with any other public member functions.
+ void HandleEvents(const sensors_event_t* begin_events,
+ const sensors_event_t* end_events);
+
+ private:
+ friend BASE;
+
+ enum OrientationType {
+ // Typical smartphone device (default).
+ kOrientationTypePortrait = 1,
+ // Landscape device.
+ kOrientationTypeLandscape = 2,
+ };
+
+ // Initializes the service. Keeps a reference to sensor_thread, which must be
+ // non-null.
+ explicit PoseService(SensorThread* sensor_thread);
+
+ // Kick the sensor watch dog thread which will robustly disable IMU usage
+ // when there are no sensor data consumers.
+ // The class mutex (mutex_) must be locked while calling this method.
+ void KickSensorWatchDogThread();
+
+ void UpdatePoseMode();
+
+ // Update the async pose ring buffer with new pose data.
+ // |start_t_head| Head position in start space.
+ // |start_q_head| Head orientation quaternion in start space.
+ // |pose_timestamp| System timestamp of pose data in seconds.
+ // |pose_delta_time| Elapsed time in seconds between this pose and the last.
+ void WriteAsyncPoses(const Eigen::Vector3<double>& start_t_head,
+ const Eigen::Quaternion<double>& start_q_head,
+ int64_t pose_timestamp);
+
+ // Set the pose mode.
+ void SetPoseMode(DvrPoseMode mode);
+
+ // The abstraction around the sensor data.
+ SensorThread* sensor_thread_;
+
+ // Protects access to all member variables.
+ std::mutex mutex_;
+
+ // Watchdog thread data. The watchdog thread will ensure that sensor access
+ // is disabled when nothing has been consuming it for a while.
+ int64_t last_sensor_usage_time_ns_;
+ std::thread watchdog_thread_;
+ std::condition_variable watchdog_condition_;
+ bool watchdog_shutdown_;
+ bool sensors_on_;
+
+ // Indices for the accelerometer and gyroscope sensors, or -1 if the sensor
+ // wasn't present on construction.
+ int accelerometer_index_;
+ int gyroscope_index_;
+
+ // The sensor fusion algorithm and its state.
+ SensorFusion sensor_fusion_;
+
+ // Current pose mode.
+ DvrPoseMode pose_mode_;
+
+ // State which is sent if pose_mode_ is DVR_POSE_MODE_MOCK_FROZEN.
+ DvrPoseState frozen_state_;
+
+ // Last known pose.
+ DvrPoseAsync last_known_pose_;
+
+ // If this flag is true, the pose published includes a small prediction of
+ // where it'll be when it's consumed.
+ bool enable_pose_prediction_;
+
+ // Flag to turn on recording of raw sensor data
+ bool enable_sensor_recording_;
+
+ // Flag to log pose to a file
+ bool enable_pose_recording_;
+
+ // Flag to turn on playback from a saved dataset instead of using live data.
+ bool enable_sensor_playback_;
+
+ std::string sensor_playback_id_;
+
+ // External pose generation.
+ bool enable_external_pose_ = false;
+
+ // The predictor to extrapolate pose samples.
+ LinearPosePredictor pose_predictor_;
+
+ // Pose ring buffer.
+ std::shared_ptr<BufferProducer> ring_buffer_;
+ // Temporary mapped ring buffer.
+ DvrPoseRingBuffer* mapped_pose_buffer_;
+ // Current vsync info, updated by displayd.
+ uint32_t vsync_count_;
+ int64_t photon_timestamp_;
+ int64_t display_period_ns_;
+ int64_t right_eye_photon_offset_ns_ = 0;
+
+ // Type for controlling pose orientation calculation.
+ OrientationType device_orientation_type_;
+
+ PoseService(const PoseService&) = delete;
+ void operator=(const PoseService&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SENSORD_POSE_SERVICE_H_
diff --git a/services/vr/sensord/sensor_fusion.cpp b/services/vr/sensord/sensor_fusion.cpp
new file mode 100644
index 0000000..5663ae4
--- /dev/null
+++ b/services/vr/sensord/sensor_fusion.cpp
@@ -0,0 +1,348 @@
+#include "sensor_fusion.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include <private/dvr/eigen.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// --- start of added bits for porting to eigen
+
+// In general, we prefer to add wrappers for things like Inverse() to minimize
+// the changes to the imported code, so that merging in upstream changes becomes
+// simpler.
+
+inline Matrix3d Inverse(const Matrix3d& matrix) { return matrix.inverse(); }
+inline Matrix3d Transpose(const Matrix3d& matrix) { return matrix.transpose(); }
+inline Matrix3d RotationMatrixNH(const Rotationd& rotation) {
+ return rotation.toRotationMatrix();
+}
+inline double Length(const Vector3d& vector) { return vector.norm(); }
+
+using uint64 = uint64_t;
+
+// --- end of added bits for porting to eigen
+
+static const double kFiniteDifferencingEpsilon = 1e-7;
+static const double kEpsilon = 1e-15;
+// Default gyroscope frequency. This corresponds to 200 Hz.
+static const double kDefaultGyroscopeTimestep_s = 0.005f;
+// Maximum time between gyroscope before we start limiting the integration.
+static const double kMaximumGyroscopeSampleDelay_s = 0.04f;
+// Compute a first-order exponential moving average of changes in accel norm per
+// frame.
+static const double kSmoothingFactor = 0.5;
+// Minimum and maximum values used for accelerometer noise covariance matrix.
+// The smaller the sigma value, the more weight is given to the accelerometer
+// signal.
+static const double kMinAccelNoiseSigma = 0.75;
+static const double kMaxAccelNoiseSigma = 7.0;
+// Initial value for the diagonal elements of the different covariance matrices.
+static const double kInitialStateCovarianceValue = 25.0;
+static const double kInitialProcessCovarianceValue = 1.0;
+// Maximum accelerometer norm change allowed before capping it covariance to a
+// large value.
+static const double kMaxAccelNormChange = 0.15;
+// Timestep IIR filtering coefficient.
+static const double kTimestepFilterCoeff = 0.95;
+// Minimum number of sample for timestep filtering.
+static const uint32_t kTimestepFilterMinSamples = 10;
+
+// Z direction in start space.
+static const Vector3d kCanonicalZDirection(0.0, 0.0, 1.0);
+
+// Computes a axis angle rotation from the input vector.
+// angle = norm(a)
+// axis = a.normalized()
+// If norm(a) == 0, it returns an identity rotation.
+static Rotationd RotationFromVector(const Vector3d& a) {
+ const double norm_a = Length(a);
+ if (norm_a < kEpsilon) {
+ return Rotationd::Identity();
+ }
+ return Rotationd(AngleAxisd(norm_a, a / norm_a));
+}
+
+// --- start of functions ported from pose_prediction.cc
+
+namespace pose_prediction {
+
+// Returns a rotation matrix based on the integration of the gyroscope_value
+// over the timestep_s in seconds.
+// TODO(pfg): Document the space better here.
+//
+// @param gyroscope_value gyroscope sensor values.
+// @param timestep_s integration period in seconds.
+// @return Integration of the gyroscope value the rotation is from Start to
+// Sensor Space.
+Rotationd GetRotationFromGyroscope(const Vector3d& gyroscope_value,
+ double timestep_s) {
+ const double velocity = Length(gyroscope_value);
+
+ // When there is no rotation data return an identity rotation.
+ if (velocity < kEpsilon) {
+ return Rotationd::Identity();
+ }
+ // Since the gyroscope_value is a start from sensor transformation we need to
+ // invert it to have a sensor from start transformation, hence the minus sign.
+ // For more info:
+ // http://developer.android.com/guide/topics/sensors/sensors_motion.html#sensors-motion-gyro
+ return Rotationd(AngleAxisd(-timestep_s * velocity,
+ gyroscope_value / velocity));
+}
+
+} // namespace pose_prediction
+
+// --- end of functions ported from pose_prediction.cc
+
+} // namespace
+
+SensorFusion::SensorFusion()
+ : execute_reset_with_next_accelerometer_sample_(false) {
+ ResetState();
+}
+
+void SensorFusion::Reset() {
+ execute_reset_with_next_accelerometer_sample_ = true;
+}
+
+void SensorFusion::ResetState() {
+ current_state_.timestamp_ns = 0;
+ current_state_.sensor_from_start_rotation = Rotationd::Identity();
+ current_state_.sensor_from_start_rotation_velocity = Vector3d::Zero();
+
+ current_accelerometer_timestamp_ns_ = 0;
+
+ state_covariance_ = Matrix3d::Identity() * kInitialStateCovarianceValue;
+ process_covariance_ = Matrix3d::Identity() * kInitialProcessCovarianceValue;
+ accelerometer_measurement_covariance_ =
+ Matrix3d::Identity() * kMinAccelNoiseSigma * kMinAccelNoiseSigma;
+ innovation_covariance_.setIdentity();
+
+ accelerometer_measurement_jacobian_ = Matrix3d::Zero();
+ kalman_gain_ = Matrix3d::Zero();
+ innovation_ = Vector3d::Zero();
+ accelerometer_measurement_ = Vector3d::Zero();
+ prediction_ = Vector3d::Zero();
+ control_input_ = Vector3d::Zero();
+ state_update_ = Vector3d::Zero();
+
+ moving_average_accelerometer_norm_change_ = 0.0;
+
+ is_timestep_filter_initialized_ = false;
+ is_gyroscope_filter_valid_ = false;
+ is_aligned_with_gravity_ = false;
+}
+
+// Here I am doing something wrong relative to time stamps. The state timestamps
+// always correspond to the gyrostamps because it would require additional
+// extrapolation if I wanted to do otherwise.
+// TODO(pfg): investigate about published an updated pose after accelerometer
+// data was used for filtering.
+PoseState SensorFusion::GetLatestPoseState() const {
+ std::unique_lock<std::mutex> lock(mutex_);
+ return current_state_;
+}
+
+void SensorFusion::ProcessGyroscopeSample(float v_x, float v_y, float v_z,
+ uint64 timestamp_ns) {
+ std::unique_lock<std::mutex> lock(mutex_);
+
+ // Don't accept gyroscope sample when waiting for a reset.
+ if (execute_reset_with_next_accelerometer_sample_) {
+ return;
+ }
+
+ // Discard outdated samples.
+ if (current_state_.timestamp_ns >= timestamp_ns) {
+ // TODO(pfg): Investigate why this happens.
+ return;
+ }
+
+ // Checks that we received at least one gyroscope sample in the past.
+ if (current_state_.timestamp_ns != 0) {
+ // TODO(pfg): roll this in filter gyroscope timestep function.
+ double current_timestep_s =
+ static_cast<double>(timestamp_ns - current_state_.timestamp_ns) * 1e-9;
+ if (current_timestep_s > kMaximumGyroscopeSampleDelay_s) {
+ if (is_gyroscope_filter_valid_) {
+ // Replaces the delta timestamp by the filtered estimates of the delta
+ // time.
+ current_timestep_s = filtered_gyroscope_timestep_s_;
+ } else {
+ current_timestep_s = kDefaultGyroscopeTimestep_s;
+ }
+ } else {
+ FilterGyroscopeTimestep(current_timestep_s);
+ }
+
+ // Only integrate after receiving a accelerometer sample.
+ if (is_aligned_with_gravity_) {
+ const Rotationd rotation_from_gyroscope =
+ pose_prediction::GetRotationFromGyroscope(Vector3d(v_x, v_y, v_z),
+ current_timestep_s);
+ current_state_.sensor_from_start_rotation =
+ rotation_from_gyroscope * current_state_.sensor_from_start_rotation;
+ current_state_.sensor_from_start_rotation.normalize();
+ UpdateStateCovariance(RotationMatrixNH(rotation_from_gyroscope));
+ state_covariance_ =
+ state_covariance_ +
+ (process_covariance_ * (current_timestep_s * current_timestep_s));
+ }
+ }
+
+ // Saves gyroscope event for future prediction.
+ current_state_.timestamp_ns = timestamp_ns;
+ current_state_.sensor_from_start_rotation_velocity = Vector3d(v_x, v_y, v_z);
+}
+
+// TODO(pfg): move to rotation object for the input.
+Vector3d SensorFusion::ComputeInnovation(const Rotationd& pose) {
+ const Vector3d predicted_down_direction =
+ RotationMatrixNH(pose) * kCanonicalZDirection;
+
+ const Rotationd rotation = Rotationd::FromTwoVectors(
+ predicted_down_direction, accelerometer_measurement_);
+ AngleAxisd angle_axis(rotation);
+ return angle_axis.axis() * angle_axis.angle();
+}
+
+void SensorFusion::ComputeMeasurementJacobian() {
+ for (int dof = 0; dof < 3; dof++) {
+ // TODO(pfg): Create this delta rotation in the constructor and used unitX..
+ Vector3d delta = Vector3d::Zero();
+ delta[dof] = kFiniteDifferencingEpsilon;
+
+ const Rotationd epsilon_rotation = RotationFromVector(delta);
+ const Vector3d delta_rotation = ComputeInnovation(
+ epsilon_rotation * current_state_.sensor_from_start_rotation);
+
+ const Vector3d col =
+ (innovation_ - delta_rotation) / kFiniteDifferencingEpsilon;
+ accelerometer_measurement_jacobian_(0, dof) = col[0];
+ accelerometer_measurement_jacobian_(1, dof) = col[1];
+ accelerometer_measurement_jacobian_(2, dof) = col[2];
+ }
+}
+
+void SensorFusion::ProcessAccelerometerSample(float acc_x, float acc_y,
+ float acc_z,
+ uint64 timestamp_ns) {
+ std::unique_lock<std::mutex> lock(mutex_);
+
+ // Discard outdated samples.
+ if (current_accelerometer_timestamp_ns_ >= timestamp_ns) {
+ // TODO(pfg): Investigate why this happens.
+ return;
+ }
+
+ // Call reset state if required.
+ if (execute_reset_with_next_accelerometer_sample_.exchange(false)) {
+ ResetState();
+ }
+
+ accelerometer_measurement_ = Vector3d(acc_x, acc_y, acc_z);
+ current_accelerometer_timestamp_ns_ = timestamp_ns;
+
+ if (!is_aligned_with_gravity_) {
+ // This is the first accelerometer measurement so it initializes the
+ // orientation estimate.
+ current_state_.sensor_from_start_rotation = Rotationd::FromTwoVectors(
+ kCanonicalZDirection, accelerometer_measurement_);
+ is_aligned_with_gravity_ = true;
+
+ previous_accelerometer_norm_ = Length(accelerometer_measurement_);
+ return;
+ }
+
+ UpdateMeasurementCovariance();
+
+ innovation_ = ComputeInnovation(current_state_.sensor_from_start_rotation);
+ ComputeMeasurementJacobian();
+
+ // S = H * P * H' + R
+ innovation_covariance_ = accelerometer_measurement_jacobian_ *
+ state_covariance_ *
+ Transpose(accelerometer_measurement_jacobian_) +
+ accelerometer_measurement_covariance_;
+
+ // K = P * H' * S^-1
+ kalman_gain_ = state_covariance_ *
+ Transpose(accelerometer_measurement_jacobian_) *
+ Inverse(innovation_covariance_);
+
+ // x_update = K*nu
+ state_update_ = kalman_gain_ * innovation_;
+
+ // P = (I - K * H) * P;
+ state_covariance_ = (Matrix3d::Identity() -
+ kalman_gain_ * accelerometer_measurement_jacobian_) *
+ state_covariance_;
+
+ // Updates pose and associate covariance matrix.
+ const Rotationd rotation_from_state_update =
+ RotationFromVector(state_update_);
+
+ current_state_.sensor_from_start_rotation =
+ rotation_from_state_update * current_state_.sensor_from_start_rotation;
+ UpdateStateCovariance(RotationMatrixNH(rotation_from_state_update));
+}
+
+void SensorFusion::UpdateStateCovariance(const Matrix3d& motion_update) {
+ state_covariance_ =
+ motion_update * state_covariance_ * Transpose(motion_update);
+}
+
+void SensorFusion::FilterGyroscopeTimestep(double gyroscope_timestep_s) {
+ if (!is_timestep_filter_initialized_) {
+ // Initializes the filter.
+ filtered_gyroscope_timestep_s_ = gyroscope_timestep_s;
+ num_gyroscope_timestep_samples_ = 1;
+ is_timestep_filter_initialized_ = true;
+ return;
+ }
+
+ // Computes the IIR filter response.
+ filtered_gyroscope_timestep_s_ =
+ kTimestepFilterCoeff * filtered_gyroscope_timestep_s_ +
+ (1 - kTimestepFilterCoeff) * gyroscope_timestep_s;
+ ++num_gyroscope_timestep_samples_;
+
+ if (num_gyroscope_timestep_samples_ > kTimestepFilterMinSamples) {
+ is_gyroscope_filter_valid_ = true;
+ }
+}
+
+void SensorFusion::UpdateMeasurementCovariance() {
+ const double current_accelerometer_norm = Length(accelerometer_measurement_);
+ // Norm change between current and previous accel readings.
+ const double current_accelerometer_norm_change =
+ std::abs(current_accelerometer_norm - previous_accelerometer_norm_);
+ previous_accelerometer_norm_ = current_accelerometer_norm;
+
+ moving_average_accelerometer_norm_change_ =
+ kSmoothingFactor * current_accelerometer_norm_change +
+ (1. - kSmoothingFactor) * moving_average_accelerometer_norm_change_;
+
+ // If we hit the accel norm change threshold, we use the maximum noise sigma
+ // for the accel covariance. For anything below that, we use a linear
+ // combination between min and max sigma values.
+ const double norm_change_ratio =
+ moving_average_accelerometer_norm_change_ / kMaxAccelNormChange;
+ const double accelerometer_noise_sigma = std::min(
+ kMaxAccelNoiseSigma,
+ kMinAccelNoiseSigma +
+ norm_change_ratio * (kMaxAccelNoiseSigma - kMinAccelNoiseSigma));
+
+ // Updates the accel covariance matrix with the new sigma value.
+ accelerometer_measurement_covariance_ = Matrix3d::Identity() *
+ accelerometer_noise_sigma *
+ accelerometer_noise_sigma;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/sensord/sensor_fusion.h b/services/vr/sensord/sensor_fusion.h
new file mode 100644
index 0000000..0ceae21
--- /dev/null
+++ b/services/vr/sensord/sensor_fusion.h
@@ -0,0 +1,181 @@
+#ifndef ANDROID_DVR_SENSORD_SENSOR_FUSION_H_
+#define ANDROID_DVR_SENSORD_SENSOR_FUSION_H_
+
+#include <atomic>
+#include <cstdlib>
+#include <mutex>
+
+#include <private/dvr/types.h>
+
+namespace android {
+namespace dvr {
+
+using Matrix3d = Eigen::Matrix<double, 3, 3>;
+using Rotationd = quatd;
+using Vector3d = vec3d;
+using AngleAxisd = Eigen::AngleAxisd;
+
+// Ported from GVR's pose_state.h.
+// Stores a 3dof pose plus derivatives. This can be used for prediction.
+struct PoseState {
+ // Time in nanoseconds for the current pose.
+ uint64_t timestamp_ns;
+
+ // Rotation from Sensor Space to Start Space.
+ Rotationd sensor_from_start_rotation;
+
+ // First derivative of the rotation.
+ // TODO(pfg): currently storing gyro data, switch to first derivative instead.
+ Vector3d sensor_from_start_rotation_velocity;
+};
+
+// Sensor fusion class that implements an Extended Kalman Filter (EKF) to
+// estimate a 3D rotation from a gyroscope and and accelerometer.
+// This system only has one state, the pose. It does not estimate any velocity
+// or acceleration.
+//
+// To learn more about Kalman filtering one can read this article which is a
+// good introduction: http://en.wikipedia.org/wiki/Kalman_filter
+//
+// Start Space is :
+// z is up.
+// y is forward based on the first sensor data.
+// x = y \times z
+// Sensor Space follows the android specification {@link
+// http://developer.android.com/guide/topics/sensors/sensors_overview.html#sensors-coords}
+// See http://go/vr-coords for definitions of Start Space and Sensor Space.
+//
+// This is a port from GVR's SensorFusion code (See
+// https://cs/vr/gvr/sensors/sensor_fusion.h)
+// which in turn is a port from java of OrientationEKF (See
+// https://cs/java/com/google/vr/cardboard/vrtoolkit/vrtoolkit/src/main/java/com/google/vrtoolkit/cardboard/sensors/internal/OrientationEKF.java)
+class SensorFusion {
+ public:
+ SensorFusion();
+ SensorFusion(const SensorFusion&) = delete;
+ void operator=(const SensorFusion&) = delete;
+
+ // Resets the state of the sensor fusion. It sets the velocity for
+ // prediction to zero. The reset will happen with the next
+ // accelerometer sample. Gyroscope sample will be discarded until a new
+ // accelerometer sample arrives.
+ void Reset();
+
+ // Gets the PoseState representing the latest pose and derivatives at a
+ // particular timestamp as estimated by SensorFusion.
+ PoseState GetLatestPoseState() const;
+
+ // Processes one gyroscope sample event. This updates the pose of the system
+ // and the prediction model. The gyroscope data is assumed to be in axis angle
+ // form. Angle = ||v|| and Axis = v / ||v||, with v = [v_x, v_y, v_z]^T.
+ //
+ // @param v_x velocity in x.
+ // @param v_y velocity in y.
+ // @param v_z velocity in z.
+ // @param timestamp_ns gyroscope event timestamp in nanosecond.
+ void ProcessGyroscopeSample(float v_x, float v_y, float v_z,
+ uint64_t timestamp_ns);
+
+ // Processes one accelerometer sample event. This updates the pose of the
+ // system. If the Accelerometer norm changes too much between sample it is not
+ // trusted as much.
+ //
+ // @param acc_x accelerometer data in x.
+ // @param acc_y accelerometer data in y.
+ // @param acc_z accelerometer data in z.
+ // @param timestamp_ns accelerometer event timestamp in nanosecond.
+ void ProcessAccelerometerSample(float acc_x, float acc_y, float acc_z,
+ uint64_t timestamp_ns);
+
+ private:
+ // Estimates the average timestep between gyroscope event.
+ void FilterGyroscopeTimestep(double gyroscope_timestep);
+
+ // Updates the state covariance with an incremental motion. It changes the
+ // space of the quadric.
+ void UpdateStateCovariance(const Matrix3d& motion_update);
+
+ // Computes the innovation vector of the Kalman based on the input pose.
+ // It uses the latest measurement vector (i.e. accelerometer data), which must
+ // be set prior to calling this function.
+ Vector3d ComputeInnovation(const Rotationd& pose);
+
+ // This computes the measurement_jacobian_ via numerical differentiation based
+ // on the current value of sensor_from_start_rotation_.
+ void ComputeMeasurementJacobian();
+
+ // Updates the accelerometer covariance matrix.
+ //
+ // This looks at the norm of recent accelerometer readings. If it has changed
+ // significantly, it means the phone receives additional acceleration than
+ // just gravity, and so the down vector information gravity signal is noisier.
+ //
+ // TODO(dcoz,pfg): this function is very simple, we probably need something
+ // more elaborated here once we have proper regression testing.
+ void UpdateMeasurementCovariance();
+
+ // Reset all internal states. This is not thread safe. Lock should be acquired
+ // outside of it. This function is called in ProcessAccelerometerSample.
+ void ResetState();
+
+ // Current transformation from Sensor Space to Start Space.
+ // x_sensor = sensor_from_start_rotation_ * x_start;
+ PoseState current_state_;
+
+ // Filtering of the gyroscope timestep started?
+ bool is_timestep_filter_initialized_;
+ // Filtered gyroscope timestep valid?
+ bool is_gyroscope_filter_valid_;
+ // Sensor fusion currently aligned with gravity? After initialization
+ // it will requires a couple of accelerometer data for the system to get
+ // aligned.
+ bool is_aligned_with_gravity_;
+
+ // Covariance of Kalman filter state (P in common formulation).
+ Matrix3d state_covariance_;
+ // Covariance of the process noise (Q in common formulation).
+ Matrix3d process_covariance_;
+ // Covariance of the accelerometer measurement (R in common formulation).
+ Matrix3d accelerometer_measurement_covariance_;
+ // Covariance of innovation (S in common formulation).
+ Matrix3d innovation_covariance_;
+ // Jacobian of the measurements (H in common formulation).
+ Matrix3d accelerometer_measurement_jacobian_;
+ // Gain of the Kalman filter (K in common formulation).
+ Matrix3d kalman_gain_;
+ // Parameter update a.k.a. innovation vector. (\nu in common formulation).
+ Vector3d innovation_;
+ // Measurement vector (z in common formulation).
+ Vector3d accelerometer_measurement_;
+ // Current prediction vector (g in common formulation).
+ Vector3d prediction_;
+ // Control input, currently this is only the gyroscope data (\mu in common
+ // formulation).
+ Vector3d control_input_;
+ // Update of the state vector. (x in common formulation).
+ Vector3d state_update_;
+
+ // Time of the last accelerometer processed event.
+ uint64_t current_accelerometer_timestamp_ns_;
+
+ // Estimates of the timestep between gyroscope event in seconds.
+ double filtered_gyroscope_timestep_s_;
+ // Number of timestep samples processed so far by the filter.
+ uint32_t num_gyroscope_timestep_samples_;
+ // Norm of the accelerometer for the previous measurement.
+ double previous_accelerometer_norm_;
+ // Moving average of the accelerometer norm changes. It is computed for every
+ // sensor datum.
+ double moving_average_accelerometer_norm_change_;
+
+ // Flag indicating if a state reset should be executed with the next
+ // accelerometer sample.
+ std::atomic<bool> execute_reset_with_next_accelerometer_sample_;
+
+ mutable std::mutex mutex_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SENSORD_SENSOR_FUSION_H_
diff --git a/services/vr/sensord/sensor_hal_thread.cpp b/services/vr/sensord/sensor_hal_thread.cpp
new file mode 100644
index 0000000..59b433f
--- /dev/null
+++ b/services/vr/sensord/sensor_hal_thread.cpp
@@ -0,0 +1,158 @@
+#include "sensor_hal_thread.h"
+
+#include <cutils/log.h>
+#include <dvr/performance_client_api.h>
+
+namespace android {
+namespace dvr {
+
+SensorHalThread::SensorHalThread(bool* out_success)
+ : shutting_down_(false),
+ paused_(false),
+ sensor_module_(nullptr),
+ sensor_device_(nullptr),
+ sensor_list_(nullptr) {
+ // Assume failure; we will change this to true on success.
+ *out_success = false;
+
+ // TODO(segal): module & device should be singletons.
+ int32_t err = hw_get_module_by_class(SENSORS_HARDWARE_MODULE_ID, "platform",
+ (hw_module_t const**)&sensor_module_);
+
+ if (err) {
+ ALOGE("couldn't load %s module (%s)", SENSORS_HARDWARE_MODULE_ID,
+ strerror(-err));
+ return;
+ }
+
+ err = sensors_open_1(&sensor_module_->common, &sensor_device_);
+ if (err) {
+ ALOGE("couldn't open device for module %s (%s)", SENSORS_HARDWARE_MODULE_ID,
+ strerror(-err));
+ return;
+ }
+
+ const int sensor_count =
+ sensor_module_->get_sensors_list(sensor_module_, &sensor_list_);
+
+ // Deactivate all of the sensors initially.
+ sensor_user_count_.resize(sensor_count, 0);
+ for (int i = 0; i < sensor_count; ++i) {
+ err = sensor_device_->activate(
+ reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
+ sensor_list_[i].handle, 0);
+
+ if (err) {
+ ALOGE("failed to deactivate sensor %d (%s)", i, strerror(-err));
+ return;
+ }
+ }
+
+ // At this point, we've successfully initialized everything.
+ *out_success = true;
+}
+
+SensorHalThread::~SensorHalThread() {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ shutting_down_ = true;
+ condition_.notify_one();
+ }
+
+ // Implicitly joins *thread_ if it's running.
+}
+
+void SensorHalThread::StartPolling(const EventConsumer& consumer) {
+ if (thread_) {
+ ALOGE("SensorHalThread::Start() called but thread is already running!");
+ return;
+ }
+
+ thread_.reset(new std::thread([this, consumer] {
+ const int priority_error = dvrSetSchedulerClass(0, "sensors:high");
+ LOG_ALWAYS_FATAL_IF(
+ priority_error < 0,
+ "SensorHalTread::StartPolling: Failed to set scheduler class: %s",
+ strerror(-priority_error));
+
+ for (;;) {
+ for (;;) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (shutting_down_)
+ return;
+ if (!paused_)
+ break;
+ condition_.wait(lock);
+ }
+ const int kMaxEvents = 100;
+ sensors_event_t events[kMaxEvents];
+ ssize_t event_count = 0;
+ do {
+ if (sensor_device_) {
+ event_count = sensor_device_->poll(
+ reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
+ events, kMaxEvents);
+ } else {
+ // When there is no sensor_device_, we still call the consumer at
+ // regular intervals in case mock poses are in use. Note that this
+ // will never be the case for production devices, but this helps
+ // during bringup.
+ usleep(5000);
+ }
+ } while (event_count == -EINTR);
+ if (event_count == kMaxEvents)
+ ALOGI("max events (%d) reached", kMaxEvents);
+
+ if (event_count >= 0) {
+ consumer(events, events + event_count);
+ } else {
+ ALOGE(
+ "SensorHalThread::StartPolling: Error while polling sensor: %s "
+ "(%zd)",
+ strerror(-event_count), -event_count);
+ }
+ }
+ }));
+}
+
+void SensorHalThread::SetPaused(bool is_paused) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ paused_ = is_paused;
+ condition_.notify_one();
+}
+
+void SensorHalThread::StartUsingSensor(const int sensor_index) {
+ if (sensor_index < 0 || sensor_index >= GetSensorCount()) {
+ ALOGE("StartUsingSensor(): sensor index %d out of range [0, %d)",
+ sensor_index, GetSensorCount());
+ return;
+ }
+
+ std::lock_guard<std::mutex> guard(user_count_mutex_);
+ if (sensor_user_count_[sensor_index]++ == 0) {
+ sensor_device_->activate(
+ reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
+ sensor_list_[sensor_index].handle, 1);
+ sensor_device_->setDelay(
+ reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
+ sensor_list_[sensor_index].handle, 0);
+ }
+}
+
+void SensorHalThread::StopUsingSensor(const int sensor_index) {
+ if (sensor_index < 0 || sensor_index >= GetSensorCount()) {
+ ALOGE("StopUsingSensor(): sensor index %d out of range [0, %d)",
+ sensor_index, GetSensorCount());
+ return;
+ }
+
+ std::lock_guard<std::mutex> guard(user_count_mutex_);
+ if (--sensor_user_count_[sensor_index] == 0) {
+ sensor_device_->activate(
+ reinterpret_cast<struct sensors_poll_device_t*>(sensor_device_),
+ sensor_list_[sensor_index].handle, 0);
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/sensord/sensor_hal_thread.h b/services/vr/sensord/sensor_hal_thread.h
new file mode 100644
index 0000000..9220757
--- /dev/null
+++ b/services/vr/sensord/sensor_hal_thread.h
@@ -0,0 +1,99 @@
+#ifndef ANDROID_DVR_SENSORD_SENSOR_HAL_THREAD_H_
+#define ANDROID_DVR_SENSORD_SENSOR_HAL_THREAD_H_
+
+#include <hardware/sensors.h>
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+#include "sensor_thread.h"
+
+namespace android {
+namespace dvr {
+
+// Manages initialization and polling of the sensor HAL. Polling is performed
+// continuously on a thread that passes events along to an arbitrary consumer.
+// All const member functions are thread-safe; otherwise, thread safety is noted
+// for each function.
+class SensorHalThread : public SensorThread {
+ public:
+ // Initializes the sensor HAL, but does not yet start polling (see Start()
+ // below). Sets *out_success to true on success; otherwise, sets *out_success
+ // to false and logs an error.
+ explicit SensorHalThread(bool* out_success);
+
+ // Tells the polling thread to shut down if it's running, and waits for it to
+ // complete its polling loop.
+ ~SensorHalThread() override;
+
+ // Begins polling on the thread. The provided consumer will be notified of
+ // events. Event notification occurs on the polling thread.
+ // Calling Start() more than once on an instance of SensorHalThread is
+ // invalid.
+ void StartPolling(const EventConsumer& consumer) override;
+
+ // Set whether the sensor polling thread is paused or not. This is useful
+ // while we need to support both 3DoF and 6DoF codepaths. This 3DoF codepath
+ // must be paused while the 6DoF codepath is using the IMU event stream.
+ void SetPaused(bool is_paused) override;
+
+ // Increase the number of users of the given sensor by one. Activates the
+ // sensor if it wasn't already active.
+ // Safe to call concurrently with any other functions in this class.
+ void StartUsingSensor(int sensor_index) override;
+
+ // Decrease the number of users of the given sensor by one. Deactivates the
+ // sensor if its usage count has dropped to zero.
+ // Safe to call concurrently with any other functions in this class.
+ void StopUsingSensor(int sensor_index) override;
+
+ // The number of sensors that are available. Returns a negative number if
+ // initialization failed.
+ int GetSensorCount() const override {
+ return static_cast<int>(sensor_user_count_.size());
+ }
+
+ // The underlying sensor HAL data structure for the sensor at the given index.
+ int GetSensorType(int index) const override {
+ return sensor_list_[index].type;
+ }
+
+ private:
+ // The actual thread on which we consume events.
+ std::unique_ptr<std::thread> thread_;
+
+ // Mutex for access to shutting_down_ and paused_ members.
+ std::mutex mutex_;
+
+ // Condition for signaling pause/unpause to the thread.
+ std::condition_variable condition_;
+
+ // If this member is set to true, the thread will stop running at its next
+ // iteration. Only set with the mutex held and signal condition_ when changed.
+ bool shutting_down_;
+
+ // If this member is set to true, the thread will pause at its next
+ // iteration. Only set with the mutex held and signal condition_ when changed.
+ bool paused_;
+
+ // HAL access
+ struct sensors_module_t* sensor_module_;
+ sensors_poll_device_1_t* sensor_device_;
+
+ // Contiguous array of available sensors, owned by the sensor HAL.
+ const sensor_t* sensor_list_;
+
+ // Mutex that protects access to sensor_user_count_.data().
+ std::mutex user_count_mutex_;
+
+ // A count of how many users each sensor has. Protected by user_count_mutex.
+ std::vector<int> sensor_user_count_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SENSORD_SENSOR_HAL_THREAD_H_
diff --git a/services/vr/sensord/sensor_ndk_thread.cpp b/services/vr/sensord/sensor_ndk_thread.cpp
new file mode 100644
index 0000000..b5e16e7
--- /dev/null
+++ b/services/vr/sensord/sensor_ndk_thread.cpp
@@ -0,0 +1,257 @@
+#include "sensor_ndk_thread.h"
+
+#include <cutils/log.h>
+#include <dvr/performance_client_api.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+static constexpr int kLooperIdUser = 5;
+} // namespace
+
+SensorNdkThread::SensorNdkThread(bool* out_success)
+ : shutting_down_(false),
+ paused_(true),
+ thread_started_(false),
+ initialization_result_(false),
+ looper_(nullptr),
+ sensor_manager_(nullptr),
+ event_queue_(nullptr),
+ sensor_list_(nullptr),
+ sensor_count_(0) {
+ // Assume failure; we will change this to true on success.
+ *out_success = false;
+
+ // These structs are the same, but sanity check the sizes.
+ static_assert(sizeof(sensors_event_t) == sizeof(ASensorEvent),
+ "Error: sizeof(sensors_event_t) != sizeof(ASensorEvent)");
+
+ thread_.reset(new std::thread([this] {
+ const int priority_error = dvrSetSchedulerClass(0, "sensors:high");
+ LOG_ALWAYS_FATAL_IF(
+ priority_error < 0,
+ "SensorHalTread::StartPolling: Failed to set scheduler class: %s",
+ strerror(-priority_error));
+
+ // Start ALooper and initialize sensor access.
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ initialization_result_ = InitializeSensors();
+ thread_started_ = true;
+ init_condition_.notify_one();
+ if (!initialization_result_)
+ return;
+ }
+
+ EventConsumer consumer;
+ for (;;) {
+ for (;;) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ UpdateSensorUse();
+ if (!consumer)
+ consumer = consumer_;
+ if (shutting_down_)
+ return;
+ if (!paused_)
+ break;
+ condition_.wait(lock);
+ }
+
+ constexpr int kMaxEvents = 100;
+ sensors_event_t events[kMaxEvents];
+ ssize_t event_count = 0;
+ if (looper_ && sensor_manager_) {
+ int poll_fd, poll_events;
+ void* poll_source;
+ // Poll for events.
+ int ident = ALooper_pollAll(-1, &poll_fd, &poll_events, &poll_source);
+
+ if (ident != kLooperIdUser)
+ continue;
+
+ ASensorEvent* event = reinterpret_cast<ASensorEvent*>(&events[0]);
+ event_count =
+ ASensorEventQueue_getEvents(event_queue_, event, kMaxEvents);
+
+ if (event_count == 0) {
+ ALOGE("Detected sensor service failure, restarting sensors");
+ // This happens when sensorservice has died and restarted. To avoid
+ // spinning we need to restart the sensor access.
+ DestroySensors();
+ InitializeSensors();
+ }
+ } else {
+ // When there is no sensor_device_, we still call the consumer at
+ // regular intervals in case mock poses are in use. Note that this
+ // will never be the case for production devices, but this helps
+ // during bringup.
+ usleep(5000);
+ }
+ if (event_count == kMaxEvents)
+ ALOGI("max events (%d) reached", kMaxEvents);
+
+ if (event_count >= 0) {
+ consumer(events, events + event_count);
+ } else {
+ ALOGE(
+ "SensorNdkThread::StartPolling: Error while polling sensor: %s "
+ "(%zd)",
+ strerror(-event_count), -event_count);
+ }
+ }
+
+ // About to exit sensor thread, destroy sensor objects.
+ DestroySensors();
+ }));
+
+ // Wait for thread to startup and initialize sensors so that we know whether
+ // it succeeded.
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ while (!thread_started_)
+ init_condition_.wait(lock);
+ }
+
+ // At this point, we've successfully initialized everything.
+ *out_success = initialization_result_;
+}
+
+SensorNdkThread::~SensorNdkThread() {
+ {
+ if (looper_)
+ ALooper_wake(looper_);
+ std::unique_lock<std::mutex> lock(mutex_);
+ shutting_down_ = true;
+ condition_.notify_one();
+ }
+
+ thread_->join();
+}
+
+bool SensorNdkThread::InitializeSensors() {
+ looper_ = ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+ if (!looper_) {
+ ALOGE("Failed to create ALooper.");
+ return false;
+ }
+
+ // Prepare to monitor accelerometer
+ sensor_manager_ = ASensorManager_getInstanceForPackage(nullptr);
+ if (!sensor_manager_) {
+ ALOGE("Failed to create ASensorManager.");
+ return false;
+ }
+
+ event_queue_ = ASensorManager_createEventQueue(
+ sensor_manager_, looper_, kLooperIdUser, nullptr, nullptr);
+ if (!event_queue_) {
+ ALOGE("Failed to create sensor EventQueue.");
+ return false;
+ }
+
+ sensor_count_ = ASensorManager_getSensorList(sensor_manager_, &sensor_list_);
+ ALOGI("Sensor count %d", sensor_count_);
+
+ sensor_user_count_.resize(sensor_count_, 0);
+
+ // To recover from sensorservice restart, enable the sensors that are already
+ // requested.
+ for (size_t sensor_index = 0; sensor_index < sensor_user_count_.size();
+ ++sensor_index) {
+ if (sensor_user_count_[sensor_index] > 0) {
+ int result = ASensorEventQueue_registerSensor(
+ event_queue_, sensor_list_[sensor_index], 0, 0);
+ ALOGE_IF(result < 0, "ASensorEventQueue_registerSensor failed: %d",
+ result);
+ }
+ }
+
+ return true;
+}
+
+void SensorNdkThread::DestroySensors() {
+ for (size_t sensor_index = 0; sensor_index < sensor_user_count_.size();
+ ++sensor_index) {
+ if (sensor_user_count_[sensor_index] > 0) {
+ ASensorEventQueue_disableSensor(event_queue_, sensor_list_[sensor_index]);
+ }
+ }
+ ASensorManager_destroyEventQueue(sensor_manager_, event_queue_);
+}
+
+void SensorNdkThread::UpdateSensorUse() {
+ if (!enable_sensors_.empty()) {
+ for (int sensor_index : enable_sensors_) {
+ if (sensor_user_count_[sensor_index]++ == 0) {
+ int result = ASensorEventQueue_registerSensor(
+ event_queue_, sensor_list_[sensor_index], 0, 0);
+ ALOGE_IF(result < 0, "ASensorEventQueue_registerSensor failed: %d",
+ result);
+ }
+ }
+ enable_sensors_.clear();
+ }
+
+ if (!disable_sensors_.empty()) {
+ for (int sensor_index : disable_sensors_) {
+ if (--sensor_user_count_[sensor_index] == 0) {
+ int result = ASensorEventQueue_disableSensor(
+ event_queue_, sensor_list_[sensor_index]);
+ ALOGE_IF(result < 0, "ASensorEventQueue_disableSensor failed: %d",
+ result);
+ }
+ }
+ disable_sensors_.clear();
+ }
+}
+
+void SensorNdkThread::StartPolling(const EventConsumer& consumer) {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (consumer_) {
+ ALOGE("Already started sensor thread.");
+ return;
+ }
+ consumer_ = consumer;
+ }
+ SetPaused(false);
+}
+
+void SensorNdkThread::SetPaused(bool is_paused) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ // SetPaused may be called before we have StartPolling, make sure we have
+ // an event consumer. Otherwise we defer until StartPolling is called.
+ if (!consumer_)
+ return;
+ paused_ = is_paused;
+ condition_.notify_one();
+ ALooper_wake(looper_);
+}
+
+void SensorNdkThread::StartUsingSensor(const int sensor_index) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (sensor_index < 0 || sensor_index >= sensor_count_) {
+ ALOGE("StartUsingSensor(): sensor index %d out of range [0, %d)",
+ sensor_index, sensor_count_);
+ return;
+ }
+
+ enable_sensors_.push_back(sensor_index);
+ ALooper_wake(looper_);
+}
+
+void SensorNdkThread::StopUsingSensor(const int sensor_index) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (sensor_index < 0 || sensor_index >= sensor_count_) {
+ ALOGE("StopUsingSensor(): sensor index %d out of range [0, %d)",
+ sensor_index, sensor_count_);
+ return;
+ }
+
+ disable_sensors_.push_back(sensor_index);
+ ALooper_wake(looper_);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/sensord/sensor_ndk_thread.h b/services/vr/sensord/sensor_ndk_thread.h
new file mode 100644
index 0000000..eb3cf9d
--- /dev/null
+++ b/services/vr/sensord/sensor_ndk_thread.h
@@ -0,0 +1,124 @@
+#ifndef ANDROID_DVR_SENSORD_SENSOR_NDK_THREAD_H_
+#define ANDROID_DVR_SENSORD_SENSOR_NDK_THREAD_H_
+
+#include <android/sensor.h>
+#include <hardware/sensors.h>
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+#include "sensor_thread.h"
+
+namespace android {
+namespace dvr {
+
+// Manages initialization and polling of the sensor data. Polling is performed
+// continuously on a thread that passes events along to an arbitrary consumer.
+// All const member functions are thread-safe; otherwise, thread safety is noted
+// for each function.
+class SensorNdkThread : public SensorThread {
+ public:
+ // Initializes the sensor access, but does not yet start polling (see Start()
+ // below). Sets *out_success to true on success; otherwise, sets *out_success
+ // to false and logs an error.
+ explicit SensorNdkThread(bool* out_success);
+
+ // Tells the polling thread to shut down if it's running, and waits for it to
+ // complete its polling loop.
+ ~SensorNdkThread() override;
+
+ // Begins polling on the thread. The provided consumer will be notified of
+ // events. Event notification occurs on the polling thread.
+ // Calling Start() more than once on an instance of SensorNdkThread is
+ // invalid.
+ void StartPolling(const EventConsumer& consumer) override;
+
+ // Set whether the sensor polling thread is paused or not. This is useful
+ // while we need to support both 3DoF and 6DoF codepaths. This 3DoF codepath
+ // must be paused while the 6DoF codepath is using the IMU event stream.
+ void SetPaused(bool is_paused) override;
+
+ // Increase the number of users of the given sensor by one. Activates the
+ // sensor if it wasn't already active.
+ // Safe to call concurrently with any other functions in this class.
+ void StartUsingSensor(int sensor_index) override;
+
+ // Decrease the number of users of the given sensor by one. Deactivates the
+ // sensor if its usage count has dropped to zero.
+ // Safe to call concurrently with any other functions in this class.
+ void StopUsingSensor(int sensor_index) override;
+
+ // The number of sensors that are available. Returns a negative number if
+ // initialization failed.
+ int GetSensorCount() const override { return sensor_count_; }
+
+ // The underlying sensor HAL data structure for the sensor at the given index.
+ int GetSensorType(int index) const override {
+ return ASensor_getType(sensor_list_[index]);
+ }
+
+ private:
+ // Initialize ALooper and sensor access on the thread.
+ // Returns true on success, false on failure.
+ bool InitializeSensors();
+
+ // Destroy sensor access.
+ void DestroySensors();
+
+ // Start or stop requested sensors from the thread. Class mutex must already
+ // be locked.
+ void UpdateSensorUse();
+
+ // The actual thread on which we consume events.
+ std::unique_ptr<std::thread> thread_;
+
+ // Mutex for access to shutting_down_ and paused_ members.
+ std::mutex mutex_;
+
+ // Condition for signaling pause/unpause to the thread.
+ std::condition_variable condition_;
+
+ // Condition for signaling thread initialization.
+ std::condition_variable init_condition_;
+
+ // If this member is set to true, the thread will stop running at its next
+ // iteration. Only set with the mutex held and signal condition_ when changed.
+ bool shutting_down_;
+
+ // If this member is set to true, the thread will pause at its next
+ // iteration. Only set with the mutex held and signal condition_ when changed.
+ bool paused_;
+
+ // Thread start hand shake to verify that sensor initialization succeeded.
+ bool thread_started_;
+
+ // Initialization result (true for success).
+ bool initialization_result_;
+
+ // The callback.
+ EventConsumer consumer_;
+
+ // Sensor access
+ ALooper* looper_;
+ ASensorManager* sensor_manager_;
+ ASensorEventQueue* event_queue_;
+
+ // Sensor list from NDK.
+ ASensorList sensor_list_;
+ int sensor_count_;
+
+ // Requests to the sensor thread to enable or disable given sensors.
+ std::vector<int> enable_sensors_;
+ std::vector<int> disable_sensors_;
+
+ // A count of how many users each sensor has. Protected by user_count_mutex.
+ std::vector<int> sensor_user_count_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SENSORD_SENSOR_NDK_THREAD_H_
diff --git a/services/vr/sensord/sensor_service.cpp b/services/vr/sensord/sensor_service.cpp
new file mode 100644
index 0000000..4396851
--- /dev/null
+++ b/services/vr/sensord/sensor_service.cpp
@@ -0,0 +1,187 @@
+#include "sensor_service.h"
+
+#include <cutils/log.h>
+#include <hardware/sensors.h>
+#include <poll.h>
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/sensor-ipc.h>
+#include <time.h>
+
+using android::pdx::default_transport::Endpoint;
+
+namespace android {
+namespace dvr {
+
+SensorService::SensorService(SensorThread* sensor_thread)
+ : BASE("SensorService", Endpoint::Create(DVR_SENSOR_SERVICE_CLIENT)),
+ sensor_thread_(sensor_thread) {
+ sensor_clients_.resize(sensor_thread_->GetSensorCount());
+
+ for (int i = 0; i < sensor_thread_->GetSensorCount(); ++i)
+ type_to_sensor_[sensor_thread_->GetSensorType(i)] = i;
+}
+
+std::shared_ptr<pdx::Channel> SensorService::OnChannelOpen(pdx::Message& msg) {
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ const pdx::MessageInfo& info = msg.GetInfo();
+
+ std::shared_ptr<SensorClient> client(
+ new SensorClient(*this, info.pid, info.cid));
+ AddClient(client);
+ return client;
+}
+
+void SensorService::OnChannelClose(pdx::Message& /*msg*/,
+ const std::shared_ptr<pdx::Channel>& chan) {
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ auto client = std::static_pointer_cast<SensorClient>(chan);
+ if (!client) {
+ ALOGW("WARNING: SensorClient was NULL!\n");
+ return;
+ }
+ RemoveClient(client);
+}
+
+void SensorService::AddClient(const std::shared_ptr<SensorClient>& client) {
+ clients_.push_front(client);
+}
+
+void SensorService::RemoveClient(const std::shared_ptr<SensorClient>& client) {
+ // First remove it from the clients associated with its sensor, if any.
+ RemoveSensorClient(client.get());
+
+ // Finally, remove it from the list of clients we're aware of, and decrease
+ // its reference count.
+ clients_.remove(client);
+}
+
+void SensorService::RemoveSensorClient(SensorClient* client) {
+ if (!client->has_sensor())
+ return;
+
+ std::forward_list<SensorClient*>& sensor_clients =
+ sensor_clients_[client->sensor()];
+ sensor_clients.remove(client);
+ sensor_thread_->StopUsingSensor(client->sensor());
+
+ client->unset_sensor();
+}
+
+int SensorService::HandleMessage(pdx::Message& msg) {
+ int ret = 0;
+ const pdx::MessageInfo& info = msg.GetInfo();
+ switch (info.op) {
+ case DVR_SENSOR_START: {
+ std::lock_guard<std::mutex> guard(mutex_);
+ // Associate this channel with the indicated sensor,
+ // unless it already has an association. In that case,
+ // fail.
+ auto client = std::static_pointer_cast<SensorClient>(msg.GetChannel());
+ if (client->has_sensor())
+ REPLY_ERROR(msg, EINVAL, error);
+ int sensor_type;
+ if (msg.Read(&sensor_type, sizeof(sensor_type)) <
+ (ssize_t)sizeof(sensor_type))
+ REPLY_ERROR(msg, EIO, error);
+
+ // Find the sensor of the requested type.
+ if (type_to_sensor_.find(sensor_type) == type_to_sensor_.end())
+ REPLY_ERROR(msg, EINVAL, error);
+ const int sensor_index = type_to_sensor_[sensor_type];
+
+ sensor_clients_[sensor_index].push_front(client.get());
+ client->set_sensor(sensor_index);
+ sensor_thread_->StartUsingSensor(sensor_index);
+
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ case DVR_SENSOR_STOP: {
+ std::lock_guard<std::mutex> guard(mutex_);
+ auto client = std::static_pointer_cast<SensorClient>(msg.GetChannel());
+ if (!client->has_sensor())
+ REPLY_ERROR(msg, EINVAL, error);
+ RemoveSensorClient(client.get());
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ case DVR_SENSOR_POLL: {
+ std::lock_guard<std::mutex> guard(mutex_);
+ auto client = std::static_pointer_cast<SensorClient>(msg.GetChannel());
+
+ // Package up the events we've got for this client. Number of
+ // events, followed by 0 or more sensor events, popped from
+ // this client's queue until it's empty.
+ int num_events = client->EventCount();
+ sensors_event_t out_buffer[num_events];
+ client->WriteEvents(out_buffer);
+ struct iovec svec[] = {
+ {.iov_base = &num_events, .iov_len = sizeof(num_events)},
+ {.iov_base = out_buffer,
+ .iov_len = num_events * sizeof(sensors_event_t)},
+ };
+ ret = msg.WriteVector(svec, 2);
+ int expected_size = sizeof(int) + num_events * sizeof(sensors_event_t);
+ if (ret < expected_size) {
+ ALOGI("error: msg.WriteVector wrote too little.");
+ REPLY_ERROR(msg, EIO, error);
+ }
+ REPLY_SUCCESS(msg, 0, error);
+ }
+ default:
+ // Do not lock mutex_ here, because this may call the on*() handlers,
+ // which will lock the mutex themselves.
+ ret = Service::HandleMessage(msg);
+ break;
+ }
+error:
+ return ret;
+}
+
+void SensorService::EnqueueEvents(const sensors_event_t* begin_events,
+ const sensors_event_t* end_events) {
+ std::lock_guard<std::mutex> guard(mutex_);
+
+ // Put the sensor values we got in the circular queue for each client that
+ // cares about the given event.
+ for (const sensors_event_t* event = begin_events; event != end_events;
+ ++event) {
+ const int sensor_index = type_to_sensor_[event->type];
+ for (const auto& client : sensor_clients_[sensor_index]) {
+ client->EnqueueEvent(*event);
+ }
+ }
+}
+
+void SensorClient::WriteEvents(sensors_event_t* buffer) {
+ while (!event_queue_.Empty()) {
+ *buffer = *(event_queue_.Top());
+ event_queue_.Pop();
+ ++buffer;
+ }
+}
+
+void SensorClient::CircularQ::Push(const sensors_event_t& event) {
+ if (count_ != 0 && head_ == tail_) {
+ Pop(); // If we're full, throw away the oldest event.
+ }
+ events_[head_] = event;
+ head_ = (head_ + 1) % kCqSize;
+ ++count_;
+}
+
+const sensors_event_t* SensorClient::CircularQ::Top() const {
+ if (count_ == 0)
+ return nullptr;
+ return &events_[tail_];
+}
+
+void SensorClient::CircularQ::Pop() {
+ if (count_ == 0)
+ return;
+ tail_ = (tail_ + 1) % kCqSize;
+ --count_;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/sensord/sensor_service.h b/services/vr/sensord/sensor_service.h
new file mode 100644
index 0000000..c35fada
--- /dev/null
+++ b/services/vr/sensord/sensor_service.h
@@ -0,0 +1,132 @@
+#ifndef ANDROID_DVR_SENSORD_SENSOR_SERVICE_H_
+#define ANDROID_DVR_SENSORD_SENSOR_SERVICE_H_
+
+#include <forward_list>
+#include <unordered_map>
+#include <vector>
+
+#include <pdx/service.h>
+#include <pthread.h>
+
+#include "sensor_thread.h"
+
+namespace android {
+namespace dvr {
+
+class SensorClient;
+
+/*
+ * SensorService implements the sensor service over ServiceFS.
+ * The sensor service provides an interface to one sensor over
+ * each channel.
+ */
+class SensorService : public pdx::ServiceBase<SensorService> {
+ public:
+ int HandleMessage(pdx::Message& msg) override;
+ std::shared_ptr<pdx::Channel> OnChannelOpen(pdx::Message& msg) override;
+ void OnChannelClose(pdx::Message& msg,
+ const std::shared_ptr<pdx::Channel>& chan) override;
+
+ // Enqueue the events in [begin_events, end_events) onto any clients that care
+ // about them.
+ // Safe to call concurrently with any other public member functions.
+ void EnqueueEvents(const sensors_event_t* begin_events,
+ const sensors_event_t* end_events);
+
+ private:
+ friend BASE;
+
+ // Initializes the service. Keeps a reference to sensor_thread, which must be
+ // non-null.
+ explicit SensorService(SensorThread* sensor_thread);
+
+ // The abstraction around the sensor HAL.
+ SensorThread* sensor_thread_;
+
+ // All of the clients we are connected to. This is the one place in this class
+ // where we keep the SensorClient instances alive using shared_ptr instances.
+ std::forward_list<std::shared_ptr<SensorClient>> clients_;
+
+ // Map types back to sensor indexes.
+ std::unordered_map<int, int> type_to_sensor_;
+ // For each sensor, the list of clients that are connected to it.
+ // Every entry in here must also be in clients_, so that its reference count
+ // remains positive.
+ std::vector<std::forward_list<SensorClient*>> sensor_clients_;
+
+ // Protects access to all member variables.
+ std::mutex mutex_;
+
+ // None of the following functions is thread-safe; callers must lock mutex_
+ // before calling one.
+ void AddClient(const std::shared_ptr<SensorClient>& client);
+ void RemoveClient(const std::shared_ptr<SensorClient>& client);
+ // Dissociate the indicated client from its sensor, if it has one; otherwise
+ // do nothing.
+ void RemoveSensorClient(SensorClient* client);
+
+ SensorService(const SensorService&) = delete;
+ void operator=(const SensorService&) = delete;
+};
+
+/*
+ * SensorClient manages the service-side per-client context for each client
+ * using the service.
+ */
+class SensorClient : public pdx::Channel {
+ public:
+ SensorClient(SensorService& /*service*/, int /*pid*/, int /*cid*/)
+ : sensor_index_(-1), has_sensor_index_(false) {}
+
+ bool has_sensor() const { return has_sensor_index_; }
+ int sensor() const { return sensor_index_; }
+ void set_sensor(int sensor) {
+ sensor_index_ = sensor;
+ has_sensor_index_ = true;
+ }
+ void unset_sensor() {
+ sensor_index_ = -1;
+ has_sensor_index_ = false;
+ }
+
+ int EventCount() const { return event_queue_.Count(); }
+
+ // Push an event onto our queue.
+ void EnqueueEvent(const sensors_event_t& event) { event_queue_.Push(event); }
+
+ // Write all the events in our queue (and clear it) to the supplied
+ // buffer. Buffer must be large enough.
+ void WriteEvents(sensors_event_t* buffer);
+
+ private:
+ SensorClient(const SensorClient&) = delete;
+ SensorClient& operator=(const SensorClient&) = delete;
+
+ int sensor_index_ = -1;
+ bool has_sensor_index_ = false;
+ // Circular queue holds as-yet-unasked-for events for the sensor associated
+ // with this client.
+ class CircularQ {
+ public:
+ static const int kCqSize = 10;
+ CircularQ() : head_(0), tail_(0), count_(0) {}
+ ~CircularQ() {}
+ void Push(const sensors_event_t& event);
+ const sensors_event_t* Top() const;
+ void Pop();
+ bool Empty() const { return count_ == 0; }
+ int Count() const { return count_; }
+
+ private:
+ sensors_event_t events_[kCqSize];
+ int head_ = 0;
+ int tail_ = 0;
+ int count_ = 0;
+ };
+ CircularQ event_queue_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SENSORD_SENSOR_SERVICE_H_
diff --git a/services/vr/sensord/sensor_thread.cpp b/services/vr/sensord/sensor_thread.cpp
new file mode 100644
index 0000000..01e4e7e
--- /dev/null
+++ b/services/vr/sensord/sensor_thread.cpp
@@ -0,0 +1,9 @@
+#include "sensor_thread.h"
+
+namespace android {
+namespace dvr {
+
+SensorThread::~SensorThread() {}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/sensord/sensor_thread.h b/services/vr/sensord/sensor_thread.h
new file mode 100644
index 0000000..46aba17
--- /dev/null
+++ b/services/vr/sensord/sensor_thread.h
@@ -0,0 +1,58 @@
+#ifndef ANDROID_DVR_SENSORD_SENSOR_THREAD_H_
+#define ANDROID_DVR_SENSORD_SENSOR_THREAD_H_
+
+#include <hardware/sensors.h>
+
+#include <functional>
+
+namespace android {
+namespace dvr {
+
+// Manages initialization and polling of the sensor data. Polling is performed
+// continuously on a thread that passes events along to an arbitrary consumer.
+// All const member functions are thread-safe; otherwise, thread safety is noted
+// for each function.
+class SensorThread {
+ public:
+ // A function type that can be called to provide it with new events.
+ // [events_begin, events_end) forms a contiguous array of events.
+ using EventConsumer = std::function<void(const sensors_event_t* events_begin,
+ const sensors_event_t* events_end)>;
+
+ // Tells the polling thread to shut down if it's running, and waits for it to
+ // complete its polling loop.
+ virtual ~SensorThread();
+
+ // Begins polling on the thread. The provided consumer will be notified of
+ // events. Event notification occurs on the polling thread.
+ // Calling Start() more than once on an instance of SensorThread is
+ // invalid.
+ virtual void StartPolling(const EventConsumer& consumer) = 0;
+
+ // Set whether the sensor polling thread is paused or not. This is useful
+ // while we need to support both 3DoF and 6DoF codepaths. This 3DoF codepath
+ // must be paused while the 6DoF codepath is using the IMU event stream.
+ virtual void SetPaused(bool is_paused) = 0;
+
+ // Increase the number of users of the given sensor by one. Activates the
+ // sensor if it wasn't already active.
+ // Safe to call concurrently with any other functions in this class.
+ virtual void StartUsingSensor(int sensor_index) = 0;
+
+ // Decrease the number of users of the given sensor by one. Deactivates the
+ // sensor if its usage count has dropped to zero.
+ // Safe to call concurrently with any other functions in this class.
+ virtual void StopUsingSensor(int sensor_index) = 0;
+
+ // The number of sensors that are available. Returns a negative number if
+ // initialization failed.
+ virtual int GetSensorCount() const = 0;
+
+ // Get the sensor type for the sensor at the given index.
+ virtual int GetSensorType(int index) const = 0;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SENSORD_SENSOR_THREAD_H_
diff --git a/services/vr/sensord/sensord.cpp b/services/vr/sensord/sensord.cpp
new file mode 100644
index 0000000..0a75318
--- /dev/null
+++ b/services/vr/sensord/sensord.cpp
@@ -0,0 +1,87 @@
+#define LOG_TAG "sensord"
+
+#include <string.h>
+
+#include <binder/ProcessState.h>
+
+#include <dvr/performance_client_api.h>
+#include <pdx/default_transport/service_dispatcher.h>
+#include <private/dvr/pose-ipc.h>
+#include <private/dvr/sensor-ipc.h>
+
+#include "pose_service.h"
+#include "sensor_hal_thread.h"
+#include "sensor_ndk_thread.h"
+#include "sensor_service.h"
+#include "sensor_thread.h"
+
+using android::dvr::PoseService;
+using android::dvr::SensorHalThread;
+using android::dvr::SensorNdkThread;
+using android::dvr::SensorService;
+using android::dvr::SensorThread;
+using android::pdx::Service;
+using android::pdx::ServiceDispatcher;
+
+int main(int, char**) {
+ ALOGI("Starting up...");
+
+ // We need to be able to create endpoints with full perms.
+ umask(0000);
+
+ android::ProcessState::self()->startThreadPool();
+
+ bool sensor_thread_succeeded = false;
+#ifdef SENSORD_USES_HAL
+ std::unique_ptr<SensorThread> sensor_thread(
+ new SensorHalThread(&sensor_thread_succeeded));
+#else
+ std::unique_ptr<SensorThread> sensor_thread(
+ new SensorNdkThread(&sensor_thread_succeeded));
+#endif
+
+ if (!sensor_thread_succeeded) {
+ ALOGE("ERROR: Failed to initialize SensorThread! No 3DoF!\n");
+ }
+
+ if (sensor_thread->GetSensorCount() == 0)
+ ALOGW("No sensors found\n");
+
+ auto sensor_service = SensorService::Create(sensor_thread.get());
+ if (!sensor_service) {
+ ALOGE("TERMINATING: failed to create SensorService!!!\n");
+ return -1;
+ }
+
+ auto pose_service = PoseService::Create(sensor_thread.get());
+ if (!pose_service) {
+ ALOGE("TERMINATING: failed to create PoseService!!!\n");
+ return -1;
+ }
+
+ std::unique_ptr<ServiceDispatcher> dispatcher =
+ android::pdx::default_transport::ServiceDispatcher::Create();
+ if (!dispatcher) {
+ ALOGE("TERMINATING: failed to create ServiceDispatcher!!!\n");
+ return -1;
+ }
+
+ dispatcher->AddService(sensor_service);
+ dispatcher->AddService(pose_service);
+
+ sensor_thread->StartPolling([sensor_service, pose_service](
+ const sensors_event_t* events_begin, const sensors_event_t* events_end) {
+ sensor_service->EnqueueEvents(events_begin, events_end);
+ pose_service->HandleEvents(events_begin, events_end);
+ });
+
+ const int priority_error = dvrSetSchedulerClass(0, "sensors:low");
+ LOG_ALWAYS_FATAL_IF(priority_error < 0,
+ "SensorService: Failed to set scheduler class: %s",
+ strerror(-priority_error));
+
+ int ret = dispatcher->EnterDispatchLoop();
+ ALOGI("Dispatch loop exited because: %s\n", strerror(-ret));
+
+ return ret;
+}
diff --git a/services/vr/sensord/sensord.rc b/services/vr/sensord/sensord.rc
new file mode 100644
index 0000000..0311474
--- /dev/null
+++ b/services/vr/sensord/sensord.rc
@@ -0,0 +1,5 @@
+service sensord /system/bin/sensord
+ class core
+ user system
+ group system camera sdcard_rw
+ cpuset /system
diff --git a/services/vr/sensord/test/poselatencytest.cpp b/services/vr/sensord/test/poselatencytest.cpp
new file mode 100644
index 0000000..615fc75
--- /dev/null
+++ b/services/vr/sensord/test/poselatencytest.cpp
@@ -0,0 +1,87 @@
+#include <dvr/pose_client.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <vector>
+
+// Creates a pose client and polls 30x for new data. Prints timestamp and
+// latency. Latency is calculated based on the difference between the
+// current clock and the timestamp from the Myriad, which has been synced
+// to QC time. Note that there is some clock drift and clocks are only sycned
+// when the FW is loaded.
+int main(int /*argc*/, char** /*argv*/) {
+ DvrPose* pose_client = dvrPoseCreate();
+ if (pose_client == nullptr) {
+ printf("Unable to create pose client\n");
+ return -1;
+ }
+
+ DvrPoseAsync last_state;
+ DvrPoseAsync current_state;
+ last_state.timestamp_ns = 0;
+ current_state.timestamp_ns = 0;
+
+ double avg_latency = 0;
+ double min_latency = (float)UINT64_MAX;
+ double max_latency = 0;
+ double std = 0;
+ std::vector<uint64_t> latency;
+
+ int num_samples = 100;
+ for (int i = 0; i < num_samples; ++i) {
+ while (last_state.timestamp_ns == current_state.timestamp_ns) {
+ uint32_t vsync_count = dvrPoseGetVsyncCount(pose_client);
+ int err = dvrPoseGet(pose_client, vsync_count, ¤t_state);
+ if (err) {
+ printf("Error polling pose: %d\n", err);
+ dvrPoseDestroy(pose_client);
+ return err;
+ }
+ }
+ struct timespec timespec;
+ uint64_t timestamp, diff;
+ clock_gettime(CLOCK_MONOTONIC, ×pec);
+ timestamp =
+ ((uint64_t)timespec.tv_sec * 1000000000) + (uint64_t)timespec.tv_nsec;
+ if (timestamp < current_state.timestamp_ns) {
+ printf("ERROR: excessive clock drift detected, reload FW to resync\n");
+ return -1;
+ }
+ diff = timestamp - current_state.timestamp_ns;
+ printf("%02d) ts = %" PRIu64 " time = %" PRIu64 "\n", i + 1,
+ current_state.timestamp_ns, timestamp);
+ printf("\tlatency: %" PRIu64 " ns (%" PRIu64 " us) (%" PRIu64 " ms)\n",
+ diff, diff / 1000, diff / 1000000);
+
+ avg_latency += diff;
+ if (diff < min_latency) {
+ min_latency = diff;
+ }
+ if (diff > max_latency) {
+ max_latency = diff;
+ }
+ latency.push_back(diff);
+
+ last_state = current_state;
+ }
+ avg_latency /= num_samples;
+ for (unsigned int i = 0; i < latency.size(); i++) {
+ std += pow(latency[i] - avg_latency, 2);
+ }
+ std /= latency.size();
+ std = sqrt(std);
+
+ printf("\n************************\n");
+ printf("Avg latency = %lf ns (%lf us) (%lf ms)\n", avg_latency,
+ avg_latency / 1000, avg_latency / 1000000);
+ printf("Max latency = %lf ns (%lf us) (%lf ms)\n", max_latency,
+ max_latency / 1000, max_latency / 1000000);
+ printf("Min latency = %lf ns (%lf us) (%lf ms)\n", min_latency,
+ min_latency / 1000, min_latency / 1000000);
+ printf("Standard dev = %lf ns (%lf us) (%lf ms)\n", std, std / 1000,
+ std / 1000000);
+ printf("\n************************\n");
+ return 0;
+}
diff --git a/services/vr/virtual_touchpad/Android.mk b/services/vr/virtual_touchpad/Android.mk
new file mode 100644
index 0000000..4224aaa
--- /dev/null
+++ b/services/vr/virtual_touchpad/Android.mk
@@ -0,0 +1,76 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+
+# Touchpad implementation.
+
+src := \
+ EvdevInjector.cpp \
+ VirtualTouchpad.cpp
+
+shared_libs := \
+ libbase
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(src)
+LOCAL_SHARED_LIBRARIES := $(shared_libs)
+LOCAL_CPPFLAGS += -std=c++11
+LOCAL_CFLAGS += -DLOG_TAG=\"VrVirtualTouchpad\"
+LOCAL_MODULE := libvirtualtouchpad
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_STATIC_LIBRARY)
+
+
+# Touchpad unit tests.
+
+test_src_files := \
+ tests/VirtualTouchpad_test.cpp
+
+static_libs := \
+ libbase \
+ libcutils \
+ libvirtualtouchpad
+
+$(foreach file,$(test_src_files), \
+ $(eval include $(CLEAR_VARS)) \
+ $(eval LOCAL_SRC_FILES := $(file)) \
+ $(eval LOCAL_STATIC_LIBRARIES := $(static_libs)) \
+ $(eval LOCAL_SHARED_LIBRARIES := $(shared_libs)) \
+ $(eval LOCAL_CPPFLAGS += -std=c++11) \
+ $(eval LOCAL_LDLIBS := -llog) \
+ $(eval LOCAL_MODULE := $(notdir $(file:%.cpp=%))) \
+ $(eval LOCAL_MODULE_TAGS := optional) \
+ $(eval LOCAL_CXX_STL := libc++_static) \
+ $(eval include $(BUILD_NATIVE_TEST)) \
+)
+
+
+# Service.
+
+src := \
+ main.cpp \
+ VirtualTouchpadService.cpp \
+ aidl/android/dvr/VirtualTouchpadService.aidl
+
+static_libs := \
+ libcutils \
+ libvirtualtouchpad
+
+shared_libs := \
+ libbase \
+ libbinder \
+ libutils
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(src)
+LOCAL_STATIC_LIBRARIES := $(static_libs)
+LOCAL_SHARED_LIBRARIES := $(shared_libs)
+LOCAL_CPPFLAGS += -std=c++11
+LOCAL_CFLAGS += -DLOG_TAG=\"VrVirtualTouchpad\"
+LOCAL_LDLIBS := -llog
+LOCAL_MODULE := virtual_touchpad
+LOCAL_MODULE_TAGS := optional
+LOCAL_INIT_RC := virtual_touchpad.rc
+LOCAL_MULTILIB := 64
+LOCAL_CXX_STL := libc++_static
+include $(BUILD_EXECUTABLE)
diff --git a/services/vr/virtual_touchpad/EvdevInjector.cpp b/services/vr/virtual_touchpad/EvdevInjector.cpp
new file mode 100644
index 0000000..be20c6c
--- /dev/null
+++ b/services/vr/virtual_touchpad/EvdevInjector.cpp
@@ -0,0 +1,311 @@
+#include "EvdevInjector.h"
+
+#include <cutils/log.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/input.h>
+#include <string.h>
+#include <sys/fcntl.h>
+#include <unistd.h>
+
+namespace android {
+namespace dvr {
+
+int EvdevInjector::UInput::Open() {
+ errno = 0;
+ fd_.reset(open("/dev/uinput", O_WRONLY | O_NONBLOCK));
+ if (fd_.get() < 0) {
+ ALOGE("couldn't open uinput (r=%d errno=%d)", fd_.get(), errno);
+ }
+ return errno;
+}
+
+int EvdevInjector::UInput::Close() {
+ errno = 0;
+ fd_.reset();
+ return errno;
+}
+
+int EvdevInjector::UInput::Write(const void* buf, size_t count) {
+ ALOGV("UInput::Write(%zu, %02X...)", count, *static_cast<const char*>(buf));
+ errno = 0;
+ ssize_t r = write(fd_.get(), buf, count);
+ if (r != static_cast<ssize_t>(count)) {
+ ALOGE("write(%zu) failed (r=%zd errno=%d)", count, r, errno);
+ }
+ return errno;
+}
+
+int EvdevInjector::UInput::IoctlSetInt(int request, int value) {
+ ALOGV("UInput::IoctlSetInt(0x%X, 0x%X)", request, value);
+ errno = 0;
+ if (const int status = ioctl(fd_.get(), request, value)) {
+ ALOGE("ioctl(%d, 0x%X, 0x%X) failed (r=%d errno=%d)", fd_.get(), request,
+ value, status, errno);
+ }
+ return errno;
+}
+
+int EvdevInjector::UInput::IoctlVoid(int request) {
+ ALOGV("UInput::IoctlVoid(0x%X)", request);
+ errno = 0;
+ if (const int status = ioctl(fd_.get(), request)) {
+ ALOGE("ioctl(%d, 0x%X) failed (r=%d errno=%d)", fd_.get(), request, status,
+ errno);
+ }
+ return errno;
+}
+
+void EvdevInjector::Close() {
+ uinput_->Close();
+ state_ = State::CLOSED;
+}
+
+int EvdevInjector::ConfigureBegin(const char* device_name, int16_t bustype,
+ int16_t vendor, int16_t product,
+ int16_t version) {
+ ALOGV("ConfigureBegin %s 0x%04" PRIX16 " 0x%04" PRIX16 " 0x%04" PRIX16
+ " 0x%04" PRIX16 "",
+ device_name, bustype, vendor, product, version);
+ if (!device_name || strlen(device_name) >= UINPUT_MAX_NAME_SIZE) {
+ return Error(ERROR_DEVICE_NAME);
+ }
+ if (const int status = RequireState(State::NEW)) {
+ return status;
+ }
+ if (!uinput_) {
+ owned_uinput_.reset(new EvdevInjector::UInput());
+ uinput_ = owned_uinput_.get();
+ }
+ if (const int status = uinput_->Open()) {
+ // Without uinput we're dead in the water.
+ state_ = State::CLOSED;
+ return Error(status);
+ }
+ state_ = State::CONFIGURING;
+ // Initialize device setting structure.
+ memset(&uidev_, 0, sizeof(uidev_));
+ strncpy(uidev_.name, device_name, UINPUT_MAX_NAME_SIZE);
+ uidev_.id.bustype = bustype;
+ uidev_.id.vendor = vendor;
+ uidev_.id.product = product;
+ uidev_.id.version = version;
+ return 0;
+}
+
+int EvdevInjector::ConfigureInputProperty(int property) {
+ ALOGV("ConfigureInputProperty %d", property);
+ if (property < 0 || property >= INPUT_PROP_CNT) {
+ ALOGE("property 0x%X out of range [0,0x%X)", property, INPUT_PROP_CNT);
+ return Error(ERROR_PROPERTY_RANGE);
+ }
+ if (const int status = RequireState(State::CONFIGURING)) {
+ return status;
+ }
+ if (const int status = uinput_->IoctlSetInt(UI_SET_PROPBIT, property)) {
+ ALOGE("failed to set property %d", property);
+ return Error(status);
+ }
+ return 0;
+}
+
+int EvdevInjector::ConfigureKey(uint16_t key) {
+ ALOGV("ConfigureKey 0x%02" PRIX16 "", key);
+ if (key < 0 || key >= KEY_CNT) {
+ ALOGE("key 0x%X out of range [0,0x%X)", key, KEY_CNT);
+ return Error(ERROR_KEY_RANGE);
+ }
+ if (const int status = RequireState(State::CONFIGURING)) {
+ return status;
+ }
+ if (const int status = EnableEventType(EV_KEY)) {
+ return status;
+ }
+ if (const int status = uinput_->IoctlSetInt(UI_SET_KEYBIT, key)) {
+ ALOGE("failed to enable EV_KEY 0x%02" PRIX16 "", key);
+ return Error(status);
+ }
+ return 0;
+}
+
+int EvdevInjector::ConfigureAbs(uint16_t abs_type, int32_t min, int32_t max,
+ int32_t fuzz, int32_t flat) {
+ ALOGV("ConfigureAbs 0x%" PRIX16 " %" PRId32 " %" PRId32 " %" PRId32
+ " %" PRId32 "",
+ abs_type, min, max, fuzz, flat);
+ if (abs_type < 0 || abs_type >= ABS_CNT) {
+ ALOGE("EV_ABS type 0x%" PRIX16 " out of range [0,0x%X)", abs_type, ABS_CNT);
+ return Error(ERROR_ABS_RANGE);
+ }
+ if (const int status = RequireState(State::CONFIGURING)) {
+ return status;
+ }
+ if (const int status = EnableEventType(EV_ABS)) {
+ return status;
+ }
+ if (const int status = uinput_->IoctlSetInt(UI_SET_ABSBIT, abs_type)) {
+ ALOGE("failed to enable EV_ABS 0x%" PRIX16 "", abs_type);
+ return Error(status);
+ }
+ uidev_.absmin[abs_type] = min;
+ uidev_.absmax[abs_type] = max;
+ uidev_.absfuzz[abs_type] = fuzz;
+ uidev_.absflat[abs_type] = flat;
+ return 0;
+}
+
+int EvdevInjector::ConfigureMultiTouchXY(int x0, int y0, int x1, int y1) {
+ if (const int status = ConfigureAbs(ABS_MT_POSITION_X, x0, x1, 0, 0)) {
+ return status;
+ }
+ if (const int status = ConfigureAbs(ABS_MT_POSITION_Y, y0, y1, 0, 0)) {
+ return status;
+ }
+ return 0;
+}
+
+int EvdevInjector::ConfigureAbsSlots(int slots) {
+ return ConfigureAbs(ABS_MT_SLOT, 0, slots, 0, 0);
+}
+
+int EvdevInjector::ConfigureEnd() {
+ ALOGV("ConfigureEnd:");
+ ALOGV(" name=\"%s\"", uidev_.name);
+ ALOGV(" id.bustype=0x%04" PRIX16, uidev_.id.bustype);
+ ALOGV(" id.vendor=0x%04" PRIX16, uidev_.id.vendor);
+ ALOGV(" id.product=0x%04" PRIX16, uidev_.id.product);
+ ALOGV(" id.version=0x%04" PRIX16, uidev_.id.version);
+ ALOGV(" ff_effects_max=%" PRIu32, uidev_.ff_effects_max);
+ for (int i = 0; i < ABS_CNT; ++i) {
+ if (uidev_.absmin[i]) {
+ ALOGV(" absmin[%d]=%" PRId32, i, uidev_.absmin[i]);
+ }
+ if (uidev_.absmax[i]) {
+ ALOGV(" absmax[%d]=%" PRId32, i, uidev_.absmax[i]);
+ }
+ if (uidev_.absfuzz[i]) {
+ ALOGV(" absfuzz[%d]=%" PRId32, i, uidev_.absfuzz[i]);
+ }
+ if (uidev_.absflat[i]) {
+ ALOGV(" absflat[%d]=%" PRId32, i, uidev_.absflat[i]);
+ }
+ }
+
+ if (const int status = RequireState(State::CONFIGURING)) {
+ return status;
+ }
+ // Write out device settings.
+ if (const int status = uinput_->Write(&uidev_, sizeof uidev_)) {
+ ALOGE("failed to write device settings");
+ return Error(status);
+ }
+ // Create device node.
+ if (const int status = uinput_->IoctlVoid(UI_DEV_CREATE)) {
+ ALOGE("failed to create device node");
+ return Error(status);
+ }
+ state_ = State::READY;
+ return 0;
+}
+
+int EvdevInjector::Send(uint16_t type, uint16_t code, int32_t value) {
+ ALOGV("Send(0x%" PRIX16 ", 0x%" PRIX16 ", 0x%" PRIX32 ")", type, code, value);
+ if (const int status = RequireState(State::READY)) {
+ return status;
+ }
+ struct input_event event;
+ memset(&event, 0, sizeof(event));
+ event.type = type;
+ event.code = code;
+ event.value = value;
+ if (const int status = uinput_->Write(&event, sizeof(event))) {
+ ALOGE("failed to write event 0x%" PRIX16 ", 0x%" PRIX16 ", 0x%" PRIX32,
+ type, code, value);
+ return Error(status);
+ }
+ return 0;
+}
+
+int EvdevInjector::SendSynReport() { return Send(EV_SYN, SYN_REPORT, 0); }
+
+int EvdevInjector::SendKey(uint16_t code, int32_t value) {
+ return Send(EV_KEY, code, value);
+}
+
+int EvdevInjector::SendAbs(uint16_t code, int32_t value) {
+ return Send(EV_ABS, code, value);
+}
+
+int EvdevInjector::SendMultiTouchSlot(int32_t slot) {
+ if (latest_slot_ != slot) {
+ if (const int status = SendAbs(ABS_MT_SLOT, slot)) {
+ return status;
+ }
+ latest_slot_ = slot;
+ }
+ return 0;
+}
+
+int EvdevInjector::SendMultiTouchXY(int32_t slot, int32_t id, int32_t x,
+ int32_t y) {
+ if (const int status = SendMultiTouchSlot(slot)) {
+ return status;
+ }
+ if (const int status = SendAbs(ABS_MT_TRACKING_ID, id)) {
+ return status;
+ }
+ if (const int status = SendAbs(ABS_MT_POSITION_X, x)) {
+ return status;
+ }
+ if (const int status = SendAbs(ABS_MT_POSITION_Y, y)) {
+ return status;
+ }
+ return 0;
+}
+
+int EvdevInjector::SendMultiTouchLift(int32_t slot) {
+ if (const int status = SendMultiTouchSlot(slot)) {
+ return status;
+ }
+ if (const int status = SendAbs(ABS_MT_TRACKING_ID, -1)) {
+ return status;
+ }
+ return 0;
+}
+
+int EvdevInjector::Error(int code) {
+ if (!error_) {
+ error_ = code;
+ }
+ return code;
+}
+
+int EvdevInjector::RequireState(State required_state) {
+ if (error_) {
+ return error_;
+ }
+ if (state_ != required_state) {
+ ALOGE("in state %d but require state %d", static_cast<int>(state_),
+ static_cast<int>(required_state));
+ return Error(ERROR_SEQUENCING);
+ }
+ return 0;
+}
+
+int EvdevInjector::EnableEventType(uint16_t type) {
+ if (const int status = RequireState(State::CONFIGURING)) {
+ return status;
+ }
+ if (enabled_event_types_.count(type) > 0) {
+ return 0;
+ }
+ if (const int status = uinput_->IoctlSetInt(UI_SET_EVBIT, type)) {
+ ALOGE("failed to enable event type 0x%X", type);
+ return Error(status);
+ }
+ enabled_event_types_.insert(type);
+ return 0;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/virtual_touchpad/EvdevInjector.h b/services/vr/virtual_touchpad/EvdevInjector.h
new file mode 100644
index 0000000..1b1c4da
--- /dev/null
+++ b/services/vr/virtual_touchpad/EvdevInjector.h
@@ -0,0 +1,139 @@
+#ifndef ANDROID_DVR_EVDEV_INJECTOR_H
+#define ANDROID_DVR_EVDEV_INJECTOR_H
+
+#include <android-base/unique_fd.h>
+#include <linux/uinput.h>
+
+#include <cstdint>
+#include <memory>
+#include <unordered_set>
+
+namespace android {
+namespace dvr {
+
+// Simulated evdev input device.
+//
+class EvdevInjector {
+ public:
+ // EvdevInjector-specific error codes are negative integers; other non-zero
+ // values returned from public routines are |errno| codes from underlying I/O.
+ // EvdevInjector maintains a 'sticky' error state, similar to |errno|, so that
+ // a caller can perform a sequence of operations and check for errors at the
+ // end using |GetError()|. In general, the first such error will be recorded
+ // and will suppress effects of further device operations until |ResetError()|
+ // is called.
+ //
+ enum : int {
+ ERROR_DEVICE_NAME = -1, // Invalid device name.
+ ERROR_PROPERTY_RANGE = -2, // |INPUT_PROP_*| code out of range.
+ ERROR_KEY_RANGE = -3, // |KEY_*|/|BTN_*| code out of range.
+ ERROR_ABS_RANGE = -4, // |ABS_*| code out of range.
+ ERROR_SEQUENCING = -5, // Configure/Send out of order.
+ };
+
+ // Key event |value| is not defined in <linux/input.h>.
+ enum : int32_t { KEY_RELEASE = 0, KEY_PRESS = 1, KEY_REPEAT = 2 };
+
+ // UInput provides a shim to intercept /dev/uinput operations
+ // just above the system call level, for testing.
+ //
+ class UInput {
+ public:
+ UInput() {}
+ virtual ~UInput() {}
+ virtual int Open();
+ virtual int Close();
+ virtual int Write(const void* buf, size_t count);
+ virtual int IoctlVoid(int request);
+ virtual int IoctlSetInt(int request, int value);
+
+ private:
+ base::unique_fd fd_;
+ };
+
+ EvdevInjector() {}
+ ~EvdevInjector() { Close(); }
+ void Close();
+
+ int GetError() const { return error_; }
+ void ResetError() { error_ = 0; }
+
+ // Configuration must be performed before sending any events.
+ // |ConfigureBegin()| must be called first, and |ConfigureEnd()| last,
+ // with zero or more other |Configure...()| calls in between in any order.
+
+ // Configure the basic evdev device properties; must be called first.
+ int ConfigureBegin(const char* device_name, int16_t bustype, int16_t vendor,
+ int16_t product, int16_t version);
+
+ // Configure an optional input device property.
+ // @param property One of the |INPUT_PROP_*| constants from <linux/input.h>.
+ int ConfigureInputProperty(int property);
+
+ // Configure an input key.
+ // @param key One of the |KEY_*| or |BTN_*| constants from <linux/input.h>.
+ int ConfigureKey(uint16_t key);
+
+ // Configure an absolute axis.
+ // @param abs_type One of the |KEY_*| or |BTN_*| constants from
+ // <linux/input.h>.
+ int ConfigureAbs(uint16_t abs_type, int32_t min, int32_t max, int32_t fuzz,
+ int32_t flat);
+
+ // Configure the number of multitouch slots.
+ int ConfigureAbsSlots(int slots);
+
+ // Configure multitouch coordinate range.
+ int ConfigureMultiTouchXY(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
+
+ // Complete configuration and create the input device.
+ int ConfigureEnd();
+
+ // Send various events.
+ //
+ int Send(uint16_t type, uint16_t code, int32_t value);
+ int SendSynReport();
+ int SendKey(uint16_t code, int32_t value);
+ int SendAbs(uint16_t code, int32_t value);
+ int SendMultiTouchSlot(int32_t slot);
+ int SendMultiTouchXY(int32_t slot, int32_t id, int32_t x, int32_t y);
+ int SendMultiTouchLift(int32_t slot);
+
+ protected:
+ // Must be called only between construction and ConfigureBegin().
+ inline void SetUInputForTesting(UInput* uinput) { uinput_ = uinput; }
+ // Caller must not retain pointer longer than EvdevInjector.
+ inline const uinput_user_dev* GetUiDevForTesting() const { return &uidev_; }
+
+ private:
+ // Phase to enforce that configuration is complete before events are sent.
+ enum class State { NEW, CONFIGURING, READY, CLOSED };
+
+ // Sets |error_| if it is not already set; returns |code|.
+ int Error(int code);
+
+ // Returns a nonzero error if the injector is not in the required |state|.
+ int RequireState(State state);
+
+ // Configures an event type if necessary.
+ // @param type One of the |EV_*| constants from <linux/input.h>.
+ int EnableEventType(uint16_t type);
+
+ // Active pointer to owned or testing UInput.
+ UInput* uinput_ = nullptr;
+ std::unique_ptr<UInput> owned_uinput_;
+
+ State state_ = State::NEW;
+ int error_ = 0;
+ uinput_user_dev uidev_;
+ std::unordered_set<uint16_t> enabled_event_types_;
+ int32_t latest_slot_ = -1;
+
+ EvdevInjector(const EvdevInjector&) = delete;
+ void operator=(const EvdevInjector&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_EVDEV_INJECTOR_H
diff --git a/services/vr/virtual_touchpad/VirtualTouchpad.cpp b/services/vr/virtual_touchpad/VirtualTouchpad.cpp
new file mode 100644
index 0000000..b137dd7
--- /dev/null
+++ b/services/vr/virtual_touchpad/VirtualTouchpad.cpp
@@ -0,0 +1,80 @@
+#include "VirtualTouchpad.h"
+
+#include <cutils/log.h>
+#include <inttypes.h>
+#include <linux/input.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// Virtual evdev device properties.
+static const char* const kDeviceName = "vr window manager virtual touchpad";
+static constexpr int16_t kDeviceBusType = BUS_VIRTUAL;
+static constexpr int16_t kDeviceVendor = 0x18D1; // Google USB vendor ID.
+static constexpr int16_t kDeviceProduct = 0x5652; // 'VR'
+static constexpr int16_t kDeviceVersion = 0x0001;
+static constexpr int32_t kWidth = 0x10000;
+static constexpr int32_t kHeight = 0x10000;
+static constexpr int32_t kSlots = 2;
+
+} // anonymous namespace
+
+int VirtualTouchpad::Initialize() {
+ if (!injector_) {
+ owned_injector_.reset(new EvdevInjector());
+ injector_ = owned_injector_.get();
+ }
+ injector_->ConfigureBegin(kDeviceName, kDeviceBusType, kDeviceVendor,
+ kDeviceProduct, kDeviceVersion);
+ injector_->ConfigureInputProperty(INPUT_PROP_DIRECT);
+ injector_->ConfigureMultiTouchXY(0, 0, kWidth - 1, kHeight - 1);
+ injector_->ConfigureAbsSlots(kSlots);
+ injector_->ConfigureKey(BTN_TOUCH);
+ injector_->ConfigureEnd();
+ return injector_->GetError();
+}
+
+int VirtualTouchpad::Touch(float x, float y, float pressure) {
+ int error = 0;
+ int32_t device_x = x * kWidth;
+ int32_t device_y = y * kHeight;
+ touches_ = ((touches_ & 1) << 1) | (pressure > 0);
+ ALOGV("(%f,%f) %f -> (%" PRId32 ",%" PRId32 ") %d",
+ x, y, pressure, device_x, device_y, touches_);
+
+ injector_->ResetError();
+ switch (touches_) {
+ case 0b00: // Hover continues.
+ if (device_x != last_device_x_ || device_y != last_device_y_) {
+ injector_->SendMultiTouchXY(0, 0, device_x, device_y);
+ injector_->SendSynReport();
+ }
+ break;
+ case 0b01: // Touch begins.
+ // Press.
+ injector_->SendMultiTouchXY(0, 0, device_x, device_y);
+ injector_->SendKey(BTN_TOUCH, EvdevInjector::KEY_PRESS);
+ injector_->SendSynReport();
+ break;
+ case 0b10: // Touch ends.
+ injector_->SendKey(BTN_TOUCH, EvdevInjector::KEY_RELEASE);
+ injector_->SendMultiTouchLift(0);
+ injector_->SendSynReport();
+ break;
+ case 0b11: // Touch continues.
+ if (device_x != last_device_x_ || device_y != last_device_y_) {
+ injector_->SendMultiTouchXY(0, 0, device_x, device_y);
+ injector_->SendSynReport();
+ }
+ break;
+ }
+ last_device_x_ = device_x;
+ last_device_y_ = device_y;
+
+ return injector_->GetError();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/virtual_touchpad/VirtualTouchpad.h b/services/vr/virtual_touchpad/VirtualTouchpad.h
new file mode 100644
index 0000000..7e7801e
--- /dev/null
+++ b/services/vr/virtual_touchpad/VirtualTouchpad.h
@@ -0,0 +1,44 @@
+#ifndef ANDROID_DVR_VIRTUAL_TOUCHPAD_H
+#define ANDROID_DVR_VIRTUAL_TOUCHPAD_H
+
+#include <memory>
+
+#include "EvdevInjector.h"
+
+namespace android {
+namespace dvr {
+
+class EvdevInjector;
+
+class VirtualTouchpad {
+ public:
+ VirtualTouchpad() {}
+ int Initialize();
+ int Touch(float x, float y, float pressure);
+
+ protected:
+ // Must be called only between construction and Initialize().
+ inline void SetEvdevInjectorForTesting(EvdevInjector* injector) {
+ injector_ = injector;
+ }
+
+ private:
+ // Active pointer to |owned_injector_| or to a testing injector.
+ EvdevInjector* injector_ = nullptr;
+ std::unique_ptr<EvdevInjector> owned_injector_;
+
+ // Previous (x,y) position to suppress redundant events.
+ int32_t last_device_x_ = INT32_MIN;
+ int32_t last_device_y_ = INT32_MIN;
+
+ // Records current touch state in bit 0 and previous state in bit 1.
+ int touches_ = 0;
+
+ VirtualTouchpad(const VirtualTouchpad&) = delete;
+ void operator=(const VirtualTouchpad&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_VIRTUAL_TOUCHPAD_H
diff --git a/services/vr/virtual_touchpad/VirtualTouchpadService.cpp b/services/vr/virtual_touchpad/VirtualTouchpadService.cpp
new file mode 100644
index 0000000..e5ead0e
--- /dev/null
+++ b/services/vr/virtual_touchpad/VirtualTouchpadService.cpp
@@ -0,0 +1,23 @@
+#include "VirtualTouchpadService.h"
+
+#include <binder/Status.h>
+#include <cutils/log.h>
+#include <linux/input.h>
+#include <utils/Errors.h>
+
+namespace android {
+namespace dvr {
+
+int VirtualTouchpadService::Initialize() {
+ return touchpad_.Initialize();
+}
+
+binder::Status VirtualTouchpadService::touch(float x, float y, float pressure) {
+ // Permissions check added and removed here :^)
+ const int error = touchpad_.Touch(x, y, pressure);
+ return error ? binder::Status::fromServiceSpecificError(error)
+ : binder::Status::ok();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/virtual_touchpad/VirtualTouchpadService.h b/services/vr/virtual_touchpad/VirtualTouchpadService.h
new file mode 100644
index 0000000..05a2a50
--- /dev/null
+++ b/services/vr/virtual_touchpad/VirtualTouchpadService.h
@@ -0,0 +1,36 @@
+#ifndef ANDROID_DVR_VIRTUAL_TOUCHPAD_SERVICE_H
+#define ANDROID_DVR_VIRTUAL_TOUCHPAD_SERVICE_H
+
+#include <android/dvr/BnVirtualTouchpadService.h>
+
+#include "VirtualTouchpad.h"
+
+namespace android {
+namespace dvr {
+
+class VirtualTouchpadService : public BnVirtualTouchpadService {
+ public:
+ VirtualTouchpadService(VirtualTouchpad& touchpad)
+ : touchpad_(touchpad) {}
+
+ // Must be called before clients can connect.
+ // Returns 0 if initialization is successful.
+ int Initialize();
+
+ static char const* getServiceName() { return "virtual_touchpad"; }
+
+ protected:
+ // Implements IVirtualTouchpadService.
+ ::android::binder::Status touch(float x, float y, float pressure) override;
+
+ private:
+ VirtualTouchpad& touchpad_;
+
+ VirtualTouchpadService(const VirtualTouchpadService&) = delete;
+ void operator=(const VirtualTouchpadService&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_VIRTUAL_TOUCHPAD_SERVICE_H
diff --git a/services/vr/virtual_touchpad/aidl/android/dvr/VirtualTouchpadService.aidl b/services/vr/virtual_touchpad/aidl/android/dvr/VirtualTouchpadService.aidl
new file mode 100644
index 0000000..da4de94
--- /dev/null
+++ b/services/vr/virtual_touchpad/aidl/android/dvr/VirtualTouchpadService.aidl
@@ -0,0 +1,16 @@
+package android.dvr;
+
+/** @hide */
+interface VirtualTouchpadService
+{
+ /**
+ * Generate a simulated touch event.
+ *
+ * @param x Horizontal touch position.
+ * @param y Vertical touch position.
+ * @param pressure Touch pressure; use 0.0 for no touch (lift or hover).
+ *
+ * Position values in the range [0.0, 1.0) map to the screen.
+ */
+ void touch(float x, float y, float pressure);
+}
diff --git a/services/vr/virtual_touchpad/main.cpp b/services/vr/virtual_touchpad/main.cpp
new file mode 100644
index 0000000..57471c5
--- /dev/null
+++ b/services/vr/virtual_touchpad/main.cpp
@@ -0,0 +1,36 @@
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/ProcessState.h>
+#include <cutils/log.h>
+
+#include "VirtualTouchpadService.h"
+
+int main() {
+ ALOGI("Starting");
+ android::dvr::VirtualTouchpad touchpad;
+ android::dvr::VirtualTouchpadService touchpad_service(touchpad);
+ const int touchpad_status = touchpad_service.Initialize();
+ if (touchpad_status) {
+ ALOGE("virtual touchpad initialization failed: %d", touchpad_status);
+ exit(1);
+ }
+
+ signal(SIGPIPE, SIG_IGN);
+ android::sp<android::ProcessState> ps(android::ProcessState::self());
+ ps->setThreadPoolMaxThreadCount(4);
+ ps->startThreadPool();
+ ps->giveThreadPoolName();
+
+ android::sp<android::IServiceManager> sm(android::defaultServiceManager());
+ const android::status_t service_status =
+ sm->addService(android::String16(touchpad_service.getServiceName()),
+ &touchpad_service, false /*allowIsolated*/);
+ if (service_status != android::OK) {
+ ALOGE("virtual touchpad service not added: %d",
+ static_cast<int>(service_status));
+ exit(2);
+ }
+
+ android::IPCThreadState::self()->joinThreadPool();
+ return 0;
+}
diff --git a/services/vr/virtual_touchpad/tests/VirtualTouchpad_test.cpp b/services/vr/virtual_touchpad/tests/VirtualTouchpad_test.cpp
new file mode 100644
index 0000000..874ef80
--- /dev/null
+++ b/services/vr/virtual_touchpad/tests/VirtualTouchpad_test.cpp
@@ -0,0 +1,233 @@
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <gtest/gtest.h>
+#include <linux/input.h>
+
+#include "EvdevInjector.h"
+#include "VirtualTouchpad.h"
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+class UInputForTesting : public EvdevInjector::UInput {
+ public:
+ void WriteInputEvent(uint16_t type, uint16_t code, int32_t value) {
+ struct input_event event;
+ memset(&event, 0, sizeof(event));
+ event.type = type;
+ event.code = code;
+ event.value = value;
+ Write(&event, sizeof (event));
+ }
+};
+
+// Recording test implementation of UInput.
+//
+class UInputRecorder : public UInputForTesting {
+ public:
+ UInputRecorder() {}
+ virtual ~UInputRecorder() {}
+
+ const std::string& GetString() const { return s_; }
+ void Reset() { s_.clear(); }
+
+ // UInput overrides:
+
+ int Open() override {
+ s_ += "o;";
+ return 0;
+ }
+
+ int Close() override {
+ s_ += "c;";
+ return 0;
+ }
+
+ int Write(const void* buf, size_t count) override {
+ s_ += "w(";
+ s_ += Encode(&count, sizeof(count));
+ s_ += ",";
+ s_ += Encode(buf, count);
+ s_ += ");";
+ return 0;
+ }
+
+ int IoctlVoid(int request) override {
+ s_ += "i(";
+ s_ += Encode(&request, sizeof(request));
+ s_ += ");";
+ return 0;
+ }
+
+ int IoctlSetInt(int request, int value) override {
+ s_ += "i(";
+ s_ += Encode(&request, sizeof(request));
+ s_ += ",";
+ s_ += Encode(&value, sizeof(value));
+ s_ += ");";
+ return 0;
+ }
+
+ private:
+ std::string s_;
+
+ std::string Encode(const void* buf, size_t count) {
+ const char* in = static_cast<const char*>(buf);
+ char out[2 * count + 1];
+ for (size_t i = 0; i < count; ++i) {
+ snprintf(&out[2 * i], 3, "%02X", in[i]);
+ }
+ return out;
+ }
+};
+
+class EvdevInjectorForTesting : public EvdevInjector {
+ public:
+ EvdevInjectorForTesting(UInput& uinput) {
+ SetUInputForTesting(&uinput);
+ }
+ const uinput_user_dev* GetUiDev() const { return GetUiDevForTesting(); }
+};
+
+class VirtualTouchpadForTesting : public VirtualTouchpad {
+ public:
+ VirtualTouchpadForTesting(EvdevInjector& injector) {
+ SetEvdevInjectorForTesting(&injector);
+ }
+};
+
+void DumpDifference(const char* expect, const char* actual) {
+ printf(" common: ");
+ while (*expect && *expect == *actual) {
+ putchar(*expect);
+ ++expect;
+ ++actual;
+ }
+ printf("\n expect: %s\n", expect);
+ printf(" actual: %s\n", actual);
+}
+
+} // anonymous namespace
+
+class VirtualTouchpadTest : public testing::Test {
+};
+
+TEST_F(VirtualTouchpadTest, Goodness) {
+ UInputRecorder expect;
+ UInputRecorder record;
+ EvdevInjectorForTesting injector(record);
+ VirtualTouchpadForTesting touchpad(injector);
+
+ const int initialization_status = touchpad.Initialize();
+ EXPECT_EQ(0, initialization_status);
+
+ // Check some aspects of uinput_user_dev.
+ const uinput_user_dev* uidev = injector.GetUiDev();
+ for (int i = 0; i < ABS_CNT; ++i) {
+ EXPECT_EQ(0, uidev->absmin[i]);
+ EXPECT_EQ(0, uidev->absfuzz[i]);
+ EXPECT_EQ(0, uidev->absflat[i]);
+ if (i != ABS_MT_POSITION_X && i != ABS_MT_POSITION_Y && i != ABS_MT_SLOT) {
+ EXPECT_EQ(0, uidev->absmax[i]);
+ }
+ }
+ const int32_t width = 1 + uidev->absmax[ABS_MT_POSITION_X];
+ const int32_t height = 1 + uidev->absmax[ABS_MT_POSITION_Y];
+ const int32_t slots = uidev->absmax[ABS_MT_SLOT];
+
+ // Check the system calls performed by initialization.
+ // From ConfigureBegin():
+ expect.Open();
+ // From ConfigureInputProperty(INPUT_PROP_DIRECT):
+ expect.IoctlSetInt(UI_SET_PROPBIT, INPUT_PROP_DIRECT);
+ // From ConfigureMultiTouchXY(0, 0, kWidth - 1, kHeight - 1):
+ expect.IoctlSetInt(UI_SET_EVBIT, EV_ABS);
+ expect.IoctlSetInt(UI_SET_ABSBIT, ABS_MT_POSITION_X);
+ expect.IoctlSetInt(UI_SET_ABSBIT, ABS_MT_POSITION_Y);
+ // From ConfigureAbsSlots(kSlots):
+ expect.IoctlSetInt(UI_SET_ABSBIT, ABS_MT_SLOT);
+ // From ConfigureKey(BTN_TOUCH):
+ expect.IoctlSetInt(UI_SET_EVBIT, EV_KEY);
+ expect.IoctlSetInt(UI_SET_KEYBIT, BTN_TOUCH);
+ // From ConfigureEnd():
+ expect.Write(uidev, sizeof (uinput_user_dev));
+ expect.IoctlVoid(UI_DEV_CREATE);
+ EXPECT_EQ(expect.GetString(), record.GetString());
+
+ expect.Reset();
+ record.Reset();
+ int touch_status = touchpad.Touch(0, 0, 0);
+ EXPECT_EQ(0, touch_status);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_SLOT, 0);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_TRACKING_ID, 0);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_POSITION_X, 0);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_POSITION_Y, 0);
+ expect.WriteInputEvent(EV_SYN, SYN_REPORT, 0);
+ EXPECT_EQ(expect.GetString(), record.GetString());
+
+ expect.Reset();
+ record.Reset();
+ touch_status = touchpad.Touch(0.25f, 0.75f, 0.5f);
+ EXPECT_EQ(0, touch_status);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_TRACKING_ID, 0);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_POSITION_X, 0.25f * width);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_POSITION_Y, 0.75f * height);
+ expect.WriteInputEvent(EV_KEY, BTN_TOUCH, EvdevInjector::KEY_PRESS);
+ expect.WriteInputEvent(EV_SYN, SYN_REPORT, 0);
+ EXPECT_EQ(expect.GetString(), record.GetString());
+
+ expect.Reset();
+ record.Reset();
+ touch_status = touchpad.Touch(1.0f, 1.0f, 1.0f);
+ EXPECT_EQ(0, touch_status);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_TRACKING_ID, 0);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_POSITION_X, width);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_POSITION_Y, height);
+ expect.WriteInputEvent(EV_SYN, SYN_REPORT, 0);
+ EXPECT_EQ(expect.GetString(), record.GetString());
+
+ expect.Reset();
+ record.Reset();
+ touch_status = touchpad.Touch(0.25f, 0.75f, -0.01f);
+ EXPECT_EQ(0, touch_status);
+ expect.WriteInputEvent(EV_KEY, BTN_TOUCH, EvdevInjector::KEY_RELEASE);
+ expect.WriteInputEvent(EV_ABS, ABS_MT_TRACKING_ID, -1);
+ expect.WriteInputEvent(EV_SYN, SYN_REPORT, 0);
+ EXPECT_EQ(expect.GetString(), record.GetString());
+
+ expect.Reset();
+ record.Reset();
+}
+
+TEST_F(VirtualTouchpadTest, Badness) {
+ UInputRecorder expect;
+ UInputRecorder record;
+ EvdevInjectorForTesting injector(record);
+ VirtualTouchpadForTesting touchpad(injector);
+
+ // Touch before initialization should return an error,
+ // and should not result in any system calls.
+ expect.Reset();
+ record.Reset();
+ int touch_status = touchpad.Touch(0.25f, 0.75f, -0.01f);
+ EXPECT_NE(0, touch_status);
+ EXPECT_EQ(expect.GetString(), record.GetString());
+
+ expect.Reset();
+ record.Reset();
+ touchpad.Initialize();
+
+ // Repeated initialization should return an error,
+ // and should not result in any system calls.
+ expect.Reset();
+ record.Reset();
+ const int initialization_status = touchpad.Initialize();
+ EXPECT_NE(0, initialization_status);
+ EXPECT_EQ(expect.GetString(), record.GetString());
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/virtual_touchpad/virtual_touchpad.rc b/services/vr/virtual_touchpad/virtual_touchpad.rc
new file mode 100644
index 0000000..b4f9f00
--- /dev/null
+++ b/services/vr/virtual_touchpad/virtual_touchpad.rc
@@ -0,0 +1,5 @@
+service virtual_touchpad /system/bin/virtual_touchpad
+ class core
+ user system
+ group system input
+ cpuset /system
diff --git a/services/vr/vr_manager/Android.mk b/services/vr/vr_manager/Android.mk
new file mode 100644
index 0000000..54b1c1a
--- /dev/null
+++ b/services/vr/vr_manager/Android.mk
@@ -0,0 +1,38 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+src_files := \
+ vr_manager.cpp \
+
+inc_files := \
+ frameworks/native/include/vr/vr_manager
+
+static_libs := \
+ libutils \
+ libbinder \
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(src_files)
+LOCAL_C_INCLUDES := $(inc_files)
+LOCAL_CFLAGS += -Wall
+LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Wunused
+LOCAL_CFLAGS += -Wunreachable-code
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(inc_files)
+#LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_STATIC_LIBRARIES := $(static_libs)
+LOCAL_MODULE := libvr_manager
+include $(BUILD_STATIC_LIBRARY)
diff --git a/services/vr/vr_manager/vr_manager.cpp b/services/vr/vr_manager/vr_manager.cpp
new file mode 100644
index 0000000..a31fcb7
--- /dev/null
+++ b/services/vr/vr_manager/vr_manager.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VrManager"
+#include <utils/Log.h>
+
+#include <vr/vr_manager/vr_manager.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+// Must be kept in sync with interface defined in IVrStateCallbacks.aidl.
+
+class BpVrStateCallbacks : public BpInterface<IVrStateCallbacks> {
+ public:
+ explicit BpVrStateCallbacks(const sp<IBinder>& impl)
+ : BpInterface<IVrStateCallbacks>(impl) {}
+
+ void onVrStateChanged(bool enabled) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IVrStateCallbacks::getInterfaceDescriptor());
+ data.writeBool(enabled);
+ remote()->transact(ON_VR_STATE_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+};
+
+IMPLEMENT_META_INTERFACE(VrStateCallbacks, "android.service.vr.IVrStateCallbacks");
+
+status_t BnVrStateCallbacks::onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags) {
+ switch(code) {
+ case ON_VR_STATE_CHANGED: {
+ CHECK_INTERFACE(IVrStateCallbacks, data, reply);
+ onVrStateChanged(data.readBool());
+ return OK;
+ }
+ }
+ return BBinder::onTransact(code, data, reply, flags);
+}
+
+// Must be kept in sync with interface defined in IVrManager.aidl.
+
+class BpVrManager : public BpInterface<IVrManager> {
+ public:
+ explicit BpVrManager(const sp<IBinder>& impl)
+ : BpInterface<IVrManager>(impl) {}
+
+ void registerListener(const sp<IVrStateCallbacks>& cb) override {
+ Parcel data;
+ data.writeInterfaceToken(IVrManager::getInterfaceDescriptor());
+ data.writeStrongBinder(IInterface::asBinder(cb));
+ remote()->transact(REGISTER_LISTENER, data, NULL);
+ }
+
+ void unregisterListener(const sp<IVrStateCallbacks>& cb) override {
+ Parcel data;
+ data.writeInterfaceToken(IVrManager::getInterfaceDescriptor());
+ data.writeStrongBinder(IInterface::asBinder(cb));
+ remote()->transact(UNREGISTER_LISTENER, data, NULL);
+ }
+
+ bool getVrModeState() override {
+ Parcel data, reply;
+ data.writeInterfaceToken(IVrManager::getInterfaceDescriptor());
+ remote()->transact(GET_VR_MODE_STATE, data, &reply);
+ int32_t ret = reply.readExceptionCode();
+ if (ret != 0) {
+ return false;
+ }
+ return reply.readBool();
+ }
+};
+
+IMPLEMENT_META_INTERFACE(VrManager, "android.service.vr.IVrManager");
+
+class BpVrDisplayStateService : public BpInterface<IVrDisplayStateService> {
+ public:
+ explicit BpVrDisplayStateService(const sp<IBinder>& impl)
+ : BpInterface<IVrDisplayStateService>(impl) {}
+
+ void displayAvailable(bool available) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IVrDisplayStateService::getInterfaceDescriptor());
+ data.writeBool(available);
+ remote()->transact(static_cast<uint32_t>(
+ VrDisplayStateTransaction::ON_DISPLAY_STATE_CHANGED),
+ data, &reply);
+ }
+};
+
+status_t BnVrDisplayStateService::onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags) {
+ switch (static_cast<VrDisplayStateTransaction>(code)) {
+ case VrDisplayStateTransaction::ON_DISPLAY_STATE_CHANGED:
+ CHECK_INTERFACE(IVrDisplayStateService, data, reply);
+ displayAvailable(data.readBool());
+ return OK;
+ }
+ return BBinder::onTransact(code, data, reply, flags);
+}
+
+IMPLEMENT_META_INTERFACE(VrDisplayStateService,
+ "android.service.vr.IVrDisplayStateService");
+
+} // namespace android
diff --git a/services/vr/vr_window_manager/Android.bp b/services/vr/vr_window_manager/Android.bp
deleted file mode 100644
index c30219f..0000000
--- a/services/vr/vr_window_manager/Android.bp
+++ /dev/null
@@ -1,39 +0,0 @@
-subdirs = [
- "composer/1.0",
-]
-
-cc_library_shared {
- name: "libvrhwc",
-
- srcs: [
- "composer/impl/sync_timeline.cpp",
- "composer/impl/vr_hwc.cpp",
- "composer/impl/vr_composer_client.cpp",
- ],
-
- static_libs: [
- "libhwcomposer-client",
- ],
-
- shared_libs: [
- "android.dvr.composer@1.0",
- "android.hardware.graphics.composer@2.1",
- "libbase",
- "libcutils",
- "libfmq",
- "libhardware",
- "libhidlbase",
- "libhidltransport",
- "liblog",
- "libsync",
- "libui",
- "libutils",
- ],
-
- // Access to software sync timeline.
- include_dirs: [ "system/core/libsync" ],
-
- cflags: [
- "-DLOG_TAG=\"vrhwc\"",
- ],
-}
diff --git a/services/vr/vr_window_manager/Android.mk b/services/vr/vr_window_manager/Android.mk
new file mode 100644
index 0000000..adce4b9
--- /dev/null
+++ b/services/vr/vr_window_manager/Android.mk
@@ -0,0 +1,100 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+src := \
+ vr_window_manager_jni.cpp \
+ application.cpp \
+ controller_mesh.cpp \
+ elbow_model.cpp \
+ hwc_callback.cpp \
+ reticle.cpp \
+ render_thread.cpp \
+ shell_view.cpp \
+ surface_flinger_view.cpp \
+ texture.cpp \
+ ../virtual_touchpad/aidl/android/dvr/VirtualTouchpadService.aidl \
+
+static_libs := \
+ libdisplay \
+ libbufferhub \
+ libbufferhubqueue \
+ libeds \
+ libdvrgraphics \
+ libdvrcommon \
+ libhwcomposer-client \
+ libsensor \
+ libperformance \
+ libpdx_default_transport \
+ libchrome \
+ libcutils \
+
+shared_libs := \
+ android.dvr.composer@1.0 \
+ android.hardware.graphics.composer@2.1 \
+ libvrhwc \
+ libandroid \
+ libbase \
+ libbinder \
+ libinput \
+ libhardware \
+ libsync \
+ libutils \
+ libgui \
+ libEGL \
+ libGLESv2 \
+ libvulkan \
+ libsync \
+ libui \
+ libhidlbase \
+ libhidltransport
+
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(src)
+LOCAL_C_INCLUDES := hardware/qcom/display/msm8996/libgralloc
+LOCAL_STATIC_LIBRARIES := $(static_libs)
+LOCAL_SHARED_LIBRARIES := $(shared_libs) libevent
+LOCAL_SHARED_LIBRARIES += libgvr
+LOCAL_STATIC_LIBRARIES += libgvr_ext
+LOCAL_CFLAGS += -DGL_GLEXT_PROTOTYPES
+LOCAL_CFLAGS += -DEGL_EGLEXT_PROTOTYPES
+LOCAL_CFLAGS += -DLOG_TAG=\"VrWindowManager\"
+LOCAL_LDLIBS := -llog
+LOCAL_MODULE := libvr_window_manager_jni
+LOCAL_MODULE_TAGS := optional
+LOCAL_MULTILIB := 64
+LOCAL_CXX_STL := libc++_static
+include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_PACKAGE_NAME := VrWindowManager
+
+# We need to be priveleged to run as the system user, which is necessary for
+# getting hmd input events and doing input injection.
+LOCAL_CERTIFICATE := platform
+LOCAL_PRIVILEGED_MODULE := true
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := $(call all-java-files-under, java)
+LOCAL_JNI_SHARED_LIBRARIES := libvr_window_manager_jni
+LOCAL_STATIC_JAVA_AAR_LIBRARIES := gvr_common_library_aar
+# gvr_common_library_aar depends on nano version of libprotobuf
+LOCAL_STATIC_JAVA_LIBRARIES := libprotobuf-java-nano
+# Make sure that libgvr's resources are loaded
+LOCAL_AAPT_FLAGS += --auto-add-overlay
+LOCAL_AAPT_FLAGS += --extra-packages com.google.vr.cardboard
+LOCAL_PROGUARD_FLAG_FILES := proguard.flags
+include $(BUILD_PACKAGE)
diff --git a/services/vr/vr_window_manager/AndroidManifest.xml b/services/vr/vr_window_manager/AndroidManifest.xml
new file mode 100644
index 0000000..5cc4b5c
--- /dev/null
+++ b/services/vr/vr_window_manager/AndroidManifest.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.google.vr.windowmanager"
+ coreApp="true"
+ android:sharedUserId="android.uid.system"
+ android:versionCode="1"
+ android:versionName="1.0" >
+
+ <!-- The GVR SDK requires API 19+ and OpenGL ES 2+. -->
+ <uses-sdk android:minSdkVersion="19" android:targetSdkVersion="24" />
+ <uses-feature android:glEsVersion="0x00020000" android:required="true" />
+
+ <!-- We need the DIAGNOSTIC permission to read HMD button events. DIAGNOSTIC
+ ensures our process runs with the "input" group, so we can access
+ /dev/input. See frameworks/base/data/etc/platform.xml for the permission
+ to group mappings.
+
+ TODO(steventhomas): We shouldn't use this DIAGNOSTIC permission. Figure
+ out the correct way to get access to the HMD buttons.
+ Bug: b/33253485. -->
+ <uses-permission android:name="android.permission.DIAGNOSTIC"/>
+ <uses-permission android:name="android.permission.RECEIVE_BOOT_COMPLETED" />
+
+ <application
+ android:label="vr_window_manager"
+ android:theme="@style/AppStyle">
+ <service android:name=".VrWindowManagerService" />
+ <receiver android:name="com.google.vr.windowmanager.BootCompletedReceiver">
+ <intent-filter>
+ <action android:name="android.intent.action.BOOT_COMPLETED" />
+ </intent-filter>
+ </receiver>
+ </application>
+</manifest>
diff --git a/services/vr/vr_window_manager/application.cpp b/services/vr/vr_window_manager/application.cpp
new file mode 100644
index 0000000..f84a0d1
--- /dev/null
+++ b/services/vr/vr_window_manager/application.cpp
@@ -0,0 +1,312 @@
+#include "application.h"
+
+#include <binder/IServiceManager.h>
+#include <cutils/log.h>
+#include <dvr/graphics.h>
+#include <dvr/performance_client_api.h>
+#include <dvr/pose_client.h>
+#include <EGL/egl.h>
+#include <GLES3/gl3.h>
+#include <gui/ISurfaceComposer.h>
+#include <hardware/hwcomposer_defs.h>
+#include <private/dvr/graphics/vr_gl_extensions.h>
+
+#include <vector>
+
+namespace android {
+namespace dvr {
+
+Application::Application()
+ : controller_api_status_logged_(false),
+ controller_connection_state_logged_(false) {}
+
+Application::~Application() {
+}
+
+int Application::Initialize(JNIEnv* env, jobject app_context,
+ jobject class_loader) {
+ dvrSetCpuPartition(0, "/application/performance");
+
+ bool is_right_handed = true; // TODO: retrieve setting from system
+ elbow_model_.Enable(ElbowModel::kDefaultNeckPosition, is_right_handed);
+ last_frame_time_ = std::chrono::system_clock::now();
+
+ java_env_ = env;
+ app_context_ = app_context;
+ class_loader_ = class_loader;
+
+ return 0;
+}
+
+int Application::AllocateResources() {
+ int surface_width = 0, surface_height = 0;
+ DvrLensInfo lens_info = {};
+ GLuint texture_id = 0;
+ GLenum texture_target = 0;
+ std::vector<DvrSurfaceParameter> surface_params = {
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
+ DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &lens_info.inter_lens_meters),
+ DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, &lens_info.left_fov),
+ DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, &lens_info.right_fov),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_TEXTURE_TARGET_TYPE, &texture_target),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_TEXTURE_TARGET_ID, &texture_id),
+ DVR_SURFACE_PARAMETER_IN(VISIBLE, 0),
+ DVR_SURFACE_PARAMETER_IN(Z_ORDER, 1),
+ DVR_SURFACE_PARAMETER_IN(GEOMETRY, DVR_SURFACE_GEOMETRY_SINGLE),
+ DVR_SURFACE_PARAMETER_IN(ENABLE_LATE_LATCH, 0),
+ DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, 0),
+ DVR_SURFACE_PARAMETER_LIST_END,
+ };
+
+ int ret = dvrGraphicsContextCreate(surface_params.data(), &graphics_context_);
+ if (ret)
+ return ret;
+
+ GLuint fbo = 0;
+ GLuint depth_stencil_buffer = 0;
+ GLuint samples = 1;
+ glGenFramebuffers(1, &fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+ glFramebufferTexture2DMultisampleEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ texture_target, texture_id, 0, samples);
+
+ glGenRenderbuffers(1, &depth_stencil_buffer);
+ glBindRenderbuffer(GL_RENDERBUFFER, depth_stencil_buffer);
+ glRenderbufferStorageMultisample(GL_RENDERBUFFER, samples,
+ GL_DEPTH_COMPONENT24, surface_width,
+ surface_height);
+
+ glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER, depth_stencil_buffer);
+
+ ALOGI("Surface size=%dx%d", surface_width, surface_height);
+ pose_client_ = dvrPoseCreate();
+ if (!pose_client_)
+ return 1;
+
+ vec2i eye_size(surface_width / 2, surface_height);
+
+ eye_viewport_[0] = Range2i::FromSize(vec2i(0, 0), eye_size);
+ eye_viewport_[1] = Range2i::FromSize(vec2i(surface_width / 2, 0), eye_size);
+
+ eye_from_head_[0] = Eigen::Translation3f(
+ vec3(lens_info.inter_lens_meters * 0.5f, 0.0f, 0.0f));
+ eye_from_head_[1] = Eigen::Translation3f(
+ vec3(-lens_info.inter_lens_meters * 0.5f, 0.0f, 0.0f));
+
+ fov_[0] = FieldOfView(lens_info.left_fov[0], lens_info.left_fov[1],
+ lens_info.left_fov[2], lens_info.left_fov[3]);
+ fov_[1] = FieldOfView(lens_info.right_fov[0], lens_info.right_fov[1],
+ lens_info.right_fov[2], lens_info.right_fov[3]);
+
+ gvr_context_ = gvr::GvrApi::Create(java_env_, app_context_, class_loader_);
+ if (gvr_context_ == nullptr) {
+ ALOGE("Gvr context creation failed");
+ return 1;
+ }
+
+ int32_t options = gvr_controller_get_default_options();
+ options |= GVR_CONTROLLER_ENABLE_GYRO | GVR_CONTROLLER_ENABLE_ACCEL;
+
+ controller_.reset(new gvr::ControllerApi);
+ if (!controller_->Init(java_env_, app_context_, class_loader_, options,
+ gvr_context_->cobj())) {
+ ALOGE("Gvr controller init failed");
+ return 1;
+ }
+
+ controller_state_.reset(new gvr::ControllerState);
+
+ return 0;
+}
+
+void Application::DeallocateResources() {
+ gvr_context_.reset();
+ controller_.reset();
+ controller_state_.reset();
+
+ if (graphics_context_)
+ dvrGraphicsContextDestroy(graphics_context_);
+
+ if (pose_client_)
+ dvrPoseDestroy(pose_client_);
+
+ initialized_ = false;
+}
+
+void Application::ProcessTasks(const std::vector<MainThreadTask>& tasks) {
+ for (auto task : tasks) {
+ switch (task) {
+ case MainThreadTask::EnableDebugMode:
+ if (!debug_mode_) {
+ debug_mode_ = true;
+ SetVisibility(debug_mode_);
+ }
+ break;
+ case MainThreadTask::DisableDebugMode:
+ if (debug_mode_) {
+ debug_mode_ = false;
+ SetVisibility(debug_mode_);
+ }
+ break;
+ case MainThreadTask::EnteringVrMode:
+ if (!initialized_)
+ AllocateResources();
+ break;
+ case MainThreadTask::ExitingVrMode:
+ if (initialized_)
+ DeallocateResources();
+ break;
+ case MainThreadTask::Show:
+ if (!is_visible_)
+ SetVisibility(true);
+ break;
+ }
+ }
+}
+
+void Application::DrawFrame() {
+ // Thread should block if we are invisible or not fully initialized.
+ std::unique_lock<std::mutex> lock(mutex_);
+ wake_up_init_and_render_.wait(lock, [this]() {
+ return is_visible_ && initialized_ || !main_thread_tasks_.empty();
+ });
+
+ // Process main thread tasks if there are any.
+ std::vector<MainThreadTask> tasks;
+ tasks.swap(main_thread_tasks_);
+ lock.unlock();
+
+ if (!tasks.empty())
+ ProcessTasks(tasks);
+
+ if (!initialized_)
+ return;
+
+ // TODO(steventhomas): If we're not visible, block until we are. For now we
+ // throttle by calling dvrGraphicsWaitNextFrame.
+ DvrFrameSchedule schedule;
+ dvrGraphicsWaitNextFrame(graphics_context_, 0, &schedule);
+
+ OnDrawFrame();
+
+ if (is_visible_) {
+ ProcessControllerInput();
+
+ DvrPoseAsync pose;
+ dvrPoseGet(pose_client_, schedule.vsync_count, &pose);
+ last_pose_ = Posef(
+ quat(pose.orientation[3], pose.orientation[0], pose.orientation[1],
+ pose.orientation[2]),
+ vec3(pose.translation[0], pose.translation[1], pose.translation[2]));
+
+ std::chrono::time_point<std::chrono::system_clock> now =
+ std::chrono::system_clock::now();
+ double delta =
+ std::chrono::duration<double>(now - last_frame_time_).count();
+ last_frame_time_ = now;
+
+ if (delta > 1.0f)
+ delta = 0.05f;
+
+ fade_value_ += delta / 0.25f;
+ if (fade_value_ > 1.0f)
+ fade_value_ = 1.0f;
+
+ quat controller_quat(controller_orientation_.qw, controller_orientation_.qx,
+ controller_orientation_.qy, controller_orientation_.qz);
+ controller_position_ = elbow_model_.Update(
+ delta, last_pose_.GetRotation(), controller_quat, false);
+
+ dvrBeginRenderFrameEds(graphics_context_, pose.orientation,
+ pose.translation);
+
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ mat4 head_matrix = last_pose_.GetObjectFromReferenceMatrix();
+ glViewport(eye_viewport_[kLeftEye].GetMinPoint()[0],
+ eye_viewport_[kLeftEye].GetMinPoint()[1],
+ eye_viewport_[kLeftEye].GetSize()[0],
+ eye_viewport_[kLeftEye].GetSize()[1]);
+ DrawEye(kLeftEye, fov_[kLeftEye].GetProjectionMatrix(0.1f, 500.0f),
+ eye_from_head_[kLeftEye], head_matrix);
+
+ glViewport(eye_viewport_[kRightEye].GetMinPoint()[0],
+ eye_viewport_[kRightEye].GetMinPoint()[1],
+ eye_viewport_[kRightEye].GetSize()[0],
+ eye_viewport_[kRightEye].GetSize()[1]);
+ DrawEye(kRightEye, fov_[kRightEye].GetProjectionMatrix(0.1f, 500.0f),
+ eye_from_head_[kRightEye], head_matrix);
+
+ dvrPresent(graphics_context_);
+ }
+}
+
+void Application::ProcessControllerInput() {
+ controller_state_->Update(*controller_);
+ gvr::ControllerApiStatus new_api_status = controller_state_->GetApiStatus();
+ gvr::ControllerConnectionState new_connection_state =
+ controller_state_->GetConnectionState();
+
+ if (!controller_api_status_logged_) {
+ controller_api_status_logged_ = true;
+ ALOGI("Controller api status: %s",
+ gvr::ControllerApi::ToString(new_api_status));
+ } else if (new_api_status != controller_api_status_) {
+ ALOGI("Controller api status changed: %s --> %s",
+ gvr::ControllerApi::ToString(controller_api_status_),
+ gvr::ControllerApi::ToString(new_api_status));
+ }
+
+ if (new_api_status == gvr::kControllerApiOk) {
+ if (!controller_connection_state_logged_) {
+ controller_connection_state_logged_ = true;
+ ALOGI("Controller connection state: %s",
+ gvr::ControllerApi::ToString(new_connection_state));
+ } else if (new_connection_state != controller_connection_state_) {
+ ALOGI("Controller connection state changed: %s --> %s",
+ gvr::ControllerApi::ToString(controller_connection_state_),
+ gvr::ControllerApi::ToString(new_connection_state));
+ }
+ } else {
+ controller_connection_state_logged_ = false;
+ }
+
+ if (new_api_status == gvr::kControllerApiOk)
+ controller_orientation_ = controller_state_->GetOrientation();
+
+ controller_api_status_ = new_api_status;
+ controller_connection_state_ = new_connection_state;
+}
+
+void Application::SetVisibility(bool visible) {
+ bool changed = is_visible_ != visible;
+ if (changed) {
+ is_visible_ = visible;
+ dvrGraphicsSurfaceSetVisible(graphics_context_, is_visible_);
+ if (is_visible_)
+ controller_->Resume();
+ else
+ controller_->Pause();
+ OnVisibilityChanged(is_visible_);
+ }
+}
+
+void Application::OnVisibilityChanged(bool visible) {
+ if (visible) {
+ fade_value_ = 0;
+ // We have been sleeping so to ensure correct deltas, reset the time.
+ last_frame_time_ = std::chrono::system_clock::now();
+ }
+}
+
+void Application::QueueTask(MainThreadTask task) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ main_thread_tasks_.push_back(task);
+ wake_up_init_and_render_.notify_one();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/application.h b/services/vr/vr_window_manager/application.h
new file mode 100644
index 0000000..3321682
--- /dev/null
+++ b/services/vr/vr_window_manager/application.h
@@ -0,0 +1,101 @@
+#ifndef VR_WINDOW_MANAGER_APPLICATION_H_
+#define VR_WINDOW_MANAGER_APPLICATION_H_
+
+#include <jni.h>
+#include <memory>
+#include <private/dvr/types.h>
+#include <stdint.h>
+#include <vr/gvr/capi/include/gvr.h>
+#include <vr/gvr/capi/include/gvr_controller.h>
+
+#include <chrono>
+#include <mutex>
+
+#include "elbow_model.h"
+
+struct DvrGraphicsContext;
+struct DvrPose;
+
+namespace android {
+namespace dvr {
+
+class Application {
+ public:
+ Application();
+ virtual ~Application();
+
+ virtual int Initialize(JNIEnv* env, jobject app_context,
+ jobject class_loader);
+
+ virtual int AllocateResources();
+ virtual void DeallocateResources();
+
+ void DrawFrame();
+
+ protected:
+ enum class MainThreadTask {
+ EnteringVrMode,
+ ExitingVrMode,
+ EnableDebugMode,
+ DisableDebugMode,
+ Show,
+ };
+
+ virtual void OnDrawFrame() = 0;
+ virtual void DrawEye(EyeType eye, const mat4& perspective,
+ const mat4& eye_matrix, const mat4& head_matrix) = 0;
+
+ void SetVisibility(bool visible);
+ virtual void OnVisibilityChanged(bool visible);
+
+ void ProcessControllerInput();
+
+ void ProcessTasks(const std::vector<MainThreadTask>& tasks);
+
+ void QueueTask(MainThreadTask task);
+
+ DvrGraphicsContext* graphics_context_ = nullptr;
+ DvrPose* pose_client_ = nullptr;
+
+ Range2i eye_viewport_[2];
+ mat4 eye_from_head_[2];
+ FieldOfView fov_[2];
+ Posef last_pose_;
+
+ std::unique_ptr<gvr::GvrApi> gvr_context_;
+ std::unique_ptr<gvr::ControllerApi> controller_;
+ std::unique_ptr<gvr::ControllerState> controller_state_;
+ gvr::ControllerApiStatus controller_api_status_;
+ gvr::ControllerConnectionState controller_connection_state_;
+ gvr_quatf controller_orientation_;
+ bool controller_api_status_logged_;
+ bool controller_connection_state_logged_;
+
+ bool is_visible_ = false;
+ std::chrono::time_point<std::chrono::system_clock> visibility_button_press_;
+ bool debug_mode_ = false;
+
+ std::chrono::time_point<std::chrono::system_clock> last_frame_time_;
+ vec3 controller_position_;
+ ElbowModel elbow_model_;
+
+ float fade_value_ = 0;
+
+ std::mutex mutex_;
+ std::condition_variable wake_up_init_and_render_;
+ bool initialized_ = false;
+ std::vector<MainThreadTask> main_thread_tasks_;
+
+ // Java Resources.
+ JNIEnv* java_env_;
+ jobject app_context_;
+ jobject class_loader_;
+
+ Application(const Application&) = delete;
+ void operator=(const Application&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_APPLICATION_H_
diff --git a/services/vr/vr_window_manager/composer/1.0/Android.bp b/services/vr/vr_window_manager/composer/1.0/Android.bp
index f69481f..e3e47ff 100644
--- a/services/vr/vr_window_manager/composer/1.0/Android.bp
+++ b/services/vr/vr_window_manager/composer/1.0/Android.bp
@@ -6,9 +6,13 @@
cmd: "$(location hidl-gen) -o $(genDir) -Lc++ -randroid.hidl:system/libhidl/transport -randroid.hardware:hardware/interfaces/ -randroid.dvr:frameworks/native/services/vr/vr_window_manager android.dvr.composer@1.0",
srcs: [
"IVrComposerClient.hal",
+ "IVrComposerView.hal",
+ "IVrComposerCallback.hal",
],
out: [
"android/dvr/composer/1.0/VrComposerClientAll.cpp",
+ "android/dvr/composer/1.0/VrComposerViewAll.cpp",
+ "android/dvr/composer/1.0/VrComposerCallbackAll.cpp",
],
}
@@ -18,6 +22,8 @@
cmd: "$(location hidl-gen) -o $(genDir) -Lc++ -randroid.hidl:system/libhidl/transport -randroid.hardware:hardware/interfaces/ -randroid.dvr:frameworks/native/services/vr/vr_window_manager android.dvr.composer@1.0",
srcs: [
"IVrComposerClient.hal",
+ "IVrComposerView.hal",
+ "IVrComposerCallback.hal",
],
out: [
"android/dvr/composer/1.0/IVrComposerClient.h",
@@ -25,6 +31,18 @@
"android/dvr/composer/1.0/BnVrComposerClient.h",
"android/dvr/composer/1.0/BpVrComposerClient.h",
"android/dvr/composer/1.0/BsVrComposerClient.h",
+
+ "android/dvr/composer/1.0/IVrComposerView.h",
+ "android/dvr/composer/1.0/IHwVrComposerView.h",
+ "android/dvr/composer/1.0/BnVrComposerView.h",
+ "android/dvr/composer/1.0/BpVrComposerView.h",
+ "android/dvr/composer/1.0/BsVrComposerView.h",
+
+ "android/dvr/composer/1.0/IVrComposerCallback.h",
+ "android/dvr/composer/1.0/IHwVrComposerCallback.h",
+ "android/dvr/composer/1.0/BnVrComposerCallback.h",
+ "android/dvr/composer/1.0/BpVrComposerCallback.h",
+ "android/dvr/composer/1.0/BsVrComposerCallback.h",
],
}
diff --git a/services/vr/vr_window_manager/composer/1.0/IVrComposerCallback.hal b/services/vr/vr_window_manager/composer/1.0/IVrComposerCallback.hal
new file mode 100644
index 0000000..6e7255e
--- /dev/null
+++ b/services/vr/vr_window_manager/composer/1.0/IVrComposerCallback.hal
@@ -0,0 +1,18 @@
+package android.dvr.composer@1.0;
+
+import android.hardware.graphics.composer@2.1::IComposerClient;
+
+interface IVrComposerCallback {
+ struct Layer {
+ handle buffer;
+ handle fence;
+ android.hardware.graphics.composer@2.1::IComposerClient.Rect display_frame;
+ android.hardware.graphics.composer@2.1::IComposerClient.FRect crop;
+ android.hardware.graphics.composer@2.1::IComposerClient.BlendMode blend_mode;
+ float alpha;
+ uint32_t type;
+ uint32_t app_id;
+ };
+
+ onNewFrame(vec<Layer> frame);
+};
diff --git a/services/vr/vr_window_manager/composer/1.0/IVrComposerView.hal b/services/vr/vr_window_manager/composer/1.0/IVrComposerView.hal
new file mode 100644
index 0000000..e16131a
--- /dev/null
+++ b/services/vr/vr_window_manager/composer/1.0/IVrComposerView.hal
@@ -0,0 +1,9 @@
+package android.dvr.composer@1.0;
+
+import IVrComposerCallback;
+
+interface IVrComposerView {
+ registerCallback(IVrComposerCallback callback);
+
+ releaseFrame();
+};
diff --git a/services/vr/vr_window_manager/composer/Android.bp b/services/vr/vr_window_manager/composer/Android.bp
new file mode 100644
index 0000000..ad63c7f
--- /dev/null
+++ b/services/vr/vr_window_manager/composer/Android.bp
@@ -0,0 +1,47 @@
+subdirs = [
+ "1.0",
+]
+
+cc_library_shared {
+ name: "libvrhwc",
+
+ srcs: [
+ "impl/sync_timeline.cpp",
+ "impl/vr_composer_view.cpp",
+ "impl/vr_hwc.cpp",
+ "impl/vr_composer_client.cpp",
+ ],
+
+ static_libs: [
+ "libhwcomposer-client",
+ ],
+
+ shared_libs: [
+ "android.dvr.composer@1.0",
+ "android.hardware.graphics.composer@2.1",
+ "libbase",
+ "libcutils",
+ "libfmq",
+ "libhardware",
+ "libhidlbase",
+ "libhidltransport",
+ "liblog",
+ "libsync",
+ "libui",
+ "libutils",
+ ],
+
+ export_include_dirs: ["."],
+
+ include_dirs: [
+ // Access to software sync timeline.
+ "system/core/libsync",
+
+ // Access to internal gralloc implementation.
+ "hardware/qcom/display/msm8996/libgralloc",
+ ],
+
+ cflags: [
+ "-DLOG_TAG=\"vrhwc\"",
+ ],
+}
diff --git a/services/vr/vr_window_manager/composer/impl/sync_timeline.cpp b/services/vr/vr_window_manager/composer/impl/sync_timeline.cpp
index aa55aed..e63ed26 100644
--- a/services/vr/vr_window_manager/composer/impl/sync_timeline.cpp
+++ b/services/vr/vr_window_manager/composer/impl/sync_timeline.cpp
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "composer/impl/sync_timeline.h"
+#include "sync_timeline.h"
#include <sys/cdefs.h>
#include <sw_sync.h>
diff --git a/services/vr/vr_window_manager/composer/impl/vr_composer_view.cpp b/services/vr/vr_window_manager/composer/impl/vr_composer_view.cpp
new file mode 100644
index 0000000..5f8168d
--- /dev/null
+++ b/services/vr/vr_window_manager/composer/impl/vr_composer_view.cpp
@@ -0,0 +1,80 @@
+#include "vr_composer_view.h"
+
+namespace android {
+namespace dvr {
+
+VrComposerView::VrComposerView() : composer_view_(nullptr) {}
+
+VrComposerView::~VrComposerView() {
+ composer_view_->UnregisterObserver(this);
+}
+
+void VrComposerView::Initialize(ComposerView* composer_view) {
+ composer_view_ = composer_view;
+ composer_view_->RegisterObserver(this);
+}
+
+Return<void> VrComposerView::registerCallback(
+ const sp<IVrComposerCallback>& callback) {
+ callback_ = callback;
+ return Void();
+}
+
+Return<void> VrComposerView::releaseFrame() {
+ composer_view_->ReleaseFrame();
+ return Void();
+}
+
+void VrComposerView::OnNewFrame(const ComposerView::Frame& frame) {
+ if (!callback_.get()) {
+ releaseFrame();
+ return;
+ }
+
+ std::vector<IVrComposerCallback::Layer> layers;
+ std::vector<native_handle_t*> fences;
+ for (size_t i = 0; i < frame.size(); ++i) {
+ native_handle_t* fence;
+ if (frame[i].fence->isValid()) {
+ fence = native_handle_create(1, 0);
+ fence->data[0] = frame[i].fence->dup();
+ } else {
+ fence = native_handle_create(0, 0);
+ }
+ fences.push_back(fence);
+
+ layers.push_back(IVrComposerCallback::Layer{
+ .buffer = hidl_handle(frame[i].buffer->getNativeBuffer()->handle),
+ .fence = hidl_handle(fence),
+ .display_frame = frame[i].display_frame,
+ .crop = frame[i].crop,
+ .blend_mode= frame[i].blend_mode,
+ .alpha = frame[i].alpha,
+ .type = frame[i].type,
+ .app_id = frame[i].app_id,
+ });
+ }
+
+ auto status =
+ callback_->onNewFrame(hidl_vec<IVrComposerCallback::Layer>(layers));
+ if (!status.isOk()) {
+ ALOGE("Failed to send onNewFrame: %s", status.description().c_str());
+ releaseFrame();
+ }
+
+ for (size_t i = 0; i < fences.size(); ++i) {
+ native_handle_close(fences[i]);
+ native_handle_delete(fences[i]);
+ }
+}
+
+VrComposerView* GetVrComposerViewFromIVrComposerView(IVrComposerView* view) {
+ return static_cast<VrComposerView*>(view);
+}
+
+IVrComposerView* HIDL_FETCH_IVrComposerView(const char* name) {
+ return new VrComposerView();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/composer/impl/vr_composer_view.h b/services/vr/vr_window_manager/composer/impl/vr_composer_view.h
new file mode 100644
index 0000000..133bbc8
--- /dev/null
+++ b/services/vr/vr_window_manager/composer/impl/vr_composer_view.h
@@ -0,0 +1,42 @@
+#ifndef VR_WINDOW_MANAGER_COMPOSER_IMPL_VR_COMPOSER_VIEW_H_
+#define VR_WINDOW_MANAGER_COMPOSER_IMPL_VR_COMPOSER_VIEW_H_
+
+#include <android/dvr/composer/1.0/IVrComposerCallback.h>
+#include <android/dvr/composer/1.0/IVrComposerView.h>
+
+#include "vr_hwc.h"
+
+namespace android {
+namespace dvr {
+
+using composer::V1_0::IVrComposerView;
+using composer::V1_0::IVrComposerCallback;
+
+class VrComposerView : public IVrComposerView, public ComposerView::Observer {
+ public:
+ VrComposerView();
+ ~VrComposerView() override;
+
+ void Initialize(ComposerView* composer_view);
+
+ // IVrComposerView
+ Return<void> registerCallback(const sp<IVrComposerCallback>& callback)
+ override;
+ Return<void> releaseFrame() override;
+
+ // ComposerView::Observer
+ void OnNewFrame(const ComposerView::Frame& frame) override;
+
+ private:
+ ComposerView* composer_view_;
+ sp<IVrComposerCallback> callback_;
+};
+
+VrComposerView* GetVrComposerViewFromIVrComposerView(IVrComposerView* view);
+
+IVrComposerView* HIDL_FETCH_IVrComposerView(const char* name);
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_COMPOSER_IMPL_VR_COMPOSER_VIEW_H_
diff --git a/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp b/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
index d64a99a..53c7d8e 100644
--- a/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
+++ b/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
@@ -13,16 +13,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "composer/impl/vr_hwc.h"
+#include "vr_hwc.h"
+#include <gralloc_priv.h>
#include <ui/Fence.h>
#include <ui/GraphicBuffer.h>
#include <ui/GraphicBufferMapper.h>
#include <mutex>
-#include "composer/impl/sync_timeline.h"
-#include "composer/impl/vr_composer_client.h"
+#include "sync_timeline.h"
+#include "vr_composer_client.h"
using namespace android::hardware::graphics::common::V1_0;
using namespace android::hardware::graphics::composer::V2_1;
@@ -42,6 +43,21 @@
const Display kDefaultDisplayId = 1;
const Config kDefaultConfigId = 1;
+sp<GraphicBuffer> GetBufferFromHandle(const native_handle_t* handle) {
+ // TODO(dnicoara): Fix this once gralloc1 is available.
+ private_handle_t* private_handle = private_handle_t::dynamicCast(handle);
+ sp<GraphicBuffer> buffer = new GraphicBuffer(
+ private_handle->width, private_handle->height, private_handle->format, 1,
+ GraphicBuffer::USAGE_HW_COMPOSER | GraphicBuffer::USAGE_HW_TEXTURE,
+ private_handle->width, native_handle_clone(handle), true);
+ if (GraphicBufferMapper::get().registerBuffer(buffer.get()) != OK) {
+ ALOGE("Failed to register buffer");
+ return nullptr;
+ }
+
+ return buffer;
+}
+
} // namespace
HwcDisplay::HwcDisplay() {}
@@ -52,17 +68,8 @@
bool HwcDisplay::SetClientTarget(const native_handle_t* handle,
base::unique_fd fence) {
- // OK, so this is where we cheat a lot because we don't have direct access to
- // buffer information. Everything is hardcoded, but once gralloc1 is available
- // we should use it to read buffer properties from the handle.
- buffer_ = new GraphicBuffer(
- 1080, 1920, PIXEL_FORMAT_RGBA_8888, 1,
- GraphicBuffer::USAGE_HW_COMPOSER | GraphicBuffer::USAGE_HW_TEXTURE, 1088,
- native_handle_clone(handle), true);
- if (GraphicBufferMapper::get().registerBuffer(buffer_.get()) != OK) {
- ALOGE("Failed to set client target");
- return false;
- }
+ if (handle)
+ buffer_ = GetBufferFromHandle(handle);
fence_ = new Fence(fence.release());
return true;
@@ -95,9 +102,43 @@
void HwcDisplay::GetChangedCompositionTypes(
std::vector<Layer>* layer_ids,
std::vector<IComposerClient::Composition>* types) {
- for (const auto& layer : layers_) {
- layer_ids->push_back(layer.id);
- types->push_back(IComposerClient::Composition::CLIENT);
+ std::sort(layers_.begin(), layers_.end(),
+ [](const auto& lhs, const auto& rhs) {
+ return lhs.z_order < rhs.z_order;
+ });
+
+ int first_client_layer = -1, last_client_layer = -1;
+ for (size_t i = 0; i < layers_.size(); ++i) {
+ switch (layers_[i].composition_type) {
+ case IComposerClient::Composition::SOLID_COLOR:
+ case IComposerClient::Composition::CURSOR:
+ case IComposerClient::Composition::SIDEBAND:
+ if (first_client_layer < 0)
+ first_client_layer = i;
+
+ last_client_layer = i;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (size_t i = 0; i < layers_.size(); ++i) {
+ if (i >= first_client_layer && i <= last_client_layer) {
+ if (layers_[i].composition_type != IComposerClient::Composition::CLIENT) {
+ layer_ids->push_back(layers_[i].id);
+ types->push_back(IComposerClient::Composition::CLIENT);
+ layers_[i].composition_type = IComposerClient::Composition::CLIENT;
+ }
+
+ continue;
+ }
+
+ if (layers_[i].composition_type != IComposerClient::Composition::DEVICE) {
+ layer_ids->push_back(layers_[i].id);
+ types->push_back(IComposerClient::Composition::DEVICE);
+ layers_[i].composition_type = IComposerClient::Composition::DEVICE;
+ }
}
}
@@ -107,26 +148,48 @@
// the current frame.
fence_time_++;
- // TODO(dnicoara): Send the actual layers when we process layers as overlays.
- ComposerView::ComposerLayer layer = {
- .buffer = buffer_,
- .fence = fence_.get() ? fence_ : new Fence(-1),
- .display_frame = {0, 0, 1080, 1920},
- .crop = {0.0f, 0.0f, 1080.0f, 1920.0f},
- .blend_mode = IComposerClient::BlendMode::NONE,
- };
- return std::vector<ComposerView::ComposerLayer>(1, layer);
+ bool queued_client_target = false;
+ std::vector<ComposerView::ComposerLayer> frame;
+ for (const auto& layer : layers_) {
+ if (layer.composition_type == IComposerClient::Composition::CLIENT) {
+ if (!queued_client_target) {
+ ComposerView::ComposerLayer client_target_layer = {
+ .buffer = buffer_,
+ .fence = fence_.get() ? fence_ : new Fence(-1),
+ .display_frame = {0, 0, static_cast<int32_t>(buffer_->getWidth()),
+ static_cast<int32_t>(buffer_->getHeight())},
+ .crop = {0.0f, 0.0f, static_cast<float>(buffer_->getWidth()),
+ static_cast<float>(buffer_->getHeight())},
+ .blend_mode = IComposerClient::BlendMode::NONE,
+ };
+
+ frame.push_back(client_target_layer);
+ queued_client_target = true;
+ }
+ } else {
+ frame.push_back(layer.info);
+ }
+ }
+
+ return frame;
}
-void HwcDisplay::GetReleaseFences(std::vector<Layer>* layer_ids,
+void HwcDisplay::GetReleaseFences(int* present_fence,
+ std::vector<Layer>* layer_ids,
std::vector<int>* fences) {
+ *present_fence = hwc_timeline_.CreateFence(fence_time_);
for (const auto& layer : layers_) {
layer_ids->push_back(layer.id);
fences->push_back(hwc_timeline_.CreateFence(fence_time_));
}
}
-void HwcDisplay::ReleaseFrame() { hwc_timeline_.IncrementTimeline(); }
+void HwcDisplay::ReleaseFrame() {
+ hwc_timeline_.IncrementTimeline();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VrHwcClient
VrHwc::VrHwc() {}
@@ -212,10 +275,10 @@
switch (attribute) {
case IComposerClient::Attribute::WIDTH:
- *outValue = 1080;
+ *outValue = 1920;
break;
case IComposerClient::Attribute::HEIGHT:
- *outValue = 1920;
+ *outValue = 1080;
break;
case IComposerClient::Attribute::VSYNC_PERIOD:
*outValue = 1000 * 1000 * 1000 / 30; // 30fps
@@ -357,14 +420,18 @@
return Error::BAD_DISPLAY;
}
- std::lock_guard<std::mutex> guard(mutex_);
+ std::vector<ComposerView::ComposerLayer> frame;
+ {
+ std::lock_guard<std::mutex> guard(mutex_);
+ frame = display_.GetFrame();
+ display_.GetReleaseFences(outPresentFence, outLayers, outReleaseFences);
+ }
if (observer_)
- observer_->OnNewFrame(display_.GetFrame());
+ observer_->OnNewFrame(frame);
else
- display_.ReleaseFrame();
+ ReleaseFrame();
- display_.GetReleaseFences(outLayers, outReleaseFences);
return Error::NONE;
}
@@ -380,6 +447,12 @@
base::unique_fd fence(acquireFence);
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->info.buffer = GetBufferFromHandle(buffer);
+ hwc_layer->info.fence = new Fence(fence.release());
+
return Error::NONE;
}
@@ -393,6 +466,12 @@
Error VrHwc::setLayerBlendMode(Display display, Layer layer, int32_t mode) {
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->info.blend_mode =
+ static_cast<ComposerView::ComposerLayer::BlendMode>(mode);
+
return Error::NONE;
}
@@ -407,6 +486,11 @@
int32_t type) {
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->composition_type = static_cast<HwcLayer::Composition>(type);
+
return Error::NONE;
}
@@ -421,12 +505,23 @@
const hwc_rect_t& frame) {
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->info.display_frame =
+ {frame.left, frame.top, frame.right, frame.bottom};
+
return Error::NONE;
}
Error VrHwc::setLayerPlaneAlpha(Display display, Layer layer, float alpha) {
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->info.alpha = alpha;
+
return Error::NONE;
}
@@ -441,6 +536,11 @@
const hwc_frect_t& crop) {
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->info.crop = {crop.left, crop.top, crop.right, crop.bottom};
+
return Error::NONE;
}
@@ -461,6 +561,11 @@
Error VrHwc::setLayerZOrder(Display display, Layer layer, uint32_t z) {
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->z_order = z;
+
return Error::NONE;
}
@@ -468,6 +573,12 @@
uint32_t appId) {
if (display != kDefaultDisplayId) return Error::BAD_DISPLAY;
+ HwcLayer* hwc_layer = display_.GetLayer(layer);
+ if (!hwc_layer) return Error::BAD_LAYER;
+
+ hwc_layer->info.type = type;
+ hwc_layer->info.app_id = appId;
+
return Error::NONE;
}
diff --git a/services/vr/vr_window_manager/composer/impl/vr_hwc.h b/services/vr/vr_window_manager/composer/impl/vr_hwc.h
index fbbf028..1de056a 100644
--- a/services/vr/vr_window_manager/composer/impl/vr_hwc.h
+++ b/services/vr/vr_window_manager/composer/impl/vr_hwc.h
@@ -23,7 +23,7 @@
#include <mutex>
-#include "composer/impl/sync_timeline.h"
+#include "sync_timeline.h"
using namespace android::hardware::graphics::common::V1_0;
using namespace android::hardware::graphics::composer::V2_1;
@@ -58,9 +58,12 @@
// it going.
sp<GraphicBuffer> buffer;
sp<Fence> fence;
- Recti display_frame;
+ Recti display_frame;
Rectf crop;
BlendMode blend_mode;
+ float alpha;
+ uint32_t type;
+ uint32_t app_id;
};
using Frame = std::vector<ComposerLayer>;
@@ -84,9 +87,15 @@
};
struct HwcLayer {
+ using Composition =
+ hardware::graphics::composer::V2_1::IComposerClient::Composition;
+
HwcLayer(Layer new_id) : id(new_id) {}
Layer id;
+ Composition composition_type;
+ uint32_t z_order;
+ ComposerView::ComposerLayer info;
};
class HwcDisplay {
@@ -108,8 +117,8 @@
std::vector<ComposerView::ComposerLayer> GetFrame();
- void GetReleaseFences(
- std::vector<Layer>* layer_ids, std::vector<int>* fences);
+ void GetReleaseFences(int* present_fence, std::vector<Layer>* layer_ids,
+ std::vector<int>* fences);
void ReleaseFrame();
diff --git a/services/vr/vr_window_manager/composer_view/Android.bp b/services/vr/vr_window_manager/composer_view/Android.bp
new file mode 100644
index 0000000..7e25c85
--- /dev/null
+++ b/services/vr/vr_window_manager/composer_view/Android.bp
@@ -0,0 +1,28 @@
+cc_binary {
+ name: "vr_composer_view",
+
+ srcs: ["vr_composer_view.cpp"],
+
+ static_libs: [
+ "libhwcomposer-client",
+ ],
+
+ shared_libs: [
+ "android.dvr.composer@1.0",
+ "android.hardware.graphics.composer@2.1",
+ "libbase",
+ "libbinder",
+ "libhardware",
+ "libhwbinder",
+ "libutils",
+ "libvrhwc",
+ ],
+
+ cflags: [
+ "-DLOG_TAG=\"vr_composer_view\"",
+ ],
+
+ init_rc: [
+ "vr_composer_view.rc",
+ ],
+}
diff --git a/services/vr/vr_window_manager/composer_view/vr_composer_view.cpp b/services/vr/vr_window_manager/composer_view/vr_composer_view.cpp
new file mode 100644
index 0000000..b5030e3
--- /dev/null
+++ b/services/vr/vr_window_manager/composer_view/vr_composer_view.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <binder/ProcessState.h>
+#include <hwbinder/IPCThreadState.h>
+#include <impl/vr_composer_view.h>
+#include <impl/vr_hwc.h>
+
+using namespace android;
+using namespace android::dvr;
+
+int main(int, char**) {
+ android::ProcessState::self()->startThreadPool();
+
+ const char instance[] = "vr_hwcomposer";
+ sp<IComposer> service = HIDL_FETCH_IComposer(instance);
+ LOG_FATAL_IF(!service, "Failed to get service");
+ LOG_FATAL_IF(service->isRemote(), "Service is remote");
+
+ service->registerAsService(instance);
+
+ sp<IVrComposerView> composer_view = HIDL_FETCH_IVrComposerView(
+ "DaydreamDisplay");
+ LOG_FATAL_IF(!composer_view, "Failed to get vr_composer_view service");
+ LOG_FATAL_IF(composer_view->isRemote(), "vr_composer_view service is remote");
+
+ composer_view->registerAsService("DaydreamDisplay");
+
+ GetVrComposerViewFromIVrComposerView(composer_view.get())->Initialize(
+ GetComposerViewFromIComposer(service.get()));
+
+ android::hardware::ProcessState::self()->startThreadPool();
+ android::hardware::IPCThreadState::self()->joinThreadPool();
+
+ return 0;
+}
diff --git a/services/vr/vr_window_manager/composer_view/vr_composer_view.rc b/services/vr/vr_window_manager/composer_view/vr_composer_view.rc
new file mode 100644
index 0000000..abb5265
--- /dev/null
+++ b/services/vr/vr_window_manager/composer_view/vr_composer_view.rc
@@ -0,0 +1,5 @@
+service vr_composer_view /system/bin/vr_composer_view
+ class core
+ user system
+ group system graphics
+ cpuset /system
diff --git a/services/vr/vr_window_manager/controller_mesh.cpp b/services/vr/vr_window_manager/controller_mesh.cpp
new file mode 100644
index 0000000..c6095b1
--- /dev/null
+++ b/services/vr/vr_window_manager/controller_mesh.cpp
@@ -0,0 +1,75 @@
+#include "controller_mesh.h"
+
+namespace android {
+namespace dvr {
+
+const int kNumControllerMeshVertices = 60;
+
+// Vertices in position.xyz, normal.xyz, uv.xy oder.
+// Generated from an .obj mesh.
+const float kControllerMeshVertices[] = {
+ 0.002023, 0.001469, -0.5, 0.809016, 0.587787, 0, 0, 0,
+ 0.000773, 0.002378, -0.5, 0.309004, 0.951061, 0, 0.1, 0,
+ 0.000773, 0.002378, 0, 0.309004, 0.951061, 0, 0.1, 1,
+ 0.002023, 0.001469, -0.5, 0.809016, 0.587787, 0, 0, 0,
+ 0.000773, 0.002378, 0, 0.309004, 0.951061, 0, 0.1, 1,
+ 0.002023, 0.001469, 0, 0.809016, 0.587787, 0, 0, 1,
+ 0.000773, 0.002378, -0.5, 0.309004, 0.951061, 0, 0.1, 0,
+ -0.000773, 0.002378, -0.5, -0.309004, 0.951061, 0, 0.2, 0,
+ -0.000773, 0.002378, 0, -0.309004, 0.951061, 0, 0.2, 1,
+ 0.000773, 0.002378, -0.5, 0.309004, 0.951061, 0, 0.1, 0,
+ -0.000773, 0.002378, 0, -0.309004, 0.951061, 0, 0.2, 1,
+ 0.000773, 0.002378, 0, 0.309004, 0.951061, 0, 0.1, 1,
+ -0.000773, 0.002378, -0.5, -0.309004, 0.951061, 0, 0.2, 0,
+ -0.002023, 0.001469, -0.5, -0.809016, 0.587787, 0, 0.3, 0,
+ -0.002023, 0.001469, 0, -0.809016, 0.587787, 0, 0.3, 1,
+ -0.000773, 0.002378, -0.5, -0.309004, 0.951061, 0, 0.2, 0,
+ -0.002023, 0.001469, 0, -0.809016, 0.587787, 0, 0.3, 1,
+ -0.000773, 0.002378, 0, -0.309004, 0.951061, 0, 0.2, 1,
+ -0.002023, 0.001469, -0.5, -0.809016, 0.587787, 0, 0.3, 0,
+ -0.0025, 0, -0.5, -1, -0, 0, 0.4, 0,
+ -0.0025, 0, 0, -1, -0, 0, 0.4, 1,
+ -0.002023, 0.001469, -0.5, -0.809016, 0.587787, 0, 0.3, 0,
+ -0.0025, 0, 0, -1, -0, 0, 0.4, 1,
+ -0.002023, 0.001469, 0, -0.809016, 0.587787, 0, 0.3, 1,
+ -0.0025, 0, -0.5, -1, -0, 0, 0.4, 0,
+ -0.002023, -0.001469, -0.5, -0.809016, -0.587787, 0, 0.5, 0,
+ -0.002023, -0.001469, 0, -0.809016, -0.587787, 0, 0.5, 1,
+ -0.0025, 0, -0.5, -1, -0, 0, 0.4, 0,
+ -0.002023, -0.001469, 0, -0.809016, -0.587787, 0, 0.5, 1,
+ -0.0025, 0, 0, -1, -0, 0, 0.4, 1,
+ -0.002023, -0.001469, -0.5, -0.809016, -0.587787, 0, 0.5, 0,
+ -0.000773, -0.002378, -0.5, -0.309004, -0.951061, 0, 0.6, 0,
+ -0.000773, -0.002378, 0, -0.309004, -0.951061, 0, 0.6, 1,
+ -0.002023, -0.001469, -0.5, -0.809016, -0.587787, 0, 0.5, 0,
+ -0.000773, -0.002378, 0, -0.309004, -0.951061, 0, 0.6, 1,
+ -0.002023, -0.001469, 0, -0.809016, -0.587787, 0, 0.5, 1,
+ -0.000773, -0.002378, -0.5, -0.309004, -0.951061, 0, 0.6, 0,
+ 0.000773, -0.002378, -0.5, 0.309004, -0.951061, 0, 0.7, 0,
+ 0.000773, -0.002378, 0, 0.309004, -0.951061, 0, 0.7, 1,
+ -0.000773, -0.002378, -0.5, -0.309004, -0.951061, 0, 0.6, 0,
+ 0.000773, -0.002378, 0, 0.309004, -0.951061, 0, 0.7, 1,
+ -0.000773, -0.002378, 0, -0.309004, -0.951061, 0, 0.6, 1,
+ 0.000773, -0.002378, -0.5, 0.309004, -0.951061, 0, 0.7, 0,
+ 0.002023, -0.001469, -0.5, 0.809016, -0.587787, 0, 0.8, 0,
+ 0.002023, -0.001469, 0, 0.809016, -0.587787, 0, 0.8, 1,
+ 0.000773, -0.002378, -0.5, 0.309004, -0.951061, 0, 0.7, 0,
+ 0.002023, -0.001469, 0, 0.809016, -0.587787, 0, 0.8, 1,
+ 0.000773, -0.002378, 0, 0.309004, -0.951061, 0, 0.7, 1,
+ 0.002023, -0.001469, -0.5, 0.809016, -0.587787, 0, 0.8, 0,
+ 0.0025, 0, -0.5, 1, 0, 0, 0.9, 0,
+ 0.0025, 0, 0, 1, 0, 0, 0.9, 1,
+ 0.002023, -0.001469, -0.5, 0.809016, -0.587787, 0, 0.8, 0,
+ 0.0025, 0, 0, 1, 0, 0, 0.9, 1,
+ 0.002023, -0.001469, 0, 0.809016, -0.587787, 0, 0.8, 1,
+ 0.0025, 0, -0.5, 1, 0, 0, 0.9, 0,
+ 0.002023, 0.001469, -0.5, 0.809016, 0.587787, 0, 1, 0,
+ 0.002023, 0.001469, 0, 0.809016, 0.587787, 0, 1, 1,
+ 0.0025, 0, -0.5, 1, 0, 0, 0.9, 0,
+ 0.002023, 0.001469, 0, 0.809016, 0.587787, 0, 1, 1,
+ 0.0025, 0, 0, 1, 0, 0, 0.9, 1,
+
+};
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/controller_mesh.h b/services/vr/vr_window_manager/controller_mesh.h
new file mode 100644
index 0000000..88872c7
--- /dev/null
+++ b/services/vr/vr_window_manager/controller_mesh.h
@@ -0,0 +1,13 @@
+#ifndef VR_WINDOW_MANAGER_CONTROLLER_MESH_H_
+#define VR_WINDOW_MANAGER_CONTROLLER_MESH_H_
+
+namespace android {
+namespace dvr {
+
+extern const int kNumControllerMeshVertices;
+extern const float kControllerMeshVertices[];
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_CONTROLLER_MESH_H_
diff --git a/services/vr/vr_window_manager/elbow_model.cpp b/services/vr/vr_window_manager/elbow_model.cpp
new file mode 100644
index 0000000..54d1eb4
--- /dev/null
+++ b/services/vr/vr_window_manager/elbow_model.cpp
@@ -0,0 +1,134 @@
+#include "elbow_model.h"
+
+#include <cutils/log.h>
+
+namespace android {
+namespace dvr {
+namespace {
+
+const vec3 kControllerForearm(0.0f, 0.0f, -0.25f);
+const vec3 kControllerPosition(0.0f, 0.0f, -0.05f);
+const vec3 kLeftElbowPosition(-0.195f, -0.5f, 0.075f);
+const vec3 kLeftArmExtension(0.13f, 0.14f, -0.08f);
+const vec3 kRightElbowPosition(0.195f, -0.5f, 0.075f);
+const vec3 kRightArmExtension(-0.13f, 0.14f, -0.08f);
+constexpr float kElbowBendRatio = 0.4f;
+constexpr float kCosMaxExtensionAngle =
+ 0.87f; // Cos of 30 degrees (90-30 = 60)
+constexpr float kCosMinExtensionAngle = 0.12f; // Cos of 83 degrees (90-83 = 7)
+constexpr float kYAxisExtensionFraction = 0.4f;
+constexpr float kMinRotationSpeed = 0.61f; // 35 degrees in radians
+constexpr float kMinAngleDelta = 0.175f; // 10 degrees in radians
+
+float clamp(float v, float min, float max) {
+ if (v < min)
+ return min;
+ if (v > max)
+ return max;
+ return v;
+}
+
+float NormalizeAngle(float angle) {
+ if (angle > M_PI)
+ angle = 2.0f * M_PI - angle;
+ return angle;
+}
+
+} // namespace
+
+const vec3 ElbowModel::kDefaultNeckPosition = vec3(0, -0.075f, -0.080f);
+
+ElbowModel::ElbowModel() {}
+ElbowModel::~ElbowModel() {}
+
+void ElbowModel::Enable(const vec3& neck_position, bool right_handed) {
+ enabled_ = true;
+ neck_position_ = neck_position;
+
+ if (right_handed) {
+ elbow_position_ = kRightElbowPosition;
+ arm_extension_ = kRightArmExtension;
+ } else {
+ elbow_position_ = kLeftElbowPosition;
+ arm_extension_ = kLeftArmExtension;
+ }
+
+ ResetRoot();
+}
+
+void ElbowModel::Disable() { enabled_ = false; }
+
+vec3 ElbowModel::Update(float delta_t, const quat& hmd_orientation,
+ const quat& controller_orientation, bool recenter) {
+ if (!enabled_)
+ return vec3::Zero();
+
+ float heading_rad = GetHeading(hmd_orientation);
+
+ quat y_rotation;
+ y_rotation = Eigen::AngleAxis<float>(heading_rad, vec3::UnitY());
+
+ // If the controller's angular velocity is above a certain amount, we can
+ // assume torso rotation and move the elbow joint relative to the
+ // camera orientation.
+ float angle_delta = last_controller_.angularDistance(controller_orientation);
+ float rot_speed = angle_delta / delta_t;
+
+ if (recenter) {
+ root_rot_ = y_rotation;
+ } else if (rot_speed > kMinRotationSpeed) {
+ root_rot_.slerp(angle_delta / kMinAngleDelta, y_rotation);
+ }
+
+ // Calculate angle (or really, cos thereof) between controller forward vector
+ // and Y axis to determine extension amount.
+ vec3 controller_forward_rotated = controller_orientation * -vec3::UnitZ();
+ float dot_y = controller_forward_rotated.y();
+ float amt_extension = clamp(dot_y - kCosMinExtensionAngle, 0, 1);
+
+ // Remove the root rotation from the orientation reading--we'll add it back in
+ // later.
+ quat controller_rot = root_rot_.inverse() * controller_orientation;
+ controller_forward_rotated = controller_rot * -vec3::UnitZ();
+ quat rot_xy;
+ rot_xy.setFromTwoVectors(-vec3::UnitZ(), controller_forward_rotated);
+
+ // Fixing polar singularity
+ float total_angle = NormalizeAngle(atan2f(rot_xy.norm(), rot_xy.w()) * 2.0f);
+ float lerp_amount = (1.0f - powf(total_angle / M_PI, 6.0f)) *
+ (1.0f - (kElbowBendRatio +
+ (1.0f - kElbowBendRatio) *
+ (amt_extension + kYAxisExtensionFraction)));
+
+ // Calculate the relative rotations of the elbow and wrist joints.
+ quat wrist_rot = quat::Identity();
+ wrist_rot.slerp(lerp_amount, rot_xy);
+ quat elbow_rot = wrist_rot.inverse() * rot_xy;
+
+ last_controller_ = controller_orientation;
+
+ vec3 position =
+ root_rot_ *
+ ((controller_root_offset_ + arm_extension_ * amt_extension) +
+ elbow_rot * (kControllerForearm + wrist_rot * kControllerPosition));
+
+ return position;
+}
+
+float ElbowModel::GetHeading(const quat& orientation) {
+ vec3 gaze = orientation * -vec3::UnitZ();
+
+ if (gaze.y() > 0.99)
+ gaze = orientation * -vec3::UnitY();
+ else if (gaze.y() < -0.99)
+ gaze = orientation * vec3::UnitY();
+
+ return atan2f(-gaze.x(), -gaze.z());
+}
+
+void ElbowModel::ResetRoot() {
+ controller_root_offset_ = elbow_position_ + neck_position_;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/elbow_model.h b/services/vr/vr_window_manager/elbow_model.h
new file mode 100644
index 0000000..a6d5ca9
--- /dev/null
+++ b/services/vr/vr_window_manager/elbow_model.h
@@ -0,0 +1,45 @@
+#ifndef VR_WINDOW_MANAGER_ELBOW_MODEL_H_
+#define VR_WINDOW_MANAGER_ELBOW_MODEL_H_
+
+#include <private/dvr/types.h>
+
+namespace android {
+namespace dvr {
+
+class ElbowModel {
+ public:
+ ElbowModel();
+ ~ElbowModel();
+
+ void Enable(const vec3& neck_position, bool right_handed);
+ void Disable();
+
+ vec3 Update(float delta_t, const quat& hmd_orientation,
+ const quat& controller_orientation, bool recenter);
+
+ static const vec3 kDefaultNeckPosition;
+
+ private:
+ ElbowModel(const ElbowModel&) = delete;
+ void operator=(const ElbowModel&) = delete;
+
+ void ResetRoot();
+
+ float GetHeading(const quat& orientation);
+
+ bool enabled_ = false;
+
+ quat last_controller_ = quat::Identity();
+
+ quat root_rot_ = quat::Identity();
+
+ vec3 controller_root_offset_ = vec3::Zero();
+ vec3 elbow_position_ = vec3::Zero();
+ vec3 arm_extension_ = vec3::Zero();
+ vec3 neck_position_ = vec3::Zero();
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_ELBOW_MODEL_H_
diff --git a/services/vr/vr_window_manager/hwc_callback.cpp b/services/vr/vr_window_manager/hwc_callback.cpp
new file mode 100644
index 0000000..b2edc20
--- /dev/null
+++ b/services/vr/vr_window_manager/hwc_callback.cpp
@@ -0,0 +1,98 @@
+#include "hwc_callback.h"
+
+#include <gralloc_priv.h>
+#include <android-base/unique_fd.h>
+#include <log/log.h>
+#include <private/dvr/native_buffer.h>
+#include <sync/sync.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+sp<GraphicBuffer> GetBufferFromHandle(const native_handle_t* handle) {
+ // TODO(dnicoara): Fix this once gralloc1 is available.
+ private_handle_t* private_handle = private_handle_t::dynamicCast(handle);
+ sp<GraphicBuffer> buffer = new GraphicBuffer(
+ private_handle->width, private_handle->height, private_handle->format, 1,
+ GraphicBuffer::USAGE_HW_COMPOSER | GraphicBuffer::USAGE_HW_TEXTURE |
+ GraphicBuffer::USAGE_HW_2D | GraphicBuffer::USAGE_HW_RENDER,
+ private_handle->width, native_handle_clone(handle), true);
+ if (GraphicBufferMapper::get().registerBuffer(buffer.get()) != OK) {
+ ALOGE("Failed to register buffer");
+ return nullptr;
+ }
+
+ return buffer;
+}
+
+HwcCallback::FrameStatus GetFrameStatus(const HwcCallback::Frame& frame) {
+ for (const auto& layer : frame.layers()) {
+ // If there is no fence it means the buffer is already finished.
+ if (layer.fence->isValid()) {
+ status_t result = layer.fence->wait(0);
+ if (result != OK) {
+ if (result != -ETIME) {
+ ALOGE("fence wait on buffer fence failed. status=%d (%s).",
+ result, strerror(-result));
+ return HwcCallback::FrameStatus::kError;
+ }
+ return HwcCallback::FrameStatus::kUnfinished;
+ }
+ }
+ }
+
+ return HwcCallback::FrameStatus::kFinished;
+}
+
+} // namespace
+
+HwcCallback::HwcCallback(IVrComposerView* composer_view, Client* client)
+ : composer_view_(composer_view),
+ client_(client) {
+ composer_view_->registerCallback(this);
+}
+
+HwcCallback::~HwcCallback() {
+ composer_view_->registerCallback(nullptr);
+}
+
+Return<void> HwcCallback::onNewFrame(
+ const hidl_vec<IVrComposerCallback::Layer>& frame) {
+
+ std::vector<HwcLayer> hwc_frame(frame.size());
+ for (size_t i = 0; i < frame.size(); ++i) {
+ int fence = frame[i].fence.getNativeHandle()->numFds ?
+ dup(frame[i].fence.getNativeHandle()->data[0]) : -1;
+
+ hwc_frame[i] = HwcLayer{
+ .fence = new Fence(fence),
+ .buffer = GetBufferFromHandle(frame[i].buffer.getNativeHandle()),
+ .crop = frame[i].crop,
+ .display_frame = frame[i].display_frame,
+ .blending = static_cast<int32_t>(frame[i].blend_mode),
+ .appid = frame[i].app_id,
+ .type = static_cast<HwcLayer::LayerType>(frame[i].type),
+ .alpha = frame[i].alpha,
+ };
+ }
+
+ std::lock_guard<std::mutex> guard(mutex_);
+ if (client_)
+ client_->OnFrame(std::make_unique<Frame>(std::move(hwc_frame)));
+
+ return Void();
+}
+
+HwcCallback::Frame::Frame(std::vector<HwcLayer>&& layers)
+ : layers_(std::move(layers)) {}
+
+HwcCallback::FrameStatus HwcCallback::Frame::Finish() {
+ if (status_ == FrameStatus::kUnfinished)
+ status_ = GetFrameStatus(*this);
+ return status_;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/hwc_callback.h b/services/vr/vr_window_manager/hwc_callback.h
new file mode 100644
index 0000000..05a889b
--- /dev/null
+++ b/services/vr/vr_window_manager/hwc_callback.h
@@ -0,0 +1,110 @@
+#ifndef VR_WINDOW_MANAGER_HWC_CALLBACK_H_
+#define VR_WINDOW_MANAGER_HWC_CALLBACK_H_
+
+#include <deque>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include <android/dvr/composer/1.0/IVrComposerCallback.h>
+#include <android/dvr/composer/1.0/IVrComposerView.h>
+#include <impl/vr_hwc.h>
+
+namespace android {
+
+class Fence;
+class GraphicBuffer;
+
+namespace dvr {
+
+using Recti = ComposerView::ComposerLayer::Recti;
+using Rectf = ComposerView::ComposerLayer::Rectf;
+
+using composer::V1_0::IVrComposerCallback;
+using composer::V1_0::IVrComposerView;
+
+class HwcCallback : public IVrComposerCallback {
+ public:
+ struct HwcLayer {
+ enum LayerType : uint32_t {
+ // These are from frameworks/base/core/java/android/view/WindowManager.java
+ kUndefinedWindow = 0,
+ kFirstApplicationWindow = 1,
+ kLastApplicationWindow = 99,
+ kFirstSubWindow = 1000,
+ kLastSubWindow = 1999,
+ kFirstSystemWindow = 2000,
+ kStatusBar = kFirstSystemWindow,
+ kInputMethod = kFirstSystemWindow + 11,
+ kNavigationBar = kFirstSystemWindow + 19,
+ kLastSystemWindow = 2999,
+ };
+
+ bool should_skip_layer() const {
+ switch (type) {
+ // Always skip the following layer types
+ case kNavigationBar:
+ case kStatusBar:
+ case kUndefinedWindow:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ sp<Fence> fence;
+ sp<GraphicBuffer> buffer;
+ Rectf crop;
+ Recti display_frame;
+ int32_t blending;
+ uint32_t appid;
+ LayerType type;
+ float alpha;
+ };
+
+ enum class FrameStatus {
+ kUnfinished,
+ kFinished,
+ kError
+ };
+
+ class Frame {
+ public:
+ Frame(std::vector<HwcLayer>&& layers);
+
+ FrameStatus Finish();
+ const std::vector<HwcLayer>& layers() const { return layers_; }
+
+ private:
+ std::vector<HwcLayer> layers_;
+ FrameStatus status_ = FrameStatus::kUnfinished;
+ };
+
+ class Client {
+ public:
+ virtual ~Client() {}
+ virtual void OnFrame(std::unique_ptr<Frame>) = 0;
+ };
+
+ explicit HwcCallback(IVrComposerView* composer_view, Client* client);
+ ~HwcCallback() override;
+
+ private:
+ // This is the only method called on the binder thread. Everything else is
+ // called on the render thread.
+ Return<void> onNewFrame(const hidl_vec<IVrComposerCallback::Layer>& frame)
+ override;
+
+ IVrComposerView* composer_view_;
+ Client *client_;
+ std::mutex mutex_;
+
+
+ HwcCallback(const HwcCallback&) = delete;
+ void operator=(const HwcCallback&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_HWC_CALLBACK_H_
diff --git a/services/vr/vr_window_manager/java/com/google/vr/windowmanager/BootCompletedReceiver.java b/services/vr/vr_window_manager/java/com/google/vr/windowmanager/BootCompletedReceiver.java
new file mode 100644
index 0000000..01d1bdb
--- /dev/null
+++ b/services/vr/vr_window_manager/java/com/google/vr/windowmanager/BootCompletedReceiver.java
@@ -0,0 +1,17 @@
+package com.google.vr.windowmanager;
+
+import android.content.BroadcastReceiver;
+import android.content.Context;
+import android.content.Intent;
+import android.util.Log;
+
+public class BootCompletedReceiver extends BroadcastReceiver {
+ private static final String TAG = BootCompletedReceiver.class.getSimpleName();
+
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ Log.i(TAG, "Starting VRWindowManager");
+ Intent vrWindowManagerIntent = new Intent(context, VrWindowManagerService.class);
+ context.startService(vrWindowManagerIntent);
+ }
+}
diff --git a/services/vr/vr_window_manager/java/com/google/vr/windowmanager/VrWindowManagerService.java b/services/vr/vr_window_manager/java/com/google/vr/windowmanager/VrWindowManagerService.java
new file mode 100644
index 0000000..1d815ca
--- /dev/null
+++ b/services/vr/vr_window_manager/java/com/google/vr/windowmanager/VrWindowManagerService.java
@@ -0,0 +1,87 @@
+package com.google.vr.windowmanager;
+
+import android.app.Service;
+import android.content.BroadcastReceiver;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.os.Handler;
+import android.os.IBinder;
+import android.util.Log;
+
+public class VrWindowManagerService extends Service {
+ private static final String TAG = VrWindowManagerService.class.getSimpleName();
+ private long nativeVrWindowManager;
+
+ // This is a temporary debugging tool for development only.
+ // It allows us to show VrWindowManager in debug mode via command line.
+ private final BroadcastReceiver debugReceiver = new BroadcastReceiver() {
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ String action = intent.getAction();
+ if (action.equals("com.google.vr.windowmanager.intent.SHOW")) {
+ nativeEnableDebug(nativeVrWindowManager);
+ } else if (action.equals("com.google.vr.windowmanager.intent.HIDE")) {
+ nativeDisableDebug(nativeVrWindowManager);
+ } else if (action.equals("com.google.vr.windowmanager.intent.ENTER_VR")) {
+ nativeEnterVrMode(nativeVrWindowManager);
+ } else if (action.equals("com.google.vr.windowmanager.intent.EXIT_VR")) {
+ nativeExitVrMode(nativeVrWindowManager);
+ }
+ }
+ };
+
+ static {
+ System.loadLibrary("vr_window_manager_jni");
+ }
+
+ @Override
+ public void onCreate() {
+ super.onCreate();
+ destroyRenderer();
+ nativeVrWindowManager = nativeCreate(getClass().getClassLoader(), getApplicationContext());
+ if (nativeVrWindowManager == 0) {
+ Log.e(TAG, "Failed to create native renderer");
+ }
+
+ // For development, testing and debugging.
+ IntentFilter filter = new IntentFilter();
+ filter.addAction("com.google.vr.windowmanager.intent.SHOW");
+ filter.addAction("com.google.vr.windowmanager.intent.HIDE");
+ filter.addAction("com.google.vr.windowmanager.intent.ENTER_VR");
+ filter.addAction("com.google.vr.windowmanager.intent.EXIT_VR");
+ registerReceiver(debugReceiver, filter);
+ }
+
+ @Override
+ public int onStartCommand(Intent intent, int flags, int startId) {
+ return START_STICKY;
+ }
+
+ @Override
+ public IBinder onBind(Intent intent) {
+ Log.i(TAG, "Ignoring bind request");
+ return null;
+ }
+
+ @Override
+ public void onDestroy() {
+ super.onDestroy();
+ unregisterReceiver(debugReceiver);
+ destroyRenderer();
+ }
+
+ private void destroyRenderer() {
+ if (nativeVrWindowManager != 0) {
+ nativeDestroy(nativeVrWindowManager);
+ nativeVrWindowManager = 0;
+ }
+ }
+
+ private native long nativeCreate(ClassLoader appClassLoader, Context context);
+ private native void nativeDestroy(long nativeVrWindowManager);
+ private native void nativeEnableDebug(long nativeVrWindowManager);
+ private native void nativeDisableDebug(long nativeVrWindowManager);
+ private native void nativeEnterVrMode(long nativeVrWindowManager);
+ private native void nativeExitVrMode(long nativeVrWindowManager);
+}
diff --git a/services/vr/vr_window_manager/proguard.flags b/services/vr/vr_window_manager/proguard.flags
new file mode 100644
index 0000000..7683d6e
--- /dev/null
+++ b/services/vr/vr_window_manager/proguard.flags
@@ -0,0 +1,22 @@
+# Don't obfuscate any NDK/SDK code. This makes the debugging of stack traces in
+# in release builds easier.
+-keepnames class com.google.vr.ndk.** { *; }
+-keepnames class com.google.vr.sdk.** { *; }
+
+# These are part of the SDK <-> VrCore interfaces for GVR.
+-keepnames class com.google.vr.vrcore.library.api.** { *; }
+
+# These are part of the Java <-> native interfaces for GVR.
+-keep class com.google.vr.** { native <methods>; }
+
+-keep class com.google.vr.cardboard.annotations.UsedByNative
+-keep @com.google.vr.cardboard.annotations.UsedByNative class *
+-keepclassmembers class * {
+ @com.google.vr.cardboard.annotations.UsedByNative *;
+}
+
+-keep class com.google.vr.cardboard.UsedByNative
+-keep @com.google.vr.cardboard.UsedByNative class *
+-keepclassmembers class * {
+ @com.google.vr.cardboard.UsedByNative *;
+}
diff --git a/services/vr/vr_window_manager/render_thread.cpp b/services/vr/vr_window_manager/render_thread.cpp
new file mode 100644
index 0000000..00e3161
--- /dev/null
+++ b/services/vr/vr_window_manager/render_thread.cpp
@@ -0,0 +1,92 @@
+#include <cutils/log.h>
+#include <future>
+#include <jni.h>
+
+#include "render_thread.h"
+#include "shell_view.h"
+
+namespace android {
+namespace dvr {
+
+RenderThread::RenderThread(JNIEnv* env, jobject class_loader,
+ jobject android_context)
+ : jvm_(nullptr),
+ class_loader_global_ref_(0),
+ android_context_global_ref_(0),
+ quit_(false) {
+ env->GetJavaVM(&jvm_);
+
+ // Create global references so we can access these objects on the render
+ // thread
+ class_loader_global_ref_ = env->NewGlobalRef(class_loader);
+ android_context_global_ref_ = env->NewGlobalRef(android_context);
+
+ std::promise<int> render_thread_init_result_promise;
+ thread_ = std::thread([this, &render_thread_init_result_promise] {
+ JNIEnv* render_thread_jni_env = nullptr;
+ jvm_->AttachCurrentThread(&render_thread_jni_env, nullptr);
+ RunRenderLoop(&render_thread_init_result_promise);
+ jvm_->DetachCurrentThread();
+ });
+
+ // Wait to see if the render thread started successfully. If not bail.
+ int render_thread_init_result =
+ render_thread_init_result_promise.get_future().get();
+ LOG_ALWAYS_FATAL_IF(render_thread_init_result != 0,
+ "Failed initializing render thread. result=%d",
+ render_thread_init_result);
+}
+
+RenderThread::~RenderThread() { Quit(); }
+
+void RenderThread::Quit() {
+ if (thread_.joinable()) {
+ quit_ = true;
+ thread_.join();
+ }
+
+ JNIEnv* env = GetJniEnv();
+ if (class_loader_global_ref_ != 0) {
+ env->DeleteGlobalRef(class_loader_global_ref_);
+ class_loader_global_ref_ = 0;
+ }
+ if (android_context_global_ref_ != 0) {
+ env->DeleteGlobalRef(android_context_global_ref_);
+ android_context_global_ref_ = 0;
+ }
+}
+
+void RenderThread::EnableDebug(bool debug) { shell_view_.EnableDebug(debug); }
+
+void RenderThread::VrMode(bool mode) { shell_view_.VrMode(mode); }
+
+JNIEnv* RenderThread::GetJniEnv() {
+ JNIEnv* env;
+ jvm_->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6);
+ return env;
+}
+
+void RenderThread::RunRenderLoop(
+ std::promise<int>* init_result_promise) {
+ // TODO(steventhomas): Create local refs to work around b/33251144. Remove
+ // once that bug is fixed.
+ JNIEnv* env = GetJniEnv();
+ jobject class_loader = env->NewLocalRef(class_loader_global_ref_);
+ jobject android_context = env->NewLocalRef(android_context_global_ref_);
+
+ int init_result = shell_view_.Initialize(env, android_context, class_loader);
+ init_result += shell_view_.AllocateResources();
+ init_result_promise->set_value(init_result);
+ if (init_result == 0) {
+ while (!quit_)
+ shell_view_.DrawFrame();
+ } else {
+ ALOGE("Failed to initialize ShellView");
+ }
+
+ env->DeleteLocalRef(class_loader);
+ env->DeleteLocalRef(android_context);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/render_thread.h b/services/vr/vr_window_manager/render_thread.h
new file mode 100644
index 0000000..e193643
--- /dev/null
+++ b/services/vr/vr_window_manager/render_thread.h
@@ -0,0 +1,47 @@
+#ifndef VR_WINDOW_MANAGER_RENDER_THREAD_H_
+#define VR_WINDOW_MANAGER_RENDER_THREAD_H_
+
+#include <atomic>
+#include <future>
+#include <jni.h>
+#include <thread>
+
+#include "shell_view.h"
+
+namespace android {
+namespace dvr {
+
+class RenderThread {
+ public:
+ RenderThread(JNIEnv* env, jobject class_loader, jobject android_context);
+ ~RenderThread();
+ void Quit();
+ void EnableDebug(bool debug);
+ void VrMode(bool mode);
+
+ RenderThread(const RenderThread&) = delete;
+ void operator=(const RenderThread&) = delete;
+
+ private:
+ // Called by both the main thread and render thread. Will return the correct
+ // JNIEnv for the current thread.
+ JNIEnv* GetJniEnv();
+
+ void RunRenderLoop(std::promise<int>* init_result_promise);
+
+ // Accessed only by the main thread.
+ std::thread thread_;
+
+ // The vars below are accessed by both the main thread and the render thread.
+ JavaVM* jvm_;
+ jobject class_loader_global_ref_;
+ jobject android_context_global_ref_;
+ std::atomic_bool quit_;
+
+ ShellView shell_view_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_RENDER_THREAD_H_
diff --git a/services/vr/vr_window_manager/res/drawable-nodpi/vr_icon.png b/services/vr/vr_window_manager/res/drawable-nodpi/vr_icon.png
new file mode 100644
index 0000000..06f896d
--- /dev/null
+++ b/services/vr/vr_window_manager/res/drawable-nodpi/vr_icon.png
Binary files differ
diff --git a/services/vr/vr_window_manager/res/drawable-nodpi/vr_icon_background.png b/services/vr/vr_window_manager/res/drawable-nodpi/vr_icon_background.png
new file mode 100644
index 0000000..d336da3
--- /dev/null
+++ b/services/vr/vr_window_manager/res/drawable-nodpi/vr_icon_background.png
Binary files differ
diff --git a/services/vr/vr_window_manager/res/values/styles.xml b/services/vr/vr_window_manager/res/values/styles.xml
new file mode 100644
index 0000000..8a1a74b
--- /dev/null
+++ b/services/vr/vr_window_manager/res/values/styles.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+<add-resource type="style" name="AppStyle"></add-resource>
+<style name="AppStyle"
+ parent="@android:style/Theme.Holo.NoActionBar.Fullscreen">
+ <item name="android:windowDisablePreview">true</item>
+</style>
+</resources>
diff --git a/services/vr/vr_window_manager/reticle.cpp b/services/vr/vr_window_manager/reticle.cpp
new file mode 100644
index 0000000..cbd0caf
--- /dev/null
+++ b/services/vr/vr_window_manager/reticle.cpp
@@ -0,0 +1,100 @@
+#include "reticle.h"
+
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+const std::string kVertexShader = SHADER0([]() {
+ layout(location = 0) in vec4 aPosition;
+ layout(location = 1) in vec4 aTexCoord;
+ uniform mat4 uViewProjection;
+ uniform mat4 uTransform;
+
+ out vec2 vTexCoord;
+ void main() {
+ gl_Position = uViewProjection * uTransform * aPosition;
+ vTexCoord = aTexCoord.xy;
+ }
+});
+
+const std::string kFragmentShader = SHADER0([]() {
+ precision mediump float;
+
+ in vec2 vTexCoord;
+ uniform vec3 uColor;
+
+ out vec4 fragColor;
+ void main() {
+ float alpha = smoothstep(1.0, 0.0, length(vTexCoord));
+ fragColor = vec4(uColor, alpha);
+ }
+});
+
+} // namespace
+
+Reticle::Reticle() {}
+
+Reticle::~Reticle() {}
+
+bool Reticle::Initialize() {
+ program_.Link(kVertexShader, kFragmentShader);
+ if (!program_)
+ return false;
+
+ return true;
+}
+
+void Reticle::ShowAt(const mat4& hit_transform, const vec3& color) {
+ transform_ = hit_transform;
+ shown_ = true;
+
+ GLint view_projection_location =
+ glGetUniformLocation(program_.GetProgram(), "uColor");
+ glProgramUniform3f(program_.GetProgram(), view_projection_location, color.x(),
+ color.y(), color.z());
+}
+
+void Reticle::Draw(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix) {
+ if (!shown_)
+ return;
+
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+
+ program_.Use();
+
+ const float kRadius = 0.015;
+ GLfloat vertices[] = {
+ -kRadius, -kRadius, 0, kRadius, -kRadius, 0,
+ -kRadius, kRadius, 0, kRadius, kRadius, 0,
+ };
+ GLfloat texture_vertices[] = {
+ -1, 1, 1, 1, -1, -1, 1, -1,
+ };
+
+ mat4 mvp = perspective * eye_matrix * head_matrix;
+ GLint view_projection_location =
+ glGetUniformLocation(program_.GetProgram(), "uViewProjection");
+ glUniformMatrix4fv(view_projection_location, 1, 0, mvp.data());
+
+ GLint transform_location =
+ glGetUniformLocation(program_.GetProgram(), "uTransform");
+ glUniformMatrix4fv(transform_location, 1, 0, transform_.data());
+
+ glEnableVertexAttribArray(0);
+ glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertices);
+ glEnableVertexAttribArray(1);
+ glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texture_vertices);
+
+ glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
+
+ glDisable(GL_BLEND);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/reticle.h b/services/vr/vr_window_manager/reticle.h
new file mode 100644
index 0000000..d8522aa
--- /dev/null
+++ b/services/vr/vr_window_manager/reticle.h
@@ -0,0 +1,35 @@
+#ifndef VR_WINDOW_MANAGER_SHELL_RETICLE_H_
+#define VR_WINDOW_MANAGER_SHELL_RETICLE_H_
+
+#include <private/dvr/graphics/shader_program.h>
+#include <private/dvr/types.h>
+
+namespace android {
+namespace dvr {
+
+class Reticle {
+ public:
+ Reticle();
+ ~Reticle();
+
+ bool Initialize();
+
+ void ShowAt(const mat4& hit_transform, const vec3& color);
+ void Hide() { shown_ = false; }
+
+ void Draw(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix);
+
+ private:
+ bool shown_ = false;
+ ShaderProgram program_;
+ mat4 transform_;
+
+ Reticle(const Reticle&) = delete;
+ void operator=(const Reticle&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_SHELL_RETICLE_H_
diff --git a/services/vr/vr_window_manager/shell_view.cpp b/services/vr/vr_window_manager/shell_view.cpp
new file mode 100644
index 0000000..cef111c
--- /dev/null
+++ b/services/vr/vr_window_manager/shell_view.cpp
@@ -0,0 +1,669 @@
+#include "shell_view.h"
+
+#include <binder/IServiceManager.h>
+#include <cutils/log.h>
+#include <EGL/eglext.h>
+#include <GLES3/gl3.h>
+#include <hardware/hwcomposer2.h>
+
+#include "controller_mesh.h"
+#include "texture.h"
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+constexpr unsigned int kVRAppLayerCount = 2;
+
+constexpr unsigned int kMaximumPendingFrames = 8;
+
+const std::string kVertexShader = SHADER0([]() {
+ layout(location = 0) in vec4 aPosition;
+ layout(location = 1) in vec4 aTexCoord;
+ uniform mat4 uViewProjection;
+ uniform mat4 uTransform;
+
+ out vec2 vTexCoord;
+ void main() {
+ gl_Position = uViewProjection * uTransform * aPosition;
+ vTexCoord = aTexCoord.xy;
+ }
+});
+
+const std::string kFragmentShader = SHADER0([]() {
+ precision mediump float;
+
+ in vec2 vTexCoord;
+ uniform sampler2D tex;
+ uniform float uAlpha;
+
+ out vec4 fragColor;
+ void main() {
+ fragColor = texture(tex, vTexCoord);
+ fragColor.a *= uAlpha;
+ }
+});
+
+// This shader provides a dim layer in a given rect. This is intended
+// to indicate the non-interactive region.
+// Texture coordinates between [uCoords.xy, uCoords.zw] are dim, otherwise
+// transparent.
+const std::string kOverlayFragmentShader = SHADER0([]() {
+ precision highp float;
+
+ in vec2 vTexCoord;
+ uniform sampler2D tex;
+ uniform vec4 uCoords;
+
+ out vec4 fragColor;
+ void main() {
+ vec4 color = vec4(0, 0, 0, 0);
+ if (all(greaterThan(vTexCoord, uCoords.xy)) &&
+ all(lessThan(vTexCoord, uCoords.zw))) {
+ color = vec4(0, 0, 0, 0.5);
+ }
+ fragColor = color;
+ }
+});
+
+const std::string kControllerFragmentShader = SHADER0([]() {
+ precision mediump float;
+
+ in vec2 vTexCoord;
+
+ out vec4 fragColor;
+ void main() { fragColor = vec4(0.8, 0.2, 0.2, 1.0); }
+});
+
+const GLfloat kVertices[] = {
+ -1, -1, 0,
+ 1, -1, 0,
+ -1, 1, 0,
+ 1, 1, 0,
+};
+
+const GLfloat kTextureVertices[] = {
+ 0, 1,
+ 1, 1,
+ 0, 0,
+ 1, 0,
+};
+
+// Returns true if the given point is inside the given rect.
+bool IsInside(const vec2& pt, const vec2& tl, const vec2& br) {
+ return pt.x() >= tl.x() && pt.x() <= br.x() &&
+ pt.y() >= tl.y() && pt.y() <= br.y();
+}
+
+mat4 GetScalingMatrix(float width, float height) {
+ float xscale = 1, yscale = 1;
+ float ar = width / height;
+ if (ar > 1)
+ yscale = 1.0 / ar;
+ else
+ xscale = ar;
+
+ return mat4(Eigen::Scaling<float>(xscale, yscale, 1.0));
+}
+
+mat4 GetHorizontallyAlignedMatrixFromPose(const Posef& pose) {
+ vec3 position = pose.GetPosition();
+ quat view_quaternion = pose.GetRotation();
+
+ vec3 z = vec3(view_quaternion * vec3(0.0f, 0.0f, 1.0f));
+ vec3 y(0.0f, 1.0f, 0.0f);
+ vec3 x = y.cross(z);
+ x.normalize();
+ y = z.cross(x);
+
+ mat4 m;
+ // clang-format off
+ m(0, 0) = x[0]; m(0, 1) = y[0]; m(0, 2) = z[0]; m(0, 3) = position[0];
+ m(1, 0) = x[1]; m(1, 1) = y[1]; m(1, 2) = z[1]; m(1, 3) = position[1];
+ m(2, 0) = x[2]; m(2, 1) = y[2]; m(2, 2) = z[2]; m(2, 3) = position[2];
+ m(3, 0) = 0.0f; m(3, 1) = 0.0f; m(3, 2) = 0.0f; m(3, 3) = 1.0f;
+ // clang-format on
+
+ return m;
+}
+
+// Helper function that applies the crop transform to the texture layer and
+// positions (and scales) the texture layer in the appropriate location in the
+// display space.
+mat4 GetLayerTransform(const TextureLayer& texture_layer, float display_width,
+ float display_height) {
+ // Map from vertex coordinates to [0, 1] coordinates:
+ // 1) Flip y since in vertex coordinates (-1, -1) is at the bottom left and
+ // in texture coordinates (0, 0) is at the top left.
+ // 2) Translate by (1, 1) to map vertex coordinates to [0, 2] on x and y.
+ // 3) Scale by 1 / 2 to map coordinates to [0, 1] on x and y.
+ mat4 unit_space(
+ Eigen::AlignedScaling3f(0.5f, 0.5f, 1.0f) *
+ Eigen::Translation3f(1.0f, 1.0f, 0.0f) *
+ Eigen::AlignedScaling3f(1.0f, -1.0f, 1.0f));
+
+ mat4 texture_space(Eigen::AlignedScaling3f(
+ texture_layer.texture->width(), texture_layer.texture->height(), 1.0f));
+
+ // 1) Translate the layer to crop the left and top edge.
+ // 2) Scale the layer such that the cropped right and bottom edges map outside
+ // the exture region.
+ float crop_width = texture_layer.crop.right - texture_layer.crop.left;
+ float crop_height = texture_layer.crop.bottom - texture_layer.crop.top;
+ mat4 texture_crop(
+ Eigen::AlignedScaling3f(
+ texture_layer.texture->width() / crop_width,
+ texture_layer.texture->height() / crop_height,
+ 1.0f) *
+ Eigen::Translation3f(
+ -texture_layer.crop.left, -texture_layer.crop.top, 0.0f));
+
+ mat4 display_space(
+ Eigen::AlignedScaling3f(display_width, display_height, 1.0f));
+
+ // 1) Scale the texture to fit the display frame.
+ // 2) Translate the texture in the display frame location.
+ float display_frame_width = texture_layer.display_frame.right -
+ texture_layer.display_frame.left;
+ float display_frame_height = texture_layer.display_frame.bottom -
+ texture_layer.display_frame.top;
+ mat4 display_frame(
+ Eigen::Translation3f(
+ texture_layer.display_frame.left,
+ texture_layer.display_frame.top,
+ 0.0f) *
+ Eigen::AlignedScaling3f(
+ display_frame_width / display_width,
+ display_frame_height / display_height,
+ 1.0f));
+
+ mat4 layer_transform = unit_space.inverse() * display_space.inverse() *
+ display_frame * display_space * texture_space.inverse() * texture_crop *
+ texture_space * unit_space;
+ return layer_transform;
+}
+
+vec3 FromGvrVec3f(const gvr_vec3f& vec3f) {
+ return vec3(vec3f.x, vec3f.y, vec3f.z);
+}
+
+quat FromGvrQuatf(const gvr_quatf& quaternion) {
+ return quat(quaternion.qw, quaternion.qx, quaternion.qy, quaternion.qz);
+}
+
+// Determine if ths frame should be shown or hidden.
+bool CalculateVisibilityFromLayerConfig(const HwcCallback::Frame& frame,
+ uint32_t vr_app) {
+ auto& layers = frame.layers();
+
+ // We assume the first two layers are the VR app.
+ if (layers.size() < kVRAppLayerCount)
+ return false;
+
+ if (vr_app != layers[0].appid || layers[0].appid == 0)
+ return false;
+
+ // If a non-VR-app, non-skipped layer appears, show.
+ size_t index = kVRAppLayerCount;
+ // Now, find a dim layer if it exists.
+ // If it does, ignore any layers behind it for visibility determination.
+ for (size_t i = index; i < layers.size(); i++) {
+ if (layers[i].appid == 0) {
+ index = i + 1;
+ break;
+ }
+ }
+
+ // If any non-skipped layers exist now then we show, otherwise hide.
+ for (size_t i = index; i < layers.size(); i++) {
+ if (!layers[i].should_skip_layer())
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+ShellView::ShellView() {
+ ime_translate_ = mat4(Eigen::Translation3f(0.0f, -0.5f, 0.25f));
+ ime_top_left_ = vec2(0, 0);
+ ime_size_ = vec2(0, 0);
+}
+
+ShellView::~ShellView() {}
+
+int ShellView::Initialize(JNIEnv* env, jobject app_context,
+ jobject class_loader) {
+ int ret = Application::Initialize(env, app_context, class_loader);
+ if (ret)
+ return ret;
+
+ translate_ = Eigen::Translation3f(0, 0, -2.5f);
+
+ if (!InitializeTouch())
+ ALOGE("Failed to initialize virtual touchpad");
+
+ return 0;
+}
+
+int ShellView::AllocateResources() {
+ int ret = Application::AllocateResources();
+ if (ret)
+ return ret;
+
+ program_.reset(new ShaderProgram);
+ program_->Link(kVertexShader, kFragmentShader);
+ overlay_program_.reset(new ShaderProgram);
+ overlay_program_->Link(kVertexShader, kOverlayFragmentShader);
+ controller_program_.reset(new ShaderProgram);
+ controller_program_->Link(kVertexShader, kControllerFragmentShader);
+ if (!program_ || !overlay_program_ || !controller_program_)
+ return 1;
+
+ surface_flinger_view_.reset(new SurfaceFlingerView);
+ if (!surface_flinger_view_->Initialize(this))
+ return 1;
+
+ reticle_.reset(new Reticle());
+ if (!reticle_->Initialize())
+ return 1;
+
+ controller_mesh_.reset(new Mesh<vec3, vec3, vec2>());
+ controller_mesh_->SetVertices(kNumControllerMeshVertices,
+ kControllerMeshVertices);
+
+ initialized_ = true;
+
+ return 0;
+}
+
+void ShellView::DeallocateResources() {
+ surface_flinger_view_.reset();
+ reticle_.reset();
+ controller_mesh_.reset();
+ program_.reset(new ShaderProgram);
+ overlay_program_.reset(new ShaderProgram);
+ controller_program_.reset(new ShaderProgram);
+ Application::DeallocateResources();
+}
+
+void ShellView::EnableDebug(bool debug) {
+ QueueTask(debug ? MainThreadTask::EnableDebugMode
+ : MainThreadTask::DisableDebugMode);
+}
+
+void ShellView::VrMode(bool mode) {
+ QueueTask(mode ? MainThreadTask::EnteringVrMode
+ : MainThreadTask::ExitingVrMode);
+}
+
+void ShellView::OnDrawFrame() {
+ textures_.clear();
+ has_ime_ = false;
+
+ {
+ std::unique_lock<std::mutex> l(pending_frame_mutex_);
+ if (!pending_frames_.empty()) {
+ // Check if we should advance the frame.
+ auto& frame = pending_frames_.front();
+ if (!frame.visibility ||
+ frame.frame->Finish() == HwcCallback::FrameStatus::kFinished) {
+ current_frame_ = std::move(frame);
+ pending_frames_.pop_front();
+ }
+ }
+ }
+
+ if (!debug_mode_ && current_frame_.visibility != is_visible_) {
+ SetVisibility(current_frame_.visibility);
+ }
+
+ if (!current_frame_.visibility)
+ return;
+
+ ime_texture_ = TextureLayer();
+
+ surface_flinger_view_->GetTextures(*current_frame_.frame.get(), &textures_,
+ &ime_texture_, debug_mode_);
+ has_ime_ = ime_texture_.texture != nullptr;
+}
+
+void ShellView::DrawEye(EyeType /* eye */, const mat4& perspective,
+ const mat4& eye_matrix, const mat4& head_matrix) {
+ if (should_recenter_) {
+ // Position the quad horizontally aligned in the direction the user
+ // is facing, effectively taking out head roll.
+ initial_head_matrix_ = GetHorizontallyAlignedMatrixFromPose(last_pose_);
+ should_recenter_ = false;
+ }
+
+ size_ = vec2(surface_flinger_view_->width(), surface_flinger_view_->height());
+ scale_ = GetScalingMatrix(size_.x(), size_.y());
+
+ DrawOverlays(perspective, eye_matrix, head_matrix);
+
+ // TODO(alexst): Replicate controller rendering from VR Home.
+ // Current approach in the function below is a quick visualization.
+ DrawController(perspective, eye_matrix, head_matrix);
+
+ // TODO: Make sure reticle is shown only over visible overlays.
+ DrawReticle(perspective, eye_matrix, head_matrix);
+}
+
+void ShellView::OnVisibilityChanged(bool visible) {
+ should_recenter_ = visible;
+ Application::OnVisibilityChanged(visible);
+}
+
+bool ShellView::OnClick(bool down) {
+ if (down) {
+ if (!is_touching_ && allow_input_) {
+ is_touching_ = true;
+ }
+ } else {
+ is_touching_ = false;
+ }
+ Touch();
+ return true;
+}
+
+void ShellView::OnFrame(std::unique_ptr<HwcCallback::Frame> frame) {
+ if (!frame || frame->layers().empty())
+ return;
+
+ bool visibility = debug_mode_ || CalculateVisibilityFromLayerConfig(
+ *frame.get(), current_vr_app_);
+ current_vr_app_ = frame->layers().front().appid;
+
+ // If we are not showing the frame there's no need to keep anything around.
+ if (!visibility) {
+ // Hidden, no change so drop it completely
+ if (!current_frame_.visibility)
+ return;
+
+ frame.reset(nullptr);
+ }
+
+ std::unique_lock<std::mutex> l(pending_frame_mutex_);
+
+ pending_frames_.emplace_back(std::move(frame), visibility);
+
+ if (pending_frames_.size() > kMaximumPendingFrames)
+ pending_frames_.pop_front();
+
+ // If we are showing ourselves the main thread is not processing anything,
+ // so give it a kick.
+ if (visibility && !current_frame_.visibility)
+ QueueTask(MainThreadTask::Show);
+}
+
+bool ShellView::IsHit(const vec3& view_location, const vec3& view_direction,
+ vec3* hit_location, vec2* hit_location_in_window_coord,
+ bool test_ime) {
+ mat4 m = initial_head_matrix_ * translate_;
+ if (test_ime)
+ m = m * ime_translate_;
+ mat4 inverse = (m * scale_).inverse();
+ vec4 transformed_loc =
+ inverse * vec4(view_location[0], view_location[1], view_location[2], 1);
+ vec4 transformed_dir = inverse * vec4(view_direction[0], view_direction[1],
+ view_direction[2], 0);
+
+ if (transformed_dir.z() >= 0 || transformed_loc.z() <= 0)
+ return false;
+
+ float distance = -transformed_loc.z() / transformed_dir.z();
+ vec4 transformed_hit_loc = transformed_loc + transformed_dir * distance;
+ if (transformed_hit_loc.x() < -1 || transformed_hit_loc.x() > 1)
+ return false;
+ if (transformed_hit_loc.y() < -1 || transformed_hit_loc.y() > 1)
+ return false;
+
+ hit_location_in_window_coord->x() =
+ (1 + transformed_hit_loc.x()) / 2 * size_.x();
+ hit_location_in_window_coord->y() =
+ (1 - transformed_hit_loc.y()) / 2 * size_.y();
+
+ *hit_location = view_location + view_direction * distance;
+ return true;
+}
+
+void ShellView::DrawOverlays(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix) {
+ if (textures_.empty())
+ return;
+
+ program_->Use();
+ mat4 mvp = perspective * eye_matrix * head_matrix;
+ GLint view_projection_location =
+ glGetUniformLocation(program_->GetProgram(), "uViewProjection");
+ glUniformMatrix4fv(view_projection_location, 1, 0, mvp.data());
+
+ GLint alpha_location =
+ glGetUniformLocation(program_->GetProgram(), "uAlpha");
+
+ GLint tex_location = glGetUniformLocation(program_->GetProgram(), "tex");
+ glUniform1i(tex_location, 0);
+ glActiveTexture(GL_TEXTURE0);
+
+ for (const auto& texture_layer : textures_) {
+ switch (texture_layer.blending) {
+ case HWC2_BLEND_MODE_PREMULTIPLIED:
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
+ break;
+ case HWC2_BLEND_MODE_COVERAGE:
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+ break;
+ default:
+ break;
+ }
+
+ glUniform1f(alpha_location, fade_value_ * texture_layer.alpha);
+
+ glBindTexture(GL_TEXTURE_2D, texture_layer.texture->id());
+
+ mat4 layer_transform = GetLayerTransform(texture_layer, size_.x(),
+ size_.y());
+
+ mat4 transform = initial_head_matrix_ * translate_ * scale_ *
+ layer_transform;
+ DrawWithTransform(transform, *program_);
+
+ glDisable(GL_BLEND);
+ }
+
+ if (has_ime_) {
+ ime_top_left_ = vec2(static_cast<float>(ime_texture_.display_frame.left),
+ static_cast<float>(ime_texture_.display_frame.top));
+ ime_size_ = vec2(static_cast<float>(ime_texture_.display_frame.right -
+ ime_texture_.display_frame.left),
+ static_cast<float>(ime_texture_.display_frame.bottom -
+ ime_texture_.display_frame.top));
+
+ DrawDimOverlay(mvp, textures_[0], ime_top_left_, ime_top_left_ + ime_size_);
+
+ DrawIme();
+ }
+}
+
+void ShellView::DrawIme() {
+ program_->Use();
+ glBindTexture(GL_TEXTURE_2D, ime_texture_.texture->id());
+
+ mat4 layer_transform = GetLayerTransform(ime_texture_, size_.x(), size_.y());
+
+ mat4 transform = initial_head_matrix_ * translate_ * ime_translate_ * scale_ *
+ layer_transform;
+
+ DrawWithTransform(transform, *program_);
+}
+
+void ShellView::DrawDimOverlay(const mat4& mvp, const TextureLayer& layer, const vec2& top_left,
+ const vec2& bottom_right) {
+ overlay_program_->Use();
+ glUniformMatrix4fv(
+ glGetUniformLocation(overlay_program_->GetProgram(), "uViewProjection"),
+ 1, 0, mvp.data());
+ glUniform4f(glGetUniformLocation(overlay_program_->GetProgram(), "uCoords"),
+ top_left.x() / size_.x(), top_left.y() / size_.y(),
+ bottom_right.x() / size_.x(), bottom_right.y() / size_.y());
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+ mat4 layer_transform =
+ GetLayerTransform(layer, size_.x(), size_.y());
+
+ mat4 transform =
+ initial_head_matrix_ * translate_ * scale_ * layer_transform;
+ DrawWithTransform(transform, *overlay_program_);
+ glDisable(GL_BLEND);
+}
+
+void ShellView::DrawWithTransform(const mat4& transform,
+ const ShaderProgram& program) {
+ GLint transform_location =
+ glGetUniformLocation(program.GetProgram(), "uTransform");
+ glUniformMatrix4fv(transform_location, 1, 0, transform.data());
+
+ glEnableVertexAttribArray(0);
+ glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, kVertices);
+ glEnableVertexAttribArray(1);
+ glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, kTextureVertices);
+ glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
+}
+
+bool ShellView::IsImeHit(const vec3& view_location, const vec3& view_direction,
+ vec3 *hit_location) {
+ // First, check if the IME window is hit.
+ bool is_hit = IsHit(view_location, view_direction, hit_location,
+ &hit_location_in_window_coord_, true);
+ if (is_hit) {
+ // If it is, check if the window coordinate is in the IME region;
+ // if so then we are done.
+ if (IsInside(hit_location_in_window_coord_, ime_top_left_,
+ ime_top_left_ + ime_size_)) {
+ allow_input_ = true;
+ return true;
+ }
+ }
+
+ allow_input_ = false;
+ // Check if we have hit the main window.
+ is_hit = IsHit(view_location, view_direction, hit_location,
+ &hit_location_in_window_coord_, false);
+ if (is_hit) {
+ // Only allow input if we are not hitting the region hidden by the IME.
+ // Allowing input here would cause clicks on the main window to actually
+ // be clicks on the IME.
+ if (!IsInside(hit_location_in_window_coord_, ime_top_left_,
+ ime_top_left_ + ime_size_)) {
+ allow_input_ = true;
+ }
+ }
+ return is_hit;
+}
+
+void ShellView::DrawReticle(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix) {
+ reticle_->Hide();
+
+ vec3 pointer_location = last_pose_.GetPosition();
+ quat view_quaternion = last_pose_.GetRotation();
+
+ if (controller_api_status_ == gvr::kControllerApiOk) {
+ view_quaternion = FromGvrQuatf(controller_orientation_);
+ vec4 controller_location = controller_translate_ * vec4(0, 0, 0, 1);
+ pointer_location = vec3(controller_location.x(), controller_location.y(),
+ controller_location.z());
+
+ if (controller_state_->GetButtonDown(gvr::kControllerButtonClick))
+ OnClick(true);
+
+ if (controller_state_->GetButtonUp(gvr::kControllerButtonClick))
+ OnClick(false);
+ }
+
+ vec3 view_direction = vec3(view_quaternion * vec3(0, 0, -1));
+
+ vec3 hit_location;
+
+ bool is_hit;
+ if(has_ime_) {
+ // This will set allow_input_ and hit_location_in_window_coord_.
+ is_hit = IsImeHit(pointer_location, view_direction, &hit_location);
+ } else {
+ is_hit = IsHit(pointer_location, view_direction, &hit_location,
+ &hit_location_in_window_coord_, false);
+ allow_input_ = is_hit;
+ }
+
+ if (is_hit) {
+ reticle_->ShowAt(
+ Eigen::Translation3f(hit_location) * view_quaternion.matrix(),
+ allow_input_ ? vec3(1, 0, 0) : vec3(0, 0, 0));
+ Touch();
+ }
+
+ reticle_->Draw(perspective, eye_matrix, head_matrix);
+}
+
+void ShellView::DrawController(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix) {
+ controller_program_->Use();
+ mat4 mvp = perspective * eye_matrix * head_matrix;
+
+ GLint view_projection_location = glGetUniformLocation(
+ controller_program_->GetProgram(), "uViewProjection");
+ glUniformMatrix4fv(view_projection_location, 1, 0, mvp.data());
+
+ quat view_quaternion = FromGvrQuatf(controller_orientation_);
+ view_quaternion.toRotationMatrix();
+
+ vec3 world_pos = last_pose_.GetPosition() + controller_position_;
+
+ controller_translate_ =
+ Eigen::Translation3f(world_pos.x(), world_pos.y(), world_pos.z());
+
+ mat4 transform = controller_translate_ * view_quaternion *
+ mat4(Eigen::Scaling<float>(1, 1, 3.0));
+ GLint transform_location =
+ glGetUniformLocation(controller_program_->GetProgram(), "uTransform");
+ glUniformMatrix4fv(transform_location, 1, 0, transform.data());
+
+ controller_mesh_->Draw();
+}
+
+bool ShellView::InitializeTouch() {
+ virtual_touchpad_ =
+ android::interface_cast<android::dvr::IVirtualTouchpadService>(
+ android::defaultServiceManager()->getService(
+ android::String16("virtual_touchpad")));
+ if (!virtual_touchpad_.get()) {
+ ALOGE("Failed to connect to virtual touchpad");
+ return false;
+ }
+ return true;
+}
+
+void ShellView::Touch() {
+ if (!virtual_touchpad_.get()) {
+ ALOGE("missing virtual touchpad");
+ // Try to reconnect; useful in development.
+ if (!InitializeTouch()) {
+ return;
+ }
+ }
+
+ const android::binder::Status status = virtual_touchpad_->touch(
+ hit_location_in_window_coord_.x() / size_.x(),
+ hit_location_in_window_coord_.y() / size_.y(),
+ is_touching_ ? 1.0f : 0.0f);
+ if (!status.isOk()) {
+ ALOGE("touch failed: %s", status.toString8().string());
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/shell_view.h b/services/vr/vr_window_manager/shell_view.h
new file mode 100644
index 0000000..0688c94
--- /dev/null
+++ b/services/vr/vr_window_manager/shell_view.h
@@ -0,0 +1,119 @@
+#ifndef VR_WINDOW_MANAGER_SHELL_VIEW_H_
+#define VR_WINDOW_MANAGER_SHELL_VIEW_H_
+
+#include <private/dvr/graphics/mesh.h>
+#include <private/dvr/graphics/shader_program.h>
+#include <android/dvr/IVirtualTouchpadService.h>
+
+#include <deque>
+
+#include "application.h"
+#include "reticle.h"
+#include "surface_flinger_view.h"
+
+namespace android {
+namespace dvr {
+
+class ShellView : public Application, public HwcCallback::Client {
+ public:
+ ShellView();
+ virtual ~ShellView();
+
+ int Initialize(JNIEnv* env, jobject app_context,
+ jobject class_loader) override;
+
+ int AllocateResources() override;
+ void DeallocateResources() override;
+
+ void EnableDebug(bool debug);
+ void VrMode(bool mode);
+
+ protected:
+ void DrawEye(EyeType eye, const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix) override;
+ void OnVisibilityChanged(bool visible) override;
+
+ void DrawOverlays(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix);
+ void DrawReticle(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix);
+ void DrawIme();
+ void DrawDimOverlay(const mat4& mvp, const TextureLayer& layer,
+ const vec2& top_left, const vec2& bottom_right);
+ void DrawController(const mat4& perspective, const mat4& eye_matrix,
+ const mat4& head_matrix);
+
+ bool IsHit(const vec3& view_location, const vec3& view_direction,
+ vec3* hit_location, vec2* hit_location_in_window_coord,
+ bool test_ime);
+ bool IsImeHit(const vec3& view_location, const vec3& view_direction,
+ vec3 *hit_location);
+ bool InitializeTouch();
+ void Touch();
+
+ void OnDrawFrame() override;
+ void DrawWithTransform(const mat4& transform, const ShaderProgram& program);
+
+ bool OnClick(bool down);
+
+ // HwcCallback::Client:
+ void OnFrame(std::unique_ptr<HwcCallback::Frame> frame) override;
+
+ std::unique_ptr<ShaderProgram> program_;
+ std::unique_ptr<ShaderProgram> overlay_program_;
+ std::unique_ptr<ShaderProgram> controller_program_;
+
+ uint32_t current_vr_app_;
+
+ // Used to center the scene when the shell becomes visible.
+ bool should_recenter_ = true;
+ mat4 initial_head_matrix_;
+ mat4 scale_;
+ mat4 translate_;
+ mat4 ime_translate_;
+ vec2 size_;
+
+ std::unique_ptr<SurfaceFlingerView> surface_flinger_view_;
+ std::unique_ptr<Reticle> reticle_;
+ sp<IVirtualTouchpadService> virtual_touchpad_;
+ std::vector<TextureLayer> textures_;
+ TextureLayer ime_texture_;
+
+ bool is_touching_ = false;
+ bool allow_input_ = false;
+ vec2 hit_location_in_window_coord_;
+ vec2 ime_top_left_;
+ vec2 ime_size_;
+ bool has_ime_ = false;
+
+ std::unique_ptr<Mesh<vec3, vec3, vec2>> controller_mesh_;
+
+ struct PendingFrame {
+ PendingFrame() = default;
+ PendingFrame(std::unique_ptr<HwcCallback::Frame>&& frame, bool visibility)
+ : frame(std::move(frame)), visibility(visibility) {}
+ PendingFrame(PendingFrame&& r)
+ : frame(std::move(r.frame)), visibility(r.visibility) {}
+
+ void operator=(PendingFrame&& r) {
+ frame.reset(r.frame.release());
+ visibility = r.visibility;
+ }
+
+ std::unique_ptr<HwcCallback::Frame> frame;
+ bool visibility = false;
+ };
+ std::deque<PendingFrame> pending_frames_;
+ std::mutex pending_frame_mutex_;
+ PendingFrame current_frame_;
+
+ mat4 controller_translate_;
+
+ ShellView(const ShellView&) = delete;
+ void operator=(const ShellView&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_SHELL_VIEW_H_
diff --git a/services/vr/vr_window_manager/surface_flinger_view.cpp b/services/vr/vr_window_manager/surface_flinger_view.cpp
new file mode 100644
index 0000000..d38fcc0
--- /dev/null
+++ b/services/vr/vr_window_manager/surface_flinger_view.cpp
@@ -0,0 +1,79 @@
+#include "surface_flinger_view.h"
+
+#include <binder/IServiceManager.h>
+#include <impl/vr_composer_view.h>
+#include <private/dvr/native_buffer.h>
+
+#include "hwc_callback.h"
+#include "texture.h"
+
+namespace android {
+namespace dvr {
+
+SurfaceFlingerView::SurfaceFlingerView() {}
+
+SurfaceFlingerView::~SurfaceFlingerView() {}
+
+bool SurfaceFlingerView::Initialize(HwcCallback::Client *client) {
+ const char instance[] = "DaydreamDisplay";
+ composer_service_ = IVrComposerView::getService(instance);
+ if (composer_service_ == nullptr) {
+ ALOGE("Failed to initialize composer service");
+ return false;
+ }
+
+ if (!composer_service_->isRemote()) {
+ ALOGE("Composer service is not remote");
+ return false;
+ }
+
+ // TODO(dnicoara): Query this from the composer service.
+ width_ = 1920;
+ height_ = 1080;
+
+ composer_observer_.reset(new HwcCallback(composer_service_.get(), client));
+ return true;
+}
+
+bool SurfaceFlingerView::GetTextures(const HwcCallback::Frame& frame,
+ std::vector<TextureLayer>* texture_layers,
+ TextureLayer* ime_layer,
+ bool debug) const {
+ auto& layers = frame.layers();
+ texture_layers->clear();
+
+ size_t start = 0;
+ // Skip the second layer if it is from the VR app.
+ if (!debug) {
+ start = 1;
+ if (layers[0].appid && layers[0].appid == layers[1].appid)
+ start = 2;
+ }
+
+ for (size_t i = start; i < layers.size(); ++i) {
+ if (!debug && layers[i].should_skip_layer())
+ continue;
+
+ std::unique_ptr<Texture> texture(new Texture());
+ if (!texture->Initialize(layers[i].buffer->getNativeBuffer())) {
+ ALOGE("Failed to create texture");
+ texture_layers->clear();
+ return false;
+ }
+
+ TextureLayer texture_layer = {
+ std::move(texture), layers[i].crop, layers[i].display_frame,
+ layers[i].blending, layers[i].alpha,
+ };
+ if (debug && layers[i].type == HwcCallback::HwcLayer::kInputMethod) {
+ *ime_layer = std::move(texture_layer);
+ } else {
+ texture_layers->emplace_back(std::move(texture_layer));
+ }
+ }
+
+ return true;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/services/vr/vr_window_manager/surface_flinger_view.h b/services/vr/vr_window_manager/surface_flinger_view.h
new file mode 100644
index 0000000..e079cdb
--- /dev/null
+++ b/services/vr/vr_window_manager/surface_flinger_view.h
@@ -0,0 +1,52 @@
+#ifndef APPLICATIONS_EXPERIMENTS_SURFACE_FLINGER_DEMO_SURFACE_FLINGER_VIEW_H_
+#define APPLICATIONS_EXPERIMENTS_SURFACE_FLINGER_DEMO_SURFACE_FLINGER_VIEW_H_
+
+#include <utils/StrongPointer.h>
+
+#include <memory>
+
+#include "hwc_callback.h"
+
+namespace android {
+namespace dvr {
+
+class IDisplay;
+class Texture;
+
+struct TextureLayer {
+ std::unique_ptr<Texture> texture;
+ Rectf crop;
+ Recti display_frame;
+ int32_t blending;
+ float alpha;
+};
+
+class SurfaceFlingerView {
+ public:
+ SurfaceFlingerView();
+ ~SurfaceFlingerView();
+
+ int width() const { return width_; }
+ int height() const { return height_; }
+
+ bool Initialize(HwcCallback::Client *client);
+
+ bool GetTextures(const HwcCallback::Frame& layers,
+ std::vector<TextureLayer>* texture_layers,
+ TextureLayer* ime_layer, bool debug) const;
+
+ private:
+ sp<IVrComposerView> composer_service_;
+ std::unique_ptr<HwcCallback> composer_observer_;
+
+ int width_ = 0;
+ int height_ = 0;
+
+ SurfaceFlingerView(const SurfaceFlingerView&) = delete;
+ void operator=(const SurfaceFlingerView&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // APPLICATIONS_EXPERIMENTS_SURFACE_FLINGER_DEMO_SURFACE_FLINGER_VIEW_H_
diff --git a/services/vr/vr_window_manager/texture.cpp b/services/vr/vr_window_manager/texture.cpp
new file mode 100644
index 0000000..dbd91b7
--- /dev/null
+++ b/services/vr/vr_window_manager/texture.cpp
@@ -0,0 +1,41 @@
+#include "texture.h"
+
+#include <cutils/log.h>
+#include <GLES/glext.h>
+#include <system/window.h>
+
+namespace android {
+namespace dvr {
+
+Texture::Texture() {}
+
+Texture::~Texture() {
+ EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (id_)
+ glDeleteTextures(1, &id_);
+ if (image_)
+ eglDestroyImageKHR(display, image_);
+}
+
+bool Texture::Initialize(ANativeWindowBuffer* buffer) {
+ width_ = buffer->width;
+ height_ = buffer->height;
+
+ EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ image_ = eglCreateImageKHR(display, EGL_NO_CONTEXT,
+ EGL_NATIVE_BUFFER_ANDROID, buffer, nullptr);
+ if (!image_) {
+ ALOGE("Failed to create eglImage");
+ return false;
+ }
+
+ glGenTextures(1, &id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image_);
+
+ return true;
+}
+
+} // namespace android
+} // namespace dvr
diff --git a/services/vr/vr_window_manager/texture.h b/services/vr/vr_window_manager/texture.h
new file mode 100644
index 0000000..9840f19
--- /dev/null
+++ b/services/vr/vr_window_manager/texture.h
@@ -0,0 +1,37 @@
+#ifndef VR_WINDOW_MANAGER_TEXTURE_H_
+#define VR_WINDOW_MANAGER_TEXTURE_H_
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace dvr {
+
+class Texture {
+ public:
+ explicit Texture();
+ ~Texture();
+
+ bool Initialize(ANativeWindowBuffer* buffer);
+
+ GLuint id() const { return id_; }
+ int width() const { return width_; }
+ int height() const { return height_; }
+
+ private:
+ EGLImageKHR image_ = nullptr;
+ GLuint id_ = 0;
+ int width_ = 0;
+ int height_ = 0;
+
+ Texture(const Texture&) = delete;
+ void operator=(const Texture&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // VR_WINDOW_MANAGER_TEXTURE_H_
diff --git a/services/vr/vr_window_manager/vr_window_manager_jni.cpp b/services/vr/vr_window_manager/vr_window_manager_jni.cpp
new file mode 100644
index 0000000..f52658a
--- /dev/null
+++ b/services/vr/vr_window_manager/vr_window_manager_jni.cpp
@@ -0,0 +1,58 @@
+#include <cutils/log.h>
+#include <jni.h>
+
+#include <memory>
+
+#include "render_thread.h"
+
+#define JNI_METHOD(return_type, method_name) \
+ JNIEXPORT return_type JNICALL \
+ Java_com_google_vr_windowmanager_VrWindowManagerService_##method_name
+
+namespace {
+
+inline jlong jptr(android::dvr::RenderThread* native_vr_window_manager) {
+ return reinterpret_cast<intptr_t>(native_vr_window_manager);
+}
+
+inline android::dvr::RenderThread* native(jlong ptr) {
+ return reinterpret_cast<android::dvr::RenderThread*>(ptr);
+}
+
+} // namespace
+
+extern "C" {
+
+JNI_METHOD(jlong, nativeCreate)(JNIEnv* env, jclass /*clazz*/,
+ jobject class_loader,
+ jobject android_context) {
+ return jptr(new android::dvr::RenderThread(
+ env, class_loader, android_context));
+}
+
+JNI_METHOD(void, nativeDestroy)
+(JNIEnv* /*env*/, jclass /*clazz*/, jlong native_render_thread) {
+ delete native(native_render_thread);
+}
+
+JNI_METHOD(void, nativeEnableDebug)
+(JNIEnv* /*env*/, jclass /*clazz*/, jlong native_render_thread) {
+ native(native_render_thread)->EnableDebug(true);
+}
+
+JNI_METHOD(void, nativeDisableDebug)
+(JNIEnv* /*env*/, jclass /*clazz*/, jlong native_render_thread) {
+ native(native_render_thread)->EnableDebug(false);
+}
+
+JNI_METHOD(void, nativeEnterVrMode)
+(JNIEnv* /*env*/, jclass /*clazz*/, jlong native_render_thread) {
+ native(native_render_thread)->VrMode(true);
+}
+
+JNI_METHOD(void, nativeExitVrMode)
+(JNIEnv* /*env*/, jclass /*clazz*/, jlong native_render_thread) {
+ native(native_render_thread)->VrMode(false);
+}
+
+} // extern "C"