Merge "Remove libsigchain from LD_PRELOADs"
diff --git a/fs_mgr/fs_mgr_verity.c b/fs_mgr/fs_mgr_verity.c
index 1d2e43f..a3023a6 100644
--- a/fs_mgr/fs_mgr_verity.c
+++ b/fs_mgr/fs_mgr_verity.c
@@ -371,6 +371,7 @@
goto out;
}
+ verity_table = verity_table_signature = NULL;
// read the verity block at the end of the block device
if (read_verity_metadata(fstab->blk_device,
&verity_table_signature,
diff --git a/libcutils/Android.mk b/libcutils/Android.mk
index 469cdcf..6987e9c 100644
--- a/libcutils/Android.mk
+++ b/libcutils/Android.mk
@@ -121,6 +121,9 @@
LOCAL_SRC_FILES_arm += \
arch-arm/memset32.S \
+# arch-arm/memset32.S does not compile with Clang.
+LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
+
LOCAL_SRC_FILES_arm64 += \
arch-arm64/android_memset.S \
diff --git a/libpixelflinger/Android.mk b/libpixelflinger/Android.mk
index 6a3a58f..da9829d 100644
--- a/libpixelflinger/Android.mk
+++ b/libpixelflinger/Android.mk
@@ -69,6 +69,10 @@
LOCAL_SHARED_LIBRARIES += libhardware_legacy
LOCAL_CFLAGS += -DWITH_LIB_HARDWARE
endif
+# t32cb16blend.S does not compile with Clang.
+LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
+# arch-arm64/col32cb16blend.S does not compile with Clang.
+LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
include $(BUILD_SHARED_LIBRARY)
#
@@ -82,6 +86,10 @@
LOCAL_SRC_FILES_arm64 := $(PIXELFLINGER_SRC_FILES_arm64)
LOCAL_SRC_FILES_mips := $(PIXELFLINGER_SRC_FILES_mips)
LOCAL_CFLAGS := $(PIXELFLINGER_CFLAGS)
+# t32cb16blend.S does not compile with Clang.
+LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
+# arch-arm64/col32cb16blend.S does not compile with Clang.
+LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
include $(BUILD_STATIC_LIBRARY)
diff --git a/libpixelflinger/tests/arch-arm64/assembler/Android.mk b/libpixelflinger/tests/arch-arm64/assembler/Android.mk
index eca36ef..961f323 100644
--- a/libpixelflinger/tests/arch-arm64/assembler/Android.mk
+++ b/libpixelflinger/tests/arch-arm64/assembler/Android.mk
@@ -5,6 +5,9 @@
arm64_assembler_test.cpp\
asm_test_jacket.S
+# asm_test_jacket.S does not compile with Clang.
+LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
+
LOCAL_SHARED_LIBRARIES := \
libcutils \
libpixelflinger
diff --git a/libpixelflinger/tests/arch-arm64/col32cb16blend/Android.mk b/libpixelflinger/tests/arch-arm64/col32cb16blend/Android.mk
index 3368eb0..5d69203 100644
--- a/libpixelflinger/tests/arch-arm64/col32cb16blend/Android.mk
+++ b/libpixelflinger/tests/arch-arm64/col32cb16blend/Android.mk
@@ -5,6 +5,8 @@
col32cb16blend_test.c \
../../../arch-arm64/col32cb16blend.S
+LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
+
LOCAL_SHARED_LIBRARIES :=
LOCAL_C_INCLUDES :=
diff --git a/libpixelflinger/tests/arch-arm64/t32cb16blend/Android.mk b/libpixelflinger/tests/arch-arm64/t32cb16blend/Android.mk
index 8e5ec5e..2c1379b 100644
--- a/libpixelflinger/tests/arch-arm64/t32cb16blend/Android.mk
+++ b/libpixelflinger/tests/arch-arm64/t32cb16blend/Android.mk
@@ -5,6 +5,8 @@
t32cb16blend_test.c \
../../../arch-arm64/t32cb16blend.S
+LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
+
LOCAL_SHARED_LIBRARIES :=
LOCAL_C_INCLUDES :=
diff --git a/libsync/tests/Android.mk b/libsync/tests/Android.mk
new file mode 100644
index 0000000..ad20e50
--- /dev/null
+++ b/libsync/tests/Android.mk
@@ -0,0 +1,31 @@
+#
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+include external/libcxx/libcxx.mk
+LOCAL_CLANG := true
+LOCAL_MODULE := sync-unit-tests
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+LOCAL_CFLAGS += -g -Wall -Werror -std=gnu++11 -Wno-missing-field-initializers -Wno-sign-compare
+LOCAL_SHARED_LIBRARIES += libsync
+LOCAL_STATIC_LIBRARIES += libgtest_main
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/..
+LOCAL_SRC_FILES := \
+ sync_test.cpp
+include $(BUILD_NATIVE_TEST)
diff --git a/libsync/tests/sync_test.cpp b/libsync/tests/sync_test.cpp
new file mode 100644
index 0000000..55cd687
--- /dev/null
+++ b/libsync/tests/sync_test.cpp
@@ -0,0 +1,615 @@
+#include <gtest/gtest.h>
+#include <sync/sync.h>
+#include <sw_sync.h>
+#include <fcntl.h>
+#include <vector>
+#include <string>
+#include <cassert>
+#include <iostream>
+#include <unistd.h>
+#include <thread>
+#include <poll.h>
+#include <mutex>
+#include <algorithm>
+#include <tuple>
+#include <random>
+#include <unordered_map>
+
+// TODO: better stress tests?
+// Handle more than 64 fd's simultaneously, i.e. fix sync_fence_info's 4k limit.
+// Handle wraparound in timelines like nvidia.
+
+using namespace std;
+
+namespace {
+
+// C++ wrapper class for sync timeline.
+class SyncTimeline {
+ int m_fd = -1;
+ bool m_fdInitialized = false;
+public:
+ SyncTimeline(const SyncTimeline &) = delete;
+ SyncTimeline& operator=(SyncTimeline&) = delete;
+ SyncTimeline() noexcept {
+ int fd = sw_sync_timeline_create();
+ if (fd == -1)
+ return;
+ m_fdInitialized = true;
+ m_fd = fd;
+ }
+ void destroy() {
+ if (m_fdInitialized) {
+ close(m_fd);
+ m_fd = -1;
+ m_fdInitialized = false;
+ }
+ }
+ ~SyncTimeline() {
+ destroy();
+ }
+ bool isValid() const {
+ if (m_fdInitialized) {
+ int status = fcntl(m_fd, F_GETFD, 0);
+ if (status == 0)
+ return true;
+ else
+ return false;
+ }
+ else {
+ return false;
+ }
+ }
+ int getFd() const {
+ return m_fd;
+ }
+ int inc(int val = 1) {
+ return sw_sync_timeline_inc(m_fd, val);
+ }
+};
+
+struct SyncPointInfo {
+ std::string driverName;
+ std::string objectName;
+ uint64_t timeStampNs;
+ int status; // 1 sig, 0 active, neg is err
+};
+
+// Wrapper class for sync fence.
+class SyncFence {
+ int m_fd = -1;
+ bool m_fdInitialized = false;
+ static int s_fenceCount;
+
+ void setFd(int fd) {
+ m_fd = fd;
+ m_fdInitialized = true;
+ }
+ void clearFd() {
+ m_fd = -1;
+ m_fdInitialized = false;
+ }
+public:
+ bool isValid() const {
+ if (m_fdInitialized) {
+ int status = fcntl(m_fd, F_GETFD, 0);
+ if (status == 0)
+ return true;
+ else
+ return false;
+ }
+ else {
+ return false;
+ }
+ }
+ SyncFence& operator=(SyncFence &&rhs) noexcept {
+ destroy();
+ if (rhs.isValid()) {
+ setFd(rhs.getFd());
+ rhs.clearFd();
+ }
+ return *this;
+ }
+ SyncFence(SyncFence &&fence) noexcept {
+ if (fence.isValid()) {
+ setFd(fence.getFd());
+ fence.clearFd();
+ }
+ }
+ SyncFence(const SyncFence &fence) noexcept {
+ // This is ok, as sync fences are immutable after construction, so a dup
+ // is basically the same thing as a copy.
+ if (fence.isValid()) {
+ int fd = dup(fence.getFd());
+ if (fd == -1)
+ return;
+ setFd(fd);
+ }
+ }
+ SyncFence(const SyncTimeline &timeline,
+ int value,
+ const char *name = nullptr) noexcept {
+ std::string autoName = "allocFence";
+ autoName += s_fenceCount;
+ s_fenceCount++;
+ int fd = sw_sync_fence_create(timeline.getFd(), name ? name : autoName.c_str(), value);
+ if (fd == -1)
+ return;
+ setFd(fd);
+ }
+ SyncFence(const SyncFence &a, const SyncFence &b, const char *name = nullptr) noexcept {
+ std::string autoName = "mergeFence";
+ autoName += s_fenceCount;
+ s_fenceCount++;
+ int fd = sync_merge(name ? name : autoName.c_str(), a.getFd(), b.getFd());
+ if (fd == -1)
+ return;
+ setFd(fd);
+ }
+ SyncFence(const vector<SyncFence> &sources) noexcept {
+ assert(sources.size());
+ SyncFence temp(*begin(sources));
+ for (auto itr = ++begin(sources); itr != end(sources); ++itr) {
+ temp = SyncFence(*itr, temp);
+ }
+ if (temp.isValid()) {
+ setFd(temp.getFd());
+ temp.clearFd();
+ }
+ }
+ void destroy() {
+ if (isValid()) {
+ close(m_fd);
+ clearFd();
+ }
+ }
+ ~SyncFence() {
+ destroy();
+ }
+ int getFd() const {
+ return m_fd;
+ }
+ int wait(int timeout = -1) {
+ return sync_wait(m_fd, timeout);
+ }
+ vector<SyncPointInfo> getInfo() const {
+ struct sync_pt_info *pointInfo = nullptr;
+ vector<SyncPointInfo> fenceInfo;
+ sync_fence_info_data *info = sync_fence_info(getFd());
+ if (!info) {
+ return fenceInfo;
+ }
+ while ((pointInfo = sync_pt_info(info, pointInfo))) {
+ fenceInfo.push_back(SyncPointInfo{
+ pointInfo->driver_name,
+ pointInfo->obj_name,
+ pointInfo->timestamp_ns,
+ pointInfo->status});
+ }
+ sync_fence_info_free(info);
+ return fenceInfo;
+ }
+ int getSize() const {
+ return getInfo().size();
+ }
+ int getSignaledCount() const {
+ return countWithStatus(1);
+ }
+ int getActiveCount() const {
+ return countWithStatus(0);
+ }
+ int getErrorCount() const {
+ return countWithStatus(-1);
+ }
+private:
+ int countWithStatus(int status) const {
+ int count = 0;
+ for (auto &info : getInfo()) {
+ if (info.status == status) {
+ count++;
+ }
+ }
+ return count;
+ }
+};
+
+int SyncFence::s_fenceCount = 0;
+
+TEST(AllocTest, Timeline) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+}
+
+TEST(AllocTest, Fence) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ SyncFence fence(timeline, 1);
+ ASSERT_TRUE(fence.isValid());
+}
+
+TEST(AllocTest, FenceNegative) {
+ int timeline = sw_sync_timeline_create();
+ ASSERT_GT(timeline, 0);
+
+ // bad fd.
+ ASSERT_LT(sw_sync_fence_create(-1, "fence", 1), 0);
+
+ // No name - segfaults in user space.
+ // Maybe we should be friendlier here?
+ /*
+ ASSERT_LT(sw_sync_fence_create(timeline, nullptr, 1), 0);
+ */
+ close(timeline);
+}
+
+TEST(FenceTest, OneTimelineWait) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ SyncFence fence(timeline, 5);
+ ASSERT_TRUE(fence.isValid());
+
+ // Wait on fence until timeout.
+ ASSERT_EQ(fence.wait(0), -1);
+ ASSERT_EQ(errno, ETIME);
+
+ // Advance timeline from 0 -> 1
+ ASSERT_EQ(timeline.inc(1), 0);
+
+ // Wait on fence until timeout.
+ ASSERT_EQ(fence.wait(0), -1);
+ ASSERT_EQ(errno, ETIME);
+
+ // Signal the fence.
+ ASSERT_EQ(timeline.inc(4), 0);
+
+ // Wait successfully.
+ ASSERT_EQ(fence.wait(0), 0);
+
+ // Go even futher, and confirm wait still succeeds.
+ ASSERT_EQ(timeline.inc(10), 0);
+ ASSERT_EQ(fence.wait(0), 0);
+}
+
+TEST(FenceTest, OneTimelinePoll) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ SyncFence fence(timeline, 100);
+ ASSERT_TRUE(fence.isValid());
+
+ fd_set set;
+ FD_ZERO(&set);
+ FD_SET(fence.getFd(), &set);
+
+ // Poll the fence, and wait till timeout.
+ timeval time = {0};
+ ASSERT_EQ(select(fence.getFd() + 1, &set, nullptr, nullptr, &time), 0);
+
+ // Advance the timeline.
+ timeline.inc(100);
+ timeline.inc(100);
+
+ // Select should return that the fd is read for reading.
+ FD_ZERO(&set);
+ FD_SET(fence.getFd(), &set);
+
+ ASSERT_EQ(select(fence.getFd() + 1, &set, nullptr, nullptr, &time), 1);
+ ASSERT_TRUE(FD_ISSET(fence.getFd(), &set));
+}
+
+TEST(FenceTest, OneTimelineMerge) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ // create fence a,b,c and then merge them all into fence d.
+ SyncFence a(timeline, 1), b(timeline, 2), c(timeline, 3);
+ ASSERT_TRUE(a.isValid());
+ ASSERT_TRUE(b.isValid());
+ ASSERT_TRUE(c.isValid());
+
+ SyncFence d({a,b,c});
+ ASSERT_TRUE(d.isValid());
+
+ // confirm all fences have one active point (even d).
+ ASSERT_EQ(a.getActiveCount(), 1);
+ ASSERT_EQ(b.getActiveCount(), 1);
+ ASSERT_EQ(c.getActiveCount(), 1);
+ ASSERT_EQ(d.getActiveCount(), 1);
+
+ // confirm that d is not signaled until the max of a,b,c
+ timeline.inc(1);
+ ASSERT_EQ(a.getSignaledCount(), 1);
+ ASSERT_EQ(d.getActiveCount(), 1);
+
+ timeline.inc(1);
+ ASSERT_EQ(b.getSignaledCount(), 1);
+ ASSERT_EQ(d.getActiveCount(), 1);
+
+ timeline.inc(1);
+ ASSERT_EQ(c.getSignaledCount(), 1);
+ ASSERT_EQ(d.getActiveCount(), 0);
+ ASSERT_EQ(d.getSignaledCount(), 1);
+}
+
+TEST(FenceTest, MergeSameFence) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ SyncFence fence(timeline, 5);
+ ASSERT_TRUE(fence.isValid());
+
+ SyncFence selfMergeFence(fence, fence);
+ ASSERT_TRUE(selfMergeFence.isValid());
+
+ ASSERT_EQ(selfMergeFence.getSignaledCount(), 0);
+
+ timeline.inc(5);
+ ASSERT_EQ(selfMergeFence.getSignaledCount(), 1);
+}
+
+TEST(FenceTest, WaitOnDestroyedTimeline) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ SyncFence fenceSig(timeline, 100);
+ SyncFence fenceKill(timeline, 200);
+
+ // Spawn a thread to wait on a fence when the timeline is killed.
+ thread waitThread{
+ [&]() {
+ ASSERT_EQ(timeline.inc(100), 0);
+
+ ASSERT_EQ(fenceKill.wait(-1), -1);
+ ASSERT_EQ(errno, ENOENT);
+ }
+ };
+
+ // Wait for the thread to spool up.
+ fenceSig.wait();
+
+ // Kill the timeline.
+ timeline.destroy();
+
+ // wait for the thread to clean up.
+ waitThread.join();
+}
+
+TEST(FenceTest, PollOnDestroyedTimeline) {
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ SyncFence fenceSig(timeline, 100);
+ SyncFence fenceKill(timeline, 200);
+
+ // Spawn a thread to wait on a fence when the timeline is killed.
+ thread waitThread{
+ [&]() {
+ ASSERT_EQ(timeline.inc(100), 0);
+
+ // Wait on the fd.
+ struct pollfd fds;
+ fds.fd = fenceKill.getFd();
+ fds.events = POLLIN | POLLERR;
+ ASSERT_EQ(poll(&fds, 1, -1), 1);
+ ASSERT_TRUE(fds.revents & POLLERR);
+ }
+ };
+
+ // Wait for the thread to spool up.
+ fenceSig.wait();
+
+ // Kill the timeline.
+ timeline.destroy();
+
+ // wait for the thread to clean up.
+ waitThread.join();
+}
+
+TEST(FenceTest, MultiTimelineWait) {
+ SyncTimeline timelineA, timelineB, timelineC;
+
+ SyncFence fenceA(timelineA, 5);
+ SyncFence fenceB(timelineB, 5);
+ SyncFence fenceC(timelineC, 5);
+
+ // Make a larger fence using 3 other fences from different timelines.
+ SyncFence mergedFence({fenceA, fenceB, fenceC});
+ ASSERT_TRUE(mergedFence.isValid());
+
+ // Confirm fence isn't signaled
+ ASSERT_EQ(mergedFence.getActiveCount(), 3);
+ ASSERT_EQ(mergedFence.wait(0), -1);
+ ASSERT_EQ(errno, ETIME);
+
+ timelineA.inc(5);
+ ASSERT_EQ(mergedFence.getActiveCount(), 2);
+ ASSERT_EQ(mergedFence.getSignaledCount(), 1);
+
+ timelineB.inc(5);
+ ASSERT_EQ(mergedFence.getActiveCount(), 1);
+ ASSERT_EQ(mergedFence.getSignaledCount(), 2);
+
+ timelineC.inc(5);
+ ASSERT_EQ(mergedFence.getActiveCount(), 0);
+ ASSERT_EQ(mergedFence.getSignaledCount(), 3);
+
+ // confirm you can successfully wait.
+ ASSERT_EQ(mergedFence.wait(100), 0);
+}
+
+TEST(StressTest, TwoThreadsSharedTimeline) {
+ const int iterations = 1 << 16;
+ int counter = 0;
+ SyncTimeline timeline;
+ ASSERT_TRUE(timeline.isValid());
+
+ // Use a single timeline to synchronize two threads
+ // hammmering on the same counter.
+ auto threadMain = [&](int threadId) {
+ for (int i = 0; i < iterations; i++) {
+ SyncFence fence(timeline, i * 2 + threadId);
+ ASSERT_TRUE(fence.isValid());
+
+ // Wait on the prior thread to complete.
+ ASSERT_EQ(fence.wait(), 0);
+
+ // Confirm the previous thread's writes are visible and then inc.
+ ASSERT_EQ(counter, i * 2 + threadId);
+ counter++;
+
+ // Kick off the other thread.
+ ASSERT_EQ(timeline.inc(), 0);
+ }
+ };
+
+ thread a{threadMain, 0};
+ thread b{threadMain, 1};
+ a.join();
+ b.join();
+
+ // make sure the threads did not trample on one another.
+ ASSERT_EQ(counter, iterations * 2);
+}
+
+class ConsumerStressTest : public ::testing::TestWithParam<int> {};
+
+TEST_P(ConsumerStressTest, MultiProducerSingleConsumer) {
+ mutex lock;
+ int counter = 0;
+ int iterations = 1 << 12;
+
+ vector<SyncTimeline> producerTimelines(GetParam());
+ vector<thread> threads;
+ SyncTimeline consumerTimeline;
+
+ // Producer threads run this lambda.
+ auto threadMain = [&](int threadId) {
+ for (int i = 0; i < iterations; i++) {
+ SyncFence fence(consumerTimeline, i);
+ ASSERT_TRUE(fence.isValid());
+
+ // Wait for the consumer to finish. Use alternate
+ // means of waiting on the fence.
+ if ((iterations + threadId) % 8 != 0) {
+ ASSERT_EQ(fence.wait(), 0);
+ }
+ else {
+ while (fence.getSignaledCount() != 1) {
+ ASSERT_EQ(fence.getErrorCount(), 0);
+ }
+ }
+
+ // Every producer increments the counter, the consumer checks + erases it.
+ lock.lock();
+ counter++;
+ lock.unlock();
+
+ ASSERT_EQ(producerTimelines[threadId].inc(), 0);
+ }
+ };
+
+ for (int i = 0; i < GetParam(); i++) {
+ threads.push_back(thread{threadMain, i});
+ }
+
+ // Consumer thread runs this loop.
+ for (int i = 1; i <= iterations; i++) {
+ // Create a fence representing all producers final timelines.
+ vector<SyncFence> fences;
+ for (auto& timeline : producerTimelines) {
+ fences.push_back(SyncFence(timeline, i));
+ }
+ SyncFence mergeFence(fences);
+ ASSERT_TRUE(mergeFence.isValid());
+
+ // Make sure we see an increment from every producer thread. Vary
+ // the means by which we wait.
+ if (iterations % 8 != 0) {
+ ASSERT_EQ(mergeFence.wait(), 0);
+ }
+ else {
+ while (mergeFence.getSignaledCount() != mergeFence.getSize()) {
+ ASSERT_EQ(mergeFence.getErrorCount(), 0);
+ }
+ }
+ ASSERT_EQ(counter, GetParam()*i);
+
+ // Release the producer threads.
+ ASSERT_EQ(consumerTimeline.inc(), 0);
+ }
+
+ for_each(begin(threads), end(threads), [](thread& thread) { thread.join(); });
+}
+INSTANTIATE_TEST_CASE_P(
+ ParameterizedStressTest,
+ ConsumerStressTest,
+ ::testing::Values(2,4,16));
+
+class MergeStressTest : public ::testing::TestWithParam<tuple<int, int>> {};
+
+template <typename K, typename V> using dict = unordered_map<K,V>;
+
+TEST_P(MergeStressTest, RandomMerge) {
+ int timelineCount = get<0>(GetParam());
+ int mergeCount = get<1>(GetParam());
+
+ vector<SyncTimeline> timelines(timelineCount);
+
+ default_random_engine generator;
+ uniform_int_distribution<int> timelineDist(0, timelines.size()-1);
+ uniform_int_distribution<int> syncPointDist(0, numeric_limits<int>::max());
+
+ SyncFence fence(timelines[0], 0);
+ ASSERT_TRUE(fence.isValid());
+
+ unordered_map<int, int> fenceMap;
+ fenceMap.insert(make_tuple(0, 0));
+
+ // Randomly create syncpoints out of a fixed set of timelines, and merge them together.
+ for (int i = 0; i < mergeCount; i++) {
+
+ // Generate syncpoint.
+ int timelineOffset = timelineDist(generator);
+ const SyncTimeline& timeline = timelines[timelineOffset];
+ int syncPoint = syncPointDist(generator);
+
+ // Keep track of the latest syncpoint in each timeline.
+ auto itr = fenceMap.find(timelineOffset);
+ if (itr == end(fenceMap)) {
+ fenceMap.insert(tie(timelineOffset, syncPoint));
+ }
+ else {
+ int oldSyncPoint = itr->second;
+ fenceMap.erase(itr);
+ fenceMap.insert(tie(timelineOffset, max(syncPoint, oldSyncPoint)));
+ }
+
+ // Merge.
+ fence = SyncFence(fence, SyncFence(timeline, syncPoint));
+ ASSERT_TRUE(fence.isValid());
+ }
+
+ // Confirm our map matches the fence.
+ ASSERT_EQ(fence.getSize(), fenceMap.size());
+
+ // Trigger the merged fence.
+ for (auto& item: fenceMap) {
+ ASSERT_EQ(fence.wait(0), -1);
+ ASSERT_EQ(errno, ETIME);
+
+ // Increment the timeline to the last syncpoint.
+ timelines[item.first].inc(item.second);
+ }
+
+ // Check that the fence is triggered.
+ ASSERT_EQ(fence.wait(0), 0);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ ParameterizedMergeStressTest,
+ MergeStressTest,
+ ::testing::Combine(::testing::Values(16,32), ::testing::Values(32, 1024, 1024*32)));
+
+}
+
diff --git a/logd/LogBuffer.cpp b/logd/LogBuffer.cpp
index cd9ea20..8c1c344 100644
--- a/logd/LogBuffer.cpp
+++ b/logd/LogBuffer.cpp
@@ -93,9 +93,9 @@
}
LogBuffer::LogBuffer(LastLogTimes *times)
- : mTimes(*times) {
+ : dgramQlenStatistics(false)
+ , mTimes(*times) {
pthread_mutex_init(&mLogElementsLock, NULL);
- dgram_qlen_statistics = false;
static const char global_tuneable[] = "persist.logd.size"; // Settings App
static const char global_default[] = "ro.logd.size"; // BoardConfig.mk
@@ -150,10 +150,10 @@
while (--it != mLogElements.begin()) {
if ((*it)->getRealTime() <= realtime) {
// halves the peak performance, use with caution
- if (dgram_qlen_statistics) {
+ if (dgramQlenStatistics) {
LogBufferElementCollection::iterator ib = it;
unsigned short buckets, num = 1;
- for (unsigned short i = 0; (buckets = stats.dgram_qlen(i)); ++i) {
+ for (unsigned short i = 0; (buckets = stats.dgramQlen(i)); ++i) {
buckets -= num;
num += buckets;
while (buckets && (--ib != mLogElements.begin())) {
@@ -267,8 +267,7 @@
if (uid == caller_uid) {
it = mLogElements.erase(it);
- unsigned short len = e->getMsgLen();
- stats.subtract(len, id, uid, e->getPid());
+ stats.subtract(e->getMsgLen(), id, uid, e->getPid());
delete e;
pruneRows--;
if (pruneRows == 0) {
@@ -318,23 +317,19 @@
uid_t uid = e->getUid();
- if (uid == worst) {
+ if ((uid == worst) || mPrune.naughty(e)) { // Worst or BlackListed
it = mLogElements.erase(it);
unsigned short len = e->getMsgLen();
- stats.subtract(len, id, worst, e->getPid());
- delete e;
- kick = true;
- pruneRows--;
- if ((pruneRows == 0) || (worst_sizes < second_worst_sizes)) {
- break;
- }
- worst_sizes -= len;
- } else if (mPrune.naughty(e)) { // BlackListed
- it = mLogElements.erase(it);
- stats.subtract(e->getMsgLen(), id, uid, e->getPid());
+ stats.subtract(len, id, uid, e->getPid());
delete e;
pruneRows--;
- if (pruneRows == 0) {
+ if (uid == worst) {
+ kick = true;
+ if ((pruneRows == 0) || (worst_sizes < second_worst_sizes)) {
+ break;
+ }
+ worst_sizes -= len;
+ } else if (pruneRows == 0) {
break;
}
} else {
diff --git a/logd/LogBuffer.h b/logd/LogBuffer.h
index 4b982a8..879baea 100644
--- a/logd/LogBuffer.h
+++ b/logd/LogBuffer.h
@@ -37,8 +37,7 @@
pthread_mutex_t mLogElementsLock;
LogStatistics stats;
-
- bool dgram_qlen_statistics;
+ bool dgramQlenStatistics;
PruneList mPrune;
@@ -66,7 +65,11 @@
void enableDgramQlenStatistics() {
stats.enableDgramQlenStatistics();
- dgram_qlen_statistics = true;
+ dgramQlenStatistics = true;
+ }
+
+ void enableStatistics() {
+ stats.enableStatistics();
}
int initPrune(char *cp) { return mPrune.init(cp); }
diff --git a/logd/LogStatistics.cpp b/logd/LogStatistics.cpp
index 82f9165..6f3a088 100644
--- a/logd/LogStatistics.cpp
+++ b/logd/LogStatistics.cpp
@@ -51,11 +51,11 @@
}
bool PidStatistics::pidGone() {
- if (mGone) {
+ if (mGone || (pid == gone)) {
return true;
}
- if (pid == gone) {
- return true;
+ if (pid == 0) {
+ return false;
}
if (kill(pid, 0) && (errno != EPERM)) {
mGone = true;
@@ -90,9 +90,14 @@
}
// must call free to release return value
+// If only we could sniff our own logs for:
+// <time> <pid> <pid> E AndroidRuntime: Process: <name>, PID: <pid>
+// which debuggerd prints as a process is crashing.
char *PidStatistics::pidToName(pid_t pid) {
char *retval = NULL;
- if (pid != gone) {
+ if (pid == 0) { // special case from auditd for kernel
+ retval = strdup("logd.auditd");
+ } else if (pid != gone) {
char buffer[512];
snprintf(buffer, sizeof(buffer), "/proc/%u/cmdline", pid);
int fd = open(buffer, O_RDONLY);
@@ -122,7 +127,7 @@
PidStatisticsCollection::iterator it;
for (it = begin(); it != end();) {
delete (*it);
- it = Pids.erase(it);
+ it = erase(it);
}
}
@@ -130,7 +135,7 @@
mSizes += size;
++mElements;
- PidStatistics *p;
+ PidStatistics *p = NULL;
PidStatisticsCollection::iterator last;
PidStatisticsCollection::iterator it;
for (last = it = begin(); it != end(); last = it, ++it) {
@@ -141,12 +146,12 @@
}
}
// insert if the gone entry.
- bool insert = (last != it) && (p->getPid() == p->gone);
+ bool insert_before_last = (last != it) && p && (p->getPid() == p->gone);
p = new PidStatistics(pid, pidToName(pid));
- if (insert) {
- Pids.insert(last, p);
+ if (insert_before_last) {
+ insert(last, p);
} else {
- Pids.push_back(p);
+ push_back(p);
}
p->add(size);
}
@@ -163,17 +168,17 @@
size_t szsTotal = p->sizesTotal();
size_t elsTotal = p->elementsTotal();
delete p;
- Pids.erase(it);
+ erase(it);
it = end();
--it;
if (it == end()) {
p = new PidStatistics(p->gone);
- Pids.push_back(p);
+ push_back(p);
} else {
p = *it;
if (p->getPid() != p->gone) {
p = new PidStatistics(p->gone);
- Pids.push_back(p);
+ push_back(p);
}
}
p->addTotal(szsTotal, elsTotal);
@@ -194,8 +199,8 @@
PidStatistics *n = (*it);
if ((n->getPid() != n->gone) && (n->sizes() > l->sizes())) {
pass = true;
- Pids.erase(it);
- Pids.insert(lt, n);
+ erase(it);
+ insert(lt, n);
it = lt;
n = l;
}
@@ -302,6 +307,10 @@
}
void LidStatistics::subtract(unsigned short size, uid_t uid, pid_t pid) {
+ if (uid == (uid_t) -1) { // init
+ uid = (uid_t) AID_ROOT;
+ }
+
UidStatisticsCollection::iterator it;
for (it = begin(); it != end(); ++it) {
UidStatistics *u = *it;
@@ -384,24 +393,25 @@
}
LogStatistics::LogStatistics()
- : start(CLOCK_MONOTONIC) {
+ : mStatistics(false)
+ , dgramQlenStatistics(false)
+ , start(CLOCK_MONOTONIC) {
log_id_for_each(i) {
mSizes[i] = 0;
mElements[i] = 0;
}
- dgram_qlen_statistics = false;
- for(unsigned short bucket = 0; dgram_qlen(bucket); ++bucket) {
+ for(unsigned short bucket = 0; dgramQlen(bucket); ++bucket) {
mMinimum[bucket].tv_sec = mMinimum[bucket].tv_sec_max;
mMinimum[bucket].tv_nsec = mMinimum[bucket].tv_nsec_max;
}
}
-// Each bucket below represents a dgram_qlen of log messages. By
+// Each bucket below represents a dgramQlen of log messages. By
// finding the minimum period of time from start to finish
-// of each dgram_qlen, we can get a performance expectation for
+// of each dgramQlen, we can get a performance expectation for
// the user space logger. The net result is that the period
-// of time divided by the dgram_qlen will give us the average time
+// of time divided by the dgramQlen will give us the average time
// between log messages; at the point where the average time
// is greater than the throughput capability of the logger
// we will not longer require the benefits of the FIFO formed
@@ -412,7 +422,7 @@
//
// for example (reformatted):
//
-// Minimum time between log events per dgram_qlen:
+// Minimum time between log events per dgramQlen:
// 1 2 3 5 10 20 30 50 100 200 300 400 500 600
// 5u2 12u 13u 15u 16u 27u 30u 36u 407u 3m1 3m3 3m9 3m9 5m5
//
@@ -426,12 +436,12 @@
// a large engineering margin so the rule of thumb that lead us to 100 is
// fine.
//
-// bucket dgram_qlen are tuned for /proc/sys/net/unix/max_dgram_qlen = 300
+// bucket dgramQlen are tuned for /proc/sys/net/unix/max_dgram_qlen = 300
const unsigned short LogStatistics::mBuckets[] = {
1, 2, 3, 5, 10, 20, 30, 50, 100, 200, 300, 400, 500, 600
};
-unsigned short LogStatistics::dgram_qlen(unsigned short bucket) {
+unsigned short LogStatistics::dgramQlen(unsigned short bucket) {
if (bucket >= sizeof(mBuckets) / sizeof(mBuckets[0])) {
return 0;
}
@@ -455,6 +465,9 @@
log_id_t log_id, uid_t uid, pid_t pid) {
mSizes[log_id] += size;
++mElements[log_id];
+ if (!mStatistics) {
+ return;
+ }
id(log_id).add(size, uid, pid);
}
@@ -462,6 +475,9 @@
log_id_t log_id, uid_t uid, pid_t pid) {
mSizes[log_id] -= size;
--mElements[log_id];
+ if (!mStatistics) {
+ return;
+ }
id(log_id).subtract(size, uid, pid);
}
@@ -545,25 +561,28 @@
spaces = 1;
log_time t(CLOCK_MONOTONIC);
- unsigned long long d = t.nsec() - start.nsec();
- string.appendFormat("\nTotal%4llu:%02llu:%02llu.%09llu",
+ unsigned long long d;
+ if (mStatistics) {
+ d = t.nsec() - start.nsec();
+ string.appendFormat("\nTotal%4llu:%02llu:%02llu.%09llu",
d / NS_PER_SEC / 60 / 60, (d / NS_PER_SEC / 60) % 60,
(d / NS_PER_SEC) % 60, d % NS_PER_SEC);
- log_id_for_each(i) {
- if (!(logMask & (1 << i))) {
- continue;
+ log_id_for_each(i) {
+ if (!(logMask & (1 << i))) {
+ continue;
+ }
+ oldLength = string.length();
+ if (spaces < 0) {
+ spaces = 0;
+ }
+ string.appendFormat("%*s%zu/%zu", spaces, "",
+ sizesTotal(i), elementsTotal(i));
+ spaces += spaces_total + oldLength - string.length();
}
- oldLength = string.length();
- if (spaces < 0) {
- spaces = 0;
- }
- string.appendFormat("%*s%zu/%zu", spaces, "",
- sizesTotal(i), elementsTotal(i));
- spaces += spaces_total + oldLength - string.length();
+ spaces = 1;
}
- spaces = 1;
d = t.nsec() - oldest.nsec();
string.appendFormat("\nNow%6llu:%02llu:%02llu.%09llu",
d / NS_PER_SEC / 60 / 60, (d / NS_PER_SEC / 60) % 60,
@@ -689,23 +708,23 @@
pids.clear();
}
- if (dgram_qlen_statistics) {
+ if (dgramQlenStatistics) {
const unsigned short spaces_time = 6;
const unsigned long long max_seconds = 100000;
spaces = 0;
- string.append("\n\nMinimum time between log events per dgram_qlen:\n");
- for(unsigned short i = 0; dgram_qlen(i); ++i) {
+ string.append("\n\nMinimum time between log events per max_dgram_qlen:\n");
+ for(unsigned short i = 0; dgramQlen(i); ++i) {
oldLength = string.length();
if (spaces < 0) {
spaces = 0;
}
- string.appendFormat("%*s%u", spaces, "", dgram_qlen(i));
+ string.appendFormat("%*s%u", spaces, "", dgramQlen(i));
spaces += spaces_time + oldLength - string.length();
}
string.append("\n");
spaces = 0;
unsigned short n;
- for(unsigned short i = 0; (n = dgram_qlen(i)); ++i) {
+ for(unsigned short i = 0; (n = dgramQlen(i)); ++i) {
unsigned long long duration = minimum(i);
if (duration) {
duration /= n;
@@ -727,8 +746,8 @@
/ (NS_PER_SEC / 1000));
} else if (duration >= (NS_PER_SEC / (1000000 / 10))) {
string.appendFormat("%lluu",
- (duration + (NS_PER_SEC / 2 / 1000000))
- / (NS_PER_SEC / 1000000));
+ (duration + (NS_PER_SEC / 2 / 1000000))
+ / (NS_PER_SEC / 1000000));
} else {
string.appendFormat("%llun", duration);
}
diff --git a/logd/LogStatistics.h b/logd/LogStatistics.h
index 3733137..f6c4329 100644
--- a/logd/LogStatistics.h
+++ b/logd/LogStatistics.h
@@ -72,6 +72,10 @@
PidStatisticsCollection Pids;
+ void insert(PidStatisticsCollection::iterator i, PidStatistics *p)
+ { Pids.insert(i, p); }
+ void push_back(PidStatistics *p) { Pids.push_back(p); }
+
size_t mSizes;
size_t mElements;
@@ -81,6 +85,8 @@
PidStatisticsCollection::iterator begin() { return Pids.begin(); }
PidStatisticsCollection::iterator end() { return Pids.end(); }
+ PidStatisticsCollection::iterator erase(PidStatisticsCollection::iterator i)
+ { return Pids.erase(i); }
uid_t getUid() { return uid; }
@@ -138,7 +144,8 @@
size_t mSizes[LOG_ID_MAX];
size_t mElements[LOG_ID_MAX];
- bool dgram_qlen_statistics;
+ bool mStatistics;
+ bool dgramQlenStatistics;
static const unsigned short mBuckets[14];
log_time mMinimum[sizeof(mBuckets) / sizeof(mBuckets[0])];
@@ -150,8 +157,9 @@
LidStatistics &id(log_id_t log_id) { return LogIds[log_id]; }
- void enableDgramQlenStatistics() { dgram_qlen_statistics = true; }
- static unsigned short dgram_qlen(unsigned short bucket);
+ void enableDgramQlenStatistics() { dgramQlenStatistics = true; }
+ void enableStatistics() { mStatistics = true; }
+ static unsigned short dgramQlen(unsigned short bucket);
unsigned long long minimum(unsigned short bucket);
void recordDiff(log_time diff, unsigned short bucket);
diff --git a/logd/README.property b/logd/README.property
index f4b3c3c..b7fcece 100644
--- a/logd/README.property
+++ b/logd/README.property
@@ -4,6 +4,9 @@
logd.auditd bool true Enable selinux audit daemon
logd.auditd.dmesg bool true selinux audit messages duplicated and
sent on to dmesg log
+logd.statistics bool depends Enable logcat -S statistics.
+ro.config.low_ram bool false if true, logd.statistics default false
+ro.build.type string if user, logd.statistics default false
logd.statistics.dgram_qlen bool false Record dgram_qlen statistics. This
represents a performance impact and
is used to determine the platform's
diff --git a/logd/main.cpp b/logd/main.cpp
index 1e1a718..54da7e3 100644
--- a/logd/main.cpp
+++ b/logd/main.cpp
@@ -152,6 +152,15 @@
if (property_get_bool("logd.statistics.dgram_qlen", false)) {
logBuf->enableDgramQlenStatistics();
}
+ {
+ char property[PROPERTY_VALUE_MAX];
+ property_get("ro.build.type", property, "");
+ if (property_get_bool("logd.statistics",
+ !!strcmp(property, "user")
+ && !property_get_bool("ro.config.low_ram", false))) {
+ logBuf->enableStatistics();
+ }
+ }
// LogReader listens on /dev/socket/logdr. When a client
// connects, log entries in the LogBuffer are written to the client.
diff --git a/toolbox/top.c b/toolbox/top.c
index 280a032..b1a275c 100644
--- a/toolbox/top.c
+++ b/toolbox/top.c
@@ -32,6 +32,7 @@
#include <ctype.h>
#include <dirent.h>
#include <grp.h>
+#include <inttypes.h>
#include <pwd.h>
#include <stdio.h>
#include <stdlib.h>
@@ -59,13 +60,13 @@
char name[PROC_NAME_LEN];
char tname[THREAD_NAME_LEN];
char state;
- long unsigned utime;
- long unsigned stime;
- long unsigned delta_utime;
- long unsigned delta_stime;
- long unsigned delta_time;
- long vss;
- long rss;
+ uint64_t utime;
+ uint64_t stime;
+ uint64_t delta_utime;
+ uint64_t delta_stime;
+ uint64_t delta_time;
+ uint64_t vss;
+ uint64_t rss;
int prs;
int num_threads;
char policy[POLICY_NAME_LEN];
@@ -344,10 +345,19 @@
proc->tname[THREAD_NAME_LEN-1] = 0;
/* Scan rest of string. */
- sscanf(close_paren + 1, " %c %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d "
- "%lu %lu %*d %*d %*d %*d %*d %*d %*d %lu %ld "
- "%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %d",
- &proc->state, &proc->utime, &proc->stime, &proc->vss, &proc->rss, &proc->prs);
+ sscanf(close_paren + 1,
+ " %c " "%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d "
+ "%" SCNu64
+ "%" SCNu64 "%*d %*d %*d %*d %*d %*d %*d "
+ "%" SCNu64
+ "%" SCNu64 "%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d "
+ "%d",
+ &proc->state,
+ &proc->utime,
+ &proc->stime,
+ &proc->vss,
+ &proc->rss,
+ &proc->prs);
return 0;
}
@@ -470,12 +480,15 @@
snprintf(user_buf, 20, "%d", proc->uid);
user_str = user_buf;
}
- if (!threads)
- printf("%5d %2d %3ld%% %c %5d %6ldK %6ldK %3s %-8.8s %s\n", proc->pid, proc->prs, proc->delta_time * 100 / total_delta_time, proc->state, proc->num_threads,
- proc->vss / 1024, proc->rss * getpagesize() / 1024, proc->policy, user_str, proc->name[0] != 0 ? proc->name : proc->tname);
- else
- printf("%5d %5d %2d %3ld%% %c %6ldK %6ldK %3s %-8.8s %-15s %s\n", proc->pid, proc->tid, proc->prs, proc->delta_time * 100 / total_delta_time, proc->state,
- proc->vss / 1024, proc->rss * getpagesize() / 1024, proc->policy, user_str, proc->tname, proc->name);
+ if (!threads) {
+ printf("%5d %2d %3" PRIu64 "%% %c %5d %6" PRIu64 "K %6" PRIu64 "K %3s %-8.8s %s\n",
+ proc->pid, proc->prs, proc->delta_time * 100 / total_delta_time, proc->state, proc->num_threads,
+ proc->vss / 1024, proc->rss * getpagesize() / 1024, proc->policy, user_str, proc->name[0] != 0 ? proc->name : proc->tname);
+ } else {
+ printf("%5d %5d %2d %3" PRIu64 "%% %c %6" PRIu64 "K %6" PRIu64 "K %3s %-8.8s %-15s %s\n",
+ proc->pid, proc->tid, proc->prs, proc->delta_time * 100 / total_delta_time, proc->state,
+ proc->vss / 1024, proc->rss * getpagesize() / 1024, proc->policy, user_str, proc->tname, proc->name);
+ }
}
}