Cache only a small portion of page map data.
The scudo memory allocator will allocate a large virtual memory address
that causes procrank to allocate a huge amount of data. Modify so it
caches the data a bit at a time.
Remove some allocations that are not necessary since the data is only used
in the first loop.
Fix a few pread64 checks against the actual bytes read, rather than the
read is < 0.
Fix sort by rss in procrank.
Bug: 135694447
Test: Ran unit tests (only one unit test actually runs the modified code).
Test: Ran the old version of procrank using a static libmeminfo and verified
Test: that the same data is found as the new version.
Change-Id: I8adc169b5607ec994ff13a9e161a479350d84c4d
diff --git a/libmeminfo/pageacct.cpp b/libmeminfo/pageacct.cpp
index 0a26c08..cb17af8 100644
--- a/libmeminfo/pageacct.cpp
+++ b/libmeminfo/pageacct.cpp
@@ -81,7 +81,8 @@
if (!InitPageAcct()) return false;
}
- if (pread64(kpageflags_fd_, flags, sizeof(uint64_t), pfn * sizeof(uint64_t)) < 0) {
+ if (pread64(kpageflags_fd_, flags, sizeof(uint64_t), pfn * sizeof(uint64_t)) !=
+ sizeof(uint64_t)) {
PLOG(ERROR) << "Failed to read page flags for page " << pfn;
return false;
}
@@ -95,7 +96,8 @@
if (!InitPageAcct()) return false;
}
- if (pread64(kpagecount_fd_, mapcount, sizeof(uint64_t), pfn * sizeof(uint64_t)) < 0) {
+ if (pread64(kpagecount_fd_, mapcount, sizeof(uint64_t), pfn * sizeof(uint64_t)) !=
+ sizeof(uint64_t)) {
PLOG(ERROR) << "Failed to read map count for page " << pfn;
return false;
}
@@ -130,7 +132,7 @@
off64_t offset = pfn_to_idle_bitmap_offset(pfn);
uint64_t idle_bits;
- if (pread64(pageidle_fd_, &idle_bits, sizeof(uint64_t), offset) < 0) {
+ if (pread64(pageidle_fd_, &idle_bits, sizeof(uint64_t), offset) != sizeof(uint64_t)) {
PLOG(ERROR) << "Failed to read page idle bitmap for page " << pfn;
return -errno;
}
diff --git a/libmeminfo/procmeminfo.cpp b/libmeminfo/procmeminfo.cpp
index 934d65c..caf6e86 100644
--- a/libmeminfo/procmeminfo.cpp
+++ b/libmeminfo/procmeminfo.cpp
@@ -27,6 +27,7 @@
#include <memory>
#include <string>
#include <utility>
+#include <vector>
#include <android-base/file.h>
#include <android-base/logging.h>
@@ -278,68 +279,89 @@
bool ProcMemInfo::ReadVmaStats(int pagemap_fd, Vma& vma, bool get_wss, bool use_pageidle) {
PageAcct& pinfo = PageAcct::Instance();
- uint64_t pagesz = getpagesize();
- uint64_t num_pages = (vma.end - vma.start) / pagesz;
-
- std::unique_ptr<uint64_t[]> pg_frames(new uint64_t[num_pages]);
- uint64_t first = vma.start / pagesz;
- if (pread64(pagemap_fd, pg_frames.get(), num_pages * sizeof(uint64_t),
- first * sizeof(uint64_t)) < 0) {
- PLOG(ERROR) << "Failed to read page frames from page map for pid: " << pid_;
+ if (get_wss && use_pageidle && !pinfo.InitPageAcct(true)) {
+ LOG(ERROR) << "Failed to init idle page accounting";
return false;
}
- if (get_wss && use_pageidle) {
- if (!pinfo.InitPageAcct(true)) {
- LOG(ERROR) << "Failed to init idle page accounting";
- return false;
- }
- }
+ uint64_t pagesz = getpagesize();
+ size_t num_pages = (vma.end - vma.start) / pagesz;
+ size_t first_page = vma.start / pagesz;
- std::unique_ptr<uint64_t[]> pg_flags(new uint64_t[num_pages]);
- std::unique_ptr<uint64_t[]> pg_counts(new uint64_t[num_pages]);
- for (uint64_t i = 0; i < num_pages; ++i) {
+ std::vector<uint64_t> page_cache;
+ size_t cur_page_cache_index = 0;
+ size_t num_in_page_cache = 0;
+ size_t num_leftover_pages = num_pages;
+ for (size_t cur_page = first_page; cur_page < first_page + num_pages; ++cur_page) {
if (!get_wss) {
vma.usage.vss += pagesz;
}
- uint64_t p = pg_frames[i];
- if (!PAGE_PRESENT(p) && !PAGE_SWAPPED(p)) continue;
- if (PAGE_SWAPPED(p)) {
+ // Cache page map data.
+ if (cur_page_cache_index == num_in_page_cache) {
+ static constexpr size_t kMaxPages = 2048;
+ num_leftover_pages -= num_in_page_cache;
+ if (num_leftover_pages > kMaxPages) {
+ num_in_page_cache = kMaxPages;
+ } else {
+ num_in_page_cache = num_leftover_pages;
+ }
+ page_cache.resize(num_in_page_cache);
+ size_t total_bytes = page_cache.size() * sizeof(uint64_t);
+ ssize_t bytes = pread64(pagemap_fd, page_cache.data(), total_bytes,
+ cur_page * sizeof(uint64_t));
+ if (bytes != total_bytes) {
+ if (bytes == -1) {
+ PLOG(ERROR) << "Failed to read page data at offset "
+ << cur_page * sizeof(uint64_t);
+ } else {
+ LOG(ERROR) << "Failed to read page data at offset "
+ << cur_page * sizeof(uint64_t) << " read bytes " << sizeof(uint64_t)
+ << " expected bytes " << bytes;
+ }
+ return false;
+ }
+ cur_page_cache_index = 0;
+ }
+
+ uint64_t page_info = page_cache[cur_page_cache_index++];
+ if (!PAGE_PRESENT(page_info) && !PAGE_SWAPPED(page_info)) continue;
+
+ if (PAGE_SWAPPED(page_info)) {
vma.usage.swap += pagesz;
- swap_offsets_.emplace_back(PAGE_SWAP_OFFSET(p));
+ swap_offsets_.emplace_back(PAGE_SWAP_OFFSET(page_info));
continue;
}
- uint64_t page_frame = PAGE_PFN(p);
- if (!pinfo.PageFlags(page_frame, &pg_flags[i])) {
+ uint64_t page_frame = PAGE_PFN(page_info);
+ uint64_t cur_page_flags;
+ if (!pinfo.PageFlags(page_frame, &cur_page_flags)) {
LOG(ERROR) << "Failed to get page flags for " << page_frame << " in process " << pid_;
swap_offsets_.clear();
return false;
}
// skip unwanted pages from the count
- if ((pg_flags[i] & pgflags_mask_) != pgflags_) continue;
+ if ((cur_page_flags & pgflags_mask_) != pgflags_) continue;
- if (!pinfo.PageMapCount(page_frame, &pg_counts[i])) {
+ uint64_t cur_page_counts;
+ if (!pinfo.PageMapCount(page_frame, &cur_page_counts)) {
LOG(ERROR) << "Failed to get page count for " << page_frame << " in process " << pid_;
swap_offsets_.clear();
return false;
}
// Page was unmapped between the presence check at the beginning of the loop and here.
- if (pg_counts[i] == 0) {
- pg_frames[i] = 0;
- pg_flags[i] = 0;
+ if (cur_page_counts == 0) {
continue;
}
- bool is_dirty = !!(pg_flags[i] & (1 << KPF_DIRTY));
- bool is_private = (pg_counts[i] == 1);
+ bool is_dirty = !!(cur_page_flags & (1 << KPF_DIRTY));
+ bool is_private = (cur_page_counts == 1);
// Working set
if (get_wss) {
bool is_referenced = use_pageidle ? (pinfo.IsPageIdle(page_frame) == 1)
- : !!(pg_flags[i] & (1 << KPF_REFERENCED));
+ : !!(cur_page_flags & (1 << KPF_REFERENCED));
if (!is_referenced) {
continue;
}
@@ -351,7 +373,7 @@
vma.usage.rss += pagesz;
vma.usage.uss += is_private ? pagesz : 0;
- vma.usage.pss += pagesz / pg_counts[i];
+ vma.usage.pss += pagesz / cur_page_counts;
if (is_private) {
vma.usage.private_dirty += is_dirty ? pagesz : 0;
vma.usage.private_clean += is_dirty ? 0 : pagesz;
diff --git a/libmeminfo/tools/procrank.cpp b/libmeminfo/tools/procrank.cpp
index cb3757d..1e44ff9 100644
--- a/libmeminfo/tools/procrank.cpp
+++ b/libmeminfo/tools/procrank.cpp
@@ -348,7 +348,7 @@
auto rss_sort = [](ProcessRecord& a, ProcessRecord& b) {
MemUsage stats_a = show_wss ? a.Wss() : a.Usage();
MemUsage stats_b = show_wss ? b.Wss() : b.Usage();
- return reverse_sort ? stats_a.rss < stats_b.pss : stats_a.pss > stats_b.pss;
+ return reverse_sort ? stats_a.rss < stats_b.rss : stats_a.rss > stats_b.rss;
};
auto vss_sort = [](ProcessRecord& a, ProcessRecord& b) {