storaged: Fix-up private variable names in uid_monitor.
Bug: N/A
Test: storaged builds
Change-Id: Ifd21e25baa7b1c6ce41c5e0ec5247f47ba716e6e
Merged-In: Ifd21e25baa7b1c6ce41c5e0ec5247f47ba716e6e
diff --git a/storaged/include/storaged_uid_monitor.h b/storaged/include/storaged_uid_monitor.h
index 3a718fa..b56e71a 100644
--- a/storaged/include/storaged_uid_monitor.h
+++ b/storaged/include/storaged_uid_monitor.h
@@ -68,31 +68,31 @@
struct uid_record {
string name;
- struct uid_io_usage ios;
+ uid_io_usage ios;
};
struct uid_records {
uint64_t start_ts;
- vector<struct uid_record> entries;
+ vector<uid_record> entries;
};
class uid_monitor {
private:
FRIEND_TEST(storaged_test, uid_monitor);
// last dump from /proc/uid_io/stats, uid -> uid_info
- unordered_map<uint32_t, uid_info> last_uid_io_stats;
+ unordered_map<uint32_t, uid_info> last_uid_io_stats_;
// current io usage for next report, app name -> uid_io_usage
- unordered_map<string, struct uid_io_usage> curr_io_stats;
+ unordered_map<string, uid_io_usage> curr_io_stats_;
// io usage records, end timestamp -> {start timestamp, vector of records}
- map<uint64_t, struct uid_records> io_history;
+ map<uint64_t, uid_records> io_history_;
// charger ON/OFF
- charger_stat_t charger_stat;
+ charger_stat_t charger_stat_;
// protects curr_io_stats, last_uid_io_stats, records and charger_stat
- Mutex uidm_mutex;
+ Mutex uidm_mutex_;
// start time for IO records
- uint64_t start_ts;
+ uint64_t start_ts_;
// true if UID_IO_STATS_PATH is accessible
- const bool enable;
+ const bool enabled_;
// reads from /proc/uid_io/stats
unordered_map<uint32_t, uid_info> get_uid_io_stats_locked();
@@ -110,16 +110,18 @@
// called by storaged -u
unordered_map<uint32_t, uid_info> get_uid_io_stats();
// called by dumpsys
- map<uint64_t, struct uid_records> dump(
+ map<uint64_t, uid_records> dump(
double hours, uint64_t threshold, bool force_report);
// called by battery properties listener
void set_charger_state(charger_stat_t stat);
// called by storaged periodic_chore or dump with force_report
- bool enabled() { return enable; };
+ bool enabled() { return enabled_; };
void report(unordered_map<int, StoragedProto>* protos);
// restores io_history from protobuf
void load_uid_io_proto(const UidIOUsage& proto);
void clear_user_history(userid_t user_id);
+
+ map<uint64_t, uid_records>& io_history() { return io_history_; }
};
#endif /* _STORAGED_UID_MONITOR_H_ */
diff --git a/storaged/storaged_uid_monitor.cpp b/storaged/storaged_uid_monitor.cpp
index 5745782..32b1568 100644
--- a/storaged/storaged_uid_monitor.cpp
+++ b/storaged/storaged_uid_monitor.cpp
@@ -50,7 +50,7 @@
std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats()
{
- Mutex::Autolock _l(uidm_mutex);
+ Mutex::Autolock _l(uidm_mutex_);
return get_uid_io_stats_locked();
};
@@ -178,10 +178,10 @@
uid_io_stats[u.uid].name = std::to_string(u.uid);
uids.push_back(u.uid);
uid_names.push_back(&uid_io_stats[u.uid].name);
- if (last_uid_io_stats.find(u.uid) == last_uid_io_stats.end()) {
+ if (last_uid_io_stats_.find(u.uid) == last_uid_io_stats_.end()) {
refresh_uid_names = true;
} else {
- uid_io_stats[u.uid].name = last_uid_io_stats[u.uid].name;
+ uid_io_stats[u.uid].name = last_uid_io_stats_[u.uid].name;
}
} else {
task_info t;
@@ -218,12 +218,12 @@
{
// remove records more than 5 days old
if (curr_ts > 5 * DAY_TO_SEC) {
- auto it = io_history.lower_bound(curr_ts - 5 * DAY_TO_SEC);
- io_history.erase(io_history.begin(), it);
+ auto it = io_history_.lower_bound(curr_ts - 5 * DAY_TO_SEC);
+ io_history_.erase(io_history_.begin(), it);
}
struct uid_records new_records;
- for (const auto& p : curr_io_stats) {
+ for (const auto& p : curr_io_stats_) {
struct uid_record record = {};
record.name = p.first;
if (!p.second.uid_ios.is_zero()) {
@@ -237,23 +237,23 @@
}
}
- curr_io_stats.clear();
- new_records.start_ts = start_ts;
- start_ts = curr_ts;
+ curr_io_stats_.clear();
+ new_records.start_ts = start_ts_;
+ start_ts_ = curr_ts;
if (new_records.entries.empty())
return;
// make some room for new records
- ssize_t overflow = history_size(io_history) +
+ ssize_t overflow = history_size(io_history_) +
new_records.entries.size() - MAX_UID_RECORDS_SIZE;
- while (overflow > 0 && io_history.size() > 0) {
- auto del_it = io_history.begin();
+ while (overflow > 0 && io_history_.size() > 0) {
+ auto del_it = io_history_.begin();
overflow -= del_it->second.entries.size();
- io_history.erase(io_history.begin());
+ io_history_.erase(io_history_.begin());
}
- io_history[curr_ts] = new_records;
+ io_history_[curr_ts] = new_records;
}
std::map<uint64_t, struct uid_records> uid_monitor::dump(
@@ -263,7 +263,7 @@
report(nullptr);
}
- Mutex::Autolock _l(uidm_mutex);
+ Mutex::Autolock _l(uidm_mutex_);
std::map<uint64_t, struct uid_records> dump_records;
uint64_t first_ts = 0;
@@ -272,7 +272,7 @@
first_ts = time(NULL) - hours * HOUR_TO_SEC;
}
- for (auto it = io_history.lower_bound(first_ts); it != io_history.end(); ++it) {
+ for (auto it = io_history_.lower_bound(first_ts); it != io_history_.end(); ++it) {
const std::vector<struct uid_record>& recs = it->second.entries;
struct uid_records filtered;
@@ -311,29 +311,29 @@
for (const auto& it : uid_io_stats) {
const uid_info& uid = it.second;
- if (curr_io_stats.find(uid.name) == curr_io_stats.end()) {
- curr_io_stats[uid.name] = {};
+ if (curr_io_stats_.find(uid.name) == curr_io_stats_.end()) {
+ curr_io_stats_[uid.name] = {};
}
- struct uid_io_usage& usage = curr_io_stats[uid.name];
+ struct uid_io_usage& usage = curr_io_stats_[uid.name];
usage.user_id = multiuser_get_user_id(uid.uid);
int64_t fg_rd_delta = uid.io[FOREGROUND].read_bytes -
- last_uid_io_stats[uid.uid].io[FOREGROUND].read_bytes;
+ last_uid_io_stats_[uid.uid].io[FOREGROUND].read_bytes;
int64_t bg_rd_delta = uid.io[BACKGROUND].read_bytes -
- last_uid_io_stats[uid.uid].io[BACKGROUND].read_bytes;
+ last_uid_io_stats_[uid.uid].io[BACKGROUND].read_bytes;
int64_t fg_wr_delta = uid.io[FOREGROUND].write_bytes -
- last_uid_io_stats[uid.uid].io[FOREGROUND].write_bytes;
+ last_uid_io_stats_[uid.uid].io[FOREGROUND].write_bytes;
int64_t bg_wr_delta = uid.io[BACKGROUND].write_bytes -
- last_uid_io_stats[uid.uid].io[BACKGROUND].write_bytes;
+ last_uid_io_stats_[uid.uid].io[BACKGROUND].write_bytes;
- usage.uid_ios.bytes[READ][FOREGROUND][charger_stat] +=
+ usage.uid_ios.bytes[READ][FOREGROUND][charger_stat_] +=
(fg_rd_delta < 0) ? 0 : fg_rd_delta;
- usage.uid_ios.bytes[READ][BACKGROUND][charger_stat] +=
+ usage.uid_ios.bytes[READ][BACKGROUND][charger_stat_] +=
(bg_rd_delta < 0) ? 0 : bg_rd_delta;
- usage.uid_ios.bytes[WRITE][FOREGROUND][charger_stat] +=
+ usage.uid_ios.bytes[WRITE][FOREGROUND][charger_stat_] +=
(fg_wr_delta < 0) ? 0 : fg_wr_delta;
- usage.uid_ios.bytes[WRITE][BACKGROUND][charger_stat] +=
+ usage.uid_ios.bytes[WRITE][BACKGROUND][charger_stat_] +=
(bg_wr_delta < 0) ? 0 : bg_wr_delta;
for (const auto& task_it : uid.tasks) {
@@ -341,34 +341,34 @@
const pid_t pid = task_it.first;
const std::string& comm = task_it.second.comm;
int64_t task_fg_rd_delta = task.io[FOREGROUND].read_bytes -
- last_uid_io_stats[uid.uid].tasks[pid].io[FOREGROUND].read_bytes;
+ last_uid_io_stats_[uid.uid].tasks[pid].io[FOREGROUND].read_bytes;
int64_t task_bg_rd_delta = task.io[BACKGROUND].read_bytes -
- last_uid_io_stats[uid.uid].tasks[pid].io[BACKGROUND].read_bytes;
+ last_uid_io_stats_[uid.uid].tasks[pid].io[BACKGROUND].read_bytes;
int64_t task_fg_wr_delta = task.io[FOREGROUND].write_bytes -
- last_uid_io_stats[uid.uid].tasks[pid].io[FOREGROUND].write_bytes;
+ last_uid_io_stats_[uid.uid].tasks[pid].io[FOREGROUND].write_bytes;
int64_t task_bg_wr_delta = task.io[BACKGROUND].write_bytes -
- last_uid_io_stats[uid.uid].tasks[pid].io[BACKGROUND].write_bytes;
+ last_uid_io_stats_[uid.uid].tasks[pid].io[BACKGROUND].write_bytes;
io_usage& task_usage = usage.task_ios[comm];
- task_usage.bytes[READ][FOREGROUND][charger_stat] +=
+ task_usage.bytes[READ][FOREGROUND][charger_stat_] +=
(task_fg_rd_delta < 0) ? 0 : task_fg_rd_delta;
- task_usage.bytes[READ][BACKGROUND][charger_stat] +=
+ task_usage.bytes[READ][BACKGROUND][charger_stat_] +=
(task_bg_rd_delta < 0) ? 0 : task_bg_rd_delta;
- task_usage.bytes[WRITE][FOREGROUND][charger_stat] +=
+ task_usage.bytes[WRITE][FOREGROUND][charger_stat_] +=
(task_fg_wr_delta < 0) ? 0 : task_fg_wr_delta;
- task_usage.bytes[WRITE][BACKGROUND][charger_stat] +=
+ task_usage.bytes[WRITE][BACKGROUND][charger_stat_] +=
(task_bg_wr_delta < 0) ? 0 : task_bg_wr_delta;
}
}
- last_uid_io_stats = uid_io_stats;
+ last_uid_io_stats_ = uid_io_stats;
}
void uid_monitor::report(unordered_map<int, StoragedProto>* protos)
{
if (!enabled()) return;
- Mutex::Autolock _l(uidm_mutex);
+ Mutex::Autolock _l(uidm_mutex_);
update_curr_io_stats_locked();
add_records_locked(time(NULL));
@@ -408,7 +408,7 @@
void uid_monitor::update_uid_io_proto(unordered_map<int, StoragedProto>* protos)
{
- for (const auto& item : io_history) {
+ for (const auto& item : io_history_) {
const uint64_t& end_ts = item.first;
const struct uid_records& recs = item.second;
unordered_map<userid_t, UidIOItem*> user_items;
@@ -448,9 +448,9 @@
void uid_monitor::clear_user_history(userid_t user_id)
{
- Mutex::Autolock _l(uidm_mutex);
+ Mutex::Autolock _l(uidm_mutex_);
- for (auto& item : io_history) {
+ for (auto& item : io_history_) {
vector<uid_record>* entries = &item.second.entries;
entries->erase(
remove_if(entries->begin(), entries->end(),
@@ -459,9 +459,9 @@
entries->end());
}
- for (auto it = io_history.begin(); it != io_history.end(); ) {
+ for (auto it = io_history_.begin(); it != io_history_.end(); ) {
if (it->second.entries.empty()) {
- it = io_history.erase(it);
+ it = io_history_.erase(it);
} else {
it++;
}
@@ -472,11 +472,11 @@
{
if (!enabled()) return;
- Mutex::Autolock _l(uidm_mutex);
+ Mutex::Autolock _l(uidm_mutex_);
for (const auto& item_proto : uid_io_proto.uid_io_items()) {
const UidIORecords& records_proto = item_proto.records();
- struct uid_records* recs = &io_history[item_proto.end_ts()];
+ struct uid_records* recs = &io_history_[item_proto.end_ts()];
recs->start_ts = records_proto.start_ts();
for (const auto& rec_proto : records_proto.entries()) {
@@ -497,24 +497,24 @@
void uid_monitor::set_charger_state(charger_stat_t stat)
{
- Mutex::Autolock _l(uidm_mutex);
+ Mutex::Autolock _l(uidm_mutex_);
- if (charger_stat == stat) {
+ if (charger_stat_ == stat) {
return;
}
update_curr_io_stats_locked();
- charger_stat = stat;
+ charger_stat_ = stat;
}
void uid_monitor::init(charger_stat_t stat)
{
- charger_stat = stat;
+ charger_stat_ = stat;
- start_ts = time(NULL);
- last_uid_io_stats = get_uid_io_stats();
+ start_ts_ = time(NULL);
+ last_uid_io_stats_ = get_uid_io_stats();
}
uid_monitor::uid_monitor()
- : enable(!access(UID_IO_STATS_PATH, R_OK)) {
+ : enabled_(!access(UID_IO_STATS_PATH, R_OK)) {
}
diff --git a/storaged/tests/storaged_test.cpp b/storaged/tests/storaged_test.cpp
index ec47b65..e08c9ee 100644
--- a/storaged/tests/storaged_test.cpp
+++ b/storaged/tests/storaged_test.cpp
@@ -443,8 +443,9 @@
TEST(storaged_test, uid_monitor) {
uid_monitor uidm;
+ auto& io_history = uidm.io_history();
- uidm.io_history[200] = {
+ io_history[200] = {
.start_ts = 100,
.entries = {
{ "app1", {
@@ -466,7 +467,7 @@
},
};
- uidm.io_history[300] = {
+ io_history[300] = {
.start_ts = 200,
.entries = {
{ "app1", {
@@ -526,9 +527,9 @@
EXPECT_EQ(user_1_item_1.records().entries(0).user_id(), 1UL);
EXPECT_EQ(user_1_item_1.records().entries(0).uid_io().wr_fg_chg_off(), 1000UL);
- uidm.io_history.clear();
+ io_history.clear();
- uidm.io_history[300] = {
+ io_history[300] = {
.start_ts = 200,
.entries = {
{ "app1", {
@@ -539,7 +540,7 @@
},
};
- uidm.io_history[400] = {
+ io_history[400] = {
.start_ts = 300,
.entries = {
{ "app1", {
@@ -553,13 +554,13 @@
uidm.load_uid_io_proto(protos[0].uid_io_usage());
uidm.load_uid_io_proto(protos[1].uid_io_usage());
- EXPECT_EQ(uidm.io_history.size(), 3UL);
- EXPECT_EQ(uidm.io_history.count(200), 1UL);
- EXPECT_EQ(uidm.io_history.count(300), 1UL);
- EXPECT_EQ(uidm.io_history.count(400), 1UL);
+ EXPECT_EQ(io_history.size(), 3UL);
+ EXPECT_EQ(io_history.count(200), 1UL);
+ EXPECT_EQ(io_history.count(300), 1UL);
+ EXPECT_EQ(io_history.count(400), 1UL);
- EXPECT_EQ(uidm.io_history[200].start_ts, 100UL);
- const vector<struct uid_record>& entries_0 = uidm.io_history[200].entries;
+ EXPECT_EQ(io_history[200].start_ts, 100UL);
+ const vector<struct uid_record>& entries_0 = io_history[200].entries;
EXPECT_EQ(entries_0.size(), 3UL);
EXPECT_EQ(entries_0[0].name, "app1");
EXPECT_EQ(entries_0[0].ios.user_id, 0UL);
@@ -572,8 +573,8 @@
EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[READ][FOREGROUND][CHARGER_ON], 1000UL);
- EXPECT_EQ(uidm.io_history[300].start_ts, 200UL);
- const vector<struct uid_record>& entries_1 = uidm.io_history[300].entries;
+ EXPECT_EQ(io_history[300].start_ts, 200UL);
+ const vector<struct uid_record>& entries_1 = io_history[300].entries;
EXPECT_EQ(entries_1.size(), 3UL);
EXPECT_EQ(entries_1[0].name, "app1");
EXPECT_EQ(entries_1[0].ios.user_id, 0UL);
@@ -585,8 +586,8 @@
EXPECT_EQ(entries_1[2].ios.user_id, 1UL);
EXPECT_EQ(entries_1[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_OFF], 1000UL);
- EXPECT_EQ(uidm.io_history[400].start_ts, 300UL);
- const vector<struct uid_record>& entries_2 = uidm.io_history[400].entries;
+ EXPECT_EQ(io_history[400].start_ts, 300UL);
+ const vector<struct uid_record>& entries_2 = io_history[400].entries;
EXPECT_EQ(entries_2.size(), 1UL);
EXPECT_EQ(entries_2[0].name, "app1");
EXPECT_EQ(entries_2[0].ios.user_id, 0UL);
@@ -615,14 +616,14 @@
uidm.clear_user_history(0);
- EXPECT_EQ(uidm.io_history.size(), 2UL);
- EXPECT_EQ(uidm.io_history.count(200), 1UL);
- EXPECT_EQ(uidm.io_history.count(300), 1UL);
+ EXPECT_EQ(uidm.io_history_.size(), 2UL);
+ EXPECT_EQ(uidm.io_history_.count(200), 1UL);
+ EXPECT_EQ(uidm.io_history_.count(300), 1UL);
- EXPECT_EQ(uidm.io_history[200].entries.size(), 1UL);
- EXPECT_EQ(uidm.io_history[300].entries.size(), 1UL);
+ EXPECT_EQ(uidm.io_history_[200].entries.size(), 1UL);
+ EXPECT_EQ(uidm.io_history_[300].entries.size(), 1UL);
uidm.clear_user_history(1);
- EXPECT_EQ(uidm.io_history.size(), 0UL);
+ EXPECT_EQ(uidm.io_history_.size(), 0UL);
}