Merge "adb: Fix missing shell protocol usage."
diff --git a/crash_reporter/Android.mk b/crash_reporter/Android.mk
index 467432a..6cd34ab 100644
--- a/crash_reporter/Android.mk
+++ b/crash_reporter/Android.mk
@@ -81,7 +81,7 @@
LOCAL_MODULE := crash_sender
LOCAL_MODULE_CLASS := EXECUTABLES
LOCAL_MODULE_PATH := $(TARGET_OUT_EXECUTABLES)
-LOCAL_REQUIRED_MODULES := curl periodic_scheduler
+LOCAL_REQUIRED_MODULES := curl grep periodic_scheduler
LOCAL_SRC_FILES := crash_sender
include $(BUILD_PREBUILT)
diff --git a/crash_reporter/crash_sender b/crash_reporter/crash_sender
index 7f9062a..d0d6772 100755
--- a/crash_reporter/crash_sender
+++ b/crash_reporter/crash_sender
@@ -63,11 +63,8 @@
# Must be stateful to enable testing kernel crashes.
PAUSE_CRASH_SENDING="${CRASH_STATE_DIR}/lock/crash_sender_paused"
-# URL to send official build crash reports to.
-REPORT_UPLOAD_PROD_URL="https://clients2.google.com/cr/report"
-
# Path to a directory of restricted certificates which includes
-# a certificate for ${REPORT_UPLOAD_PROD_URL}.
+# a certificate for the crash server.
RESTRICTED_CERTIFICATES_PATH="/system/etc/security/cacerts"
# File whose existence implies we're running and not to start again.
@@ -79,6 +76,9 @@
# Set this to 1 to allow uploading of device coredumps.
DEVCOREDUMP_UPLOAD_FLAG_FILE="${CRASH_STATE_DIR}/device_coredump_upload_allowed"
+# The weave configuration file.
+WEAVE_CONF_FILE="/etc/weaved/weaved.conf"
+
# The syslog tag for all logging we emit.
TAG="$(basename $0)[$$]"
@@ -180,10 +180,21 @@
}
# Generate a uniform random number in 0..max-1.
+# POSIX arithmetic expansion requires support of at least signed long integers.
+# On 32-bit systems, that may mean 32-bit signed integers, in which case the
+# 32-bit random number read from /dev/urandom may be interpreted as negative
+# when used inside an arithmetic expansion (since the high bit might be set).
+# mksh at least is known to behave this way.
+# For this case, simply take the absolute value, which will still give a
+# roughly uniform random distribution for the modulo (as we are merely ignoring
+# the high/sign bit).
+# See corresponding Arithmetic Expansion and Arithmetic Expression sections:
+# POSIX: http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_06_04
+# mksh: http://linux.die.net/man/1/mksh
generate_uniform_random() {
local max=$1
local random="$(od -An -N4 -tu /dev/urandom)"
- echo $((random % max))
+ echo $(((random < 0 ? -random : random) % max))
}
# Check if sending a crash now does not exceed the maximum 24hr rate and
@@ -277,7 +288,7 @@
local report_payload="$(get_key_value "${meta_path}" "payload")"
local kind="$(get_kind "${meta_path}")"
local exec_name="$(get_key_value "${meta_path}" "exec_name")"
- local url="${REPORT_UPLOAD_PROD_URL}"
+ local url="$(getprop crash_reporter.server)"
local brillo_version="$(get_key_value "${meta_path}" "ver")"
local hwclass="$(get_hardware_class)"
local write_payload_size="$(get_key_value "${meta_path}" "payload_size")"
@@ -288,6 +299,13 @@
local version="$(get_key_value "${meta_path}" "upload_var_ver")"
local upload_prefix="$(get_key_value "${meta_path}" "upload_prefix")"
local guid
+ local model_manifest_id="$(get_key_value "${WEAVE_CONF_FILE}" "model_id")"
+
+ # If crash_reporter.server is not set return with an error.
+ if [ -z "${url}" ]; then
+ lecho "Configuration error: crash_reporter.server not set."
+ return 1
+ fi
set -- \
-F "write_payload_size=${write_payload_size}" \
@@ -441,6 +459,7 @@
-F "ver=${version}" \
-F "hwclass=${hwclass}" \
-F "exec_name=${exec_name}" \
+ -F "model_manifest_id=${model_manifest_id}" \
${image_type:+-F "image_type=${image_type}"} \
${boot_mode:+-F "boot_mode=${boot_mode}"} \
${error_type:+-F "error_type=${error_type}"} \
diff --git a/crash_reporter/init.crash_reporter.rc b/crash_reporter/init.crash_reporter.rc
index db9bb6f..30e87f5 100644
--- a/crash_reporter/init.crash_reporter.rc
+++ b/crash_reporter/init.crash_reporter.rc
@@ -27,3 +27,4 @@
service crash_sender /system/bin/periodic_scheduler 3600 14400 crash_sender \
/system/bin/crash_sender
class late_start
+ group system
diff --git a/crash_reporter/periodic_scheduler b/crash_reporter/periodic_scheduler
index 7fdb5c9..5408da7 100755
--- a/crash_reporter/periodic_scheduler
+++ b/crash_reporter/periodic_scheduler
@@ -22,8 +22,7 @@
set -e -u
SCRIPT_NAME="$(basename "$0")"
-#CHECK_DELAY=300 # Check every 5 minutes.
-CHECK_DELAY=15 # Check every 5 minutes.
+CHECK_DELAY=300 # Check every 5 minutes.
KILL_DELAY=10 # How long to let the job clean up after a timeout.
# Let the unittests override.
: ${SPOOL_DIR:=/data/misc/crash_reporter/spool/cron-lite}
diff --git a/metricsd/Android.mk b/metricsd/Android.mk
index c5d5281..c219ab1 100644
--- a/metricsd/Android.mk
+++ b/metricsd/Android.mk
@@ -84,6 +84,7 @@
LOCAL_MODULE := libmetrics
LOCAL_C_INCLUDES := $(metrics_includes)
LOCAL_CFLAGS := $(metrics_CFLAGS)
+LOCAL_CLANG := true
LOCAL_CPP_EXTENSION := $(metrics_cpp_extension)
LOCAL_CPPFLAGS := $(metrics_CPPFLAGS)
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
@@ -97,6 +98,7 @@
LOCAL_MODULE := metrics_client
LOCAL_C_INCLUDES := $(metrics_includes)
LOCAL_CFLAGS := $(metrics_CFLAGS)
+LOCAL_CLANG := true
LOCAL_CPP_EXTENSION := $(metrics_cpp_extension)
LOCAL_CPPFLAGS := $(metrics_CPPFLAGS)
LOCAL_SHARED_LIBRARIES := $(metrics_shared_libraries) \
@@ -132,7 +134,10 @@
libchromeos-http \
libchromeos-dbus \
libcutils \
- libdbus
+ libdbus \
+ librootdev
+
+LOCAL_CLANG := true
LOCAL_SRC_FILES := $(metrics_daemon_sources)
LOCAL_STATIC_LIBRARIES := metrics_daemon_protos
include $(BUILD_EXECUTABLE)
diff --git a/metricsd/constants.h b/metricsd/constants.h
index 15c15d9..717e5d2 100644
--- a/metricsd/constants.h
+++ b/metricsd/constants.h
@@ -19,10 +19,12 @@
namespace metrics {
static const char kMetricsDirectory[] = "/data/misc/metrics/";
-static const char kMetricsEventsFilePath[] = "/data/misc/metrics/uma-events";
-static const char kMetricsGUIDFilePath[] = "/data/misc/metrics/Sysinfo.GUID";
+static const char kMetricsEventsFileName[] = "uma-events";
+static const char kMetricsGUIDFileName[] = "Sysinfo.GUID";
static const char kMetricsServer[] = "https://clients4.google.com/uma/v2";
-static const char kConsentFilePath[] = "/data/misc/metrics/enabled";
+static const char kConsentFileName[] = "enabled";
+static const char kStagedLogName[] = "staged_log";
+static const char kFailedUploadCountName[] = "failed_upload_count";
static const char kDefaultVersion[] = "0.0.0.0";
// System properties used.
diff --git a/metricsd/include/metrics/metrics_library.h b/metricsd/include/metrics/metrics_library.h
index a956b69..26df2f4 100644
--- a/metricsd/include/metrics/metrics_library.h
+++ b/metricsd/include/metrics/metrics_library.h
@@ -22,6 +22,7 @@
#include <unistd.h>
#include <base/compiler_specific.h>
+#include <base/files/file_path.h>
#include <base/macros.h>
#include <base/memory/scoped_ptr.h>
#include <gtest/gtest_prod.h> // for FRIEND_TEST
@@ -122,6 +123,7 @@
private:
friend class CMetricsLibraryTest;
friend class MetricsLibraryTest;
+ friend class UploadServiceTest;
FRIEND_TEST(MetricsLibraryTest, AreMetricsEnabled);
FRIEND_TEST(MetricsLibraryTest, FormatChromeMessage);
FRIEND_TEST(MetricsLibraryTest, FormatChromeMessageTooLong);
@@ -129,8 +131,7 @@
FRIEND_TEST(MetricsLibraryTest, SendMessageToChrome);
FRIEND_TEST(MetricsLibraryTest, SendMessageToChromeUMAEventsBadFileLocation);
- void InitForTest(const std::string& uma_events_file,
- const std::string& consent_file);
+ void InitForTest(const base::FilePath& metrics_directory);
// Sets |*result| to whether or not the |mounts_file| indicates that
// the |device_name| is currently mounted. Uses |buffer| of
@@ -146,8 +147,8 @@
// Cached state of whether or not metrics were enabled.
static bool cached_enabled_;
- std::string uma_events_file_;
- std::string consent_file_;
+ base::FilePath uma_events_file_;
+ base::FilePath consent_file_;
DISALLOW_COPY_AND_ASSIGN(MetricsLibrary);
};
diff --git a/metricsd/metrics_client.cc b/metricsd/metrics_client.cc
index f658b22..78174ef 100644
--- a/metricsd/metrics_client.cc
+++ b/metricsd/metrics_client.cc
@@ -140,11 +140,13 @@
}
static int DumpLogs() {
- printf("Metrics from %s\n\n", metrics::kMetricsEventsFilePath);
+ base::FilePath events_file = base::FilePath(
+ metrics::kMetricsDirectory).Append(metrics::kMetricsEventsFileName);
+ printf("Metrics from %s\n\n", events_file.value().data());
ScopedVector<metrics::MetricSample> metrics;
- metrics::SerializationUtils::ReadMetricsFromFile(
- metrics::kMetricsEventsFilePath, &metrics);
+ metrics::SerializationUtils::ReadMetricsFromFile(events_file.value(),
+ &metrics);
for (ScopedVector<metrics::MetricSample>::const_iterator i = metrics.begin();
i != metrics.end(); ++i) {
diff --git a/metricsd/metrics_daemon.cc b/metricsd/metrics_daemon.cc
index 2838119..de7f2ea 100644
--- a/metricsd/metrics_daemon.cc
+++ b/metricsd/metrics_daemon.cc
@@ -84,6 +84,8 @@
const int kMetricStatsShortInterval = 1; // seconds
const int kMetricStatsLongInterval = 30; // seconds
+const int kMetricMeminfoInterval = 30; // seconds
+
// Assume a max rate of 250Mb/s for reads (worse for writes) and 512 byte
// sectors.
const int kMetricSectorsIOMax = 500000; // sectors/second
@@ -110,6 +112,7 @@
const char kVmStatFileName[] = "/proc/vmstat";
const char kMeminfoFileName[] = "/proc/meminfo";
const int kMetricsProcStatFirstLineItemsCount = 11;
+const int kDiskMetricsStatItemCount = 11;
// Thermal CPU throttling.
@@ -184,10 +187,10 @@
void MetricsDaemon::RunUploaderTest() {
upload_service_.reset(new UploadService(
- new SystemProfileCache(true, base::FilePath(config_root_)),
+ new SystemProfileCache(true, metrics_directory_),
metrics_lib_,
server_));
- upload_service_->Init(upload_interval_, metrics_file_);
+ upload_service_->Init(upload_interval_, metrics_directory_);
upload_service_->UploadEvent();
}
@@ -215,22 +218,21 @@
bool uploader_active,
bool dbus_enabled,
MetricsLibraryInterface* metrics_lib,
+ const string& diskstats_path,
const string& scaling_max_freq_path,
const string& cpuinfo_max_freq_path,
const base::TimeDelta& upload_interval,
const string& server,
- const string& metrics_file,
- const string& config_root) {
+ const base::FilePath& metrics_directory) {
CHECK(metrics_lib);
testing_ = testing;
uploader_active_ = uploader_active;
dbus_enabled_ = dbus_enabled;
- config_root_ = config_root;
+ metrics_directory_ = metrics_directory;
metrics_lib_ = metrics_lib;
upload_interval_ = upload_interval;
server_ = server;
- metrics_file_ = metrics_file;
// Get ticks per second (HZ) on this system.
// Sysconf cannot fail, so no sanity checks are needed.
@@ -273,8 +275,13 @@
weekly_cycle_.reset(new PersistentInteger("weekly.cycle"));
version_cycle_.reset(new PersistentInteger("version.cycle"));
+ diskstats_path_ = diskstats_path;
scaling_max_freq_path_ = scaling_max_freq_path;
cpuinfo_max_freq_path_ = cpuinfo_max_freq_path;
+
+ // If testing, initialize Stats Reporter without connecting DBus
+ if (testing_)
+ StatsReporterInit();
}
int MetricsDaemon::OnInit() {
@@ -283,6 +290,13 @@
if (return_code != EX_OK)
return return_code;
+ StatsReporterInit();
+
+ // Start collecting meminfo stats.
+ ScheduleMeminfoCallback(kMetricMeminfoInterval);
+ memuse_final_time_ = GetActiveTime() + kMemuseIntervals[0];
+ ScheduleMemuseCallback(kMemuseIntervals[0]);
+
if (testing_)
return EX_OK;
@@ -313,10 +327,15 @@
}
}
+ base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ base::Bind(&MetricsDaemon::HandleUpdateStatsTimeout,
+ base::Unretained(this)),
+ base::TimeDelta::FromMilliseconds(kUpdateStatsIntervalMs));
+
if (uploader_active_) {
upload_service_.reset(
new UploadService(new SystemProfileCache(), metrics_lib_, server_));
- upload_service_->Init(upload_interval_, metrics_file_);
+ upload_service_->Init(upload_interval_, metrics_directory_);
}
return EX_OK;
@@ -494,6 +513,40 @@
base::TimeDelta::FromSeconds(wait));
}
+bool MetricsDaemon::DiskStatsReadStats(uint64_t* read_sectors,
+ uint64_t* write_sectors) {
+ CHECK(read_sectors);
+ CHECK(write_sectors);
+ std::string line;
+ if (diskstats_path_.empty()) {
+ return false;
+ }
+
+ if (!base::ReadFileToString(base::FilePath(diskstats_path_), &line)) {
+ PLOG(WARNING) << "Could not read disk stats from " << diskstats_path_;
+ return false;
+ }
+
+ std::vector<std::string> parts = base::SplitString(
+ line, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ if (parts.size() != kDiskMetricsStatItemCount) {
+ LOG(ERROR) << "Could not parse disk stat correctly. Expected "
+ << kDiskMetricsStatItemCount << " elements but got "
+ << parts.size();
+ return false;
+ }
+ if (!base::StringToUint64(parts[2], read_sectors)) {
+ LOG(ERROR) << "Couldn't convert read sectors " << parts[2] << " to uint64";
+ return false;
+ }
+ if (!base::StringToUint64(parts[6], write_sectors)) {
+ LOG(ERROR) << "Couldn't convert write sectors " << parts[6] << " to uint64";
+ return false;
+ }
+
+ return true;
+}
+
bool MetricsDaemon::VmStatsParseStats(const char* stats,
struct VmstatRecord* record) {
CHECK(stats);
@@ -712,11 +765,7 @@
return;
}
// Make both calls even if the first one fails.
- bool success = ProcessMeminfo(meminfo_raw);
- bool reschedule =
- ReportZram(base::FilePath(FILE_PATH_LITERAL("/sys/block/zram0"))) &&
- success;
- if (reschedule) {
+ if (ProcessMeminfo(meminfo_raw)) {
base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
base::Bind(&MetricsDaemon::MeminfoCallback, base::Unretained(this),
wait),
diff --git a/metricsd/metrics_daemon.h b/metricsd/metrics_daemon.h
index 9180e23..b363c5e 100644
--- a/metricsd/metrics_daemon.h
+++ b/metricsd/metrics_daemon.h
@@ -45,12 +45,12 @@
bool uploader_active,
bool dbus_enabled,
MetricsLibraryInterface* metrics_lib,
+ const std::string& diskstats_path,
const std::string& cpuinfo_max_freq_path,
const std::string& scaling_max_freq_path,
const base::TimeDelta& upload_interval,
const std::string& server,
- const std::string& metrics_file,
- const std::string& config_root);
+ const base::FilePath& metrics_directory);
// Initializes DBus and MessageLoop variables before running the MessageLoop.
int OnInit() override;
@@ -78,6 +78,7 @@
FRIEND_TEST(MetricsDaemonTest, GetHistogramPath);
FRIEND_TEST(MetricsDaemonTest, IsNewEpoch);
FRIEND_TEST(MetricsDaemonTest, MessageFilter);
+ FRIEND_TEST(MetricsDaemonTest, ParseDiskStats);
FRIEND_TEST(MetricsDaemonTest, ParseVmStats);
FRIEND_TEST(MetricsDaemonTest, ProcessKernelCrash);
FRIEND_TEST(MetricsDaemonTest, ProcessMeminfo);
@@ -86,7 +87,6 @@
FRIEND_TEST(MetricsDaemonTest, ProcessUserCrash);
FRIEND_TEST(MetricsDaemonTest, ReportCrashesDailyFrequency);
FRIEND_TEST(MetricsDaemonTest, ReadFreqToInt);
- FRIEND_TEST(MetricsDaemonTest, ReportDiskStats);
FRIEND_TEST(MetricsDaemonTest, ReportKernelCrashInterval);
FRIEND_TEST(MetricsDaemonTest, ReportUncleanShutdownInterval);
FRIEND_TEST(MetricsDaemonTest, ReportUserCrashInterval);
@@ -267,7 +267,7 @@
bool dbus_enabled_;
// Root of the configuration files to use.
- std::string config_root_;
+ base::FilePath metrics_directory_;
// The metrics library handle.
MetricsLibraryInterface* metrics_lib_;
@@ -324,12 +324,12 @@
scoped_ptr<PersistentInteger> unclean_shutdowns_daily_count_;
scoped_ptr<PersistentInteger> unclean_shutdowns_weekly_count_;
+ std::string diskstats_path_;
std::string scaling_max_freq_path_;
std::string cpuinfo_max_freq_path_;
base::TimeDelta upload_interval_;
std::string server_;
- std::string metrics_file_;
scoped_ptr<UploadService> upload_service_;
};
diff --git a/metricsd/metrics_daemon_main.cc b/metricsd/metrics_daemon_main.cc
index 7f9ec43..c2e794e 100644
--- a/metricsd/metrics_daemon_main.cc
+++ b/metricsd/metrics_daemon_main.cc
@@ -20,6 +20,7 @@
#include <base/strings/string_util.h>
#include <chromeos/flag_helper.h>
#include <chromeos/syslog_logging.h>
+#include <rootdev.h>
#include "constants.h"
#include "metrics_daemon.h"
@@ -29,6 +30,28 @@
const char kCpuinfoMaxFreqPath[] =
"/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq";
+// Returns the path to the disk stats in the sysfs. Returns the null string if
+// it cannot find the disk stats file.
+static
+const std::string MetricsMainDiskStatsPath() {
+ char dev_path_cstr[PATH_MAX];
+ std::string dev_prefix = "/dev/block/";
+ std::string dev_path;
+
+ int ret = rootdev(dev_path_cstr, sizeof(dev_path_cstr), true, true);
+ if (ret != 0) {
+ LOG(WARNING) << "error " << ret << " determining root device";
+ return "";
+ }
+ dev_path = dev_path_cstr;
+ // Check that rootdev begins with "/dev/block/".
+ if (!base::StartsWithASCII(dev_path, dev_prefix, false)) {
+ LOG(WARNING) << "unexpected root device " << dev_path;
+ return "";
+ }
+ return "/sys/class/block/" + dev_path.substr(dev_prefix.length()) + "/stat";
+}
+
int main(int argc, char** argv) {
DEFINE_bool(daemon, true, "run as daemon (use -nodaemon for debugging)");
@@ -52,11 +75,9 @@
DEFINE_string(server,
metrics::kMetricsServer,
"Server to upload the metrics to. (needs -uploader)");
- DEFINE_string(metrics_file,
- metrics::kMetricsEventsFilePath,
- "File to use as a proxy for uploading the metrics");
- DEFINE_string(config_root,
- "/", "Root of the configuration files (testing only)");
+ DEFINE_string(metrics_directory,
+ metrics::kMetricsDirectory,
+ "Root of the configuration files (testing only)");
chromeos::FlagHelper::Init(argc, argv, "Chromium OS Metrics Daemon");
@@ -75,12 +96,12 @@
FLAGS_uploader | FLAGS_uploader_test,
FLAGS_withdbus,
&metrics_lib,
+ MetricsMainDiskStatsPath(),
kScalingMaxFreqPath,
kCpuinfoMaxFreqPath,
base::TimeDelta::FromSeconds(FLAGS_upload_interval_secs),
FLAGS_server,
- FLAGS_metrics_file,
- FLAGS_config_root);
+ base::FilePath(FLAGS_metrics_directory));
if (FLAGS_uploader_test) {
daemon.RunUploaderTest();
diff --git a/metricsd/metrics_daemon_test.cc b/metricsd/metrics_daemon_test.cc
index 0d2229c..476d0f3 100644
--- a/metricsd/metrics_daemon_test.cc
+++ b/metricsd/metrics_daemon_test.cc
@@ -82,12 +82,12 @@
false,
true,
&metrics_lib_,
+ disk_stats_path_.value(),
scaling_max_freq_path_.value(),
cpu_max_freq_path_.value(),
base::TimeDelta::FromMinutes(30),
metrics::kMetricsServer,
- metrics::kMetricsEventsFilePath,
- "/");
+ temp_dir_.path());
}
// Adds a metrics library mock expectation that the specified metric
@@ -198,6 +198,21 @@
/* min */ 1, /* max */ 100, /* buckets */ 50);
}
+TEST_F(MetricsDaemonTest, ParseDiskStats) {
+ uint64_t read_sectors_now, write_sectors_now;
+ CreateFakeDiskStatsFile(kFakeDiskStats0);
+ ASSERT_TRUE(daemon_.DiskStatsReadStats(&read_sectors_now,
+ &write_sectors_now));
+ EXPECT_EQ(read_sectors_now, kFakeReadSectors[0]);
+ EXPECT_EQ(write_sectors_now, kFakeWriteSectors[0]);
+
+ CreateFakeDiskStatsFile(kFakeDiskStats1);
+ ASSERT_TRUE(daemon_.DiskStatsReadStats(&read_sectors_now,
+ &write_sectors_now));
+ EXPECT_EQ(read_sectors_now, kFakeReadSectors[1]);
+ EXPECT_EQ(write_sectors_now, kFakeWriteSectors[1]);
+}
+
TEST_F(MetricsDaemonTest, ProcessMeminfo) {
string meminfo =
"MemTotal: 2000000 kB\nMemFree: 500000 kB\n"
diff --git a/metricsd/metrics_library.cc b/metricsd/metrics_library.cc
index 5687f1b..6449a24 100644
--- a/metricsd/metrics_library.cc
+++ b/metricsd/metrics_library.cc
@@ -56,7 +56,7 @@
time_t MetricsLibrary::cached_enabled_time_ = 0;
bool MetricsLibrary::cached_enabled_ = false;
-MetricsLibrary::MetricsLibrary() : consent_file_(metrics::kConsentFilePath) {}
+MetricsLibrary::MetricsLibrary() {}
MetricsLibrary::~MetricsLibrary() {}
// We take buffer and buffer_size as parameters in order to simplify testing
@@ -131,19 +131,20 @@
time_t this_check_time = time(nullptr);
if (this_check_time != cached_enabled_time_) {
cached_enabled_time_ = this_check_time;
- cached_enabled_ = stat(consent_file_.c_str(), &stat_buffer) >= 0;
+ cached_enabled_ = stat(consent_file_.value().data(), &stat_buffer) >= 0;
}
return cached_enabled_;
}
void MetricsLibrary::Init() {
- uma_events_file_ = metrics::kMetricsEventsFilePath;
+ base::FilePath dir = base::FilePath(metrics::kMetricsDirectory);
+ uma_events_file_ = dir.Append(metrics::kMetricsEventsFileName);
+ consent_file_ = dir.Append(metrics::kConsentFileName);
}
-void MetricsLibrary::InitForTest(const std::string& uma_events_file,
- const std::string& consent_file) {
- uma_events_file_ = uma_events_file;
- consent_file_ = consent_file;
+void MetricsLibrary::InitForTest(const base::FilePath& metrics_directory) {
+ uma_events_file_ = metrics_directory.Append(metrics::kMetricsEventsFileName);
+ consent_file_ = metrics_directory.Append(metrics::kConsentFileName);
}
bool MetricsLibrary::SendToUMA(const std::string& name,
@@ -154,30 +155,32 @@
return metrics::SerializationUtils::WriteMetricToFile(
*metrics::MetricSample::HistogramSample(name, sample, min, max, nbuckets)
.get(),
- metrics::kMetricsEventsFilePath);
+ uma_events_file_.value());
}
bool MetricsLibrary::SendEnumToUMA(const std::string& name, int sample,
int max) {
return metrics::SerializationUtils::WriteMetricToFile(
*metrics::MetricSample::LinearHistogramSample(name, sample, max).get(),
- metrics::kMetricsEventsFilePath);
+ uma_events_file_.value());
}
bool MetricsLibrary::SendSparseToUMA(const std::string& name, int sample) {
return metrics::SerializationUtils::WriteMetricToFile(
*metrics::MetricSample::SparseHistogramSample(name, sample).get(),
- metrics::kMetricsEventsFilePath);
+ uma_events_file_.value());
}
bool MetricsLibrary::SendUserActionToUMA(const std::string& action) {
return metrics::SerializationUtils::WriteMetricToFile(
- *metrics::MetricSample::UserActionSample(action).get(), metrics::kMetricsEventsFilePath);
+ *metrics::MetricSample::UserActionSample(action).get(),
+ uma_events_file_.value());
}
bool MetricsLibrary::SendCrashToUMA(const char *crash_kind) {
return metrics::SerializationUtils::WriteMetricToFile(
- *metrics::MetricSample::CrashSample(crash_kind).get(), metrics::kMetricsEventsFilePath);
+ *metrics::MetricSample::CrashSample(crash_kind).get(),
+ uma_events_file_.value());
}
bool MetricsLibrary::SendCrosEventToUMA(const std::string& event) {
diff --git a/metricsd/metrics_library_test.cc b/metricsd/metrics_library_test.cc
index 7ade6ee..f300d17 100644
--- a/metricsd/metrics_library_test.cc
+++ b/metricsd/metrics_library_test.cc
@@ -28,19 +28,17 @@
protected:
virtual void SetUp() {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- consent_file_ = temp_dir_.path().Append("consent");
- uma_events_file_ = temp_dir_.path().Append("events");
- lib_.InitForTest(uma_events_file_.value(), consent_file_.value());
- EXPECT_EQ(0, WriteFile(uma_events_file_, "", 0));
+ lib_.InitForTest(temp_dir_.path());
+ EXPECT_EQ(0, WriteFile(lib_.uma_events_file_, "", 0));
// Defeat metrics enabled caching between tests.
lib_.cached_enabled_time_ = 0;
}
void SetMetricsConsent(bool enabled) {
if (enabled) {
- ASSERT_EQ(base::WriteFile(consent_file_, "", 0), 0);
+ ASSERT_EQ(base::WriteFile(lib_.consent_file_, "", 0), 0);
} else {
- ASSERT_TRUE(base::DeleteFile(consent_file_, false));
+ ASSERT_TRUE(base::DeleteFile(lib_.consent_file_, false));
}
}
@@ -49,8 +47,6 @@
MetricsLibrary lib_;
base::ScopedTempDir temp_dir_;
- base::FilePath consent_file_;
- base::FilePath uma_events_file_;
};
TEST_F(MetricsLibraryTest, AreMetricsEnabledFalse) {
diff --git a/metricsd/uploader/system_profile_cache.cc b/metricsd/uploader/system_profile_cache.cc
index 8635fb0..e3f6339 100644
--- a/metricsd/uploader/system_profile_cache.cc
+++ b/metricsd/uploader/system_profile_cache.cc
@@ -55,16 +55,16 @@
SystemProfileCache::SystemProfileCache()
: initialized_(false),
testing_(false),
- config_root_("/"),
+ metrics_directory_(metrics::kMetricsDirectory),
session_id_(new chromeos_metrics::PersistentInteger(
kPersistentSessionIdFilename)) {
}
SystemProfileCache::SystemProfileCache(bool testing,
- const base::FilePath& config_root)
+ const base::FilePath& metrics_directory)
: initialized_(false),
testing_(testing),
- config_root_(config_root),
+ metrics_directory_(metrics_directory),
session_id_(new chromeos_metrics::PersistentInteger(
kPersistentSessionIdFilename)) {
}
@@ -91,9 +91,11 @@
channel = "";
profile_.version = metrics::kDefaultVersion;
}
- profile_.client_id =
- testing_ ? "client_id_test" :
- GetPersistentGUID(metrics::kMetricsGUIDFilePath);
+ std::string guid_path = metrics_directory_.Append(
+ metrics::kMetricsGUIDFileName).value();
+ profile_.client_id = testing_ ?
+ "client_id_test" :
+ GetPersistentGUID(guid_path);
profile_.hardware_class = "unknown";
profile_.channel = ProtoChannelFromString(channel);
@@ -155,7 +157,7 @@
std::string SystemProfileCache::GetProperty(const std::string& name) {
if (testing_) {
std::string content;
- base::ReadFileToString(config_root_.Append(name), &content);
+ base::ReadFileToString(metrics_directory_.Append(name), &content);
return content;
} else {
char value[PROPERTY_VALUE_MAX];
diff --git a/metricsd/uploader/system_profile_cache.h b/metricsd/uploader/system_profile_cache.h
index 7157810..1d22fa1 100644
--- a/metricsd/uploader/system_profile_cache.h
+++ b/metricsd/uploader/system_profile_cache.h
@@ -50,7 +50,7 @@
public:
SystemProfileCache();
- SystemProfileCache(bool testing, const base::FilePath& config_root);
+ SystemProfileCache(bool testing, const base::FilePath& metrics_directory);
// Populates the ProfileSystem protobuf with system information.
bool Populate(metrics::ChromeUserMetricsExtension* metrics_proto) override;
@@ -77,13 +77,13 @@
bool InitializeOrCheck();
// Gets a system property as a string.
- // When |testing_| is true, reads the value from |config_root_|/|name|
+ // When |testing_| is true, reads the value from |metrics_directory_|/|name|
// instead.
std::string GetProperty(const std::string& name);
bool initialized_;
bool testing_;
- base::FilePath config_root_;
+ base::FilePath metrics_directory_;
scoped_ptr<chromeos_metrics::PersistentInteger> session_id_;
SystemProfile profile_;
};
diff --git a/metricsd/uploader/upload_service.cc b/metricsd/uploader/upload_service.cc
index 2335630..b630cec 100644
--- a/metricsd/uploader/upload_service.cc
+++ b/metricsd/uploader/upload_service.cc
@@ -19,6 +19,7 @@
#include <string>
#include <base/bind.h>
+#include <base/files/file_util.h>
#include <base/logging.h>
#include <base/memory/scoped_vector.h>
#include <base/message_loop/message_loop.h>
@@ -29,6 +30,7 @@
#include <base/metrics/statistics_recorder.h>
#include <base/sha1.h>
+#include "constants.h"
#include "serialization/metric_sample.h"
#include "serialization/serialization_utils.h"
#include "uploader/metrics_log.h"
@@ -44,6 +46,7 @@
metrics_lib_(metrics_lib),
histogram_snapshot_manager_(this),
sender_(new HttpSender(server)),
+ failed_upload_count_(metrics::kFailedUploadCountName),
testing_(false) {
}
@@ -56,9 +59,10 @@
}
void UploadService::Init(const base::TimeDelta& upload_interval,
- const std::string& metrics_file) {
+ const base::FilePath& metrics_directory) {
base::StatisticsRecorder::Initialize();
- metrics_file_ = metrics_file;
+ metrics_file_ = metrics_directory.Append(metrics::kMetricsEventsFileName);
+ staged_log_path_ = metrics_directory.Append(metrics::kStagedLogName);
if (!testing_) {
base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
@@ -70,8 +74,8 @@
}
void UploadService::StartNewLog() {
- CHECK(!staged_log_) << "the staged log should be discarded before starting "
- "a new metrics log";
+ CHECK(!HasStagedLog()) << "the staged log should be discarded before "
+ << "starting a new metrics log";
MetricsLog* log = new MetricsLog();
current_log_.reset(log);
}
@@ -87,7 +91,11 @@
}
void UploadService::UploadEvent() {
- if (staged_log_) {
+ // If the system shutdown or crashed while uploading a report, we may not have
+ // deleted an old log.
+ RemoveFailedLog();
+
+ if (HasStagedLog()) {
// Previous upload failed, retry sending the logs.
SendStagedLog();
return;
@@ -99,51 +107,48 @@
StageCurrentLog();
// If a log is available for upload, upload it.
- if (staged_log_) {
+ if (HasStagedLog()) {
SendStagedLog();
}
}
void UploadService::SendStagedLog() {
- CHECK(staged_log_) << "staged_log_ must exist to be sent";
-
// If metrics are not enabled, discard the log and exit.
if (!metrics_lib_->AreMetricsEnabled()) {
LOG(INFO) << "Metrics disabled. Don't upload metrics samples.";
- staged_log_.reset();
+ base::DeleteFile(staged_log_path_, false);
return;
}
- std::string log_text;
- staged_log_->GetEncodedLog(&log_text);
- if (!sender_->Send(log_text, base::SHA1HashString(log_text))) {
- ++failed_upload_count_;
- if (failed_upload_count_ <= kMaxFailedUpload) {
- LOG(WARNING) << "log upload failed " << failed_upload_count_
- << " times. It will be retried later.";
- return;
- }
- LOG(WARNING) << "log failed more than " << kMaxFailedUpload << " times.";
+ std::string staged_log;
+ CHECK(base::ReadFileToString(staged_log_path_, &staged_log));
+
+ // Increase the failed count in case the daemon crashes while sending the log.
+ failed_upload_count_.Add(1);
+
+ if (!sender_->Send(staged_log, base::SHA1HashString(staged_log))) {
+ LOG(WARNING) << "log failed to upload";
} else {
- LOG(INFO) << "uploaded " << log_text.length() << " bytes";
+ VLOG(1) << "uploaded " << staged_log.length() << " bytes";
+ base::DeleteFile(staged_log_path_, false);
}
- // Discard staged log.
- staged_log_.reset();
+
+ RemoveFailedLog();
}
void UploadService::Reset() {
- staged_log_.reset();
+ base::DeleteFile(staged_log_path_, false);
current_log_.reset();
- failed_upload_count_ = 0;
+ failed_upload_count_.Set(0);
}
void UploadService::ReadMetrics() {
- CHECK(!staged_log_)
- << "cannot read metrics until the old logs have been discarded";
+ CHECK(!HasStagedLog()) << "cannot read metrics until the old logs have been "
+ << "discarded";
ScopedVector<metrics::MetricSample> vector;
metrics::SerializationUtils::ReadAndTruncateMetricsFromFile(
- metrics_file_, &vector);
+ metrics_file_.value(), &vector);
int i = 0;
for (ScopedVector<metrics::MetricSample>::iterator it = vector.begin();
@@ -152,7 +157,7 @@
AddSample(*sample);
i++;
}
- DLOG(INFO) << i << " samples read";
+ VLOG(1) << i << " samples read";
}
void UploadService::AddSample(const metrics::MetricSample& sample) {
@@ -216,19 +221,27 @@
}
void UploadService::StageCurrentLog() {
- CHECK(!staged_log_)
- << "staged logs must be discarded before another log can be staged";
+ // If we haven't logged anything since the last upload, don't upload an empty
+ // report.
+ if (!current_log_)
+ return;
- if (!current_log_) return;
-
- staged_log_.swap(current_log_);
- staged_log_->CloseLog();
- if (!staged_log_->PopulateSystemProfile(system_profile_setter_.get())) {
+ scoped_ptr<MetricsLog> staged_log;
+ staged_log.swap(current_log_);
+ staged_log->CloseLog();
+ if (!staged_log->PopulateSystemProfile(system_profile_setter_.get())) {
LOG(WARNING) << "Error while adding metadata to the log. Discarding the "
<< "log.";
- staged_log_.reset();
+ return;
}
- failed_upload_count_ = 0;
+ std::string encoded_log;
+ staged_log->GetEncodedLog(&encoded_log);
+
+ failed_upload_count_.Set(0);
+ if (static_cast<int>(encoded_log.size()) != base::WriteFile(
+ staged_log_path_, encoded_log.data(), encoded_log.size())) {
+ LOG(ERROR) << "failed to persist to " << staged_log_path_.value();
+ }
}
MetricsLog* UploadService::GetOrCreateCurrentLog() {
@@ -237,3 +250,16 @@
}
return current_log_.get();
}
+
+bool UploadService::HasStagedLog() {
+ return base::PathExists(staged_log_path_);
+}
+
+void UploadService::RemoveFailedLog() {
+ if (failed_upload_count_.Get() > kMaxFailedUpload) {
+ LOG(INFO) << "log failed more than " << kMaxFailedUpload << " times.";
+ CHECK(base::DeleteFile(staged_log_path_, false))
+ << "failed to delete staged log at " << staged_log_path_.value();
+ failed_upload_count_.Set(0);
+ }
+}
diff --git a/metricsd/uploader/upload_service.h b/metricsd/uploader/upload_service.h
index 7f2f413..a4d0a1e 100644
--- a/metricsd/uploader/upload_service.h
+++ b/metricsd/uploader/upload_service.h
@@ -24,6 +24,7 @@
#include "base/metrics/histogram_snapshot_manager.h"
#include "metrics/metrics_library.h"
+#include "persistent_integer.h"
#include "uploader/metrics_log.h"
#include "uploader/sender.h"
#include "uploader/system_profile_cache.h"
@@ -73,7 +74,7 @@
const std::string& server);
void Init(const base::TimeDelta& upload_interval,
- const std::string& metrics_file);
+ const base::FilePath& metrics_directory);
// Starts a new log. The log needs to be regenerated after each successful
// launch as it is destroyed when staging the log.
@@ -106,6 +107,7 @@
FRIEND_TEST(UploadServiceTest, LogContainsAggregatedValues);
FRIEND_TEST(UploadServiceTest, LogEmptyAfterUpload);
FRIEND_TEST(UploadServiceTest, LogEmptyByDefault);
+ FRIEND_TEST(UploadServiceTest, LogFromTheMetricsLibrary);
FRIEND_TEST(UploadServiceTest, LogKernelCrash);
FRIEND_TEST(UploadServiceTest, LogUncleanShutdown);
FRIEND_TEST(UploadServiceTest, LogUserCrash);
@@ -146,6 +148,12 @@
// system information.
void StageCurrentLog();
+ // Returns true iff a log is staged.
+ bool HasStagedLog();
+
+ // Remove the staged log iff the upload failed more than |kMaxFailedUpload|.
+ void RemoveFailedLog();
+
// Returns the current log. If there is no current log, creates it first.
MetricsLog* GetOrCreateCurrentLog();
@@ -153,11 +161,11 @@
MetricsLibraryInterface* metrics_lib_;
base::HistogramSnapshotManager histogram_snapshot_manager_;
scoped_ptr<Sender> sender_;
- int failed_upload_count_;
+ chromeos_metrics::PersistentInteger failed_upload_count_;
scoped_ptr<MetricsLog> current_log_;
- scoped_ptr<MetricsLog> staged_log_;
- std::string metrics_file_;
+ base::FilePath metrics_file_;
+ base::FilePath staged_log_path_;
bool testing_;
};
diff --git a/metricsd/uploader/upload_service_test.cc b/metricsd/uploader/upload_service_test.cc
index 40c235d..873953e 100644
--- a/metricsd/uploader/upload_service_test.cc
+++ b/metricsd/uploader/upload_service_test.cc
@@ -24,6 +24,7 @@
#include "constants.h"
#include "metrics_library_mock.h"
+#include "persistent_integer.h"
#include "serialization/metric_sample.h"
#include "uploader/metrics_log.h"
#include "uploader/mock/mock_system_profile_setter.h"
@@ -38,17 +39,16 @@
protected:
virtual void SetUp() {
CHECK(dir_.CreateUniqueTempDir());
+ chromeos_metrics::PersistentInteger::SetMetricsDirectory(
+ dir_.path().value());
+ metrics_lib_.InitForTest(dir_.path());
upload_service_.reset(new UploadService(new MockSystemProfileSetter(),
&metrics_lib_, "", true));
upload_service_->sender_.reset(new SenderMock);
- event_file_ = dir_.path().Append("event");
- upload_service_->Init(base::TimeDelta::FromMinutes(30), event_file_.value());
+ upload_service_->Init(base::TimeDelta::FromMinutes(30), dir_.path());
upload_service_->GatherHistograms();
upload_service_->Reset();
-
- chromeos_metrics::PersistentInteger::SetMetricsDirectory(
- dir_.path().value());
}
scoped_ptr<metrics::MetricSample> Crash(const std::string& name) {
@@ -61,11 +61,9 @@
base::WriteFile(dir_.path().Append(name), value.data(), value.size()));
}
- base::FilePath event_file_;
-
base::ScopedTempDir dir_;
scoped_ptr<UploadService> upload_service_;
- MetricsLibraryMock metrics_lib_;
+ MetricsLibrary metrics_lib_;
scoped_ptr<base::AtExitManager> exit_manager_;
};
@@ -135,9 +133,17 @@
upload_service_->UploadEvent();
}
- EXPECT_TRUE(upload_service_->staged_log_);
+ EXPECT_TRUE(upload_service_->HasStagedLog());
upload_service_->UploadEvent();
- EXPECT_FALSE(upload_service_->staged_log_);
+ EXPECT_FALSE(upload_service_->HasStagedLog());
+
+ // Log a new sample. The failed upload counter should be reset.
+ upload_service_->AddSample(*Crash("user"));
+ for (int i = 0; i < UploadService::kMaxFailedUpload; i++) {
+ upload_service_->UploadEvent();
+ }
+ // The log is not discarded after multiple failed uploads.
+ EXPECT_TRUE(upload_service_->HasStagedLog());
}
TEST_F(UploadServiceTest, EmptyLogsAreNotSent) {
@@ -269,3 +275,18 @@
cache.Initialize();
EXPECT_EQ(cache.profile_.session_id, session_id + 1);
}
+
+// Test that we can log metrics from the metrics library and have the uploader
+// upload them.
+TEST_F(UploadServiceTest, LogFromTheMetricsLibrary) {
+ SenderMock* sender = new SenderMock();
+ upload_service_->sender_.reset(sender);
+
+ upload_service_->UploadEvent();
+ EXPECT_EQ(0, sender->send_call_count());
+
+ metrics_lib_.SendEnumToUMA("testname", 2, 10);
+ upload_service_->UploadEvent();
+
+ EXPECT_EQ(1, sender->send_call_count());
+}