Split buckets on boot complete

Also clean up a bit of code on splitting on app upgrades

Piggy-backed off the app upgrade tests, adding parameterized tests to
also test boot complete event.

Refactored some value metric test code to increase code reuse and
assertions.

Fixed a broken value metric test that had assertions commented out.

Refactored NamedLatch into MultiConditionTrigger to avoid creating a
thread before necessary.

Test: atest statsd_test
Test: push a simple test config, reboot, wait, get data. Made sure
the bucket was split
Bug: 144099206
Bug: 154511974

Change-Id: I73858b5db08e8cda762bd8091b30da8738d1fd88
diff --git a/cmds/statsd/Android.bp b/cmds/statsd/Android.bp
index 6e8ceb7..c544510 100644
--- a/cmds/statsd/Android.bp
+++ b/cmds/statsd/Android.bp
@@ -104,7 +104,7 @@
         "src/subscriber/IncidentdReporter.cpp",
         "src/subscriber/SubscriberReporter.cpp",
         "src/uid_data.proto",
-        "src/utils/NamedLatch.cpp",
+        "src/utils/MultiConditionTrigger.cpp",
     ],
 
     local_include_dirs: [
@@ -362,7 +362,7 @@
         "tests/StatsService_test.cpp",
         "tests/storage/StorageManager_test.cpp",
         "tests/UidMap_test.cpp",
-        "tests/utils/NamedLatch_test.cpp",
+        "tests/utils/MultiConditionTrigger_test.cpp",
     ],
 
     static_libs: [
diff --git a/cmds/statsd/src/StatsLogProcessor.cpp b/cmds/statsd/src/StatsLogProcessor.cpp
index 325cbc7..fc7b950 100644
--- a/cmds/statsd/src/StatsLogProcessor.cpp
+++ b/cmds/statsd/src/StatsLogProcessor.cpp
@@ -1052,8 +1052,8 @@
 void StatsLogProcessor::notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk,
                                          const int uid, const int64_t version) {
     std::lock_guard<std::mutex> lock(mMetricsMutex);
-    ALOGW("Received app upgrade");
-    for (auto it : mMetricsManagers) {
+    VLOG("Received app upgrade");
+    for (const auto& it : mMetricsManagers) {
         it.second->notifyAppUpgrade(eventTimeNs, apk, uid, version);
     }
 }
@@ -1061,20 +1061,28 @@
 void StatsLogProcessor::notifyAppRemoved(const int64_t& eventTimeNs, const string& apk,
                                          const int uid) {
     std::lock_guard<std::mutex> lock(mMetricsMutex);
-    ALOGW("Received app removed");
-    for (auto it : mMetricsManagers) {
+    VLOG("Received app removed");
+    for (const auto& it : mMetricsManagers) {
         it.second->notifyAppRemoved(eventTimeNs, apk, uid);
     }
 }
 
 void StatsLogProcessor::onUidMapReceived(const int64_t& eventTimeNs) {
     std::lock_guard<std::mutex> lock(mMetricsMutex);
-    ALOGW("Received uid map");
-    for (auto it : mMetricsManagers) {
+    VLOG("Received uid map");
+    for (const auto& it : mMetricsManagers) {
         it.second->onUidMapReceived(eventTimeNs);
     }
 }
 
+void StatsLogProcessor::onStatsdInitCompleted(const int64_t& elapsedTimeNs) {
+    std::lock_guard<std::mutex> lock(mMetricsMutex);
+    VLOG("Received boot completed signal");
+    for (const auto& it : mMetricsManagers) {
+        it.second->onStatsdInitCompleted(elapsedTimeNs);
+    }
+}
+
 void StatsLogProcessor::noteOnDiskData(const ConfigKey& key) {
     std::lock_guard<std::mutex> lock(mMetricsMutex);
     mOnDiskDataConfigs.insert(key);
diff --git a/cmds/statsd/src/StatsLogProcessor.h b/cmds/statsd/src/StatsLogProcessor.h
index 97512ed..ffd83ba 100644
--- a/cmds/statsd/src/StatsLogProcessor.h
+++ b/cmds/statsd/src/StatsLogProcessor.h
@@ -120,6 +120,11 @@
     /* Notify all MetricsManagers of uid map snapshots received */
     void onUidMapReceived(const int64_t& eventTimeNs) override;
 
+    /* Notify all metrics managers of boot completed
+     * This will force a bucket split when the boot is finished.
+     */
+    void onStatsdInitCompleted(const int64_t& elapsedTimeNs);
+
     // Reset all configs.
     void resetConfigs();
 
diff --git a/cmds/statsd/src/StatsService.cpp b/cmds/statsd/src/StatsService.cpp
index ae7a8d0..f8cdcff 100644
--- a/cmds/statsd/src/StatsService.cpp
+++ b/cmds/statsd/src/StatsService.cpp
@@ -118,7 +118,8 @@
                   }
               })),
       mEventQueue(queue),
-      mBootCompleteLatch({kBootCompleteTag, kUidMapReceivedTag, kAllPullersRegisteredTag}),
+      mBootCompleteTrigger({kBootCompleteTag, kUidMapReceivedTag, kAllPullersRegisteredTag},
+                           [this]() { mProcessor->onStatsdInitCompleted(getElapsedRealtimeNs()); }),
       mStatsCompanionServiceDeathRecipient(
               AIBinder_DeathRecipient_new(StatsService::statsCompanionServiceDied)) {
     mUidMap = UidMap::getInstance();
@@ -165,12 +166,6 @@
         std::thread pushedEventThread([this] { readLogs(); });
         pushedEventThread.detach();
     }
-
-    std::thread bootCompletedThread([this] {
-        mBootCompleteLatch.wait();
-        VLOG("In the boot completed thread");
-    });
-    bootCompletedThread.detach();
 }
 
 StatsService::~StatsService() {
@@ -946,7 +941,7 @@
                        packageNames,
                        installers);
 
-    mBootCompleteLatch.countDown(kUidMapReceivedTag);
+    mBootCompleteTrigger.markComplete(kUidMapReceivedTag);
     VLOG("StatsService::informAllUidData UidData proto parsed successfully.");
     return Status::ok();
 }
@@ -1066,7 +1061,7 @@
     ENFORCE_UID(AID_SYSTEM);
 
     VLOG("StatsService::bootCompleted was called");
-    mBootCompleteLatch.countDown(kBootCompleteTag);
+    mBootCompleteTrigger.markComplete(kBootCompleteTag);
     return Status::ok();
 }
 
@@ -1235,7 +1230,7 @@
     ENFORCE_UID(AID_SYSTEM);
 
     VLOG("StatsService::allPullersFromBootRegistered was called");
-    mBootCompleteLatch.countDown(kAllPullersRegisteredTag);
+    mBootCompleteTrigger.markComplete(kAllPullersRegisteredTag);
     return Status::ok();
 }
 
diff --git a/cmds/statsd/src/StatsService.h b/cmds/statsd/src/StatsService.h
index 79324d8..e27966b 100644
--- a/cmds/statsd/src/StatsService.h
+++ b/cmds/statsd/src/StatsService.h
@@ -33,7 +33,7 @@
 #include "packages/UidMap.h"
 #include "shell/ShellSubscriber.h"
 #include "statscompanion_util.h"
-#include "utils/NamedLatch.h"
+#include "utils/MultiConditionTrigger.h"
 
 using namespace android;
 using namespace android::os;
@@ -386,7 +386,7 @@
     mutable mutex mShellSubscriberMutex;
     std::shared_ptr<LogEventQueue> mEventQueue;
 
-    NamedLatch mBootCompleteLatch;
+    MultiConditionTrigger mBootCompleteTrigger;
     static const inline string kBootCompleteTag = "BOOT_COMPLETE";
     static const inline string kUidMapReceivedTag = "UID_MAP";
     static const inline string kAllPullersRegisteredTag = "PULLERS_REGISTERED";
@@ -399,11 +399,14 @@
     FRIEND_TEST(StatsServiceTest, TestAddConfig_invalid);
     FRIEND_TEST(StatsServiceTest, TestGetUidFromArgs);
     FRIEND_TEST(PartialBucketE2eTest, TestCountMetricNoSplitOnNewApp);
+    FRIEND_TEST(PartialBucketE2eTest, TestCountMetricSplitOnBoot);
     FRIEND_TEST(PartialBucketE2eTest, TestCountMetricSplitOnUpgrade);
     FRIEND_TEST(PartialBucketE2eTest, TestCountMetricSplitOnRemoval);
     FRIEND_TEST(PartialBucketE2eTest, TestCountMetricWithoutSplit);
+    FRIEND_TEST(PartialBucketE2eTest, TestValueMetricOnBootWithoutMinPartialBucket);
     FRIEND_TEST(PartialBucketE2eTest, TestValueMetricWithoutMinPartialBucket);
     FRIEND_TEST(PartialBucketE2eTest, TestValueMetricWithMinPartialBucket);
+    FRIEND_TEST(PartialBucketE2eTest, TestGaugeMetricOnBootWithoutMinPartialBucket);
     FRIEND_TEST(PartialBucketE2eTest, TestGaugeMetricWithoutMinPartialBucket);
     FRIEND_TEST(PartialBucketE2eTest, TestGaugeMetricWithMinPartialBucket);
 };
diff --git a/cmds/statsd/src/metrics/CountMetricProducer.h b/cmds/statsd/src/metrics/CountMetricProducer.h
index a4711e8..f9a8842 100644
--- a/cmds/statsd/src/metrics/CountMetricProducer.h
+++ b/cmds/statsd/src/metrics/CountMetricProducer.h
@@ -109,10 +109,11 @@
     FRIEND_TEST(CountMetricProducerTest, TestEventsWithNonSlicedCondition);
     FRIEND_TEST(CountMetricProducerTest, TestEventsWithSlicedCondition);
     FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced);
-    FRIEND_TEST(CountMetricProducerTest, TestEventWithAppUpgrade);
-    FRIEND_TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket);
     FRIEND_TEST(CountMetricProducerTest, TestFirstBucket);
     FRIEND_TEST(CountMetricProducerTest, TestOneWeekTimeUnit);
+
+    FRIEND_TEST(CountMetricProducerTest_PartialBucket, TestSplitInCurrentBucket);
+    FRIEND_TEST(CountMetricProducerTest_PartialBucket, TestSplitInNextBucket);
 };
 
 }  // namespace statsd
diff --git a/cmds/statsd/src/metrics/DurationMetricProducer.h b/cmds/statsd/src/metrics/DurationMetricProducer.h
index cc48f99..6f84076 100644
--- a/cmds/statsd/src/metrics/DurationMetricProducer.h
+++ b/cmds/statsd/src/metrics/DurationMetricProducer.h
@@ -154,12 +154,14 @@
     FRIEND_TEST(DurationMetricTrackerTest, TestNoCondition);
     FRIEND_TEST(DurationMetricTrackerTest, TestNonSlicedCondition);
     FRIEND_TEST(DurationMetricTrackerTest, TestNonSlicedConditionUnknownState);
-    FRIEND_TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade);
-    FRIEND_TEST(DurationMetricTrackerTest, TestSumDurationWithUpgradeInFollowingBucket);
-    FRIEND_TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade);
-    FRIEND_TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket);
     FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicates);
     FRIEND_TEST(DurationMetricTrackerTest, TestFirstBucket);
+
+    FRIEND_TEST(DurationMetricProducerTest_PartialBucket, TestSumDuration);
+    FRIEND_TEST(DurationMetricProducerTest_PartialBucket,
+                TestSumDurationWithSplitInFollowingBucket);
+    FRIEND_TEST(DurationMetricProducerTest_PartialBucket, TestMaxDuration);
+    FRIEND_TEST(DurationMetricProducerTest_PartialBucket, TestMaxDurationWithSplitInNextBucket);
 };
 
 }  // namespace statsd
diff --git a/cmds/statsd/src/metrics/GaugeMetricProducer.h b/cmds/statsd/src/metrics/GaugeMetricProducer.h
index 79ec711..e2bff21 100644
--- a/cmds/statsd/src/metrics/GaugeMetricProducer.h
+++ b/cmds/statsd/src/metrics/GaugeMetricProducer.h
@@ -73,18 +73,23 @@
                       bool pullSuccess, int64_t originalPullTimeNs) override;
 
     // GaugeMetric needs to immediately trigger another pull when we create the partial bucket.
-    void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
-                          const int64_t version) override {
+    void notifyAppUpgrade(const int64_t& eventTimeNs) override {
         std::lock_guard<std::mutex> lock(mMutex);
 
         if (!mSplitBucketForAppUpgrade) {
             return;
         }
-        if (eventTimeNs > getCurrentBucketEndTimeNs()) {
-            // Flush full buckets on the normal path up to the latest bucket boundary.
-            flushIfNeededLocked(eventTimeNs);
+        flushLocked(eventTimeNs);
+        if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+            pullAndMatchEventsLocked(eventTimeNs);
         }
-        flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
+    };
+
+    // GaugeMetric needs to immediately trigger another pull when we create the partial bucket.
+    void onStatsdInitCompleted(const int64_t& eventTimeNs) override {
+        std::lock_guard<std::mutex> lock(mMutex);
+
+        flushLocked(eventTimeNs);
         if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
             pullAndMatchEventsLocked(eventTimeNs);
         }
@@ -190,13 +195,14 @@
     FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsWithCondition);
     FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsWithSlicedCondition);
     FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsNoCondition);
-    FRIEND_TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade);
-    FRIEND_TEST(GaugeMetricProducerTest, TestPulledWithUpgrade);
     FRIEND_TEST(GaugeMetricProducerTest, TestPulledWithAppUpgradeDisabled);
     FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsAnomalyDetection);
     FRIEND_TEST(GaugeMetricProducerTest, TestFirstBucket);
     FRIEND_TEST(GaugeMetricProducerTest, TestPullOnTrigger);
     FRIEND_TEST(GaugeMetricProducerTest, TestRemoveDimensionInOutput);
+
+    FRIEND_TEST(GaugeMetricProducerTest_PartialBucket, TestPushedEvents);
+    FRIEND_TEST(GaugeMetricProducerTest_PartialBucket, TestPulled);
 };
 
 }  // namespace statsd
diff --git a/cmds/statsd/src/metrics/MetricProducer.h b/cmds/statsd/src/metrics/MetricProducer.h
index 4550e65..7d33d5a 100644
--- a/cmds/statsd/src/metrics/MetricProducer.h
+++ b/cmds/statsd/src/metrics/MetricProducer.h
@@ -141,30 +141,25 @@
     }
 
     /**
-     * Forces this metric to split into a partial bucket right now. If we're past a full bucket, we
-     * first call the standard flushing code to flush up to the latest full bucket. Then we call
-     * the flush again when the end timestamp is forced to be now, and then after flushing, update
-     * the start timestamp to be now.
+     * Force a partial bucket split on app upgrade
      */
-    virtual void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
-                          const int64_t version) {
+    virtual void notifyAppUpgrade(const int64_t& eventTimeNs) {
         std::lock_guard<std::mutex> lock(mMutex);
-
-        if (eventTimeNs > getCurrentBucketEndTimeNs()) {
-            // Flush full buckets on the normal path up to the latest bucket boundary.
-            flushIfNeededLocked(eventTimeNs);
-        }
-        // Now flush a partial bucket.
-        flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
-        // Don't update the current bucket number so that the anomaly tracker knows this bucket
-        // is a partial bucket and can merge it with the previous bucket.
+        flushLocked(eventTimeNs);
     };
 
-    void notifyAppRemoved(const int64_t& eventTimeNs, const string& apk, const int uid) {
+    void notifyAppRemoved(const int64_t& eventTimeNs) {
         // Force buckets to split on removal also.
-        notifyAppUpgrade(eventTimeNs, apk, uid, 0);
+        notifyAppUpgrade(eventTimeNs);
     };
 
+    /**
+     * Force a partial bucket split on boot complete.
+     */
+    virtual void onStatsdInitCompleted(const int64_t& eventTimeNs) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        flushLocked(eventTimeNs);
+    }
     // Consume the parsed stats log entry that already matched the "what" of the metric.
     void onMatchedLogEvent(const size_t matcherIndex, const LogEvent& event) {
         std::lock_guard<std::mutex> lock(mMutex);
@@ -292,8 +287,7 @@
     // End: getters/setters
 protected:
     /**
-     * Flushes the current bucket if the eventTime is after the current bucket's end time. This will
-       also flush the current partial bucket in memory.
+     * Flushes the current bucket if the eventTime is after the current bucket's end time.
      */
     virtual void flushIfNeededLocked(const int64_t& eventTime){};
 
diff --git a/cmds/statsd/src/metrics/MetricsManager.cpp b/cmds/statsd/src/metrics/MetricsManager.cpp
index d832ed8..bcca1fd9 100644
--- a/cmds/statsd/src/metrics/MetricsManager.cpp
+++ b/cmds/statsd/src/metrics/MetricsManager.cpp
@@ -231,8 +231,8 @@
 void MetricsManager::notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
                                       const int64_t version) {
     // Inform all metric producers.
-    for (auto it : mAllMetricProducers) {
-        it->notifyAppUpgrade(eventTimeNs, apk, uid, version);
+    for (const auto& it : mAllMetricProducers) {
+        it->notifyAppUpgrade(eventTimeNs);
     }
     // check if we care this package
     if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) != mAllowedPkg.end()) {
@@ -252,8 +252,8 @@
 void MetricsManager::notifyAppRemoved(const int64_t& eventTimeNs, const string& apk,
                                       const int uid) {
     // Inform all metric producers.
-    for (auto it : mAllMetricProducers) {
-        it->notifyAppRemoved(eventTimeNs, apk, uid);
+    for (const auto& it : mAllMetricProducers) {
+        it->notifyAppRemoved(eventTimeNs);
     }
     // check if we care this package
     if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) != mAllowedPkg.end()) {
@@ -282,6 +282,13 @@
     initLogSourceWhiteList();
 }
 
+void MetricsManager::onStatsdInitCompleted(const int64_t& eventTimeNs) {
+    // Inform all metric producers.
+    for (const auto& it : mAllMetricProducers) {
+        it->onStatsdInitCompleted(eventTimeNs);
+    }
+}
+
 void MetricsManager::init() {
     for (const auto& producer : mAllMetricProducers) {
         producer->prepareFirstBucket();
diff --git a/cmds/statsd/src/metrics/MetricsManager.h b/cmds/statsd/src/metrics/MetricsManager.h
index 1fd6572..ef03d20 100644
--- a/cmds/statsd/src/metrics/MetricsManager.h
+++ b/cmds/statsd/src/metrics/MetricsManager.h
@@ -70,6 +70,8 @@
 
     void onUidMapReceived(const int64_t& eventTimeNs);
 
+    void onStatsdInitCompleted(const int64_t& elapsedTimeNs);
+
     void init();
 
     vector<int32_t> getPullAtomUids(int32_t atomId) override;
diff --git a/cmds/statsd/src/metrics/ValueMetricProducer.h b/cmds/statsd/src/metrics/ValueMetricProducer.h
index e9273dc..c8dc8cc 100644
--- a/cmds/statsd/src/metrics/ValueMetricProducer.h
+++ b/cmds/statsd/src/metrics/ValueMetricProducer.h
@@ -69,8 +69,7 @@
                       bool pullSuccess, int64_t originalPullTimeNs) override;
 
     // ValueMetric needs special logic if it's a pulled atom.
-    void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
-                          const int64_t version) override {
+    void notifyAppUpgrade(const int64_t& eventTimeNs) override {
         std::lock_guard<std::mutex> lock(mMutex);
         if (!mSplitBucketForAppUpgrade) {
             return;
@@ -81,6 +80,15 @@
         flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
     };
 
+    // ValueMetric needs special logic if it's a pulled atom.
+    void onStatsdInitCompleted(const int64_t& eventTimeNs) override {
+        std::lock_guard<std::mutex> lock(mMutex);
+        if (mIsPulled && mCondition) {
+            pullAndMatchEventsLocked(eventTimeNs);
+        }
+        flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
+    };
+
     void onStateChanged(int64_t eventTimeNs, int32_t atomId, const HashableDimensionKey& primaryKey,
                         int oldState, int newState) override;
 
@@ -256,7 +264,6 @@
 
     FRIEND_TEST(ValueMetricProducerTest, TestAnomalyDetection);
     FRIEND_TEST(ValueMetricProducerTest, TestBaseSetOnConditionChange);
-    FRIEND_TEST(ValueMetricProducerTest, TestBucketBoundariesOnAppUpgrade);
     FRIEND_TEST(ValueMetricProducerTest, TestBucketBoundariesOnConditionChange);
     FRIEND_TEST(ValueMetricProducerTest, TestBucketBoundaryNoCondition);
     FRIEND_TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition);
@@ -269,10 +276,8 @@
     FRIEND_TEST(ValueMetricProducerTest, TestEmptyDataResetsBase_onDataPulled);
     FRIEND_TEST(ValueMetricProducerTest, TestEventsWithNonSlicedCondition);
     FRIEND_TEST(ValueMetricProducerTest, TestFirstBucket);
-    FRIEND_TEST(ValueMetricProducerTest, TestFullBucketResetWhenLastBucketInvalid);
     FRIEND_TEST(ValueMetricProducerTest, TestLateOnDataPulledWithDiff);
     FRIEND_TEST(ValueMetricProducerTest, TestLateOnDataPulledWithoutDiff);
-    FRIEND_TEST(ValueMetricProducerTest, TestPartialBucketCreated);
     FRIEND_TEST(ValueMetricProducerTest, TestPartialResetOnBucketBoundaries);
     FRIEND_TEST(ValueMetricProducerTest, TestPulledData_noDiff_bucketBoundaryFalse);
     FRIEND_TEST(ValueMetricProducerTest, TestPulledData_noDiff_bucketBoundaryTrue);
@@ -283,15 +288,12 @@
     FRIEND_TEST(ValueMetricProducerTest, TestPulledEventsTakeAbsoluteValueOnReset);
     FRIEND_TEST(ValueMetricProducerTest, TestPulledEventsTakeZeroOnReset);
     FRIEND_TEST(ValueMetricProducerTest, TestPulledEventsWithFiltering);
-    FRIEND_TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade);
-    FRIEND_TEST(ValueMetricProducerTest, TestPulledValueWithUpgradeWhileConditionFalse);
     FRIEND_TEST(ValueMetricProducerTest, TestPulledWithAppUpgradeDisabled);
     FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateAvg);
     FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateMax);
     FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateMin);
     FRIEND_TEST(ValueMetricProducerTest, TestPushedAggregateSum);
     FRIEND_TEST(ValueMetricProducerTest, TestPushedEventsWithCondition);
-    FRIEND_TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade);
     FRIEND_TEST(ValueMetricProducerTest, TestPushedEventsWithoutCondition);
     FRIEND_TEST(ValueMetricProducerTest, TestResetBaseOnPullDelayExceeded);
     FRIEND_TEST(ValueMetricProducerTest, TestResetBaseOnPullFailAfterConditionChange);
@@ -313,6 +315,14 @@
     FRIEND_TEST(ValueMetricProducerTest_BucketDrop, TestInvalidBucketWhenGuardRailHit);
     FRIEND_TEST(ValueMetricProducerTest_BucketDrop,
                 TestInvalidBucketWhenAccumulateEventWrongBucket);
+
+    FRIEND_TEST(ValueMetricProducerTest_PartialBucket, TestBucketBoundariesOnPartialBucket);
+    FRIEND_TEST(ValueMetricProducerTest_PartialBucket, TestFullBucketResetWhenLastBucketInvalid);
+    FRIEND_TEST(ValueMetricProducerTest_PartialBucket, TestPartialBucketCreated);
+    FRIEND_TEST(ValueMetricProducerTest_PartialBucket, TestPushedEvents);
+    FRIEND_TEST(ValueMetricProducerTest_PartialBucket, TestPulledValue);
+    FRIEND_TEST(ValueMetricProducerTest_PartialBucket, TestPulledValueWhileConditionFalse);
+
     friend class ValueMetricProducerTestHelper;
 };
 
diff --git a/cmds/statsd/src/utils/MultiConditionTrigger.cpp b/cmds/statsd/src/utils/MultiConditionTrigger.cpp
new file mode 100644
index 0000000..43a6933
--- /dev/null
+++ b/cmds/statsd/src/utils/MultiConditionTrigger.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define DEBUG false  // STOPSHIP if true
+
+#include "MultiConditionTrigger.h"
+
+#include <thread>
+
+using namespace std;
+
+namespace android {
+namespace os {
+namespace statsd {
+
+MultiConditionTrigger::MultiConditionTrigger(const set<string>& conditionNames,
+                                             function<void()> trigger)
+    : mRemainingConditionNames(conditionNames),
+      mTrigger(trigger),
+      mCompleted(mRemainingConditionNames.empty()) {
+    if (mCompleted) {
+        thread executorThread([this] { mTrigger(); });
+        executorThread.detach();
+    }
+}
+
+void MultiConditionTrigger::markComplete(const string& conditionName) {
+    bool doTrigger = false;
+    {
+        lock_guard<mutex> lg(mMutex);
+        if (mCompleted) {
+            return;
+        }
+        mRemainingConditionNames.erase(conditionName);
+        mCompleted = mRemainingConditionNames.empty();
+        doTrigger = mCompleted;
+    }
+    if (doTrigger) {
+        std::thread executorThread([this] { mTrigger(); });
+        executorThread.detach();
+    }
+}
+}  // namespace statsd
+}  // namespace os
+}  // namespace android
diff --git a/cmds/statsd/src/utils/MultiConditionTrigger.h b/cmds/statsd/src/utils/MultiConditionTrigger.h
new file mode 100644
index 0000000..51f6029
--- /dev/null
+++ b/cmds/statsd/src/utils/MultiConditionTrigger.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <gtest/gtest_prod.h>
+
+#include <mutex>
+#include <set>
+
+namespace android {
+namespace os {
+namespace statsd {
+
+/**
+ * This class provides a utility to wait for a set of named conditions to occur.
+ *
+ * It will execute the trigger runnable in a detached thread once all conditions have been marked
+ * true.
+ */
+class MultiConditionTrigger {
+public:
+    explicit MultiConditionTrigger(const std::set<std::string>& conditionNames,
+                                   std::function<void()> trigger);
+
+    MultiConditionTrigger(const MultiConditionTrigger&) = delete;
+    MultiConditionTrigger& operator=(const MultiConditionTrigger&) = delete;
+
+    // Mark a specific condition as true. If this condition has called markComplete already or if
+    // the event was not specified in the constructor, the function is a no-op.
+    void markComplete(const std::string& eventName);
+
+private:
+    mutable std::mutex mMutex;
+    std::set<std::string> mRemainingConditionNames;
+    std::function<void()> mTrigger;
+    bool mCompleted;
+
+    FRIEND_TEST(MultiConditionTriggerTest, TestCountDownCalledBySameEventName);
+};
+}  // namespace statsd
+}  // namespace os
+}  // namespace android
diff --git a/cmds/statsd/src/utils/NamedLatch.cpp b/cmds/statsd/src/utils/NamedLatch.cpp
deleted file mode 100644
index 6e77977..0000000
--- a/cmds/statsd/src/utils/NamedLatch.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#define DEBUG false  // STOPSHIP if true
-
-#include "NamedLatch.h"
-
-using namespace std;
-
-namespace android {
-namespace os {
-namespace statsd {
-
-NamedLatch::NamedLatch(const set<string>& eventNames) : mRemainingEventNames(eventNames) {
-}
-
-void NamedLatch::countDown(const string& eventName) {
-    bool notify = false;
-    {
-        lock_guard<mutex> lg(mMutex);
-        mRemainingEventNames.erase(eventName);
-        notify = mRemainingEventNames.empty();
-    }
-    if (notify) {
-        mConditionVariable.notify_all();
-    }
-}
-
-void NamedLatch::wait() const {
-    unique_lock<mutex> unique_lk(mMutex);
-    mConditionVariable.wait(unique_lk, [this] { return mRemainingEventNames.empty(); });
-}
-
-}  // namespace statsd
-}  // namespace os
-}  // namespace android
diff --git a/cmds/statsd/src/utils/NamedLatch.h b/cmds/statsd/src/utils/NamedLatch.h
deleted file mode 100644
index 70238370..0000000
--- a/cmds/statsd/src/utils/NamedLatch.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#pragma once
-
-#include <gtest/gtest_prod.h>
-
-#include <condition_variable>
-#include <mutex>
-#include <set>
-
-namespace android {
-namespace os {
-namespace statsd {
-
-/**
- * This class provides a threading primitive similar to a latch.
- * The primary difference is that it waits for named events to occur instead of waiting for
- * N threads to reach a certain point.
- *
- * It uses a condition variable under the hood.
- */
-class NamedLatch {
-public:
-    explicit NamedLatch(const std::set<std::string>& eventNames);
-
-    NamedLatch(const NamedLatch&) = delete;
-    NamedLatch& operator=(const NamedLatch&) = delete;
-
-    // Mark a specific event as completed. If this event has called countDown already or if the
-    // event was not specified in the constructor, the function is a no-op.
-    void countDown(const std::string& eventName);
-
-    // Blocks the calling thread until all events in eventNames have called countDown.
-    void wait() const;
-
-private:
-    mutable std::mutex mMutex;
-    mutable std::condition_variable mConditionVariable;
-    std::set<std::string> mRemainingEventNames;
-
-    FRIEND_TEST(NamedLatchTest, TestCountDownCalledBySameEventName);
-};
-}  // namespace statsd
-}  // namespace os
-}  // namespace android
diff --git a/cmds/statsd/tests/e2e/PartialBucket_e2e_test.cpp b/cmds/statsd/tests/e2e/PartialBucket_e2e_test.cpp
index b173ee0..9117623 100644
--- a/cmds/statsd/tests/e2e/PartialBucket_e2e_test.cpp
+++ b/cmds/statsd/tests/e2e/PartialBucket_e2e_test.cpp
@@ -89,6 +89,7 @@
     valueMetric->set_bucket(FIVE_MINUTES);
     valueMetric->set_min_bucket_size_nanos(minTime);
     valueMetric->set_use_absolute_value_on_reset(true);
+    valueMetric->set_skip_zero_diff_output(false);
     return config;
 }
 
@@ -217,6 +218,35 @@
     EXPECT_EQ(1, report.metrics(0).count_metrics().data(0).bucket_info(0).count());
 }
 
+TEST(PartialBucketE2eTest, TestCountMetricSplitOnBoot) {
+    shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+    SendConfig(service, MakeConfig());
+    int64_t start = getElapsedRealtimeNs();  // This is the start-time the metrics producers are
+                                             // initialized with.
+
+    // Goes into the first bucket
+    service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + NS_PER_SEC, 100).get());
+    int64_t bootCompleteTimeNs = start + 2 * NS_PER_SEC;
+    service->mProcessor->onStatsdInitCompleted(bootCompleteTimeNs);
+    // Goes into the second bucket.
+    service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 3 * NS_PER_SEC, 100).get());
+
+    ConfigMetricsReport report = GetReports(service->mProcessor, start + 4 * NS_PER_SEC);
+    backfillStartEndTimestamp(&report);
+
+    ASSERT_EQ(1, report.metrics_size());
+    ASSERT_EQ(1, report.metrics(0).count_metrics().data_size());
+    ASSERT_EQ(1, report.metrics(0).count_metrics().data(0).bucket_info_size());
+    EXPECT_TRUE(report.metrics(0)
+                        .count_metrics()
+                        .data(0)
+                        .bucket_info(0)
+                        .has_start_bucket_elapsed_nanos());
+    EXPECT_EQ(MillisToNano(NanoToMillis(bootCompleteTimeNs)),
+              report.metrics(0).count_metrics().data(0).bucket_info(0).end_bucket_elapsed_nanos());
+    EXPECT_EQ(1, report.metrics(0).count_metrics().data(0).bucket_info(0).count());
+}
+
 TEST(PartialBucketE2eTest, TestValueMetricWithoutMinPartialBucket) {
     shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
     service->mPullerManager->RegisterPullAtomCallback(
@@ -229,13 +259,22 @@
                                              // initialized with.
 
     service->mProcessor->informPullAlarmFired(5 * 60 * NS_PER_SEC + start);
-    service->mUidMap->updateApp(5 * 60 * NS_PER_SEC + start + 2, String16(kApp1.c_str()), 1, 2,
-                                String16("v2"), String16(""));
+    int64_t appUpgradeTimeNs = 5 * 60 * NS_PER_SEC + start + 2 * NS_PER_SEC;
+    service->mUidMap->updateApp(appUpgradeTimeNs, String16(kApp1.c_str()), 1, 2, String16("v2"),
+                                String16(""));
 
     ConfigMetricsReport report =
-            GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100, true);
+            GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
+    backfillStartEndTimestamp(&report);
+
     EXPECT_EQ(1, report.metrics_size());
     EXPECT_EQ(0, report.metrics(0).value_metrics().skipped_size());
+
+    // The fake subsystem state sleep puller returns two atoms.
+    ASSERT_EQ(2, report.metrics(0).value_metrics().data_size());
+    ASSERT_EQ(2, report.metrics(0).value_metrics().data(0).bucket_info_size());
+    EXPECT_EQ(MillisToNano(NanoToMillis(appUpgradeTimeNs)),
+              report.metrics(0).value_metrics().data(0).bucket_info(1).end_bucket_elapsed_nanos());
 }
 
 TEST(PartialBucketE2eTest, TestValueMetricWithMinPartialBucket) {
@@ -249,13 +288,13 @@
     int64_t start = getElapsedRealtimeNs();  // This is the start-time the metrics producers are
                                              // initialized with.
 
-    const int64_t endSkipped = 5 * 60 * NS_PER_SEC + start + 2;
+    const int64_t endSkipped = 5 * 60 * NS_PER_SEC + start + 2 * NS_PER_SEC;
     service->mProcessor->informPullAlarmFired(5 * 60 * NS_PER_SEC + start);
     service->mUidMap->updateApp(endSkipped, String16(kApp1.c_str()), 1, 2, String16("v2"),
                                String16(""));
 
     ConfigMetricsReport report =
-            GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC, true);
+            GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
     backfillStartEndTimestamp(&report);
 
     ASSERT_EQ(1, report.metrics_size());
@@ -264,10 +303,49 @@
     // Can't test the start time since it will be based on the actual time when the pulling occurs.
     EXPECT_EQ(MillisToNano(NanoToMillis(endSkipped)),
               report.metrics(0).value_metrics().skipped(0).end_bucket_elapsed_nanos());
+
+    ASSERT_EQ(2, report.metrics(0).value_metrics().data_size());
+    EXPECT_EQ(1, report.metrics(0).value_metrics().data(0).bucket_info_size());
+}
+
+TEST(PartialBucketE2eTest, TestValueMetricOnBootWithoutMinPartialBucket) {
+    shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+    // Initial pull will fail since puller is not registered.
+    SendConfig(service, MakeValueMetricConfig(0));
+    int64_t start = getElapsedRealtimeNs();  // This is the start-time the metrics producers are
+                                             // initialized with.
+
+    service->mPullerManager->RegisterPullAtomCallback(
+            /*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
+            SharedRefBase::make<FakeSubsystemSleepCallback>());
+
+    int64_t bootCompleteTimeNs = start + NS_PER_SEC;
+    service->mProcessor->onStatsdInitCompleted(bootCompleteTimeNs);
+
+    service->mProcessor->informPullAlarmFired(5 * 60 * NS_PER_SEC + start);
+
+    ConfigMetricsReport report = GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
+    backfillStartEndTimestamp(&report);
+
+    // First bucket is dropped due to the initial pull failing
+    ASSERT_EQ(1, report.metrics_size());
+    EXPECT_EQ(1, report.metrics(0).value_metrics().skipped_size());
+    EXPECT_EQ(MillisToNano(NanoToMillis(bootCompleteTimeNs)),
+              report.metrics(0).value_metrics().skipped(0).end_bucket_elapsed_nanos());
+
+    // The fake subsystem state sleep puller returns two atoms.
+    ASSERT_EQ(2, report.metrics(0).value_metrics().data_size());
+    ASSERT_EQ(1, report.metrics(0).value_metrics().data(0).bucket_info_size());
+    EXPECT_EQ(
+            MillisToNano(NanoToMillis(bootCompleteTimeNs)),
+            report.metrics(0).value_metrics().data(0).bucket_info(0).start_bucket_elapsed_nanos());
 }
 
 TEST(PartialBucketE2eTest, TestGaugeMetricWithoutMinPartialBucket) {
     shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+    service->mPullerManager->RegisterPullAtomCallback(
+            /*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
+            SharedRefBase::make<FakeSubsystemSleepCallback>());
     // Partial buckets don't occur when app is first installed.
     service->mUidMap->updateApp(1, String16(kApp1.c_str()), 1, 1, String16("v1"), String16(""));
     SendConfig(service, MakeGaugeMetricConfig(0));
@@ -278,16 +356,22 @@
     service->mUidMap->updateApp(5 * 60 * NS_PER_SEC + start + 2, String16(kApp1.c_str()), 1, 2,
                                String16("v2"), String16(""));
 
-    ConfigMetricsReport report =
-            GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100, true);
-    EXPECT_EQ(1, report.metrics_size());
+    ConfigMetricsReport report = GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
+    backfillStartEndTimestamp(&report);
+    ASSERT_EQ(1, report.metrics_size());
     EXPECT_EQ(0, report.metrics(0).gauge_metrics().skipped_size());
+    // The fake subsystem state sleep puller returns two atoms.
+    ASSERT_EQ(2, report.metrics(0).gauge_metrics().data_size());
+    EXPECT_EQ(2, report.metrics(0).gauge_metrics().data(0).bucket_info_size());
 }
 
 TEST(PartialBucketE2eTest, TestGaugeMetricWithMinPartialBucket) {
     shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
     // Partial buckets don't occur when app is first installed.
     service->mUidMap->updateApp(1, String16(kApp1.c_str()), 1, 1, String16("v1"), String16(""));
+    service->mPullerManager->RegisterPullAtomCallback(
+            /*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
+            SharedRefBase::make<FakeSubsystemSleepCallback>());
     SendConfig(service, MakeGaugeMetricConfig(60 * NS_PER_SEC /* One minute */));
     int64_t start = getElapsedRealtimeNs();  // This is the start-time the metrics producers are
                                              // initialized with.
@@ -298,7 +382,7 @@
                                 String16(""));
 
     ConfigMetricsReport report =
-            GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC, true);
+            GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
     backfillStartEndTimestamp(&report);
     ASSERT_EQ(1, report.metrics_size());
     ASSERT_EQ(1, report.metrics(0).gauge_metrics().skipped_size());
@@ -306,6 +390,38 @@
     EXPECT_TRUE(report.metrics(0).gauge_metrics().skipped(0).has_start_bucket_elapsed_nanos());
     EXPECT_EQ(MillisToNano(NanoToMillis(endSkipped)),
               report.metrics(0).gauge_metrics().skipped(0).end_bucket_elapsed_nanos());
+    ASSERT_EQ(2, report.metrics(0).gauge_metrics().data_size());
+    EXPECT_EQ(1, report.metrics(0).gauge_metrics().data(0).bucket_info_size());
+}
+
+TEST(PartialBucketE2eTest, TestGaugeMetricOnBootWithoutMinPartialBucket) {
+    shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+    // Initial pull will fail since puller hasn't been registered.
+    SendConfig(service, MakeGaugeMetricConfig(0));
+    int64_t start = getElapsedRealtimeNs();  // This is the start-time the metrics producers are
+                                             // initialized with.
+
+    service->mPullerManager->RegisterPullAtomCallback(
+            /*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
+            SharedRefBase::make<FakeSubsystemSleepCallback>());
+
+    int64_t bootCompleteTimeNs = start + NS_PER_SEC;
+    service->mProcessor->onStatsdInitCompleted(bootCompleteTimeNs);
+
+    service->mProcessor->informPullAlarmFired(5 * 60 * NS_PER_SEC + start);
+
+    ConfigMetricsReport report = GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
+    backfillStartEndTimestamp(&report);
+
+    ASSERT_EQ(1, report.metrics_size());
+    EXPECT_EQ(0, report.metrics(0).gauge_metrics().skipped_size());
+    // The fake subsystem state sleep puller returns two atoms.
+    ASSERT_EQ(2, report.metrics(0).gauge_metrics().data_size());
+    // No data in the first bucket, so nothing is reported
+    ASSERT_EQ(1, report.metrics(0).gauge_metrics().data(0).bucket_info_size());
+    EXPECT_EQ(
+            MillisToNano(NanoToMillis(bootCompleteTimeNs)),
+            report.metrics(0).gauge_metrics().data(0).bucket_info(0).start_bucket_elapsed_nanos());
 }
 
 #else
diff --git a/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp b/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
index 65f8de6..8131725 100644
--- a/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/CountMetricProducer_test.cpp
@@ -38,9 +38,9 @@
 namespace os {
 namespace statsd {
 
-const ConfigKey kConfigKey(0, 12345);
 
 namespace {
+const ConfigKey kConfigKey(0, 12345);
 
 void makeLogEvent(LogEvent* logEvent, int64_t timestampNs, int atomId) {
     AStatsEvent* statsEvent = AStatsEvent_obtain();
@@ -61,6 +61,13 @@
 
 }  // namespace
 
+// Setup for parameterized tests.
+class CountMetricProducerTest_PartialBucket : public TestWithParam<BucketSplitEvent> {};
+
+INSTANTIATE_TEST_SUITE_P(CountMetricProducerTest_PartialBucket,
+                         CountMetricProducerTest_PartialBucket,
+                         testing::Values(APP_UPGRADE, BOOT_COMPLETE));
+
 TEST(CountMetricProducerTest, TestFirstBucket) {
     CountMetric metric;
     metric.set_id(1);
@@ -237,11 +244,11 @@
     EXPECT_EQ(1LL, bucketInfo.mCount);
 }
 
-TEST(CountMetricProducerTest, TestEventWithAppUpgrade) {
+TEST_P(CountMetricProducerTest_PartialBucket, TestSplitInCurrentBucket) {
     sp<AlarmMonitor> alarmMonitor;
     int64_t bucketStartTimeNs = 10000000000;
     int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
-    int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+    int64_t eventTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
 
     int tagId = 1;
     int conditionTagId = 2;
@@ -260,22 +267,30 @@
     sp<AnomalyTracker> anomalyTracker = countProducer.addAnomalyTracker(alert, alarmMonitor);
     EXPECT_TRUE(anomalyTracker != nullptr);
 
-    // Bucket is flushed yet.
+    // Bucket is not flushed yet.
     LogEvent event1(/*uid=*/0, /*pid=*/0);
     makeLogEvent(&event1, bucketStartTimeNs + 1, tagId, /*uid=*/"111");
     countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
     EXPECT_EQ(0UL, countProducer.mPastBuckets.size());
     EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 
-    // App upgrade forces bucket flush.
+    // App upgrade or boot complete forces bucket flush.
     // Check that there's a past bucket and the bucket end is not adjusted.
-    countProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            countProducer.notifyAppUpgrade(eventTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            countProducer.onStatsdInitCompleted(eventTimeNs);
+            break;
+    }
     EXPECT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ((long long)bucketStartTimeNs,
+    EXPECT_EQ(bucketStartTimeNs,
               countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
-    EXPECT_EQ((long long)eventUpgradeTimeNs,
+    EXPECT_EQ(eventTimeNs,
               countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
-    EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, countProducer.getCurrentBucketNum());
+    EXPECT_EQ(eventTimeNs, countProducer.mCurrentBucketStartTimeNs);
     // Anomaly tracker only contains full buckets.
     EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 
@@ -285,7 +300,8 @@
     makeLogEvent(&event2, bucketStartTimeNs + 59 * NS_PER_SEC + 10, tagId, /*uid=*/"222");
     countProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
     EXPECT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(eventTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, countProducer.getCurrentBucketNum());
     EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 
     // Third event in following bucket.
@@ -294,13 +310,14 @@
     countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
     EXPECT_EQ(2UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
     EXPECT_EQ(lastEndTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1, countProducer.getCurrentBucketNum());
     EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 }
 
-TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket) {
+TEST_P(CountMetricProducerTest_PartialBucket, TestSplitInNextBucket) {
     int64_t bucketStartTimeNs = 10000000000;
     int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
-    int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+    int64_t eventTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
 
     int tagId = 1;
     int conditionTagId = 2;
@@ -319,15 +336,23 @@
     countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
     EXPECT_EQ(0UL, countProducer.mPastBuckets.size());
 
-    // App upgrade forces bucket flush.
-    // Check that there's a past bucket and the bucket end is not adjusted.
-    countProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    // App upgrade or boot complete forces bucket flush.
+    // Check that there's a past bucket and the bucket end is not adjusted since the upgrade
+    // occurred after the bucket end time.
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            countProducer.notifyAppUpgrade(eventTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            countProducer.onStatsdInitCompleted(eventTimeNs);
+            break;
+    }
     EXPECT_EQ(1UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ((int64_t)bucketStartTimeNs,
+    EXPECT_EQ(bucketStartTimeNs,
               countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs,
               countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
-    EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(eventTimeNs, countProducer.mCurrentBucketStartTimeNs);
 
     // Next event occurs in same bucket as partial bucket created.
     LogEvent event2(/*uid=*/0, /*pid=*/0);
@@ -340,7 +365,7 @@
     makeLogEvent(&event3, bucketStartTimeNs + 121 * NS_PER_SEC + 10, tagId, /*uid=*/"333");
     countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
     EXPECT_EQ(2UL, countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ((int64_t)eventUpgradeTimeNs,
+    EXPECT_EQ((int64_t)eventTimeNs,
               countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketStartNs);
     EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs,
               countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketEndNs);
diff --git a/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp b/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
index 30f8159..8ef2519 100644
--- a/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/DurationMetricProducer_test.cpp
@@ -41,10 +41,10 @@
 namespace os {
 namespace statsd {
 
-const ConfigKey kConfigKey(0, 12345);
 
 namespace {
 
+const ConfigKey kConfigKey(0, 12345);
 void makeLogEvent(LogEvent* logEvent, int64_t timestampNs, int atomId) {
     AStatsEvent* statsEvent = AStatsEvent_obtain();
     AStatsEvent_setAtomId(statsEvent, atomId);
@@ -55,6 +55,13 @@
 
 }  // namespace
 
+// Setup for parameterized tests.
+class DurationMetricProducerTest_PartialBucket : public TestWithParam<BucketSplitEvent> {};
+
+INSTANTIATE_TEST_SUITE_P(DurationMetricProducerTest_PartialBucket,
+                         DurationMetricProducerTest_PartialBucket,
+                         testing::Values(APP_UPGRADE, BOOT_COMPLETE));
+
 TEST(DurationMetricTrackerTest, TestFirstBucket) {
     sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
     DurationMetric metric;
@@ -205,7 +212,7 @@
     EXPECT_EQ(1LL, buckets2[0].mDuration);
 }
 
-TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade) {
+TEST_P(DurationMetricProducerTest_PartialBucket, TestSumDuration) {
     /**
      * The duration starts from the first bucket, through the two partial buckets (10-70sec),
      * another bucket, and ends at the beginning of the next full bucket.
@@ -217,15 +224,7 @@
      */
     int64_t bucketStartTimeNs = 10000000000;
     int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
-    int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
-    int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
-    int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
-
     int tagId = 1;
-    LogEvent event1(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event1, startTimeNs, tagId);
-    LogEvent event2(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event2, endTimeNs, tagId);
 
     DurationMetric metric;
     metric.set_id(1);
@@ -238,32 +237,47 @@
                                             3 /* stop_all index */, false /*nesting*/, wizard,
                                             dimensions, bucketStartTimeNs, bucketStartTimeNs);
 
+    int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
+    LogEvent event1(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event1, startTimeNs, tagId);
     durationProducer.onMatchedLogEvent(1 /* start index*/, event1);
     EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
     EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
 
-    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    int64_t partialBucketSplitTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            durationProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            durationProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
     EXPECT_EQ(1UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
     std::vector<DurationBucket> buckets =
             durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
     EXPECT_EQ(bucketStartTimeNs, buckets[0].mBucketStartNs);
-    EXPECT_EQ(eventUpgradeTimeNs, buckets[0].mBucketEndNs);
-    EXPECT_EQ(eventUpgradeTimeNs - startTimeNs, buckets[0].mDuration);
-    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, buckets[0].mBucketEndNs);
+    EXPECT_EQ(partialBucketSplitTimeNs - startTimeNs, buckets[0].mDuration);
+    EXPECT_EQ(partialBucketSplitTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, durationProducer.getCurrentBucketNum());
 
     // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+    LogEvent event2(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event2, endTimeNs, tagId);
     durationProducer.onMatchedLogEvent(2 /* stop index*/, event2);
     buckets = durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
     EXPECT_EQ(3UL, buckets.size());
-    EXPECT_EQ(eventUpgradeTimeNs, buckets[1].mBucketStartNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, buckets[1].mBucketStartNs);
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[1].mBucketEndNs);
-    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - eventUpgradeTimeNs, buckets[1].mDuration);
+    EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - partialBucketSplitTimeNs, buckets[1].mDuration);
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[2].mBucketStartNs);
     EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[2].mBucketEndNs);
     EXPECT_EQ(bucketSizeNs, buckets[2].mDuration);
 }
 
-TEST(DurationMetricTrackerTest, TestSumDurationWithUpgradeInFollowingBucket) {
+TEST_P(DurationMetricProducerTest_PartialBucket, TestSumDurationWithSplitInFollowingBucket) {
     /**
      * Expected buckets (start at 11s, upgrade at 75s, end at 135s):
      *  - [10,70]: 59 secs
@@ -272,15 +286,7 @@
      */
     int64_t bucketStartTimeNs = 10000000000;
     int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
-    int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
-    int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
-    int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
-
     int tagId = 1;
-    LogEvent event1(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event1, startTimeNs, tagId);
-    LogEvent event2(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event2, endTimeNs, tagId);
 
     DurationMetric metric;
     metric.set_id(1);
@@ -293,11 +299,22 @@
                                             3 /* stop_all index */, false /*nesting*/, wizard,
                                             dimensions, bucketStartTimeNs, bucketStartTimeNs);
 
+    int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
+    LogEvent event1(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event1, startTimeNs, tagId);
     durationProducer.onMatchedLogEvent(1 /* start index*/, event1);
     EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
     EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
 
-    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    int64_t partialBucketSplitTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            durationProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            durationProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
     EXPECT_EQ(2UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
     std::vector<DurationBucket> buckets =
             durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
@@ -305,32 +322,29 @@
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[0].mBucketEndNs);
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - startTimeNs, buckets[0].mDuration);
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[1].mBucketStartNs);
-    EXPECT_EQ(eventUpgradeTimeNs, buckets[1].mBucketEndNs);
-    EXPECT_EQ(eventUpgradeTimeNs - (bucketStartTimeNs + bucketSizeNs), buckets[1].mDuration);
-    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, buckets[1].mBucketEndNs);
+    EXPECT_EQ(partialBucketSplitTimeNs - (bucketStartTimeNs + bucketSizeNs), buckets[1].mDuration);
+    EXPECT_EQ(partialBucketSplitTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1, durationProducer.getCurrentBucketNum());
 
     // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+    LogEvent event2(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event2, endTimeNs, tagId);
     durationProducer.onMatchedLogEvent(2 /* stop index*/, event2);
     buckets = durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
     EXPECT_EQ(3UL, buckets.size());
-    EXPECT_EQ(eventUpgradeTimeNs, buckets[2].mBucketStartNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, buckets[2].mBucketStartNs);
     EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[2].mBucketEndNs);
-    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs - eventUpgradeTimeNs, buckets[2].mDuration);
+    EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs - partialBucketSplitTimeNs,
+              buckets[2].mDuration);
 }
 
-TEST(DurationMetricTrackerTest, TestSumDurationAnomalyWithUpgrade) {
+TEST_P(DurationMetricProducerTest_PartialBucket, TestSumDurationAnomaly) {
     sp<AlarmMonitor> alarmMonitor;
     int64_t bucketStartTimeNs = 10000000000;
     int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
-    int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
-    int64_t startTimeNs = bucketStartTimeNs + 1;
-    int64_t endTimeNs = startTimeNs + 65 * NS_PER_SEC;
-
     int tagId = 1;
-    LogEvent event1(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event1, startTimeNs, tagId);
-    LogEvent event2(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event2, endTimeNs, tagId);
 
     // Setup metric with alert.
     DurationMetric metric;
@@ -351,27 +365,35 @@
     sp<AnomalyTracker> anomalyTracker = durationProducer.addAnomalyTracker(alert, alarmMonitor);
     EXPECT_TRUE(anomalyTracker != nullptr);
 
+    int64_t startTimeNs = bucketStartTimeNs + 1;
+    LogEvent event1(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event1, startTimeNs, tagId);
     durationProducer.onMatchedLogEvent(1 /* start index*/, event1);
-    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+
+    int64_t partialBucketSplitTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            durationProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            durationProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
 
     // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    int64_t endTimeNs = startTimeNs + 65 * NS_PER_SEC;
+    LogEvent event2(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event2, endTimeNs, tagId);
     durationProducer.onMatchedLogEvent(2 /* stop index*/, event2);
+
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - startTimeNs,
               anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 }
 
-TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade) {
+TEST_P(DurationMetricProducerTest_PartialBucket, TestMaxDuration) {
     int64_t bucketStartTimeNs = 10000000000;
     int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
-    int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
-    int64_t startTimeNs = bucketStartTimeNs + 1;
-    int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
-
     int tagId = 1;
-    LogEvent event1(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event1, startTimeNs, tagId);
-    LogEvent event2(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event2, endTimeNs, tagId);
 
     DurationMetric metric;
     metric.set_id(1);
@@ -385,15 +407,30 @@
                                             3 /* stop_all index */, false /*nesting*/, wizard,
                                             dimensions, bucketStartTimeNs, bucketStartTimeNs);
 
+    int64_t startTimeNs = bucketStartTimeNs + 1;
+    LogEvent event1(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event1, startTimeNs, tagId);
     durationProducer.onMatchedLogEvent(1 /* start index*/, event1);
     EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
     EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
 
-    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    int64_t partialBucketSplitTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            durationProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            durationProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
     EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, durationProducer.getCurrentBucketNum());
 
     // We skip ahead one bucket, so we fill in the first two partial buckets and one full bucket.
+    int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
+    LogEvent event2(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event2, endTimeNs, tagId);
     durationProducer.onMatchedLogEvent(2 /* stop index*/, event2);
     EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
 
@@ -406,18 +443,10 @@
     EXPECT_EQ(endTimeNs - startTimeNs, buckets[0].mDuration);
 }
 
-TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket) {
+TEST_P(DurationMetricProducerTest_PartialBucket, TestMaxDurationWithSplitInNextBucket) {
     int64_t bucketStartTimeNs = 10000000000;
     int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
-    int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
-    int64_t startTimeNs = bucketStartTimeNs + 1;
-    int64_t endTimeNs = startTimeNs + 115 * NS_PER_SEC;
-
     int tagId = 1;
-    LogEvent event1(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event1, startTimeNs, tagId);
-    LogEvent event2(/*uid=*/0, /*pid=*/0);
-    makeLogEvent(&event2, endTimeNs, tagId);
 
     DurationMetric metric;
     metric.set_id(1);
@@ -431,24 +460,39 @@
                                             3 /* stop_all index */, false /*nesting*/, wizard,
                                             dimensions, bucketStartTimeNs, bucketStartTimeNs);
 
+    int64_t startTimeNs = bucketStartTimeNs + 1;
+    LogEvent event1(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event1, startTimeNs, tagId);
     durationProducer.onMatchedLogEvent(1 /* start index*/, event1);
     EXPECT_EQ(0UL, durationProducer.mPastBuckets.size());
     EXPECT_EQ(bucketStartTimeNs, durationProducer.mCurrentBucketStartTimeNs);
 
-    durationProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    int64_t partialBucketSplitTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            durationProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            durationProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
     EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1, durationProducer.getCurrentBucketNum());
 
     // Stop occurs in the same partial bucket as created for the app upgrade.
+    int64_t endTimeNs = startTimeNs + 115 * NS_PER_SEC;
+    LogEvent event2(/*uid=*/0, /*pid=*/0);
+    makeLogEvent(&event2, endTimeNs, tagId);
     durationProducer.onMatchedLogEvent(2 /* stop index*/, event2);
     EXPECT_EQ(0UL, durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(eventUpgradeTimeNs, durationProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, durationProducer.mCurrentBucketStartTimeNs);
 
     durationProducer.flushIfNeededLocked(bucketStartTimeNs + 2 * bucketSizeNs + 1);
     std::vector<DurationBucket> buckets =
             durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
     EXPECT_EQ(1UL, buckets.size());
-    EXPECT_EQ(eventUpgradeTimeNs, buckets[0].mBucketStartNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, buckets[0].mBucketStartNs);
     EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[0].mBucketEndNs);
     EXPECT_EQ(endTimeNs - startTimeNs, buckets[0].mDuration);
 }
diff --git a/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp b/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
index 42d0d5d..9d2ec88 100644
--- a/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/GaugeMetricProducer_test.cpp
@@ -42,6 +42,8 @@
 namespace os {
 namespace statsd {
 
+namespace {
+
 const ConfigKey kConfigKey(0, 12345);
 const int tagId = 1;
 const int64_t metricId = 123;
@@ -52,9 +54,8 @@
 const int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
 const int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
 const int64_t bucket4StartTimeNs = bucketStartTimeNs + 3 * bucketSizeNs;
-const int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
+const int64_t partialBucketSplitTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
 
-namespace {
 shared_ptr<LogEvent> makeLogEvent(int32_t atomId, int64_t timestampNs, int32_t value1, string str1,
                                   int32_t value2) {
     AStatsEvent* statsEvent = AStatsEvent_obtain();
@@ -71,6 +72,13 @@
 }
 }  // anonymous namespace
 
+// Setup for parameterized tests.
+class GaugeMetricProducerTest_PartialBucket : public TestWithParam<BucketSplitEvent> {};
+
+INSTANTIATE_TEST_SUITE_P(GaugeMetricProducerTest_PartialBucket,
+                         GaugeMetricProducerTest_PartialBucket,
+                         testing::Values(APP_UPGRADE, BOOT_COMPLETE));
+
 /*
  * Tests that the first bucket works correctly
  */
@@ -194,7 +202,7 @@
     EXPECT_EQ(25L, it->mValue.int_value);
 }
 
-TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade) {
+TEST_P(GaugeMetricProducerTest_PartialBucket, TestPushedEvents) {
     sp<AlarmMonitor> alarmMonitor;
     GaugeMetric metric;
     metric.set_id(metricId);
@@ -230,11 +238,22 @@
     gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
     EXPECT_EQ(1UL, (*gaugeProducer.mCurrentSlicedBucket).count(DEFAULT_METRIC_DIMENSION_KEY));
 
-    gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            gaugeProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            gaugeProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
     EXPECT_EQ(0UL, (*gaugeProducer.mCurrentSlicedBucket).count(DEFAULT_METRIC_DIMENSION_KEY));
     EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(bucketStartTimeNs,
+              gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
+    EXPECT_EQ(partialBucketSplitTimeNs,
+              gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
     EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
-    EXPECT_EQ(eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
     // Partial buckets are not sent to anomaly tracker.
     EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 
@@ -244,7 +263,11 @@
     gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
     EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
     EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ((int64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(bucketStartTimeNs,
+              gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
+    EXPECT_EQ(partialBucketSplitTimeNs,
+              gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
+    EXPECT_EQ((int64_t)partialBucketSplitTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
     // Partial buckets are not sent to anomaly tracker.
     EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 
@@ -267,7 +290,7 @@
     EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
 }
 
-TEST(GaugeMetricProducerTest, TestPulledWithUpgrade) {
+TEST_P(GaugeMetricProducerTest_PartialBucket, TestPulled) {
     GaugeMetric metric;
     metric.set_id(metricId);
     metric.set_bucket(ONE_MINUTE);
@@ -293,7 +316,8 @@
             .WillOnce(Invoke(
                     [](int tagId, const ConfigKey&, vector<std::shared_ptr<LogEvent>>* data, bool) {
                         data->clear();
-                        data->push_back(CreateRepeatedValueLogEvent(tagId, eventUpgradeTimeNs, 2));
+                        data->push_back(
+                                CreateRepeatedValueLogEvent(tagId, partialBucketSplitTimeNs, 2));
                         return true;
                     }));
 
@@ -311,10 +335,21 @@
                          .mFields->begin()
                          ->mValue.int_value);
 
-    gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            gaugeProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            gaugeProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
     EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    EXPECT_EQ(bucketStartTimeNs,
+              gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
+    EXPECT_EQ(partialBucketSplitTimeNs,
+              gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
     EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
-    EXPECT_EQ((int64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
     EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
     EXPECT_EQ(2, gaugeProducer.mCurrentSlicedBucket->begin()
                          ->second.front()
@@ -370,7 +405,7 @@
                          .mFields->begin()
                          ->mValue.int_value);
 
-    gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
+    gaugeProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
     EXPECT_EQ(0UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
     EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
     EXPECT_EQ(bucketStartTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
diff --git a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
index 3b4d646..f493cc4 100644
--- a/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
+++ b/cmds/statsd/tests/metrics/ValueMetricProducer_test.cpp
@@ -41,6 +41,8 @@
 namespace os {
 namespace statsd {
 
+namespace {
+
 const ConfigKey kConfigKey(0, 12345);
 const int tagId = 1;
 const int64_t metricId = 123;
@@ -58,10 +60,18 @@
 static void assertPastBucketValuesSingleKey(
         const std::unordered_map<MetricDimensionKey, std::vector<ValueBucket>>& mPastBuckets,
         const std::initializer_list<int>& expectedValuesList,
-        const std::initializer_list<int64_t>& expectedDurationNsList) {
-    std::vector<int> expectedValues(expectedValuesList);
-    std::vector<int64_t> expectedDurationNs(expectedDurationNsList);
+        const std::initializer_list<int64_t>& expectedDurationNsList,
+        const std::initializer_list<int64_t>& expectedStartTimeNsList,
+        const std::initializer_list<int64_t>& expectedEndTimeNsList) {
+    vector<int> expectedValues(expectedValuesList);
+    vector<int64_t> expectedDurationNs(expectedDurationNsList);
+    vector<int64_t> expectedStartTimeNs(expectedStartTimeNsList);
+    vector<int64_t> expectedEndTimeNs(expectedEndTimeNsList);
+
     ASSERT_EQ(expectedValues.size(), expectedDurationNs.size());
+    ASSERT_EQ(expectedValues.size(), expectedStartTimeNs.size());
+    ASSERT_EQ(expectedValues.size(), expectedEndTimeNs.size());
+
     if (expectedValues.size() == 0) {
         ASSERT_EQ(0, mPastBuckets.size());
         return;
@@ -70,15 +80,21 @@
     ASSERT_EQ(1, mPastBuckets.size());
     ASSERT_EQ(expectedValues.size(), mPastBuckets.begin()->second.size());
 
-    auto buckets = mPastBuckets.begin()->second;
+    const vector<ValueBucket>& buckets = mPastBuckets.begin()->second;
     for (int i = 0; i < expectedValues.size(); i++) {
         EXPECT_EQ(expectedValues[i], buckets[i].values[0].long_value)
                 << "Values differ at index " << i;
         EXPECT_EQ(expectedDurationNs[i], buckets[i].mConditionTrueNs)
                 << "Condition duration value differ at index " << i;
+        EXPECT_EQ(expectedStartTimeNs[i], buckets[i].mBucketStartNs)
+                << "Start time differs at index " << i;
+        EXPECT_EQ(expectedEndTimeNs[i], buckets[i].mBucketEndNs)
+                << "End time differs at index " << i;
     }
 }
 
+}  // anonymous namespace
+
 class ValueMetricProducerTestHelper {
 public:
     static sp<ValueMetricProducer> createValueProducerNoConditions(
@@ -191,6 +207,13 @@
     }
 };
 
+// Setup for parameterized tests.
+class ValueMetricProducerTest_PartialBucket : public TestWithParam<BucketSplitEvent> {};
+
+INSTANTIATE_TEST_SUITE_P(ValueMetricProducerTest_PartialBucket,
+                         ValueMetricProducerTest_PartialBucket,
+                         testing::Values(APP_UPGRADE, BOOT_COMPLETE));
+
 /*
  * Tests that the first bucket works correctly
  */
@@ -325,9 +348,10 @@
     EXPECT_EQ(bucketSizeNs, valueProducer->mPastBuckets.begin()->second[2].mConditionTrueNs);
 }
 
-TEST(ValueMetricProducerTest, TestPartialBucketCreated) {
+TEST_P(ValueMetricProducerTest_PartialBucket, TestPartialBucketCreated) {
     ValueMetric metric = ValueMetricProducerTestHelper::createMetric();
     sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+    int64_t partialBucketSplitTimeNs = bucket2StartTimeNs + 2;
     EXPECT_CALL(*pullerManager, Pull(tagId, kConfigKey, _, _))
             // Initialize bucket.
             .WillOnce(Invoke([](int tagId, const ConfigKey&,
@@ -337,10 +361,12 @@
                 return true;
             }))
             // Partial bucket.
-            .WillOnce(Invoke([](int tagId, const ConfigKey&,
-                                vector<std::shared_ptr<LogEvent>>* data, bool) {
+            .WillOnce(Invoke([partialBucketSplitTimeNs](int tagId, const ConfigKey&,
+                                                        vector<std::shared_ptr<LogEvent>>* data,
+                                                        bool) {
                 data->clear();
-                data->push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 10, 5));
+                data->push_back(
+                        CreateRepeatedValueLogEvent(tagId, partialBucketSplitTimeNs + 8, 5));
                 return true;
             }));
 
@@ -354,19 +380,21 @@
     valueProducer->onDataPulled(allData, /** success */ true, bucket2StartTimeNs);
 
     // Partial buckets created in 2nd bucket.
-    valueProducer->notifyAppUpgrade(bucket2StartTimeNs + 2, "com.foo", 10000, 1);
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            valueProducer->notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            valueProducer->onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
+    EXPECT_EQ(partialBucketSplitTimeNs, valueProducer->mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1, valueProducer->getCurrentBucketNum());
 
-    // One full bucket and one partial bucket.
-    EXPECT_EQ(1UL, valueProducer->mPastBuckets.size());
-    vector<ValueBucket> buckets = valueProducer->mPastBuckets.begin()->second;
-    EXPECT_EQ(2UL, buckets.size());
-    // Full bucket (2 - 1)
-    EXPECT_EQ(1, buckets[0].values[0].long_value);
-    EXPECT_EQ(bucketSizeNs, buckets[0].mConditionTrueNs);
-    // Full bucket (5 - 3)
-    EXPECT_EQ(3, buckets[1].values[0].long_value);
-    // partial bucket [bucket2StartTimeNs, bucket2StartTimeNs + 2]
-    EXPECT_EQ(2, buckets[1].mConditionTrueNs);
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {1, 3},
+                                    {bucketSizeNs, partialBucketSplitTimeNs - bucket2StartTimeNs},
+                                    {bucketStartTimeNs, bucket2StartTimeNs},
+                                    {bucket2StartTimeNs, partialBucketSplitTimeNs});
 }
 
 /*
@@ -613,7 +641,8 @@
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 110));
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 
     // has one slice
     EXPECT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
@@ -625,7 +654,8 @@
     EXPECT_EQ(10, curInterval.value.long_value);
 
     valueProducer->onConditionChanged(false, bucket2StartTimeNs + 1);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 
     // has one slice
     EXPECT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
@@ -636,10 +666,12 @@
     EXPECT_EQ(false, curBaseInfo.hasBase);
 
     valueProducer->onConditionChanged(true, bucket3StartTimeNs + 1);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10, 20}, {bucketSizeNs - 8, 1});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10, 20}, {bucketSizeNs - 8, 1},
+                                    {bucketStartTimeNs, bucket2StartTimeNs},
+                                    {bucket2StartTimeNs, bucket3StartTimeNs});
 }
 
-TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade) {
+TEST_P(ValueMetricProducerTest_PartialBucket, TestPushedEvents) {
     ValueMetric metric = ValueMetricProducerTestHelper::createMetric();
 
     UidMap uidMap;
@@ -660,25 +692,46 @@
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
     EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
 
-    valueProducer.notifyAppUpgrade(bucketStartTimeNs + 150, "ANY.APP", 1, 1);
-    EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(bucketStartTimeNs + 150, valueProducer.mCurrentBucketStartTimeNs);
+    int64_t partialBucketSplitTimeNs = bucketStartTimeNs + 150;
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            valueProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            valueProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {10},
+                                    {partialBucketSplitTimeNs - bucketStartTimeNs},
+                                    {bucketStartTimeNs}, {partialBucketSplitTimeNs});
+    EXPECT_EQ(partialBucketSplitTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, valueProducer.getCurrentBucketNum());
 
+    // Event arrives after the bucket split.
     LogEvent event2(/*uid=*/0, /*pid=*/0);
-    CreateRepeatedValueLogEvent(&event2, tagId, bucketStartTimeNs + 59 * NS_PER_SEC, 10);
+    CreateRepeatedValueLogEvent(&event2, tagId, bucketStartTimeNs + 59 * NS_PER_SEC, 20);
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
-    EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(bucketStartTimeNs + 150, valueProducer.mCurrentBucketStartTimeNs);
+
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {10},
+                                    {partialBucketSplitTimeNs - bucketStartTimeNs},
+                                    {bucketStartTimeNs}, {partialBucketSplitTimeNs});
+    EXPECT_EQ(partialBucketSplitTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, valueProducer.getCurrentBucketNum());
 
     // Next value should create a new bucket.
     LogEvent event3(/*uid=*/0, /*pid=*/0);
-    CreateRepeatedValueLogEvent(&event3, tagId, bucketStartTimeNs + 65 * NS_PER_SEC, 10);
+    CreateRepeatedValueLogEvent(&event3, tagId, bucket2StartTimeNs + 5 * NS_PER_SEC, 10);
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
-    EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {10, 20},
+                                    {partialBucketSplitTimeNs - bucketStartTimeNs,
+                                     bucket2StartTimeNs - partialBucketSplitTimeNs},
+                                    {bucketStartTimeNs, partialBucketSplitTimeNs},
+                                    {partialBucketSplitTimeNs, bucket2StartTimeNs});
     EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, valueProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1, valueProducer.getCurrentBucketNum());
 }
 
-TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
+TEST_P(ValueMetricProducerTest_PartialBucket, TestPulledValue) {
     ValueMetric metric = ValueMetricProducerTestHelper::createMetric();
 
     UidMap uidMap;
@@ -689,14 +742,16 @@
                     atomMatcherId, logEventMatcherIndex, atomMatcher, uidMap)});
     sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
     sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+    int64_t partialBucketSplitTimeNs = bucket2StartTimeNs + 150;
     EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, kConfigKey, _, _, _)).WillOnce(Return());
     EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, kConfigKey, _)).WillOnce(Return());
     EXPECT_CALL(*pullerManager, Pull(tagId, kConfigKey, _, _))
             .WillOnce(Return(true))
-            .WillOnce(Invoke([](int tagId, const ConfigKey&,
-                                vector<std::shared_ptr<LogEvent>>* data, bool) {
+            .WillOnce(Invoke([partialBucketSplitTimeNs](int tagId, const ConfigKey&,
+                                                        vector<std::shared_ptr<LogEvent>>* data,
+                                                        bool) {
                 data->clear();
-                data->push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 149, 120));
+                data->push_back(CreateRepeatedValueLogEvent(tagId, partialBucketSplitTimeNs, 120));
                 return true;
             }));
     ValueMetricProducer valueProducer(kConfigKey, metric, -1, wizard, logEventMatcherIndex,
@@ -711,20 +766,27 @@
     valueProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
     EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
 
-    valueProducer.notifyAppUpgrade(bucket2StartTimeNs + 150, "ANY.APP", 1, 1);
-    EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(bucket2StartTimeNs + 150, valueProducer.mCurrentBucketStartTimeNs);
-    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {20}, {150});
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            valueProducer.notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            valueProducer.onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
+    EXPECT_EQ(partialBucketSplitTimeNs, valueProducer.mCurrentBucketStartTimeNs);
+    EXPECT_EQ(1, valueProducer.getCurrentBucketNum());
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {20}, {150}, {bucket2StartTimeNs},
+                                    {partialBucketSplitTimeNs});
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 150));
     valueProducer.onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
-    EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
     EXPECT_EQ(bucket3StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
-    EXPECT_EQ(20L,
-              valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].values[0].long_value);
-    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {20, 30},
-                                    {150, bucketSizeNs - 150});
+    EXPECT_EQ(2, valueProducer.getCurrentBucketNum());
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {20, 30}, {150, bucketSizeNs - 150},
+                                    {bucket2StartTimeNs, partialBucketSplitTimeNs},
+                                    {partialBucketSplitTimeNs, bucket3StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestPulledWithAppUpgradeDisabled) {
@@ -754,12 +816,12 @@
     valueProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
     EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
 
-    valueProducer.notifyAppUpgrade(bucket2StartTimeNs + 150, "ANY.APP", 1, 1);
+    valueProducer.notifyAppUpgrade(bucket2StartTimeNs + 150);
     EXPECT_EQ(0UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
     EXPECT_EQ(bucket2StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
 }
 
-TEST(ValueMetricProducerTest, TestPulledValueWithUpgradeWhileConditionFalse) {
+TEST_P(ValueMetricProducerTest_PartialBucket, TestPulledValueWhileConditionFalse) {
     ValueMetric metric = ValueMetricProducerTestHelper::createMetricWithCondition();
 
     sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
@@ -784,14 +846,21 @@
     valueProducer->onConditionChanged(false, bucket2StartTimeNs - 100);
     EXPECT_FALSE(valueProducer->mCondition);
 
-    valueProducer->notifyAppUpgrade(bucket2StartTimeNs - 50, "ANY.APP", 1, 1);
+    int64_t partialBucketSplitTimeNs = bucket2StartTimeNs - 50;
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            valueProducer->notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            valueProducer->onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
     // Expect one full buckets already done and starting a partial bucket.
-    EXPECT_EQ(bucket2StartTimeNs - 50, valueProducer->mCurrentBucketStartTimeNs);
-    EXPECT_EQ(1UL, valueProducer->mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
-    EXPECT_EQ(bucketStartTimeNs,
-              valueProducer->mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
+    EXPECT_EQ(partialBucketSplitTimeNs, valueProducer->mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, valueProducer->getCurrentBucketNum());
     assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20},
-                                    {(bucket2StartTimeNs - 100) - (bucketStartTimeNs + 1)});
+                                    {(bucket2StartTimeNs - 100) - (bucketStartTimeNs + 1)},
+                                    {bucketStartTimeNs}, {partialBucketSplitTimeNs});
     EXPECT_FALSE(valueProducer->mCondition);
 }
 TEST(ValueMetricProducerTest, TestPushedEventsWithoutCondition) {
@@ -834,7 +903,8 @@
     EXPECT_EQ(30, curInterval.value.long_value);
 
     valueProducer.flushIfNeededLocked(bucket2StartTimeNs);
-    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {30}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {30}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestPushedEventsWithCondition) {
@@ -895,7 +965,8 @@
     EXPECT_EQ(50, curInterval.value.long_value);
 
     valueProducer.flushIfNeededLocked(bucket2StartTimeNs);
-    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {50}, {20});
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {50}, {20}, {bucketStartTimeNs},
+                                    {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestAnomalyDetection) {
@@ -1012,7 +1083,8 @@
     EXPECT_EQ(true, curBaseInfo.hasBase);
     EXPECT_EQ(23, curBaseInfo.base.long_value);
     EXPECT_EQ(false, curInterval.hasValue);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {12}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {12}, {bucketSizeNs},
+                                    {bucket2StartTimeNs}, {bucket3StartTimeNs});
 
     // pull 3 come late.
     // The previous bucket gets closed with error. (Has start value 23, no ending)
@@ -1028,7 +1100,15 @@
     EXPECT_EQ(true, curBaseInfo.hasBase);
     EXPECT_EQ(36, curBaseInfo.base.long_value);
     EXPECT_EQ(false, curInterval.hasValue);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {12}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {12}, {bucketSizeNs},
+                                    {bucket2StartTimeNs}, {bucket3StartTimeNs});
+    // The 3rd bucket is dropped due to multiple buckets being skipped.
+    ASSERT_EQ(1, valueProducer->mSkippedBuckets.size());
+    EXPECT_EQ(bucket3StartTimeNs, valueProducer->mSkippedBuckets[0].bucketStartTimeNs);
+    EXPECT_EQ(bucket4StartTimeNs, valueProducer->mSkippedBuckets[0].bucketEndTimeNs);
+    ASSERT_EQ(1, valueProducer->mSkippedBuckets[0].dropEvents.size());
+    EXPECT_EQ(MULTIPLE_BUCKETS_SKIPPED, valueProducer->mSkippedBuckets[0].dropEvents[0].reason);
+    EXPECT_EQ(bucket6StartTimeNs, valueProducer->mSkippedBuckets[0].dropEvents[0].dropTimeNs);
 }
 
 /*
@@ -1073,7 +1153,8 @@
     valueProducer->onConditionChanged(false, bucket2StartTimeNs + 1);
     curInterval = valueProducer->mCurrentSlicedBucket.begin()->second[0];
     curBaseInfo = valueProducer->mCurrentBaseInfo.begin()->second[0];
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
     EXPECT_EQ(false, curBaseInfo.hasBase);
 
     // Now the alarm is delivered.
@@ -1082,7 +1163,8 @@
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 110));
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
     curInterval = valueProducer->mCurrentSlicedBucket.begin()->second[0];
     curBaseInfo = valueProducer->mCurrentBaseInfo.begin()->second[0];
     EXPECT_EQ(false, curBaseInfo.hasBase);
@@ -1090,10 +1172,9 @@
 }
 
 /*
-* Test pulled event with non sliced condition. The pull on boundary come late, after the
-condition
-* change to false, and then true again. This is due to alarm delivered late.
-*/
+ * Test pulled event with non sliced condition. The pull on boundary come late, after the condition
+ * change to false, and then true again. This is due to alarm delivered late.
+ */
 TEST(ValueMetricProducerTest, TestBucketBoundaryWithCondition2) {
     ValueMetric metric = ValueMetricProducerTestHelper::createMetricWithCondition();
 
@@ -1139,7 +1220,8 @@
 
     // pull on bucket boundary come late, condition change happens before it
     valueProducer->onConditionChanged(false, bucket2StartTimeNs + 1);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
     EXPECT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
     curInterval = valueProducer->mCurrentSlicedBucket.begin()->second[0];
     curBaseInfo = valueProducer->mCurrentBaseInfo.begin()->second[0];
@@ -1148,7 +1230,8 @@
 
     // condition changed to true again, before the pull alarm is delivered
     valueProducer->onConditionChanged(true, bucket2StartTimeNs + 25);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
     curInterval = valueProducer->mCurrentSlicedBucket.begin()->second[0];
     curBaseInfo = valueProducer->mCurrentBaseInfo.begin()->second[0];
     EXPECT_EQ(true, curBaseInfo.hasBase);
@@ -1167,13 +1250,15 @@
     EXPECT_EQ(140, curBaseInfo.base.long_value);
     EXPECT_EQ(true, curInterval.hasValue);
     EXPECT_EQ(10, curInterval.value.long_value);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 
     allData.clear();
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 160));
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20, 30},
-                                    {bucketSizeNs - 8, bucketSizeNs - 24});
+    assertPastBucketValuesSingleKey(
+            valueProducer->mPastBuckets, {20, 30}, {bucketSizeNs - 8, bucketSizeNs - 24},
+            {bucketStartTimeNs, bucket2StartTimeNs}, {bucket2StartTimeNs, bucket3StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestPushedAggregateMin) {
@@ -1216,7 +1301,8 @@
     EXPECT_EQ(10, curInterval.value.long_value);
 
     valueProducer.flushIfNeededLocked(bucket2StartTimeNs);
-    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {10}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {10}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestPushedAggregateMax) {
@@ -1239,9 +1325,6 @@
 
     LogEvent event1(/*uid=*/0, /*pid=*/0);
     CreateRepeatedValueLogEvent(&event1, tagId, bucketStartTimeNs + 10, 10);
-
-    LogEvent event2(/*uid=*/0, /*pid=*/0);
-    CreateRepeatedValueLogEvent(&event2, tagId, bucketStartTimeNs + 20, 20);
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
 
     // has one slice
@@ -1251,6 +1334,8 @@
     EXPECT_EQ(10, curInterval.value.long_value);
     EXPECT_EQ(true, curInterval.hasValue);
 
+    LogEvent event2(/*uid=*/0, /*pid=*/0);
+    CreateRepeatedValueLogEvent(&event2, tagId, bucketStartTimeNs + 20, 20);
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
 
     // has one slice
@@ -1258,10 +1343,9 @@
     curInterval = valueProducer.mCurrentSlicedBucket.begin()->second[0];
     EXPECT_EQ(20, curInterval.value.long_value);
 
-    valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
-    /* EXPECT_EQ(1UL, valueProducer.mPastBuckets.size()); */
-    /* EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size()); */
-    /* EXPECT_EQ(20, valueProducer.mPastBuckets.begin()->second.back().values[0].long_value); */
+    valueProducer.flushIfNeededLocked(bucket2StartTimeNs);
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {20}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestPushedAggregateAvg) {
@@ -1351,7 +1435,8 @@
     EXPECT_EQ(25, curInterval.value.long_value);
 
     valueProducer.flushIfNeededLocked(bucket2StartTimeNs);
-    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {25}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {25}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestSkipZeroDiffOutput) {
@@ -1375,10 +1460,8 @@
 
     LogEvent event1(/*uid=*/0, /*pid=*/0);
     CreateRepeatedValueLogEvent(&event1, tagId, bucketStartTimeNs + 10, 10);
-
-    LogEvent event2(/*uid=*/0, /*pid=*/0);
-    CreateRepeatedValueLogEvent(&event2, tagId, bucketStartTimeNs + 15, 15);
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
+
     // has one slice
     EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
     ValueMetricProducer::Interval curInterval =
@@ -1388,6 +1471,8 @@
     EXPECT_EQ(10, curBaseInfo.base.long_value);
     EXPECT_EQ(false, curInterval.hasValue);
 
+    LogEvent event2(/*uid=*/0, /*pid=*/0);
+    CreateRepeatedValueLogEvent(&event2, tagId, bucketStartTimeNs + 15, 15);
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event2);
 
     // has one slice
@@ -1400,12 +1485,14 @@
     LogEvent event3(/*uid=*/0, /*pid=*/0);
     CreateRepeatedValueLogEvent(&event3, tagId, bucket2StartTimeNs + 10, 15);
     valueProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
+
     EXPECT_EQ(1UL, valueProducer.mCurrentSlicedBucket.size());
     curInterval = valueProducer.mCurrentSlicedBucket.begin()->second[0];
     curBaseInfo = valueProducer.mCurrentBaseInfo.begin()->second[0];
     EXPECT_EQ(true, curBaseInfo.hasBase);
     EXPECT_EQ(15, curBaseInfo.base.long_value);
     EXPECT_EQ(true, curInterval.hasValue);
+    EXPECT_EQ(0, curInterval.value.long_value);
 
     LogEvent event4(/*uid=*/0, /*pid=*/0);
     CreateRepeatedValueLogEvent(&event4, tagId, bucket2StartTimeNs + 15, 15);
@@ -1416,11 +1503,11 @@
     EXPECT_EQ(true, curBaseInfo.hasBase);
     EXPECT_EQ(15, curBaseInfo.base.long_value);
     EXPECT_EQ(true, curInterval.hasValue);
+    EXPECT_EQ(0, curInterval.value.long_value);
 
     valueProducer.flushIfNeededLocked(bucket3StartTimeNs);
-    EXPECT_EQ(1UL, valueProducer.mPastBuckets.size());
-    EXPECT_EQ(1UL, valueProducer.mPastBuckets.begin()->second.size());
-    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {5}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer.mPastBuckets, {5}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestSkipZeroDiffOutputMultiValue) {
@@ -1740,8 +1827,8 @@
     EXPECT_EQ(3, baseInfo1.base.long_value);
     EXPECT_EQ(false, interval1.hasValue);
     EXPECT_EQ(0UL, valueProducer->mPastBuckets.size());
-    vector<shared_ptr<LogEvent>> allData;
 
+    vector<shared_ptr<LogEvent>> allData;
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 2, 4));
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 11));
@@ -1753,7 +1840,8 @@
     EXPECT_EQ(false, interval1.hasValue);
     EXPECT_EQ(8, interval1.value.long_value);
     EXPECT_FALSE(interval1.seenNewData);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {8}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {8}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 
     auto it = valueProducer->mCurrentSlicedBucket.begin();
     for (; it != valueProducer->mCurrentSlicedBucket.end(); it++) {
@@ -1769,14 +1857,13 @@
     }
     EXPECT_TRUE(it != iter);
     EXPECT_TRUE(itBase != iterBase);
-    auto& interval2 = it->second[0];
-    auto& baseInfo2 = itBase->second[0];
+    auto interval2 = it->second[0];
+    auto baseInfo2 = itBase->second[0];
     EXPECT_EQ(2, it->first.getDimensionKeyInWhat().getValues()[0].mValue.int_value);
     EXPECT_EQ(true, baseInfo2.hasBase);
     EXPECT_EQ(4, baseInfo2.base.long_value);
     EXPECT_EQ(false, interval2.hasValue);
     EXPECT_FALSE(interval2.seenNewData);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {8}, {bucketSizeNs});
 
     // next pull somehow did not happen, skip to end of bucket 3
     allData.clear();
@@ -1791,7 +1878,8 @@
     EXPECT_EQ(5, baseInfo2.base.long_value);
     EXPECT_EQ(false, interval2.hasValue);
     EXPECT_FALSE(interval2.seenNewData);
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {8}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {8}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 
     allData.clear();
     allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 2, 14));
@@ -1805,9 +1893,13 @@
     EXPECT_FALSE(interval2.seenNewData);
     ASSERT_EQ(2UL, valueProducer->mPastBuckets.size());
     auto iterator = valueProducer->mPastBuckets.begin();
+    EXPECT_EQ(bucket4StartTimeNs, iterator->second[0].mBucketStartNs);
+    EXPECT_EQ(bucket5StartTimeNs, iterator->second[0].mBucketEndNs);
     EXPECT_EQ(9, iterator->second[0].values[0].long_value);
     EXPECT_EQ(bucketSizeNs, iterator->second[0].mConditionTrueNs);
     iterator++;
+    EXPECT_EQ(bucketStartTimeNs, iterator->second[0].mBucketStartNs);
+    EXPECT_EQ(bucket2StartTimeNs, iterator->second[0].mBucketEndNs);
     EXPECT_EQ(8, iterator->second[0].values[0].long_value);
     EXPECT_EQ(bucketSizeNs, iterator->second[0].mConditionTrueNs);
 }
@@ -2414,7 +2506,8 @@
     EXPECT_EQ(true, valueProducer->mHasGlobalBase);
 
     EXPECT_EQ(1UL, valueProducer->mPastBuckets.size());
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {1}, {bucketSizeNs - 12 + 1});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {1}, {bucketSizeNs - 12 + 1},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestPartialResetOnBucketBoundaries) {
@@ -2461,10 +2554,11 @@
     EXPECT_EQ(true, valueProducer->mHasGlobalBase);
 }
 
-TEST(ValueMetricProducerTest, TestFullBucketResetWhenLastBucketInvalid) {
+TEST_P(ValueMetricProducerTest_PartialBucket, TestFullBucketResetWhenLastBucketInvalid) {
     ValueMetric metric = ValueMetricProducerTestHelper::createMetric();
 
     sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+    int64_t partialBucketSplitTimeNs = bucketStartTimeNs + bucketSizeNs / 2;
     EXPECT_CALL(*pullerManager, Pull(tagId, kConfigKey, _, _))
             // Initialization.
             .WillOnce(Invoke(
@@ -2474,23 +2568,41 @@
                         return true;
                     }))
             // notifyAppUpgrade.
-            .WillOnce(Invoke([](int tagId, const ConfigKey&,
-                                vector<std::shared_ptr<LogEvent>>* data, bool) {
+            .WillOnce(Invoke([partialBucketSplitTimeNs](int tagId, const ConfigKey&,
+                                                        vector<std::shared_ptr<LogEvent>>* data,
+                                                        bool) {
                 data->clear();
-                data->push_back(CreateRepeatedValueLogEvent(
-                        tagId, bucketStartTimeNs + bucketSizeNs / 2, 10));
+                data->push_back(CreateRepeatedValueLogEvent(tagId, partialBucketSplitTimeNs, 10));
                 return true;
             }));
     sp<ValueMetricProducer> valueProducer =
             ValueMetricProducerTestHelper::createValueProducerNoConditions(pullerManager, metric);
     ASSERT_EQ(0UL, valueProducer->mCurrentFullBucket.size());
 
-    valueProducer->notifyAppUpgrade(bucketStartTimeNs + bucketSizeNs / 2, "com.foo", 10000, 1);
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            valueProducer->notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            valueProducer->onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
+    EXPECT_EQ(partialBucketSplitTimeNs, valueProducer->mCurrentBucketStartTimeNs);
+    EXPECT_EQ(0, valueProducer->getCurrentBucketNum());
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {9},
+                                    {partialBucketSplitTimeNs - bucketStartTimeNs},
+                                    {bucketStartTimeNs}, {partialBucketSplitTimeNs});
     ASSERT_EQ(1UL, valueProducer->mCurrentFullBucket.size());
 
     vector<shared_ptr<LogEvent>> allData;
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 4));
     valueProducer->onDataPulled(allData, /** fails */ false, bucket3StartTimeNs + 1);
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {9},
+                                    {partialBucketSplitTimeNs - bucketStartTimeNs},
+                                    {bucketStartTimeNs}, {partialBucketSplitTimeNs});
+    ASSERT_EQ(1, valueProducer->mSkippedBuckets.size());
+    EXPECT_EQ(partialBucketSplitTimeNs, valueProducer->mSkippedBuckets[0].bucketStartTimeNs);
+    EXPECT_EQ(bucket2StartTimeNs, valueProducer->mSkippedBuckets[0].bucketEndTimeNs);
     ASSERT_EQ(0UL, valueProducer->mCurrentFullBucket.size());
 }
 
@@ -2537,7 +2649,8 @@
     valueProducer->onConditionChanged(false, bucket3StartTimeNs + 10);
 
     // Bucket should have been completed.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {2}, {bucketSizeNs - 10});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {2}, {bucketSizeNs - 10},
+                                    {bucket2StartTimeNs}, {bucket3StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestLateOnDataPulledWithoutDiff) {
@@ -2557,7 +2670,8 @@
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
     // Bucket should have been completed.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestLateOnDataPulledWithDiff) {
@@ -2585,12 +2699,14 @@
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
     // Bucket should have been completed.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {19}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {19}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
-TEST(ValueMetricProducerTest, TestBucketBoundariesOnAppUpgrade) {
+TEST_P(ValueMetricProducerTest_PartialBucket, TestBucketBoundariesOnPartialBucket) {
     ValueMetric metric = ValueMetricProducerTestHelper::createMetric();
 
+    int64_t partialBucketSplitTimeNs = bucket2StartTimeNs + 2;
     sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
     EXPECT_CALL(*pullerManager, Pull(tagId, kConfigKey, _, _))
             // Initialization.
@@ -2601,20 +2717,29 @@
                         return true;
                     }))
             // notifyAppUpgrade.
-            .WillOnce(Invoke([](int tagId, const ConfigKey&,
-                                vector<std::shared_ptr<LogEvent>>* data, bool) {
+            .WillOnce(Invoke([partialBucketSplitTimeNs](int tagId, const ConfigKey&,
+                                                        vector<std::shared_ptr<LogEvent>>* data,
+                                                        bool) {
                 data->clear();
-                data->push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 2, 10));
+                data->push_back(CreateRepeatedValueLogEvent(tagId, partialBucketSplitTimeNs, 10));
                 return true;
             }));
 
     sp<ValueMetricProducer> valueProducer =
             ValueMetricProducerTestHelper::createValueProducerNoConditions(pullerManager, metric);
 
-    valueProducer->notifyAppUpgrade(bucket2StartTimeNs + 2, "com.foo", 10000, 1);
+    switch (GetParam()) {
+        case APP_UPGRADE:
+            valueProducer->notifyAppUpgrade(partialBucketSplitTimeNs);
+            break;
+        case BOOT_COMPLETE:
+            valueProducer->onStatsdInitCompleted(partialBucketSplitTimeNs);
+            break;
+    }
 
     // Bucket should have been completed.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {9}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {9}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestDataIsNotUpdatedWhenNoConditionChanged) {
@@ -2642,7 +2767,7 @@
 
     valueProducer->onConditionChanged(true, bucketStartTimeNs + 8);
     valueProducer->onConditionChanged(false, bucketStartTimeNs + 10);
-    valueProducer->onConditionChanged(false, bucketStartTimeNs + 10);
+    valueProducer->onConditionChanged(false, bucketStartTimeNs + 12);
 
     EXPECT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
     auto curInterval = valueProducer->mCurrentSlicedBucket.begin()->second[0];
@@ -2654,7 +2779,8 @@
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 10));
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 1);
 
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {2}, {2});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {2}, {2}, {bucketStartTimeNs},
+                                    {bucket2StartTimeNs});
 }
 
 // TODO: b/145705635 fix or delete this test
@@ -2705,7 +2831,7 @@
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
     // There was not global base available so all buckets are invalid.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {}, {}, {});
 }
 
 TEST(ValueMetricProducerTest, TestPullNeededFastDump) {
@@ -2849,7 +2975,8 @@
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 30);
 
     // Bucket should have been completed.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
 }
 
 TEST(ValueMetricProducerTest, TestPulledData_noDiff_withMultipleConditionChanges) {
@@ -2892,7 +3019,8 @@
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 110));
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {50 - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {50 - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
     curInterval = valueProducer->mCurrentSlicedBucket.begin()->second[0];
     curBaseInfo = valueProducer->mCurrentBaseInfo.begin()->second[0];
     EXPECT_EQ(false, curBaseInfo.hasBase);
@@ -2923,7 +3051,8 @@
     allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 30));
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs - 8});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs - 8},
+                                    {bucketStartTimeNs}, {bucket2StartTimeNs});
     ValueMetricProducer::Interval curInterval =
             valueProducer->mCurrentSlicedBucket.begin()->second[0];
     ValueMetricProducer::BaseInfo curBaseInfo = valueProducer->mCurrentBaseInfo.begin()->second[0];
@@ -2946,7 +3075,7 @@
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
     // Condition was always false.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {}, {}, {});
 }
 
 TEST(ValueMetricProducerTest, TestPulledData_noDiff_withFailure) {
@@ -2976,7 +3105,7 @@
     valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
 
     // No buckets, we had a failure.
-    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {});
+    assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {}, {}, {});
 }
 
 /*
diff --git a/cmds/statsd/tests/statsd_test_util.h b/cmds/statsd/tests/statsd_test_util.h
index 37b9889..4d68ea2 100644
--- a/cmds/statsd/tests/statsd_test_util.h
+++ b/cmds/statsd/tests/statsd_test_util.h
@@ -42,6 +42,8 @@
 const int SCREEN_STATE_ATOM_ID = util::SCREEN_STATE_CHANGED;
 const int UID_PROCESS_STATE_ATOM_ID = util::UID_PROCESS_STATE_CHANGED;
 
+enum BucketSplitEvent { APP_UPGRADE, BOOT_COMPLETE };
+
 // Converts a ProtoOutputStream to a StatsLogReport proto.
 StatsLogReport outputStreamToProto(ProtoOutputStream* proto);
 
diff --git a/cmds/statsd/tests/utils/MultiConditionTrigger_test.cpp b/cmds/statsd/tests/utils/MultiConditionTrigger_test.cpp
new file mode 100644
index 0000000..db402a0
--- /dev/null
+++ b/cmds/statsd/tests/utils/MultiConditionTrigger_test.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "utils/MultiConditionTrigger.h"
+
+#include <gtest/gtest.h>
+
+#include <chrono>
+#include <set>
+#include <thread>
+#include <vector>
+
+#ifdef __ANDROID__
+
+using namespace std;
+using std::this_thread::sleep_for;
+
+namespace android {
+namespace os {
+namespace statsd {
+
+TEST(MultiConditionTrigger, TestMultipleConditions) {
+    int numConditions = 5;
+    string t1 = "t1", t2 = "t2", t3 = "t3", t4 = "t4", t5 = "t5";
+    set<string> conditionNames = {t1, t2, t3, t4, t5};
+
+    mutex lock;
+    condition_variable cv;
+    bool triggerCalled = false;
+
+    // Mark done as true and notify in the done.
+    MultiConditionTrigger trigger(conditionNames, [&lock, &cv, &triggerCalled] {
+        {
+            lock_guard lg(lock);
+            triggerCalled = true;
+        }
+        cv.notify_all();
+    });
+
+    vector<thread> threads;
+    vector<bool> done(numConditions, false);
+
+    int i = 0;
+    for (const string& conditionName : conditionNames) {
+        threads.emplace_back([&done, &conditionName, &trigger, i] {
+            sleep_for(chrono::milliseconds(3));
+            done[i] = true;
+            trigger.markComplete(conditionName);
+        });
+        i++;
+    }
+
+    unique_lock<mutex> unique_lk(lock);
+    cv.wait(unique_lk, [&triggerCalled] {
+        return triggerCalled;
+    });
+
+    for (i = 0; i < numConditions; i++) {
+        EXPECT_EQ(done[i], 1);
+    }
+
+    for (i = 0; i < numConditions; i++) {
+        threads[i].join();
+    }
+}
+
+TEST(MultiConditionTrigger, TestNoConditions) {
+    mutex lock;
+    condition_variable cv;
+    bool triggerCalled = false;
+
+    MultiConditionTrigger trigger({}, [&lock, &cv, &triggerCalled] {
+        {
+            lock_guard lg(lock);
+            triggerCalled = true;
+        }
+        cv.notify_all();
+    });
+
+    unique_lock<mutex> unique_lk(lock);
+    cv.wait(unique_lk, [&triggerCalled] { return triggerCalled; });
+    EXPECT_TRUE(triggerCalled);
+    // Ensure that trigger occurs immediately if no events need to be completed.
+}
+
+TEST(MultiConditionTrigger, TestMarkCompleteCalledBySameCondition) {
+    string t1 = "t1", t2 = "t2";
+    set<string> conditionNames = {t1, t2};
+
+    mutex lock;
+    condition_variable cv;
+    bool triggerCalled = false;
+
+    MultiConditionTrigger trigger(conditionNames, [&lock, &cv, &triggerCalled] {
+        {
+            lock_guard lg(lock);
+            triggerCalled = true;
+        }
+        cv.notify_all();
+    });
+
+    trigger.markComplete(t1);
+    trigger.markComplete(t1);
+
+    // Ensure that the trigger still hasn't fired.
+    {
+        lock_guard lg(lock);
+        EXPECT_FALSE(triggerCalled);
+    }
+
+    trigger.markComplete(t2);
+    unique_lock<mutex> unique_lk(lock);
+    cv.wait(unique_lk, [&triggerCalled] { return triggerCalled; });
+    EXPECT_TRUE(triggerCalled);
+}
+
+TEST(MultiConditionTrigger, TestTriggerOnlyCalledOnce) {
+    string t1 = "t1";
+    set<string> conditionNames = {t1};
+
+    mutex lock;
+    condition_variable cv;
+    bool triggerCalled = false;
+    int triggerCount = 0;
+
+    MultiConditionTrigger trigger(conditionNames, [&lock, &cv, &triggerCalled, &triggerCount] {
+        {
+            lock_guard lg(lock);
+            triggerCount++;
+            triggerCalled = true;
+        }
+        cv.notify_all();
+    });
+
+    trigger.markComplete(t1);
+
+    // Ensure that the trigger fired.
+    {
+        unique_lock<mutex> unique_lk(lock);
+        cv.wait(unique_lk, [&triggerCalled] { return triggerCalled; });
+        EXPECT_TRUE(triggerCalled);
+        EXPECT_EQ(triggerCount, 1);
+        triggerCalled = false;
+    }
+
+    trigger.markComplete(t1);
+
+    // Ensure that the trigger does not fire again.
+    {
+        unique_lock<mutex> unique_lk(lock);
+        cv.wait_for(unique_lk, chrono::milliseconds(5), [&triggerCalled] { return triggerCalled; });
+        EXPECT_FALSE(triggerCalled);
+        EXPECT_EQ(triggerCount, 1);
+    }
+}
+
+}  // namespace statsd
+}  // namespace os
+}  // namespace android
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
diff --git a/cmds/statsd/tests/utils/NamedLatch_test.cpp b/cmds/statsd/tests/utils/NamedLatch_test.cpp
deleted file mode 100644
index de48a13..0000000
--- a/cmds/statsd/tests/utils/NamedLatch_test.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "utils/NamedLatch.h"
-
-#include <gtest/gtest.h>
-
-#include <chrono>
-#include <set>
-#include <thread>
-#include <vector>
-
-#ifdef __ANDROID__
-
-using namespace std;
-using std::this_thread::sleep_for;
-
-namespace android {
-namespace os {
-namespace statsd {
-
-TEST(NamedLatchTest, TestWait) {
-    int numEvents = 5;
-    string t1 = "t1", t2 = "t2", t3 = "t3", t4 = "t4", t5 = "t5";
-    set<string> eventNames = {t1, t2, t3, t4, t5};
-
-    NamedLatch latch(eventNames);
-    vector<thread> threads;
-    vector<bool> done(numEvents, false);
-
-    int i = 0;
-    for (const string& eventName : eventNames) {
-        threads.emplace_back([&done, &eventName, &latch, i] {
-            sleep_for(chrono::milliseconds(3));
-            done[i] = true;
-            latch.countDown(eventName);
-        });
-        i++;
-    }
-
-    latch.wait();
-
-    for (i = 0; i < numEvents; i++) {
-        EXPECT_EQ(done[i], 1);
-    }
-
-    for (i = 0; i < numEvents; i++) {
-        threads[i].join();
-    }
-}
-
-TEST(NamedLatchTest, TestNoWorkers) {
-    NamedLatch latch({});
-    latch.wait();
-    // Ensure that latch does not wait if no events need to countDown.
-}
-
-TEST(NamedLatchTest, TestCountDownCalledBySameEventName) {
-    string t1 = "t1", t2 = "t2";
-    set<string> eventNames = {t1, t2};
-
-    NamedLatch latch(eventNames);
-
-    thread waiterThread([&latch] { latch.wait(); });
-
-    latch.countDown(t1);
-    latch.countDown(t1);
-
-    // Ensure that the latch's remaining threads still has t2.
-    latch.mMutex.lock();
-    ASSERT_EQ(latch.mRemainingEventNames.size(), 1);
-    EXPECT_NE(latch.mRemainingEventNames.find(t2), latch.mRemainingEventNames.end());
-    latch.mMutex.unlock();
-
-    latch.countDown(t2);
-    waiterThread.join();
-}
-
-}  // namespace statsd
-}  // namespace os
-}  // namespace android
-#else
-GTEST_LOG_(INFO) << "This test does nothing.\n";
-#endif