Merge changes I8c5ab552,If8546dea

* changes:
  libsnapshot: tests uses common MapUpdateSnapshot/WriteSnapshotAndHash
  libsnapshot: Add test for accounting for hash tree
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h b/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
index 7411e5a..8e3875f 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
@@ -72,6 +72,8 @@
 
 static constexpr const std::string_view kCowGroupName = "cow";
 
+bool SourceCopyOperationIsClone(const chromeos_update_engine::InstallOperation& operation);
+
 enum class UpdateState : unsigned int {
     // No update or merge is in progress.
     None,
diff --git a/fs_mgr/libsnapshot/partition_cow_creator.cpp b/fs_mgr/libsnapshot/partition_cow_creator.cpp
index 0ba2a88..61f5c0c 100644
--- a/fs_mgr/libsnapshot/partition_cow_creator.cpp
+++ b/fs_mgr/libsnapshot/partition_cow_creator.cpp
@@ -62,6 +62,19 @@
     return false;
 }
 
+bool SourceCopyOperationIsClone(const InstallOperation& operation) {
+    using ChromeOSExtent = chromeos_update_engine::Extent;
+    if (operation.src_extents().size() != operation.dst_extents().size()) {
+        return false;
+    }
+    return std::equal(operation.src_extents().begin(), operation.src_extents().end(),
+                      operation.dst_extents().begin(),
+                      [](const ChromeOSExtent& src, const ChromeOSExtent& dst) {
+                          return src.start_block() == dst.start_block() &&
+                                 src.num_blocks() == dst.num_blocks();
+                      });
+}
+
 void WriteExtent(DmSnapCowSizeCalculator* sc, const chromeos_update_engine::Extent& de,
                  unsigned int sectors_per_block) {
     const auto block_boundary = de.start_block() + de.num_blocks();
@@ -88,6 +101,11 @@
     if (operations == nullptr) return sc.cow_size_bytes();
 
     for (const auto& iop : *operations) {
+        // Do not allocate space for operations that are going to be skipped
+        // during OTA application.
+        if (iop.type() == InstallOperation::SOURCE_COPY && SourceCopyOperationIsClone(iop))
+            continue;
+
         for (const auto& de : iop.dst_extents()) {
             WriteExtent(&sc, de, sectors_per_block);
         }
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index 5573abc..33c122b 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -129,6 +129,11 @@
     auto file = LockExclusive();
     if (!file) return false;
 
+    // Purge the ImageManager just in case there is a corrupt lp_metadata file
+    // lying around. (NB: no need to return false on an error, we can let the
+    // update try to progress.)
+    images_->RemoveAllImages();
+
     auto state = ReadUpdateState(file.get());
     if (state != UpdateState::None) {
         LOG(ERROR) << "An update is already in progress, cannot begin a new update";
@@ -1182,8 +1187,26 @@
     }
 
     bool ok = true;
+    bool has_mapped_cow_images = false;
     for (const auto& name : snapshots) {
-        ok &= (UnmapPartitionWithSnapshot(lock, name) && DeleteSnapshot(lock, name));
+        if (!UnmapPartitionWithSnapshot(lock, name) || !DeleteSnapshot(lock, name)) {
+            // Remember whether or not we were able to unmap the cow image.
+            auto cow_image_device = GetCowImageDeviceName(name);
+            has_mapped_cow_images |= images_->IsImageMapped(cow_image_device);
+
+            ok = false;
+        }
+    }
+
+    if (ok || !has_mapped_cow_images) {
+        // Delete any image artifacts as a precaution, in case an update is
+        // being cancelled due to some corrupted state in an lp_metadata file.
+        // Note that we do not do this if some cow images are still mapped,
+        // since we must not remove backing storage if it's in use.
+        if (!images_->RemoveAllImages()) {
+            LOG(ERROR) << "Could not remove all snapshot artifacts";
+            return false;
+        }
     }
     return ok;
 }
diff --git a/libutils/StrongPointer.cpp b/libutils/StrongPointer.cpp
index ba52502..ef46723 100644
--- a/libutils/StrongPointer.cpp
+++ b/libutils/StrongPointer.cpp
@@ -21,4 +21,7 @@
 namespace android {
 
 void sp_report_race() { LOG_ALWAYS_FATAL("sp<> assignment detected data race"); }
+
+void sp_report_stack_pointer() { LOG_ALWAYS_FATAL("sp<> constructed with stack pointer argument"); }
+
 }
diff --git a/libutils/include/utils/StrongPointer.h b/libutils/include/utils/StrongPointer.h
index 9cd7c75..07dd3f1 100644
--- a/libutils/include/utils/StrongPointer.h
+++ b/libutils/include/utils/StrongPointer.h
@@ -122,26 +122,54 @@
         return o != *this;
     }
 
-private:    
+private:
     template<typename Y> friend class sp;
     template<typename Y> friend class wp;
     void set_pointer(T* ptr);
+    static inline void check_not_on_stack(const void* ptr);
     T* m_ptr;
 };
 
-// For code size reasons, we do not want this inlined or templated.
+// For code size reasons, we do not want these inlined or templated.
 void sp_report_race();
+void sp_report_stack_pointer();
 
 #undef COMPARE
 
 // ---------------------------------------------------------------------------
 // No user serviceable parts below here.
 
+// Check whether address is definitely on the calling stack.  We actually check whether it is on
+// the same 4K page as the frame pointer.
+//
+// Assumptions:
+// - Pages are never smaller than 4K (MIN_PAGE_SIZE)
+// - Malloced memory never shares a page with a stack.
+//
+// It does not appear safe to broaden this check to include adjacent pages; apparently this code
+// is used in environments where there may not be a guard page below (at higher addresses than)
+// the bottom of the stack.
+//
+// TODO: Consider adding make_sp<T>() to allocate an object and wrap the resulting pointer safely
+// without checking overhead.
+template <typename T>
+void sp<T>::check_not_on_stack(const void* ptr) {
+    static constexpr int MIN_PAGE_SIZE = 0x1000;  // 4K. Safer than including sys/user.h.
+    static constexpr uintptr_t MIN_PAGE_MASK = ~static_cast<uintptr_t>(MIN_PAGE_SIZE - 1);
+    uintptr_t my_frame_address =
+            reinterpret_cast<uintptr_t>(__builtin_frame_address(0 /* this frame */));
+    if (((reinterpret_cast<uintptr_t>(ptr) ^ my_frame_address) & MIN_PAGE_MASK) == 0) {
+        sp_report_stack_pointer();
+    }
+}
+
 template<typename T>
 sp<T>::sp(T* other)
         : m_ptr(other) {
-    if (other)
+    if (other) {
+        check_not_on_stack(other);
         other->incStrong(this);
+    }
 }
 
 template<typename T>
@@ -159,8 +187,10 @@
 template<typename T> template<typename U>
 sp<T>::sp(U* other)
         : m_ptr(other) {
-    if (other)
+    if (other) {
+        check_not_on_stack(other);
         (static_cast<T*>(other))->incStrong(this);
+    }
 }
 
 template<typename T> template<typename U>
@@ -207,7 +237,10 @@
 template<typename T>
 sp<T>& sp<T>::operator =(T* other) {
     T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
-    if (other) other->incStrong(this);
+    if (other) {
+        check_not_on_stack(other);
+        other->incStrong(this);
+    }
     if (oldPtr) oldPtr->decStrong(this);
     if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
     m_ptr = other;