Add support for clobbered blocks

In ext4 filesystems, some blocks might be changed even being mounted
R/O, such as the superblock (block 0). We need to exclude such blocks
from integrity verification. Plus such blocks should always be
written to the target by copying instead of patching.

Bug: http://b/20939131
Change-Id: I991169ec307dfb231b2fe8908a0668595ecb2060
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 8eb249a..0a387ec 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -82,6 +82,7 @@
   """A zero-length image."""
   blocksize = 4096
   care_map = RangeSet()
+  clobbered_blocks = RangeSet()
   total_blocks = 0
   file_map = {}
   def ReadRangeSet(self, ranges):
@@ -114,6 +115,7 @@
 
     self.total_blocks = len(self.data) / self.blocksize
     self.care_map = RangeSet(data=(0, self.total_blocks))
+    self.clobbered_blocks = RangeSet()
 
     zero_blocks = []
     nonzero_blocks = []
@@ -135,6 +137,8 @@
     return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
 
   def TotalSha1(self):
+    # DataImage always carries empty clobbered_blocks.
+    assert self.clobbered_blocks.size() == 0
     return sha1(self.data).hexdigest()
 
 
@@ -184,6 +188,10 @@
 #      (Typically a domain is a file, and the key in file_map is the
 #      pathname.)
 #
+#    clobbered_blocks: a RangeSet containing which blocks contain data
+#      but may be altered by the FS. They need to be excluded when
+#      verifying the partition integrity.
+#
 #    ReadRangeSet(): a function that takes a RangeSet and returns the
 #      data contained in the image blocks of that RangeSet.  The data
 #      is returned as a list or tuple of strings; concatenating the
@@ -193,7 +201,7 @@
 #
 #    TotalSha1(): a function that returns (as a hex string) the SHA-1
 #      hash of all the data in the image (ie, all the blocks in the
-#      care_map)
+#      care_map minus clobbered_blocks).
 #
 # When creating a BlockImageDiff, the src image may be None, in which
 # case the list of transfers produced will never read from the
@@ -445,7 +453,6 @@
       if free_string:
         out.append("".join(free_string))
 
-
       # sanity check: abort if we're going to need more than 512 MB if
       # stash space
       assert max_stashed_blocks * self.tgt.blocksize < (512 << 20)
@@ -845,6 +852,12 @@
                  "zero", self.transfers)
         continue
 
+      elif tgt_fn == "__COPY":
+        # "__COPY" domain includes all the blocks not contained in any
+        # file and that need to be copied unconditionally to the target.
+        Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+        continue
+
       elif tgt_fn in self.src.file_map:
         # Look for an exact pathname match in the source.
         Transfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],