Add support for clobbered blocks
In ext4 filesystems, some blocks might be changed even being mounted
R/O, such as the superblock (block 0). We need to exclude such blocks
from integrity verification. Plus such blocks should always be
written to the target by copying instead of patching.
Bug: http://b/20939131
Change-Id: I657025b7b1ad50d4365e7b18dc39308facfe864e
(cherry picked from commit ff7778166bd13a90c89fa333591ee2037f587a11)
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index b97bb84..2ac97ac 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -21,10 +21,17 @@
class SparseImage(object):
- """Wraps a sparse image file (and optional file map) into an image
- object suitable for passing to BlockImageDiff."""
+ """Wraps a sparse image file into an image object.
- def __init__(self, simg_fn, file_map_fn=None):
+ Wraps a sparse image file (and optional file map and clobbered_blocks) into
+ an image object suitable for passing to BlockImageDiff. file_map contains
+ the mapping between files and their blocks. clobbered_blocks contains the set
+ of blocks that should be always written to the target regardless of the old
+ contents (i.e. copying instead of patching). clobbered_blocks should be in
+ the form of a string like "0" or "0 1-5 8".
+ """
+
+ def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None):
self.simg_f = f = open(simg_fn, "rb")
header_bin = f.read(28)
@@ -57,6 +64,7 @@
pos = 0 # in blocks
care_data = []
self.offset_map = offset_map = []
+ self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
for i in range(total_chunks):
header_bin = f.read(12)
@@ -103,7 +111,7 @@
self.offset_index = [i[0] for i in offset_map]
if file_map_fn:
- self.LoadFileBlockMap(file_map_fn)
+ self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
else:
self.file_map = {"__DATA": self.care_map}
@@ -111,9 +119,10 @@
return [d for d in self._GetRangeData(ranges)]
def TotalSha1(self):
- """Return the SHA-1 hash of all data in the 'care' regions of this image."""
+ """Return the SHA-1 hash of all data in the 'care' regions but not in
+ clobbered_blocks of this image."""
h = sha1()
- for d in self._GetRangeData(self.care_map):
+ for d in self._GetRangeData(self.care_map.subtract(self.clobbered_blocks)):
h.update(d)
return h.hexdigest()
@@ -156,7 +165,7 @@
yield fill_data * (this_read * (self.blocksize >> 2))
to_read -= this_read
- def LoadFileBlockMap(self, fn):
+ def LoadFileBlockMap(self, fn, clobbered_blocks):
remaining = self.care_map
self.file_map = out = {}
@@ -166,14 +175,20 @@
ranges = rangelib.RangeSet.parse(ranges)
out[fn] = ranges
assert ranges.size() == ranges.intersect(remaining).size()
+
+ # Currently we assume that blocks in clobbered_blocks are not part of
+ # any file.
+ assert not clobbered_blocks.overlaps(ranges)
remaining = remaining.subtract(ranges)
+ remaining = remaining.subtract(clobbered_blocks)
+
# For all the remaining blocks in the care_map (ie, those that
- # aren't part of the data for any file), divide them into blocks
- # that are all zero and blocks that aren't. (Zero blocks are
- # handled specially because (1) there are usually a lot of them
- # and (2) bsdiff handles files with long sequences of repeated
- # bytes especially poorly.)
+ # aren't part of the data for any file nor part of the clobbered_blocks),
+ # divide them into blocks that are all zero and blocks that aren't.
+ # (Zero blocks are handled specially because (1) there are usually
+ # a lot of them and (2) bsdiff handles files with long sequences of
+ # repeated bytes especially poorly.)
zero_blocks = []
nonzero_blocks = []
@@ -203,6 +218,7 @@
out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks)
+ out["__COPY"] = clobbered_blocks
def ResetFileMap(self):
"""Throw away the file map and treat the entire image as