blob: 10022d049359d8083ddc1870aaa5ba34d4a1a704 [file] [log] [blame]
Doug Zongker424296a2014-09-02 08:53:09 -07001# Copyright (C) 2014 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
Anthony Kingc713d762015-11-03 00:23:11 +000015from __future__ import print_function
16
Doug Zongkerfc44a512014-08-26 13:10:25 -070017import bisect
18import os
Doug Zongkerfc44a512014-08-26 13:10:25 -070019import struct
Doug Zongkerfc44a512014-08-26 13:10:25 -070020from hashlib import sha1
21
Dan Albert8b72aef2015-03-23 19:13:21 -070022import rangelib
23
Doug Zongkerfc44a512014-08-26 13:10:25 -070024
25class SparseImage(object):
Tao Bao5ece99d2015-05-12 11:42:31 -070026 """Wraps a sparse image file into an image object.
Doug Zongkerfc44a512014-08-26 13:10:25 -070027
Tao Bao5ece99d2015-05-12 11:42:31 -070028 Wraps a sparse image file (and optional file map and clobbered_blocks) into
29 an image object suitable for passing to BlockImageDiff. file_map contains
30 the mapping between files and their blocks. clobbered_blocks contains the set
31 of blocks that should be always written to the target regardless of the old
32 contents (i.e. copying instead of patching). clobbered_blocks should be in
33 the form of a string like "0" or "0 1-5 8".
34 """
35
36 def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None):
Doug Zongkerfc44a512014-08-26 13:10:25 -070037 self.simg_f = f = open(simg_fn, "rb")
38
39 header_bin = f.read(28)
40 header = struct.unpack("<I4H4I", header_bin)
41
42 magic = header[0]
43 major_version = header[1]
44 minor_version = header[2]
45 file_hdr_sz = header[3]
46 chunk_hdr_sz = header[4]
47 self.blocksize = blk_sz = header[5]
48 self.total_blocks = total_blks = header[6]
49 total_chunks = header[7]
Doug Zongkerfc44a512014-08-26 13:10:25 -070050
51 if magic != 0xED26FF3A:
52 raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
53 if major_version != 1 or minor_version != 0:
54 raise ValueError("I know about version 1.0, but this is version %u.%u" %
55 (major_version, minor_version))
56 if file_hdr_sz != 28:
57 raise ValueError("File header size was expected to be 28, but is %u." %
58 (file_hdr_sz,))
59 if chunk_hdr_sz != 12:
60 raise ValueError("Chunk header size was expected to be 12, but is %u." %
61 (chunk_hdr_sz,))
62
63 print("Total of %u %u-byte output blocks in %u input chunks."
64 % (total_blks, blk_sz, total_chunks))
65
66 pos = 0 # in blocks
67 care_data = []
68 self.offset_map = offset_map = []
Tao Bao5ece99d2015-05-12 11:42:31 -070069 self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -070070
71 for i in range(total_chunks):
72 header_bin = f.read(12)
73 header = struct.unpack("<2H2I", header_bin)
74 chunk_type = header[0]
Doug Zongkerfc44a512014-08-26 13:10:25 -070075 chunk_sz = header[2]
76 total_sz = header[3]
77 data_sz = total_sz - 12
78
79 if chunk_type == 0xCAC1:
80 if data_sz != (chunk_sz * blk_sz):
81 raise ValueError(
82 "Raw chunk input size (%u) does not match output size (%u)" %
83 (data_sz, chunk_sz * blk_sz))
84 else:
85 care_data.append(pos)
86 care_data.append(pos + chunk_sz)
Doug Zongkere18eb502014-10-15 15:55:50 -070087 offset_map.append((pos, chunk_sz, f.tell(), None))
Doug Zongkerfc44a512014-08-26 13:10:25 -070088 pos += chunk_sz
89 f.seek(data_sz, os.SEEK_CUR)
90
91 elif chunk_type == 0xCAC2:
Doug Zongkere18eb502014-10-15 15:55:50 -070092 fill_data = f.read(4)
93 care_data.append(pos)
94 care_data.append(pos + chunk_sz)
95 offset_map.append((pos, chunk_sz, None, fill_data))
96 pos += chunk_sz
Doug Zongkerfc44a512014-08-26 13:10:25 -070097
98 elif chunk_type == 0xCAC3:
99 if data_sz != 0:
100 raise ValueError("Don't care chunk input size is non-zero (%u)" %
101 (data_sz))
102 else:
103 pos += chunk_sz
104
105 elif chunk_type == 0xCAC4:
106 raise ValueError("CRC32 chunks are not supported")
107
108 else:
109 raise ValueError("Unknown chunk type 0x%04X not supported" %
110 (chunk_type,))
111
Dan Albert8b72aef2015-03-23 19:13:21 -0700112 self.care_map = rangelib.RangeSet(care_data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700113 self.offset_index = [i[0] for i in offset_map]
114
Tao Bao2fd2c9b2015-07-09 17:37:49 -0700115 # Bug: 20881595
116 # Introduce extended blocks as a workaround for the bug. dm-verity may
117 # touch blocks that are not in the care_map due to block device
118 # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
119 # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
120 # are the maximum read-ahead we configure for dm-verity block devices.
121 extended = self.care_map.extend(512)
122 all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
123 extended = extended.intersect(all_blocks).subtract(self.care_map)
124 self.extended = extended
125
Doug Zongkerfc44a512014-08-26 13:10:25 -0700126 if file_map_fn:
Tao Bao5ece99d2015-05-12 11:42:31 -0700127 self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700128 else:
129 self.file_map = {"__DATA": self.care_map}
130
131 def ReadRangeSet(self, ranges):
132 return [d for d in self._GetRangeData(ranges)]
133
Tao Bao5fcaaef2015-06-01 13:40:49 -0700134 def TotalSha1(self, include_clobbered_blocks=False):
135 """Return the SHA-1 hash of all data in the 'care' regions.
136
137 If include_clobbered_blocks is True, it returns the hash including the
138 clobbered_blocks."""
139 ranges = self.care_map
140 if not include_clobbered_blocks:
Tao Bao2b4ff172015-06-23 17:30:35 -0700141 ranges = ranges.subtract(self.clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700142 h = sha1()
Tao Bao5fcaaef2015-06-01 13:40:49 -0700143 for d in self._GetRangeData(ranges):
Doug Zongkerfc44a512014-08-26 13:10:25 -0700144 h.update(d)
145 return h.hexdigest()
146
147 def _GetRangeData(self, ranges):
148 """Generator that produces all the image data in 'ranges'. The
149 number of individual pieces returned is arbitrary (and in
150 particular is not necessarily equal to the number of ranges in
151 'ranges'.
152
153 This generator is stateful -- it depends on the open file object
154 contained in this SparseImage, so you should not try to run two
155 instances of this generator on the same object simultaneously."""
156
157 f = self.simg_f
158 for s, e in ranges:
159 to_read = e-s
160 idx = bisect.bisect_right(self.offset_index, s) - 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700161 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Doug Zongkerfc44a512014-08-26 13:10:25 -0700162
163 # for the first chunk we may be starting partway through it.
Doug Zongkerfc44a512014-08-26 13:10:25 -0700164 remain = chunk_len - (s - chunk_start)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700165 this_read = min(remain, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700166 if filepos is not None:
167 p = filepos + ((s - chunk_start) * self.blocksize)
168 f.seek(p, os.SEEK_SET)
169 yield f.read(this_read * self.blocksize)
170 else:
171 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700172 to_read -= this_read
173
174 while to_read > 0:
175 # continue with following chunks if this range spans multiple chunks.
176 idx += 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700177 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Doug Zongkerfc44a512014-08-26 13:10:25 -0700178 this_read = min(chunk_len, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700179 if filepos is not None:
180 f.seek(filepos, os.SEEK_SET)
181 yield f.read(this_read * self.blocksize)
182 else:
183 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700184 to_read -= this_read
185
Tao Bao5ece99d2015-05-12 11:42:31 -0700186 def LoadFileBlockMap(self, fn, clobbered_blocks):
Doug Zongkerfc44a512014-08-26 13:10:25 -0700187 remaining = self.care_map
188 self.file_map = out = {}
189
190 with open(fn) as f:
191 for line in f:
192 fn, ranges = line.split(None, 1)
Dan Albert8b72aef2015-03-23 19:13:21 -0700193 ranges = rangelib.RangeSet.parse(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700194 out[fn] = ranges
195 assert ranges.size() == ranges.intersect(remaining).size()
Tao Bao5ece99d2015-05-12 11:42:31 -0700196
197 # Currently we assume that blocks in clobbered_blocks are not part of
198 # any file.
199 assert not clobbered_blocks.overlaps(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700200 remaining = remaining.subtract(ranges)
201
Tao Bao5ece99d2015-05-12 11:42:31 -0700202 remaining = remaining.subtract(clobbered_blocks)
203
Doug Zongkerfc44a512014-08-26 13:10:25 -0700204 # For all the remaining blocks in the care_map (ie, those that
Tao Bao5ece99d2015-05-12 11:42:31 -0700205 # aren't part of the data for any file nor part of the clobbered_blocks),
206 # divide them into blocks that are all zero and blocks that aren't.
207 # (Zero blocks are handled specially because (1) there are usually
208 # a lot of them and (2) bsdiff handles files with long sequences of
209 # repeated bytes especially poorly.)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700210
211 zero_blocks = []
212 nonzero_blocks = []
213 reference = '\0' * self.blocksize
214
215 f = self.simg_f
216 for s, e in remaining:
217 for b in range(s, e):
218 idx = bisect.bisect_right(self.offset_index, b) - 1
Dan Albert8b72aef2015-03-23 19:13:21 -0700219 chunk_start, _, filepos, fill_data = self.offset_map[idx]
Doug Zongkere18eb502014-10-15 15:55:50 -0700220 if filepos is not None:
221 filepos += (b-chunk_start) * self.blocksize
222 f.seek(filepos, os.SEEK_SET)
223 data = f.read(self.blocksize)
224 else:
225 if fill_data == reference[:4]: # fill with all zeros
226 data = reference
227 else:
228 data = None
Doug Zongkerfc44a512014-08-26 13:10:25 -0700229
230 if data == reference:
231 zero_blocks.append(b)
232 zero_blocks.append(b+1)
233 else:
234 nonzero_blocks.append(b)
235 nonzero_blocks.append(b+1)
236
Tao Bao8bd72022015-07-01 18:06:33 -0700237 assert zero_blocks or nonzero_blocks or clobbered_blocks
Tao Bao7f9470c2015-06-26 17:49:39 -0700238
239 if zero_blocks:
240 out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
241 if nonzero_blocks:
242 out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks)
Tao Bao8bd72022015-07-01 18:06:33 -0700243 if clobbered_blocks:
244 out["__COPY"] = clobbered_blocks
Doug Zongkerfc44a512014-08-26 13:10:25 -0700245
246 def ResetFileMap(self):
247 """Throw away the file map and treat the entire image as
248 undifferentiated data."""
249 self.file_map = {"__DATA": self.care_map}