blob: 013044f6ff007551d4831c89fe85f201785828d1 [file] [log] [blame]
Doug Zongker424296a2014-09-02 08:53:09 -07001# Copyright (C) 2014 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
Doug Zongkerfc44a512014-08-26 13:10:25 -070015import bisect
16import os
Doug Zongkerfc44a512014-08-26 13:10:25 -070017import struct
Doug Zongkerfc44a512014-08-26 13:10:25 -070018from hashlib import sha1
19
Dan Albert8b72aef2015-03-23 19:13:21 -070020import rangelib
21
Doug Zongkerfc44a512014-08-26 13:10:25 -070022
23class SparseImage(object):
Tao Bao5ece99d2015-05-12 11:42:31 -070024 """Wraps a sparse image file into an image object.
Doug Zongkerfc44a512014-08-26 13:10:25 -070025
Tao Bao5ece99d2015-05-12 11:42:31 -070026 Wraps a sparse image file (and optional file map and clobbered_blocks) into
27 an image object suitable for passing to BlockImageDiff. file_map contains
28 the mapping between files and their blocks. clobbered_blocks contains the set
29 of blocks that should be always written to the target regardless of the old
30 contents (i.e. copying instead of patching). clobbered_blocks should be in
31 the form of a string like "0" or "0 1-5 8".
32 """
33
34 def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None):
Doug Zongkerfc44a512014-08-26 13:10:25 -070035 self.simg_f = f = open(simg_fn, "rb")
36
37 header_bin = f.read(28)
38 header = struct.unpack("<I4H4I", header_bin)
39
40 magic = header[0]
41 major_version = header[1]
42 minor_version = header[2]
43 file_hdr_sz = header[3]
44 chunk_hdr_sz = header[4]
45 self.blocksize = blk_sz = header[5]
46 self.total_blocks = total_blks = header[6]
47 total_chunks = header[7]
Doug Zongkerfc44a512014-08-26 13:10:25 -070048
49 if magic != 0xED26FF3A:
50 raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
51 if major_version != 1 or minor_version != 0:
52 raise ValueError("I know about version 1.0, but this is version %u.%u" %
53 (major_version, minor_version))
54 if file_hdr_sz != 28:
55 raise ValueError("File header size was expected to be 28, but is %u." %
56 (file_hdr_sz,))
57 if chunk_hdr_sz != 12:
58 raise ValueError("Chunk header size was expected to be 12, but is %u." %
59 (chunk_hdr_sz,))
60
61 print("Total of %u %u-byte output blocks in %u input chunks."
62 % (total_blks, blk_sz, total_chunks))
63
64 pos = 0 # in blocks
65 care_data = []
66 self.offset_map = offset_map = []
Tao Bao5ece99d2015-05-12 11:42:31 -070067 self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -070068
69 for i in range(total_chunks):
70 header_bin = f.read(12)
71 header = struct.unpack("<2H2I", header_bin)
72 chunk_type = header[0]
Doug Zongkerfc44a512014-08-26 13:10:25 -070073 chunk_sz = header[2]
74 total_sz = header[3]
75 data_sz = total_sz - 12
76
77 if chunk_type == 0xCAC1:
78 if data_sz != (chunk_sz * blk_sz):
79 raise ValueError(
80 "Raw chunk input size (%u) does not match output size (%u)" %
81 (data_sz, chunk_sz * blk_sz))
82 else:
83 care_data.append(pos)
84 care_data.append(pos + chunk_sz)
Doug Zongkere18eb502014-10-15 15:55:50 -070085 offset_map.append((pos, chunk_sz, f.tell(), None))
Doug Zongkerfc44a512014-08-26 13:10:25 -070086 pos += chunk_sz
87 f.seek(data_sz, os.SEEK_CUR)
88
89 elif chunk_type == 0xCAC2:
Doug Zongkere18eb502014-10-15 15:55:50 -070090 fill_data = f.read(4)
91 care_data.append(pos)
92 care_data.append(pos + chunk_sz)
93 offset_map.append((pos, chunk_sz, None, fill_data))
94 pos += chunk_sz
Doug Zongkerfc44a512014-08-26 13:10:25 -070095
96 elif chunk_type == 0xCAC3:
97 if data_sz != 0:
98 raise ValueError("Don't care chunk input size is non-zero (%u)" %
99 (data_sz))
100 else:
101 pos += chunk_sz
102
103 elif chunk_type == 0xCAC4:
104 raise ValueError("CRC32 chunks are not supported")
105
106 else:
107 raise ValueError("Unknown chunk type 0x%04X not supported" %
108 (chunk_type,))
109
Dan Albert8b72aef2015-03-23 19:13:21 -0700110 self.care_map = rangelib.RangeSet(care_data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700111 self.offset_index = [i[0] for i in offset_map]
112
Tao Bao2fd2c9b2015-07-09 17:37:49 -0700113 # Bug: 20881595
114 # Introduce extended blocks as a workaround for the bug. dm-verity may
115 # touch blocks that are not in the care_map due to block device
116 # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
117 # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
118 # are the maximum read-ahead we configure for dm-verity block devices.
119 extended = self.care_map.extend(512)
120 all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
121 extended = extended.intersect(all_blocks).subtract(self.care_map)
122 self.extended = extended
123
Doug Zongkerfc44a512014-08-26 13:10:25 -0700124 if file_map_fn:
Tao Bao5ece99d2015-05-12 11:42:31 -0700125 self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700126 else:
127 self.file_map = {"__DATA": self.care_map}
128
129 def ReadRangeSet(self, ranges):
130 return [d for d in self._GetRangeData(ranges)]
131
Tao Bao5fcaaef2015-06-01 13:40:49 -0700132 def TotalSha1(self, include_clobbered_blocks=False):
133 """Return the SHA-1 hash of all data in the 'care' regions.
134
135 If include_clobbered_blocks is True, it returns the hash including the
136 clobbered_blocks."""
137 ranges = self.care_map
138 if not include_clobbered_blocks:
Tao Bao2b4ff172015-06-23 17:30:35 -0700139 ranges = ranges.subtract(self.clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700140 h = sha1()
Tao Bao5fcaaef2015-06-01 13:40:49 -0700141 for d in self._GetRangeData(ranges):
Doug Zongkerfc44a512014-08-26 13:10:25 -0700142 h.update(d)
143 return h.hexdigest()
144
145 def _GetRangeData(self, ranges):
146 """Generator that produces all the image data in 'ranges'. The
147 number of individual pieces returned is arbitrary (and in
148 particular is not necessarily equal to the number of ranges in
149 'ranges'.
150
151 This generator is stateful -- it depends on the open file object
152 contained in this SparseImage, so you should not try to run two
153 instances of this generator on the same object simultaneously."""
154
155 f = self.simg_f
156 for s, e in ranges:
157 to_read = e-s
158 idx = bisect.bisect_right(self.offset_index, s) - 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700159 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Doug Zongkerfc44a512014-08-26 13:10:25 -0700160
161 # for the first chunk we may be starting partway through it.
Doug Zongkerfc44a512014-08-26 13:10:25 -0700162 remain = chunk_len - (s - chunk_start)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700163 this_read = min(remain, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700164 if filepos is not None:
165 p = filepos + ((s - chunk_start) * self.blocksize)
166 f.seek(p, os.SEEK_SET)
167 yield f.read(this_read * self.blocksize)
168 else:
169 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700170 to_read -= this_read
171
172 while to_read > 0:
173 # continue with following chunks if this range spans multiple chunks.
174 idx += 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700175 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Doug Zongkerfc44a512014-08-26 13:10:25 -0700176 this_read = min(chunk_len, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700177 if filepos is not None:
178 f.seek(filepos, os.SEEK_SET)
179 yield f.read(this_read * self.blocksize)
180 else:
181 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700182 to_read -= this_read
183
Tao Bao5ece99d2015-05-12 11:42:31 -0700184 def LoadFileBlockMap(self, fn, clobbered_blocks):
Doug Zongkerfc44a512014-08-26 13:10:25 -0700185 remaining = self.care_map
186 self.file_map = out = {}
187
188 with open(fn) as f:
189 for line in f:
190 fn, ranges = line.split(None, 1)
Dan Albert8b72aef2015-03-23 19:13:21 -0700191 ranges = rangelib.RangeSet.parse(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700192 out[fn] = ranges
193 assert ranges.size() == ranges.intersect(remaining).size()
Tao Bao5ece99d2015-05-12 11:42:31 -0700194
195 # Currently we assume that blocks in clobbered_blocks are not part of
196 # any file.
197 assert not clobbered_blocks.overlaps(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700198 remaining = remaining.subtract(ranges)
199
Tao Bao5ece99d2015-05-12 11:42:31 -0700200 remaining = remaining.subtract(clobbered_blocks)
201
Doug Zongkerfc44a512014-08-26 13:10:25 -0700202 # For all the remaining blocks in the care_map (ie, those that
Tao Bao5ece99d2015-05-12 11:42:31 -0700203 # aren't part of the data for any file nor part of the clobbered_blocks),
204 # divide them into blocks that are all zero and blocks that aren't.
205 # (Zero blocks are handled specially because (1) there are usually
206 # a lot of them and (2) bsdiff handles files with long sequences of
207 # repeated bytes especially poorly.)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700208
209 zero_blocks = []
210 nonzero_blocks = []
211 reference = '\0' * self.blocksize
212
Tao Bao7c4c6f52015-08-19 17:07:50 -0700213 # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
214 # the whole system image will be treated as a single file. But for some
215 # unknown bug, the updater will be killed due to OOM when writing back the
216 # patched image to flash (observed on lenok-userdebug MEA49). Prior to
217 # getting a real fix, we evenly divide the non-zero blocks into smaller
218 # groups (currently 1024 blocks or 4MB per group).
219 # Bug: 23227672
220 MAX_BLOCKS_PER_GROUP = 1024
221 nonzero_groups = []
222
Doug Zongkerfc44a512014-08-26 13:10:25 -0700223 f = self.simg_f
224 for s, e in remaining:
225 for b in range(s, e):
226 idx = bisect.bisect_right(self.offset_index, b) - 1
Dan Albert8b72aef2015-03-23 19:13:21 -0700227 chunk_start, _, filepos, fill_data = self.offset_map[idx]
Doug Zongkere18eb502014-10-15 15:55:50 -0700228 if filepos is not None:
229 filepos += (b-chunk_start) * self.blocksize
230 f.seek(filepos, os.SEEK_SET)
231 data = f.read(self.blocksize)
232 else:
233 if fill_data == reference[:4]: # fill with all zeros
234 data = reference
235 else:
236 data = None
Doug Zongkerfc44a512014-08-26 13:10:25 -0700237
238 if data == reference:
239 zero_blocks.append(b)
240 zero_blocks.append(b+1)
241 else:
242 nonzero_blocks.append(b)
243 nonzero_blocks.append(b+1)
244
Tao Bao7c4c6f52015-08-19 17:07:50 -0700245 if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
246 nonzero_groups.append(nonzero_blocks)
247 # Clear the list.
248 nonzero_blocks = []
249
250 if nonzero_blocks:
251 nonzero_groups.append(nonzero_blocks)
252 nonzero_blocks = []
253
254 assert zero_blocks or nonzero_groups or clobbered_blocks
Tao Bao7f9470c2015-06-26 17:49:39 -0700255
256 if zero_blocks:
257 out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
Tao Bao7c4c6f52015-08-19 17:07:50 -0700258 if nonzero_groups:
259 for i, blocks in enumerate(nonzero_groups):
260 out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
Tao Bao8bd72022015-07-01 18:06:33 -0700261 if clobbered_blocks:
262 out["__COPY"] = clobbered_blocks
Doug Zongkerfc44a512014-08-26 13:10:25 -0700263
264 def ResetFileMap(self):
265 """Throw away the file map and treat the entire image as
266 undifferentiated data."""
267 self.file_map = {"__DATA": self.care_map}