Make releasetools pylint clean.
This caught a few bugs/syntax errors (a few character classes were not
escaped properly in regex patterns, some indentation was illegal,
etc).
Change-Id: I50637607524e68c4fb9cad7167f58a46b8d26b2c
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 8bbe452..97ed873 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -55,14 +55,14 @@
return
def output_sink(fn, data):
- ofile = open(os.path.join(OPTIONS.input_tmp,"SYSTEM",fn), "w")
- ofile.write(data)
- ofile.close()
+ ofile = open(os.path.join(OPTIONS.input_tmp, "SYSTEM", fn), "w")
+ ofile.write(data)
+ ofile.close()
if OPTIONS.rebuild_recovery:
- print("Building new recovery patch")
- common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, boot_img,
- info_dict=OPTIONS.info_dict)
+ print "Building new recovery patch"
+ common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
+ boot_img, info_dict=OPTIONS.info_dict)
block_list = common.MakeTempFile(prefix="system-blocklist-", suffix=".map")
imgname = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict,
@@ -88,7 +88,7 @@
block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
imgname = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict,
- block_list=block_list)
+ block_list=block_list)
common.ZipWrite(output_zip, imgname, prefix + "vendor.img")
common.ZipWrite(output_zip, block_list, prefix + "vendor.map")
@@ -110,18 +110,18 @@
try:
os.symlink(os.path.join(input_dir, what.upper()),
os.path.join(input_dir, what))
- except OSError, e:
- # bogus error on my mac version?
- # File "./build/tools/releasetools/img_from_target_files", line 86, in AddSystem
- # os.path.join(OPTIONS.input_tmp, "system"))
- # OSError: [Errno 17] File exists
- if (e.errno == errno.EEXIST):
+ except OSError as e:
+ # bogus error on my mac version?
+ # File "./build/tools/releasetools/img_from_target_files"
+ # os.path.join(OPTIONS.input_tmp, "system"))
+ # OSError: [Errno 17] File exists
+ if e.errno == errno.EEXIST:
pass
image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
fstab = info_dict["fstab"]
if fstab:
- image_props["fs_type" ] = fstab["/" + what].fs_type
+ image_props["fs_type"] = fstab["/" + what].fs_type
if what == "system":
fs_config_prefix = ""
@@ -130,10 +130,12 @@
fs_config = os.path.join(
input_dir, "META/" + fs_config_prefix + "filesystem_config.txt")
- if not os.path.exists(fs_config): fs_config = None
+ if not os.path.exists(fs_config):
+ fs_config = None
fc_config = os.path.join(input_dir, "BOOT/RAMDISK/file_contexts")
- if not os.path.exists(fc_config): fc_config = None
+ if not os.path.exists(fc_config):
+ fc_config = None
succ = build_image.BuildImage(os.path.join(input_dir, what),
image_props, img,
@@ -173,7 +175,7 @@
fstab = OPTIONS.info_dict["fstab"]
if fstab:
- image_props["fs_type" ] = fstab["/data"].fs_type
+ image_props["fs_type"] = fstab["/data"].fs_type
succ = build_image.BuildImage(user_dir, image_props, img.name)
assert succ, "build userdata.img image failed"
@@ -210,7 +212,7 @@
fstab = OPTIONS.info_dict["fstab"]
if fstab:
- image_props["fs_type" ] = fstab["/cache"].fs_type
+ image_props["fs_type"] = fstab["/cache"].fs_type
succ = build_image.BuildImage(user_dir, image_props, img.name)
assert succ, "build cache.img image failed"
@@ -289,7 +291,7 @@
output_zip.close()
def main(argv):
- def option_handler(o, a):
+ def option_handler(o, _):
if o in ("-a", "--add_missing"):
OPTIONS.add_missing = True
elif o in ("-r", "--rebuild_recovery",):
@@ -298,12 +300,10 @@
return False
return True
- args = common.ParseOptions(argv, __doc__,
- extra_opts="ar",
- extra_long_opts=["add_missing",
- "rebuild_recovery",
- ],
- extra_option_handler=option_handler)
+ args = common.ParseOptions(
+ argv, __doc__, extra_opts="ar",
+ extra_long_opts=["add_missing", "rebuild_recovery"],
+ extra_option_handler=option_handler)
if len(args) != 1:
@@ -317,7 +317,7 @@
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError, e:
+ except common.ExternalError as e:
print
print " ERROR: %s" % (e,)
print
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 5b5c4cc..75379cd 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -20,17 +20,17 @@
import itertools
import multiprocessing
import os
-import pprint
import re
import subprocess
-import sys
import threading
import tempfile
-from rangelib import *
+from rangelib import RangeSet
+
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
+
def compute_patch(src, tgt, imgdiff=False):
srcfd, srcfile = tempfile.mkstemp(prefix="src-")
tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-")
@@ -69,7 +69,16 @@
except OSError:
pass
-class EmptyImage(object):
+
+class Image(object):
+ def ReadRangeSet(self, ranges):
+ raise NotImplementedError
+
+ def TotalSha1(self):
+ raise NotImplementedError
+
+
+class EmptyImage(Image):
"""A zero-length image."""
blocksize = 4096
care_map = RangeSet()
@@ -81,7 +90,7 @@
return sha1().hexdigest()
-class DataImage(object):
+class DataImage(Image):
"""An image wrapped around a single string of data."""
def __init__(self, data, trim=False, pad=False):
@@ -126,9 +135,7 @@
return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
def TotalSha1(self):
- if not hasattr(self, "sha1"):
- self.sha1 = sha1(self.data).hexdigest()
- return self.sha1
+ return sha1(self.data).hexdigest()
class Transfer(object):
@@ -196,9 +203,13 @@
def __init__(self, tgt, src=None, threads=None, version=3):
if threads is None:
threads = multiprocessing.cpu_count() // 2
- if threads == 0: threads = 1
+ if threads == 0:
+ threads = 1
self.threads = threads
self.version = version
+ self.transfers = []
+ self.src_basenames = {}
+ self.src_numpatterns = {}
assert version in (1, 2, 3)
@@ -247,7 +258,7 @@
self.ComputePatches(prefix)
self.WriteTransfers(prefix)
- def HashBlocks(self, source, ranges):
+ def HashBlocks(self, source, ranges): # pylint: disable=no-self-use
data = source.ReadRangeSet(ranges)
ctx = sha1()
@@ -300,7 +311,7 @@
free_string = []
if self.version == 1:
- src_string = xf.src_ranges.to_string_raw()
+ src_str = xf.src_ranges.to_string_raw()
elif self.version >= 2:
# <# blocks> <src ranges>
@@ -310,7 +321,7 @@
# <# blocks> - <stash refs...>
size = xf.src_ranges.size()
- src_string = [str(size)]
+ src_str = [str(size)]
unstashed_src_ranges = xf.src_ranges
mapped_stashes = []
@@ -322,10 +333,10 @@
sr = xf.src_ranges.map_within(sr)
mapped_stashes.append(sr)
if self.version == 2:
- src_string.append("%d:%s" % (sid, sr.to_string_raw()))
+ src_str.append("%d:%s" % (sid, sr.to_string_raw()))
else:
assert sh in stashes
- src_string.append("%s:%s" % (sh, sr.to_string_raw()))
+ src_str.append("%s:%s" % (sh, sr.to_string_raw()))
stashes[sh] -= 1
if stashes[sh] == 0:
free_string.append("free %s\n" % (sh))
@@ -333,17 +344,17 @@
heapq.heappush(free_stash_ids, sid)
if unstashed_src_ranges:
- src_string.insert(1, unstashed_src_ranges.to_string_raw())
+ src_str.insert(1, unstashed_src_ranges.to_string_raw())
if xf.use_stash:
mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
- src_string.insert(2, mapped_unstashed.to_string_raw())
+ src_str.insert(2, mapped_unstashed.to_string_raw())
mapped_stashes.append(mapped_unstashed)
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
else:
- src_string.insert(1, "-")
+ src_str.insert(1, "-")
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
- src_string = " ".join(src_string)
+ src_str = " ".join(src_str)
# all versions:
# zero <rangeset>
@@ -356,14 +367,14 @@
# move <src rangeset> <tgt rangeset>
#
# version 2:
- # bsdiff patchstart patchlen <tgt rangeset> <src_string>
- # imgdiff patchstart patchlen <tgt rangeset> <src_string>
- # move <tgt rangeset> <src_string>
+ # bsdiff patchstart patchlen <tgt rangeset> <src_str>
+ # imgdiff patchstart patchlen <tgt rangeset> <src_str>
+ # move <tgt rangeset> <src_str>
#
# version 3:
- # bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_string>
- # imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_string>
- # move hash <tgt rangeset> <src_string>
+ # bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
+ # imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
+ # move hash <tgt rangeset> <src_str>
tgt_size = xf.tgt_ranges.size()
@@ -383,12 +394,12 @@
elif self.version == 2:
out.append("%s %s %s\n" % (
xf.style,
- xf.tgt_ranges.to_string_raw(), src_string))
+ xf.tgt_ranges.to_string_raw(), src_str))
elif self.version >= 3:
out.append("%s %s %s %s\n" % (
xf.style,
self.HashBlocks(self.tgt, xf.tgt_ranges),
- xf.tgt_ranges.to_string_raw(), src_string))
+ xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style in ("bsdiff", "imgdiff"):
performs_read = True
@@ -401,14 +412,14 @@
elif self.version == 2:
out.append("%s %d %d %s %s\n" % (
xf.style, xf.patch_start, xf.patch_len,
- xf.tgt_ranges.to_string_raw(), src_string))
+ xf.tgt_ranges.to_string_raw(), src_str))
elif self.version >= 3:
out.append("%s %d %d %s %s %s %s\n" % (
xf.style,
xf.patch_start, xf.patch_len,
self.HashBlocks(self.src, xf.src_ranges),
self.HashBlocks(self.tgt, xf.tgt_ranges),
- xf.tgt_ranges.to_string_raw(), src_string))
+ xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style == "zero":
assert xf.tgt_ranges
@@ -417,7 +428,7 @@
out.append("%s %s\n" % (xf.style, to_zero.to_string_raw()))
total += to_zero.size()
else:
- raise ValueError, "unknown transfer style '%s'\n" % (xf.style,)
+ raise ValueError("unknown transfer style '%s'\n" % xf.style)
if free_string:
out.append("".join(free_string))
@@ -527,11 +538,13 @@
patches = [None] * patch_num
+ # TODO: Rewrite with multiprocessing.ThreadPool?
lock = threading.Lock()
def diff_worker():
while True:
with lock:
- if not diff_q: return
+ if not diff_q:
+ return
tgt_size, src, tgt, xf, patchnum = diff_q.pop()
patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff"))
size = len(patch)
@@ -543,7 +556,7 @@
xf.tgt_name + " (from " + xf.src_name + ")")))
threads = [threading.Thread(target=diff_worker)
- for i in range(self.threads)]
+ for _ in range(self.threads)]
for th in threads:
th.start()
while threads:
@@ -670,8 +683,6 @@
stash_size = 0
for xf in self.transfers:
- lost = 0
- size = xf.src_ranges.size()
for u in xf.goes_before.copy():
# xf should go before u
if xf.order < u.order:
@@ -737,7 +748,8 @@
# Put all sinks at the end of the sequence.
while True:
sinks = [u for u in G if not u.outgoing]
- if not sinks: break
+ if not sinks:
+ break
for u in sinks:
s2.appendleft(u)
del G[u]
@@ -747,14 +759,16 @@
# Put all the sources at the beginning of the sequence.
while True:
sources = [u for u in G if not u.incoming]
- if not sources: break
+ if not sources:
+ break
for u in sources:
s1.append(u)
del G[u]
for iu in u.outgoing:
del iu.incoming[u]
- if not G: break
+ if not G:
+ break
# Find the "best" vertex to put next. "Best" is the one that
# maximizes the net difference in source blocks saved we get by
@@ -792,7 +806,8 @@
print("Generating digraph...")
for a in self.transfers:
for b in self.transfers:
- if a is b: continue
+ if a is b:
+ continue
# If the blocks written by A are read by B, then B needs to go before A.
i = a.tgt_ranges.intersect(b.src_ranges)
@@ -807,7 +822,6 @@
a.goes_after[b] = size
def FindTransfers(self):
- self.transfers = []
empty = RangeSet()
for tgt_fn, tgt_ranges in self.tgt.file_map.items():
if tgt_fn == "__ZERO":
@@ -847,9 +861,6 @@
Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
def AbbreviateSourceNames(self):
- self.src_basenames = {}
- self.src_numpatterns = {}
-
for k in self.src.file_map.keys():
b = os.path.basename(k)
self.src_basenames[b] = k
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 55f0058..359489f 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -72,14 +72,15 @@
"""
success, verity_tree_size = GetVerityTreeSize(partition_size)
if not success:
- return 0;
+ return 0
success, verity_metadata_size = GetVerityMetadataSize(partition_size)
if not success:
return 0
return partition_size - verity_tree_size - verity_metadata_size
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
- cmd = ("build_verity_tree -A %s %s %s" % (FIXED_SALT, sparse_image_path, verity_image_path))
+ cmd = "build_verity_tree -A %s %s %s" % (
+ FIXED_SALT, sparse_image_path, verity_image_path)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
@@ -92,14 +93,10 @@
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key):
- cmd = ("system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s" %
- (image_size,
- verity_metadata_path,
- root_hash,
- salt,
- block_device,
- signer_path,
- key))
+ cmd_template = (
+ "system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s")
+ cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
+ block_device, signer_path, key)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
@@ -125,10 +122,13 @@
return False
return True
-def BuildVerifiedImage(data_image_path, verity_image_path, verity_metadata_path):
- if not Append2Simg(data_image_path, verity_metadata_path, "Could not append verity metadata!"):
+def BuildVerifiedImage(data_image_path, verity_image_path,
+ verity_metadata_path):
+ if not Append2Simg(data_image_path, verity_metadata_path,
+ "Could not append verity metadata!"):
return False
- if not Append2Simg(data_image_path, verity_image_path, "Could not append verity tree!"):
+ if not Append2Simg(data_image_path, verity_image_path,
+ "Could not append verity tree!"):
return False
return True
@@ -153,7 +153,8 @@
Args:
out_file: the location to write the verifiable image at
- prop_dict: a dictionary of properties required for image creation and verification
+ prop_dict: a dictionary of properties required for image creation and
+ verification
Returns:
True on success, False otherwise.
"""
@@ -178,13 +179,8 @@
# build the metadata blocks
root_hash = prop_dict["verity_root_hash"]
salt = prop_dict["verity_salt"]
- if not BuildVerityMetadata(image_size,
- verity_metadata_path,
- root_hash,
- salt,
- block_dev,
- signer_path,
- signer_key):
+ if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
+ block_dev, signer_path, signer_key):
shutil.rmtree(tempdir_name, ignore_errors=True)
return False
@@ -223,7 +219,8 @@
is_verity_partition = "verity_block_device" in prop_dict
verity_supported = prop_dict.get("verity") == "true"
- # adjust the partition size to make room for the hashes if this is to be verified
+ # adjust the partition size to make room for the hashes if this is to be
+ # verified
if verity_supported and is_verity_partition:
partition_size = int(prop_dict.get("partition_size"))
adjusted_size = AdjustPartitionSizeForVerity(partition_size)
@@ -329,7 +326,8 @@
d["mount_point"] = mount_point
if mount_point == "system":
copy_prop("fs_type", "fs_type")
- # Copy the generic sysetem fs type first, override with specific one if available.
+ # Copy the generic sysetem fs type first, override with specific one if
+ # available.
copy_prop("system_fs_type", "fs_type")
copy_prop("system_size", "partition_size")
copy_prop("system_journal_size", "journal_size")
@@ -397,7 +395,8 @@
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
if not BuildImage(in_dir, image_properties, out_file):
- print >> sys.stderr, "error: failed to build %s from %s" % (out_file, in_dir)
+ print >> sys.stderr, "error: failed to build %s from %s" % (out_file,
+ in_dir)
exit(1)
diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures
new file mode 120000
index 0000000..9f62aa3
--- /dev/null
+++ b/tools/releasetools/check_target_files_signatures
@@ -0,0 +1 @@
+check_target_files_signatures.py
\ No newline at end of file
diff --git a/tools/releasetools/check_target_files_signatures b/tools/releasetools/check_target_files_signatures.py
similarity index 96%
rename from tools/releasetools/check_target_files_signatures
rename to tools/releasetools/check_target_files_signatures.py
index b2f46c1..dd57033 100755
--- a/tools/releasetools/check_target_files_signatures
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -49,14 +49,8 @@
import re
import shutil
import subprocess
-import tempfile
import zipfile
-try:
- from hashlib import sha1 as sha1
-except ImportError:
- from sha import sha as sha1
-
import common
# Work around a bug in python's zipfile module that prevents opening
@@ -127,7 +121,7 @@
def FindLocalCerts(self):
to_load = []
for top in OPTIONS.local_cert_dirs:
- for dirpath, dirnames, filenames in os.walk(top):
+ for dirpath, _, filenames in os.walk(top):
certs = [os.path.join(dirpath, i)
for i in filenames if i.endswith(".x509.pem")]
if certs:
@@ -172,6 +166,10 @@
class APK(object):
def __init__(self, full_filename, filename):
self.filename = filename
+ self.certs = None
+ self.shared_uid = None
+ self.package = None
+
Push(filename+":")
try:
self.RecordCerts(full_filename)
@@ -212,7 +210,7 @@
for line in manifest.split("\n"):
line = line.strip()
- m = re.search('A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line)
+ m = re.search(r'A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line)
if m:
name = m.group(1)
if name == "android:sharedUserId":
@@ -232,13 +230,16 @@
def __init__(self):
self.max_pkg_len = 30
self.max_fn_len = 20
+ self.apks = None
+ self.apks_by_basename = None
+ self.certmap = None
def LoadZipFile(self, filename):
d, z = common.UnzipTemp(filename, '*.apk')
try:
self.apks = {}
self.apks_by_basename = {}
- for dirpath, dirnames, filenames in os.walk(d):
+ for dirpath, _, filenames in os.walk(d):
for fn in filenames:
if fn.endswith(".apk"):
fullname = os.path.join(dirpath, fn)
@@ -323,8 +324,8 @@
"""Look for instances where a given package that exists in both
self and other have different certs."""
- all = set(self.apks.keys())
- all.update(other.apks.keys())
+ all_apks = set(self.apks.keys())
+ all_apks.update(other.apks.keys())
max_pkg_len = max(self.max_pkg_len, other.max_pkg_len)
@@ -434,7 +435,7 @@
try:
r = main(sys.argv[1:])
sys.exit(r)
- except common.ExternalError, e:
+ except common.ExternalError as e:
print
print " ERROR: %s" % (e,)
print
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 6903dc66..63e438a 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -30,44 +30,45 @@
import zipfile
import blockimgdiff
-from rangelib import *
+import rangelib
try:
from hashlib import sha1 as sha1
except ImportError:
from sha import sha as sha1
-# missing in Python 2.4 and before
-if not hasattr(os, "SEEK_SET"):
- os.SEEK_SET = 0
-class Options(object): pass
-OPTIONS = Options()
-
-DEFAULT_SEARCH_PATH_BY_PLATFORM = {
- "linux2": "out/host/linux-x86",
- "darwin": "out/host/darwin-x86",
+class Options(object):
+ def __init__(self):
+ platform_search_path = {
+ "linux2": "out/host/linux-x86",
+ "darwin": "out/host/darwin-x86",
}
-OPTIONS.search_path = DEFAULT_SEARCH_PATH_BY_PLATFORM.get(sys.platform, None)
-OPTIONS.signapk_path = "framework/signapk.jar" # Relative to search_path
-OPTIONS.extra_signapk_args = []
-OPTIONS.java_path = "java" # Use the one on the path by default.
-OPTIONS.java_args = "-Xmx2048m" # JVM Args
-OPTIONS.public_key_suffix = ".x509.pem"
-OPTIONS.private_key_suffix = ".pk8"
-OPTIONS.verbose = False
-OPTIONS.tempfiles = []
-OPTIONS.device_specific = None
-OPTIONS.extras = {}
-OPTIONS.info_dict = None
+ self.search_path = platform_search_path.get(sys.platform, None)
+ self.signapk_path = "framework/signapk.jar" # Relative to search_path
+ self.extra_signapk_args = []
+ self.java_path = "java" # Use the one on the path by default.
+ self.java_args = "-Xmx2048m" # JVM Args
+ self.public_key_suffix = ".x509.pem"
+ self.private_key_suffix = ".pk8"
+ self.verbose = False
+ self.tempfiles = []
+ self.device_specific = None
+ self.extras = {}
+ self.info_dict = None
+ self.worker_threads = None
+
+
+OPTIONS = Options()
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
-class ExternalError(RuntimeError): pass
+class ExternalError(RuntimeError):
+ pass
def Run(args, **kwargs):
@@ -94,19 +95,19 @@
pass
-def LoadInfoDict(input):
+def LoadInfoDict(input_file):
"""Read and parse the META/misc_info.txt key/value pairs from the
input target files and return a dict."""
def read_helper(fn):
- if isinstance(input, zipfile.ZipFile):
- return input.read(fn)
+ if isinstance(input_file, zipfile.ZipFile):
+ return input_file.read(fn)
else:
- path = os.path.join(input, *fn.split("/"))
+ path = os.path.join(input_file, *fn.split("/"))
try:
with open(path) as f:
return f.read()
- except IOError, e:
+ except IOError as e:
if e.errno == errno.ENOENT:
raise KeyError(fn)
d = {}
@@ -122,14 +123,16 @@
if "mkyaffs2_extra_flags" not in d:
try:
- d["mkyaffs2_extra_flags"] = read_helper("META/mkyaffs2-extra-flags.txt").strip()
+ d["mkyaffs2_extra_flags"] = read_helper(
+ "META/mkyaffs2-extra-flags.txt").strip()
except KeyError:
# ok if flags don't exist
pass
if "recovery_api_version" not in d:
try:
- d["recovery_api_version"] = read_helper("META/recovery-api-version.txt").strip()
+ d["recovery_api_version"] = read_helper(
+ "META/recovery-api-version.txt").strip()
except KeyError:
raise ValueError("can't find recovery API version in input target-files")
@@ -146,9 +149,11 @@
try:
data = read_helper("META/imagesizes.txt")
for line in data.split("\n"):
- if not line: continue
+ if not line:
+ continue
name, value = line.split(" ", 1)
- if not value: continue
+ if not value:
+ continue
if name == "blocksize":
d[name] = value
else:
@@ -186,7 +191,8 @@
d = {}
for line in lines:
line = line.strip()
- if not line or line.startswith("#"): continue
+ if not line or line.startswith("#"):
+ continue
if "=" in line:
name, value = line.split("=", 1)
d[name] = value
@@ -194,7 +200,12 @@
def LoadRecoveryFSTab(read_helper, fstab_version):
class Partition(object):
- pass
+ def __init__(self, mount_point, fs_type, device, length, device2):
+ self.mount_point = mount_point
+ self.fs_type = fs_type
+ self.device = device
+ self.length = length
+ self.device2 = device2
try:
data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
@@ -206,68 +217,65 @@
d = {}
for line in data.split("\n"):
line = line.strip()
- if not line or line.startswith("#"): continue
+ if not line or line.startswith("#"):
+ continue
pieces = line.split()
- if not (3 <= len(pieces) <= 4):
+ if not 3 <= len(pieces) <= 4:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
-
- p = Partition()
- p.mount_point = pieces[0]
- p.fs_type = pieces[1]
- p.device = pieces[2]
- p.length = 0
options = None
if len(pieces) >= 4:
if pieces[3].startswith("/"):
- p.device2 = pieces[3]
+ device2 = pieces[3]
if len(pieces) >= 5:
options = pieces[4]
else:
- p.device2 = None
+ device2 = None
options = pieces[3]
else:
- p.device2 = None
+ device2 = None
+ mount_point = pieces[0]
+ length = 0
if options:
options = options.split(",")
for i in options:
if i.startswith("length="):
- p.length = int(i[7:])
+ length = int(i[7:])
else:
- print "%s: unknown option \"%s\"" % (p.mount_point, i)
+ print "%s: unknown option \"%s\"" % (mount_point, i)
- d[p.mount_point] = p
+ d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
+ device=pieces[2], length=length,
+ device2=device2)
elif fstab_version == 2:
d = {}
for line in data.split("\n"):
line = line.strip()
- if not line or line.startswith("#"): continue
+ if not line or line.startswith("#"):
+ continue
pieces = line.split()
if len(pieces) != 5:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
# Ignore entries that are managed by vold
options = pieces[4]
- if "voldmanaged=" in options: continue
+ if "voldmanaged=" in options:
+ continue
# It's a good line, parse it
- p = Partition()
- p.device = pieces[0]
- p.mount_point = pieces[1]
- p.fs_type = pieces[2]
- p.device2 = None
- p.length = 0
-
+ length = 0
options = options.split(",")
for i in options:
if i.startswith("length="):
- p.length = int(i[7:])
+ length = int(i[7:])
else:
# Ignore all unknown options in the unified fstab
continue
- d[p.mount_point] = p
+ mount_point = pieces[1]
+ d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
+ device=pieces[0], length=length, device2=None)
else:
raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
@@ -279,6 +287,7 @@
for k, v in sorted(d.items()):
print "%-25s = (%s) %s" % (k, type(v).__name__, v)
+
def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Return the image
@@ -305,8 +314,8 @@
p2.wait()
p1.wait()
- assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,)
- assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,)
+ assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
+ assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
@@ -347,7 +356,8 @@
if info_dict.get("verity_key", None):
path = "/" + os.path.basename(sourcedir).lower()
- cmd = ["boot_signer", path, img.name, info_dict["verity_key"] + ".pk8", info_dict["verity_key"] + ".x509.pem", img.name]
+ cmd = ["boot_signer", path, img.name, info_dict["verity_key"] + ".pk8",
+ info_dict["verity_key"] + ".x509.pem", img.name]
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "boot_signer of %s image failed" % path
@@ -453,7 +463,7 @@
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
+ _, stderr = p.communicate()
if p.returncode == 0:
# Encrypted key with empty string as password.
key_passwords[k] = ''
@@ -524,20 +534,23 @@
any, for the given target. Raise exception if the data is too big.
Print a warning if the data is nearing the maximum size."""
- if target.endswith(".img"): target = target[:-4]
+ if target.endswith(".img"):
+ target = target[:-4]
mount_point = "/" + target
fs_type = None
limit = None
if info_dict["fstab"]:
- if mount_point == "/userdata": mount_point = "/data"
+ if mount_point == "/userdata":
+ mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
device = p.device
if "/" in device:
device = device[device.rfind("/")+1:]
limit = info_dict.get(device + "_size", None)
- if not fs_type or not limit: return
+ if not fs_type or not limit:
+ return
if fs_type == "yaffs2":
# image size should be increased by 1/64th to account for the
@@ -562,7 +575,8 @@
certmap = {}
for line in tf_zip.read("META/apkcerts.txt").split("\n"):
line = line.strip()
- if not line: continue
+ if not line:
+ continue
m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
r'private_key="(.*)"$', line)
if m:
@@ -622,13 +636,11 @@
"java_path=", "java_args=", "public_key_suffix=",
"private_key_suffix=", "device_specific=", "extra="] +
list(extra_long_opts))
- except getopt.GetoptError, err:
+ except getopt.GetoptError as err:
Usage(docstring)
print "**", str(err), "**"
sys.exit(2)
- path_specified = False
-
for o, a in opts:
if o in ("-h", "--help"):
Usage(docstring)
@@ -707,7 +719,8 @@
if i not in current or not current[i]:
missing.append(i)
# Are all the passwords already in the file?
- if not missing: return current
+ if not missing:
+ return current
for i in missing:
current[i] = ""
@@ -721,7 +734,7 @@
current = self.UpdateAndReadFile(current)
- def PromptResult(self, current):
+ def PromptResult(self, current): # pylint: disable=no-self-use
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
@@ -732,9 +745,10 @@
result[k] = v
else:
while True:
- result[k] = getpass.getpass("Enter password for %s key> "
- % (k,)).strip()
- if result[k]: break
+ result[k] = getpass.getpass(
+ "Enter password for %s key> " % k).strip()
+ if result[k]:
+ break
return result
def UpdateAndReadFile(self, current):
@@ -742,14 +756,13 @@
return self.PromptResult(current)
f = open(self.pwfile, "w")
- os.chmod(self.pwfile, 0600)
+ os.chmod(self.pwfile, 0o600)
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
- sorted = [(not v, k, v) for (k, v) in current.iteritems()]
- sorted.sort()
- for i, (_, k, v) in enumerate(sorted):
+ sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
+ for i, (_, k, v) in enumerate(sorted_list):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
# position cursor on first line with no password.
@@ -763,19 +776,21 @@
def ReadFile(self):
result = {}
- if self.pwfile is None: return result
+ if self.pwfile is None:
+ return result
try:
f = open(self.pwfile, "r")
for line in f:
line = line.strip()
- if not line or line[0] == '#': continue
+ if not line or line[0] == '#':
+ continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
print "failed to parse password file: ", line
else:
result[m.group(2)] = m.group(1)
f.close()
- except IOError, e:
+ except IOError as e:
if e.errno != errno.ENOENT:
print "error reading password file: ", str(e)
return result
@@ -821,16 +836,16 @@
zipfile.ZIP64_LIMIT = saved_zip64_limit
-def ZipWriteStr(zip, filename, data, perms=0644, compression=None):
+def ZipWriteStr(zip_file, filename, data, perms=0o644, compression=None):
# use a fixed timestamp so the output is repeatable.
zinfo = zipfile.ZipInfo(filename=filename,
date_time=(2009, 1, 1, 0, 0, 0))
if compression is None:
- zinfo.compress_type = zip.compression
+ zinfo.compress_type = zip_file.compression
else:
zinfo.compress_type = compression
zinfo.external_attr = perms << 16
- zip.writestr(zinfo, data)
+ zip_file.writestr(zinfo, data)
class DeviceSpecificParams(object):
@@ -845,7 +860,8 @@
if self.module is None:
path = OPTIONS.device_specific
- if not path: return
+ if not path:
+ return
try:
if os.path.isdir(path):
info = imp.find_module("releasetools", [path])
@@ -983,7 +999,8 @@
err = []
def run():
_, e = p.communicate()
- if e: err.append(e)
+ if e:
+ err.append(e)
th = threading.Thread(target=run)
th.start()
th.join(timeout=300) # 5 mins
@@ -1050,7 +1067,7 @@
print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
- except Exception, e:
+ except Exception as e:
print e
raise
@@ -1063,8 +1080,9 @@
threads.pop().join()
-class BlockDifference:
- def __init__(self, partition, tgt, src=None, check_first_block=False, version=None):
+class BlockDifference(object):
+ def __init__(self, partition, tgt, src=None, check_first_block=False,
+ version=None):
self.tgt = tgt
self.src = src
self.partition = partition
@@ -1094,7 +1112,8 @@
else:
script.Print("Patching %s image after verification." % (self.partition,))
- if progress: script.ShowProgress(progress, 0)
+ if progress:
+ script.ShowProgress(progress, 0)
self._WriteUpdate(script, output_zip)
def WriteVerifyScript(self, script):
@@ -1108,11 +1127,11 @@
'"%s.new.dat", "%s.patch.dat") then') %
(self.device, partition, partition, partition))
else:
- script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' %
- (self.device, self.src.care_map.to_string_raw(),
- self.src.TotalSha1()))
+ script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
+ self.device, self.src.care_map.to_string_raw(),
+ self.src.TotalSha1()))
script.Print('Verified %s image...' % (partition,))
- script.AppendExtra('else');
+ script.AppendExtra('else')
# When generating incrementals for the system and vendor partitions,
# explicitly check the first block (which contains the superblock) of
@@ -1147,9 +1166,9 @@
'package_extract_file("{partition}.transfer.list"), '
'"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
device=self.device, partition=self.partition))
- script.AppendExtra(script._WordWrap(call))
+ script.AppendExtra(script.WordWrap(call))
- def _HashBlocks(self, source, ranges):
+ def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
data = source.ReadRangeSet(ranges)
ctx = sha1()
@@ -1159,8 +1178,8 @@
return ctx.hexdigest()
def _CheckFirstBlock(self, script):
- r = RangeSet((0, 1))
- srchash = self._HashBlocks(self.src, r);
+ r = rangelib.RangeSet((0, 1))
+ srchash = self._HashBlocks(self.src, r)
script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
'abort("%s has been remounted R/W; '
@@ -1172,16 +1191,21 @@
# map recovery.fstab's fs_types to mount/format "partition types"
-PARTITION_TYPES = { "yaffs2": "MTD", "mtd": "MTD",
- "ext4": "EMMC", "emmc": "EMMC",
- "f2fs": "EMMC" }
+PARTITION_TYPES = {
+ "yaffs2": "MTD",
+ "mtd": "MTD",
+ "ext4": "EMMC",
+ "emmc": "EMMC",
+ "f2fs": "EMMC"
+}
def GetTypeAndDevice(mount_point, info):
fstab = info["fstab"]
if fstab:
- return PARTITION_TYPES[fstab[mount_point].fs_type], fstab[mount_point].device
+ return (PARTITION_TYPES[fstab[mount_point].fs_type],
+ fstab[mount_point].device)
else:
- return None
+ raise KeyError
def ParseCertificate(data):
@@ -1243,16 +1267,15 @@
else
log -t recovery "Recovery image already installed"
fi
-""" % { 'boot_size': boot_img.size,
- 'boot_sha1': boot_img.sha1,
- 'recovery_size': recovery_img.size,
- 'recovery_sha1': recovery_img.sha1,
- 'boot_type': boot_type,
- 'boot_device': boot_device,
- 'recovery_type': recovery_type,
- 'recovery_device': recovery_device,
- 'bonus_args': bonus_args,
- }
+""" % {'boot_size': boot_img.size,
+ 'boot_sha1': boot_img.sha1,
+ 'recovery_size': recovery_img.size,
+ 'recovery_sha1': recovery_img.sha1,
+ 'boot_type': boot_type,
+ 'boot_device': boot_device,
+ 'recovery_type': recovery_type,
+ 'recovery_device': recovery_device,
+ 'bonus_args': bonus_args}
# The install script location moved from /system/etc to /system/bin
# in the L release. Parse the init.rc file to find out where the
@@ -1261,12 +1284,12 @@
try:
with open(os.path.join(input_dir, "BOOT", "RAMDISK", "init.rc")) as f:
for line in f:
- m = re.match("^service flash_recovery /system/(\S+)\s*$", line)
+ m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
if m:
sh_location = m.group(1)
print "putting script in", sh_location
break
- except (OSError, IOError), e:
+ except (OSError, IOError) as e:
print "failed to read init.rc: %s" % (e,)
output_sink(sh_location, sh)
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 934d751..3d0da88 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
import re
import common
@@ -36,7 +35,7 @@
return x
@staticmethod
- def _WordWrap(cmd, linelen=80):
+ def WordWrap(cmd, linelen=80):
"""'cmd' should be a function call with null characters after each
parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd
to a given line length, replacing nulls with spaces and/or newlines
@@ -77,32 +76,30 @@
cmd = ('file_getprop("/oem/oem.prop", "{name}") == "{value}" || '
'abort("This package expects the value \\"{value}\\" for '
'\\"{name}\\" on the OEM partition; this has value \\"" + '
- 'file_getprop("/oem/oem.prop", "{name}") + "\\".");'
- ).format(name=name, value=value)
+ 'file_getprop("/oem/oem.prop", "{name}") + "\\".");').format(
+ name=name, value=value)
self.script.append(cmd)
def AssertSomeFingerprint(self, *fp):
"""Assert that the current recovery build fingerprint is one of *fp."""
if not fp:
raise ValueError("must specify some fingerprints")
- cmd = (
- ' ||\n '.join([('getprop("ro.build.fingerprint") == "%s"')
- % i for i in fp]) +
+ cmd = (' ||\n '.join([('getprop("ro.build.fingerprint") == "%s"') % i
+ for i in fp]) +
' ||\n abort("Package expects build fingerprint of %s; this '
- 'device has " + getprop("ro.build.fingerprint") + ".");'
- ) % (" or ".join(fp),)
+ 'device has " + getprop("ro.build.fingerprint") + ".");') % (
+ " or ".join(fp))
self.script.append(cmd)
def AssertSomeThumbprint(self, *fp):
"""Assert that the current recovery build thumbprint is one of *fp."""
if not fp:
raise ValueError("must specify some thumbprints")
- cmd = (
- ' ||\n '.join([('getprop("ro.build.thumbprint") == "%s"')
- % i for i in fp]) +
+ cmd = (' ||\n '.join([('getprop("ro.build.thumbprint") == "%s"') % i
+ for i in fp]) +
' ||\n abort("Package expects build thumbprint of %s; this '
- 'device has " + getprop("ro.build.thumbprint") + ".");'
- ) % (" or ".join(fp),)
+ 'device has " + getprop("ro.build.thumbprint") + ".");') % (
+ " or ".join(fp))
self.script.append(cmd)
def AssertOlderBuild(self, timestamp, timestamp_text):
@@ -111,15 +108,15 @@
self.script.append(
('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
'abort("Can\'t install this package (%s) over newer '
- 'build (" + getprop("ro.build.date") + ").");'
- ) % (timestamp, timestamp_text))
+ 'build (" + getprop("ro.build.date") + ").");') % (timestamp,
+ timestamp_text))
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
cmd = ('getprop("ro.product.device") == "%s" || '
'abort("This package is for \\"%s\\" devices; '
- 'this is a \\"" + getprop("ro.product.device") + "\\".");'
- ) % (device, device)
+ 'this is a \\"" + getprop("ro.product.device") + "\\".");') % (
+ device, device)
self.script.append(cmd)
def AssertSomeBootloader(self, *bootloaders):
@@ -128,7 +125,7 @@
" ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]) +
");")
- self.script.append(self._WordWrap(cmd))
+ self.script.append(self.WordWrap(cmd))
def ShowProgress(self, frac, dur):
"""Update the progress bar, advancing it over 'frac' over the next
@@ -180,9 +177,9 @@
if "=" in option:
key, value = option.split("=", 1)
mount_dict[key] = value
- self.script.append('mount("%s", "%s", "%s", "%s", "%s");' %
- (p.fs_type, common.PARTITION_TYPES[p.fs_type],
- p.device, p.mount_point, mount_dict.get(p.fs_type, "")))
+ self.script.append('mount("%s", "%s", "%s", "%s", "%s");' % (
+ p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device,
+ p.mount_point, mount_dict.get(p.fs_type, "")))
self.mounts.add(p.mount_point)
def UnpackPackageDir(self, src, dst):
@@ -205,18 +202,17 @@
fstab = self.info.get("fstab", None)
if fstab:
p = fstab[partition]
- if (p.fs_type not in ( "ext2", "ext3", "ext4")):
+ if p.fs_type not in ("ext2", "ext3", "ext4"):
raise ValueError("Partition %s cannot be tuned\n" % (partition,))
- self.script.append('tune2fs(' +
- "".join(['"%s", ' % (i,) for i in options]) +
- '"%s") || abort("Failed to tune partition %s");'
- % ( p.device,partition));
+ self.script.append(
+ 'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) +
+ '"%s") || abort("Failed to tune partition %s");' % (
+ p.device, partition))
def FormatPartition(self, partition):
"""Format the given partition, specified by its mount point (eg,
"/system")."""
- reserve_size = 0
fstab = self.info.get("fstab", None)
if fstab:
p = fstab[partition]
@@ -235,9 +231,10 @@
def DeleteFiles(self, file_list):
"""Delete all files in file_list."""
- if not file_list: return
+ if not file_list:
+ return
cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
- self.script.append(self._WordWrap(cmd))
+ self.script.append(self.WordWrap(cmd))
def RenameFile(self, srcfile, tgtfile):
"""Moves a file from one location to another."""
@@ -251,7 +248,7 @@
skip the action if the file exists. Used when a patch
is later renamed."""
cmd = ('sha1_check(read_file("%s"), %s) || ' % (tgtfile, tgtsha1))
- self.script.append(self._WordWrap(cmd))
+ self.script.append(self.WordWrap(cmd))
def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
"""Apply binary patches (in *patchpairs) to the given srcfile to
@@ -265,7 +262,7 @@
cmd.append(',\0%s, package_extract_file("%s")' % patchpairs[i:i+2])
cmd.append(');')
cmd = "".join(cmd)
- self.script.append(self._WordWrap(cmd))
+ self.script.append(self.WordWrap(cmd))
def WriteRawImage(self, mount_point, fn, mapfn=None):
"""Write the given package file into the partition for the given
@@ -289,33 +286,37 @@
self.script.append(
'package_extract_file("%(fn)s", "%(device)s");' % args)
else:
- raise ValueError("don't know how to write \"%s\" partitions" % (p.fs_type,))
+ raise ValueError(
+ "don't know how to write \"%s\" partitions" % p.fs_type)
def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities):
"""Set file ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
else:
- if capabilities is None: capabilities = "0x0"
+ if capabilities is None:
+ capabilities = "0x0"
cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \
'"capabilities", %s' % (fn, uid, gid, mode, capabilities)
if selabel is not None:
- cmd += ', "selabel", "%s"' % ( selabel )
+ cmd += ', "selabel", "%s"' % selabel
cmd += ');'
self.script.append(cmd)
- def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel, capabilities):
+ def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel,
+ capabilities):
"""Recursively set path ownership and permissions."""
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
% (uid, gid, dmode, fmode, fn))
else:
- if capabilities is None: capabilities = "0x0"
+ if capabilities is None:
+ capabilities = "0x0"
cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
'"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \
% (fn, uid, gid, dmode, fmode, capabilities)
if selabel is not None:
- cmd += ', "selabel", "%s"' % ( selabel )
+ cmd += ', "selabel", "%s"' % selabel
cmd += ');'
self.script.append(cmd)
@@ -328,15 +329,15 @@
for dest, links in sorted(by_dest.iteritems()):
cmd = ('symlink("%s", ' % (dest,) +
",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
- self.script.append(self._WordWrap(cmd))
+ self.script.append(self.WordWrap(cmd))
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
def Unmount(self, mount_point):
- self.script.append('unmount("%s");' % (mount_point,))
- self.mounts.remove(mount_point);
+ self.script.append('unmount("%s");' % mount_point)
+ self.mounts.remove(mount_point)
def UnmountAll(self):
for p in sorted(self.mounts):
@@ -359,4 +360,4 @@
else:
data = open(input_path, "rb").read()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary",
- data, perms=0755)
+ data, perms=0o755)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 4dda0b7..8c5acd8 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -32,18 +32,10 @@
print >> sys.stderr, "Python 2.7 or newer is required."
sys.exit(1)
-import errno
import os
-import re
import shutil
-import subprocess
-import tempfile
import zipfile
-# missing in Python 2.4 and before
-if not hasattr(os, "SEEK_SET"):
- os.SEEK_SET = 0
-
import common
OPTIONS = common.OPTIONS
@@ -58,7 +50,7 @@
def main(argv):
bootable_only = [False]
- def option_handler(o, a):
+ def option_handler(o, _):
if o in ("-z", "--bootable_zip"):
bootable_only[0] = True
else:
@@ -116,7 +108,7 @@
boot_image = common.GetBootableImage(
"boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
if boot_image:
- boot_image.AddToZip(output_zip)
+ boot_image.AddToZip(output_zip)
recovery_image = common.GetBootableImage(
"recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY")
if recovery_image:
@@ -157,7 +149,7 @@
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError, e:
+ except common.ExternalError as e:
print
print " ERROR: %s" % (e,)
print
diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch
new file mode 120000
index 0000000..45cec08
--- /dev/null
+++ b/tools/releasetools/make_recovery_patch
@@ -0,0 +1 @@
+make_recovery_patch.py
\ No newline at end of file
diff --git a/tools/releasetools/make_recovery_patch b/tools/releasetools/make_recovery_patch.py
similarity index 100%
rename from tools/releasetools/make_recovery_patch
rename to tools/releasetools/make_recovery_patch.py
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
new file mode 120000
index 0000000..6755a90
--- /dev/null
+++ b/tools/releasetools/ota_from_target_files
@@ -0,0 +1 @@
+ota_from_target_files.py
\ No newline at end of file
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files.py
similarity index 86%
rename from tools/releasetools/ota_from_target_files
rename to tools/releasetools/ota_from_target_files.py
index b71baf9..26fbaf0 100755
--- a/tools/releasetools/ota_from_target_files
+++ b/tools/releasetools/ota_from_target_files.py
@@ -88,21 +88,13 @@
sys.exit(1)
import copy
-import errno
import multiprocessing
import os
-import re
-import subprocess
import tempfile
-import time
import zipfile
-from hashlib import sha1 as sha1
-
import common
import edify_generator
-import build_image
-import blockimgdiff
import sparse_img
OPTIONS = common.OPTIONS
@@ -130,7 +122,8 @@
"""Given a dict, return the key corresponding to the largest
value. Returns 'default' if the dict is empty."""
x = [(v, k) for (k, v) in d.iteritems()]
- if not x: return default
+ if not x:
+ return default
x.sort()
return x[-1][1]
@@ -138,12 +131,12 @@
def IsSymlink(info):
"""Return true if the zipfile.ZipInfo object passed in represents a
symlink."""
- return (info.external_attr >> 16) == 0120777
+ return (info.external_attr >> 16) == 0o120777
def IsRegular(info):
"""Return true if the zipfile.ZipInfo object passed in represents a
symlink."""
- return (info.external_attr >> 28) == 010
+ return (info.external_attr >> 28) == 0o10
def ClosestFileMatch(src, tgtfiles, existing):
"""Returns the closest file match between a source file and list
@@ -170,15 +163,15 @@
return result
return None
-class ItemSet:
+class ItemSet(object):
def __init__(self, partition, fs_config):
self.partition = partition
self.fs_config = fs_config
self.ITEMS = {}
- def Get(self, name, dir=False):
+ def Get(self, name, is_dir=False):
if name not in self.ITEMS:
- self.ITEMS[name] = Item(self, name, dir=dir)
+ self.ITEMS[name] = Item(self, name, is_dir=is_dir)
return self.ITEMS[name]
def GetMetadata(self, input_zip):
@@ -187,7 +180,8 @@
output = input_zip.read(self.fs_config)
for line in output.split("\n"):
- if not line: continue
+ if not line:
+ continue
columns = line.split()
name, uid, gid, mode = columns[:4]
selabel = None
@@ -209,20 +203,22 @@
i.mode = int(mode, 8)
i.selabel = selabel
i.capabilities = capabilities
- if i.dir:
+ if i.is_dir:
i.children.sort(key=lambda i: i.name)
# set metadata for the files generated by this script.
i = self.ITEMS.get("system/recovery-from-boot.p", None)
- if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0644, None, None
+ if i:
+ i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None
i = self.ITEMS.get("system/etc/install-recovery.sh", None)
- if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0544, None, None
+ if i:
+ i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None
-class Item:
+class Item(object):
"""Items represent the metadata (user, group, mode) of files and
directories in the system image."""
- def __init__(self, itemset, name, dir=False):
+ def __init__(self, itemset, name, is_dir=False):
self.itemset = itemset
self.name = name
self.uid = None
@@ -230,22 +226,26 @@
self.mode = None
self.selabel = None
self.capabilities = None
- self.dir = dir
+ self.is_dir = is_dir
+ self.descendants = None
+ self.best_subtree = None
if name:
- self.parent = itemset.Get(os.path.dirname(name), dir=True)
+ self.parent = itemset.Get(os.path.dirname(name), is_dir=True)
self.parent.children.append(self)
else:
self.parent = None
- if dir:
+ if self.is_dir:
self.children = []
def Dump(self, indent=0):
if self.uid is not None:
- print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
+ print "%s%s %d %d %o" % (
+ " " * indent, self.name, self.uid, self.gid, self.mode)
else:
- print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
- if self.dir:
+ print "%s%s %s %s %s" % (
+ " " * indent, self.name, self.uid, self.gid, self.mode)
+ if self.is_dir:
print "%s%s" % (" "*indent, self.descendants)
print "%s%s" % (" "*indent, self.best_subtree)
for i in self.children:
@@ -253,21 +253,24 @@
def CountChildMetadata(self):
"""Count up the (uid, gid, mode, selabel, capabilities) tuples for
- all children and determine the best strategy for using set_perm_recursive and
- set_perm to correctly chown/chmod all the files to their desired
+ all children and determine the best strategy for using set_perm_recursive
+ and set_perm to correctly chown/chmod all the files to their desired
values. Recursively calls itself for all descendants.
- Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count} counting up
- all descendants of this node. (dmode or fmode may be None.) Also
- sets the best_subtree of each directory Item to the (uid, gid,
- dmode, fmode, selabel, capabilities) tuple that will match the most
- descendants of that Item.
+ Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count}
+ counting up all descendants of this node. (dmode or fmode may be None.)
+ Also sets the best_subtree of each directory Item to the (uid, gid, dmode,
+ fmode, selabel, capabilities) tuple that will match the most descendants of
+ that Item.
"""
- assert self.dir
- d = self.descendants = {(self.uid, self.gid, self.mode, None, self.selabel, self.capabilities): 1}
+ assert self.is_dir
+ key = (self.uid, self.gid, self.mode, None, self.selabel,
+ self.capabilities)
+ self.descendants = {key: 1}
+ d = self.descendants
for i in self.children:
- if i.dir:
+ if i.is_dir:
for k, v in i.CountChildMetadata().iteritems():
d[k] = d.get(k, 0) + v
else:
@@ -286,17 +289,23 @@
# Now find the dmode, fmode, selabel, and capabilities that match
# the most descendants with that (uid, gid), and choose those.
- best_dmode = (0, 0755)
- best_fmode = (0, 0644)
+ best_dmode = (0, 0o755)
+ best_fmode = (0, 0o644)
best_selabel = (0, None)
best_capabilities = (0, None)
for k, count in d.iteritems():
- if k[:2] != ug: continue
- if k[2] is not None and count >= best_dmode[0]: best_dmode = (count, k[2])
- if k[3] is not None and count >= best_fmode[0]: best_fmode = (count, k[3])
- if k[4] is not None and count >= best_selabel[0]: best_selabel = (count, k[4])
- if k[5] is not None and count >= best_capabilities[0]: best_capabilities = (count, k[5])
- self.best_subtree = ug + (best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1])
+ if k[:2] != ug:
+ continue
+ if k[2] is not None and count >= best_dmode[0]:
+ best_dmode = (count, k[2])
+ if k[3] is not None and count >= best_fmode[0]:
+ best_fmode = (count, k[3])
+ if k[4] is not None and count >= best_selabel[0]:
+ best_selabel = (count, k[4])
+ if k[5] is not None and count >= best_capabilities[0]:
+ best_capabilities = (count, k[5])
+ self.best_subtree = ug + (
+ best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1])
return d
@@ -308,11 +317,11 @@
self.CountChildMetadata()
def recurse(item, current):
- # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple that the current
- # item (and all its children) have already been set to. We only
- # need to issue set_perm/set_perm_recursive commands if we're
+ # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple
+ # that the current item (and all its children) have already been set to.
+ # We only need to issue set_perm/set_perm_recursive commands if we're
# supposed to be something different.
- if item.dir:
+ if item.is_dir:
if current != item.best_subtree:
script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
current = item.best_subtree
@@ -367,9 +376,9 @@
data = input_zip.read(info.filename)
output_zip.writestr(info2, data)
if fn.endswith("/"):
- itemset.Get(fn[:-1], dir=True)
+ itemset.Get(fn[:-1], is_dir=True)
else:
- itemset.Get(fn, dir=False)
+ itemset.Get(fn)
symlinks.sort()
return symlinks
@@ -383,17 +392,19 @@
whole_file=True)
-def AppendAssertions(script, info_dict, oem_dict = None):
+def AppendAssertions(script, info_dict, oem_dict=None):
oem_props = info_dict.get("oem_fingerprint_properties")
if oem_props is None or len(oem_props) == 0:
device = GetBuildProp("ro.product.device", info_dict)
script.AssertDevice(device)
else:
if oem_dict is None:
- raise common.ExternalError("No OEM file provided to answer expected assertions")
+ raise common.ExternalError(
+ "No OEM file provided to answer expected assertions")
for prop in oem_props.split():
if oem_dict.get(prop) is None:
- raise common.ExternalError("The OEM file is missing the property %s" % prop)
+ raise common.ExternalError(
+ "The OEM file is missing the property %s" % prop)
script.AssertOemProperty(prop, oem_dict.get(prop))
@@ -421,10 +432,10 @@
if oem_props is None:
return GetBuildProp("ro.build.fingerprint", info_dict)
return "%s/%s/%s:%s" % (
- GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict),
- GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict),
- GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict),
- GetBuildProp("ro.build.thumbprint", info_dict))
+ GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict),
+ GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict),
+ GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict),
+ GetBuildProp("ro.build.thumbprint", info_dict))
def GetImage(which, tmpdir, info_dict):
@@ -474,15 +485,16 @@
if OPTIONS.oem_source is None:
raise common.ExternalError("OEM source required for this build")
script.Mount("/oem", recovery_mount_options)
- oem_dict = common.LoadDictionaryFromLines(open(OPTIONS.oem_source).readlines())
+ oem_dict = common.LoadDictionaryFromLines(
+ open(OPTIONS.oem_source).readlines())
- metadata = {"post-build": CalculateFingerprint(
- oem_props, oem_dict, OPTIONS.info_dict),
- "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+ metadata = {
+ "post-build": CalculateFingerprint(oem_props, oem_dict,
OPTIONS.info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc",
- OPTIONS.info_dict),
- }
+ "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+ OPTIONS.info_dict),
+ "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
+ }
device_specific = common.DeviceSpecificParams(
input_zip=input_zip,
@@ -521,7 +533,8 @@
# do normal full package installation:
# wipe and install system, boot image, etc.
# set up system to update recovery partition on first boot
- # complete script normally (allow recovery to mark itself finished and reboot)
+ # complete script normally
+ # (allow recovery to mark itself finished and reboot)
recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
OPTIONS.input_tmp, "RECOVERY")
@@ -585,7 +598,7 @@
if not block_based:
def output_sink(fn, data):
common.ZipWriteStr(output_zip, "recovery/" + fn, data)
- system_items.Get("system/" + fn, dir=False)
+ system_items.Get("system/" + fn)
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink,
recovery_img, boot_img)
@@ -686,7 +699,7 @@
while len(dirs) > 0:
path = "/".join(dirs)
if path in known_paths:
- break;
+ break
known_paths.add(path)
dirs.pop()
@@ -701,11 +714,12 @@
script = edify_generator.EdifyGenerator(source_version,
OPTIONS.target_info_dict)
- metadata = {"pre-device": GetBuildProp("ro.product.device",
- OPTIONS.source_info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc",
- OPTIONS.target_info_dict),
- }
+ metadata = {
+ "pre-device": GetBuildProp("ro.product.device",
+ OPTIONS.source_info_dict),
+ "post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.target_info_dict),
+ }
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
@@ -730,12 +744,8 @@
updating_boot = (not OPTIONS.two_step and
(source_boot.data != target_boot.data))
- source_recovery = common.GetBootableImage(
- "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY",
- OPTIONS.source_info_dict)
target_recovery = common.GetBootableImage(
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
- updating_recovery = (source_recovery.data != target_recovery.data)
system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict)
system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict)
@@ -753,8 +763,10 @@
if HasVendorPartition(target_zip):
if not HasVendorPartition(source_zip):
raise RuntimeError("can't generate incremental that adds /vendor")
- vendor_src = GetImage("vendor", OPTIONS.source_tmp, OPTIONS.source_info_dict)
- vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, OPTIONS.target_info_dict)
+ vendor_src = GetImage("vendor", OPTIONS.source_tmp,
+ OPTIONS.source_info_dict)
+ vendor_tgt = GetImage("vendor", OPTIONS.target_tmp,
+ OPTIONS.target_info_dict)
vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
check_first_block=True,
version=blockimgdiff_version)
@@ -762,13 +774,15 @@
vendor_diff = None
oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
- recovery_mount_options = OPTIONS.target_info_dict.get("recovery_mount_options")
+ recovery_mount_options = OPTIONS.target_info_dict.get(
+ "recovery_mount_options")
oem_dict = None
if oem_props is not None and len(oem_props) > 0:
if OPTIONS.oem_source is None:
raise common.ExternalError("OEM source required for this build")
script.Mount("/oem", recovery_mount_options)
- oem_dict = common.LoadDictionaryFromLines(open(OPTIONS.oem_source).readlines())
+ oem_dict = common.LoadDictionaryFromLines(
+ open(OPTIONS.oem_source).readlines())
AppendAssertions(script, OPTIONS.target_info_dict, oem_dict)
device_specific.IncrementalOTA_Assertions()
@@ -792,7 +806,8 @@
# patch system files, etc.
# force full install of new boot image
# set up system to update recovery partition on first boot
- # complete script normally (allow recovery to mark itself finished and reboot)
+ # complete script normally
+ # (allow recovery to mark itself finished and reboot)
if OPTIONS.two_step:
if not OPTIONS.info_dict.get("multistage_support", None):
@@ -805,7 +820,7 @@
script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)
- script.AppendExtra("sleep(20);\n");
+ script.AppendExtra("sleep(20);\n")
script.WriteRawImage("/recovery", "recovery.img")
script.AppendExtra("""
set_stage("%(bcb_dev)s", "3/3");
@@ -929,8 +944,9 @@
WriteMetadata(metadata, output_zip)
-class FileDifference:
+class FileDifference(object):
def __init__(self, partition, source_zip, target_zip, output_zip):
+ self.deferred_patch_list = None
print "Loading target..."
self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
print "Loading source..."
@@ -1003,7 +1019,7 @@
def EmitVerification(self, script):
so_far = 0
- for tf, sf, size, patch_sha in self.patch_list:
+ for tf, sf, _, _ in self.patch_list:
if tf.name != sf.name:
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
@@ -1011,18 +1027,18 @@
return so_far
def EmitExplicitTargetVerification(self, script):
- for fn, size, sha1 in self.verbatim_targets:
- if (fn[-1] != "/"):
+ for fn, _, sha1 in self.verbatim_targets:
+ if fn[-1] != "/":
script.FileCheck("/"+fn, sha1)
for tf, _, _, _ in self.patch_list:
script.FileCheck(tf.name, tf.sha1)
def RemoveUnneededFiles(self, script, extras=()):
- script.DeleteFiles(["/"+i[0] for i in self.verbatim_targets] +
- ["/"+i for i in sorted(self.source_data)
- if i not in self.target_data and
- i not in self.renames] +
- list(extras))
+ script.DeleteFiles(
+ ["/" + i[0] for i in self.verbatim_targets] +
+ ["/" + i for i in sorted(self.source_data)
+ if i not in self.target_data and i not in self.renames] +
+ list(extras))
def TotalPatchSize(self):
return sum(i[1].size for i in self.patch_list)
@@ -1030,22 +1046,24 @@
def EmitPatches(self, script, total_patch_size, so_far):
self.deferred_patch_list = deferred_patch_list = []
for item in self.patch_list:
- tf, sf, size, _ = item
+ tf, sf, _, _ = item
if tf.name == "system/build.prop":
deferred_patch_list.append(item)
continue
- if (sf.name != tf.name):
+ if sf.name != tf.name:
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
- script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
+ script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1,
+ "patch/" + sf.name + ".p")
so_far += tf.size
script.SetProgress(so_far / total_patch_size)
return so_far
def EmitDeferredPatches(self, script):
for item in self.deferred_patch_list:
- tf, sf, size, _ = item
- script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
- script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None)
+ tf, sf, _, _ = item
+ script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1,
+ "patch/" + sf.name + ".p")
+ script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None)
def EmitRenames(self, script):
if len(self.renames) > 0:
@@ -1055,8 +1073,6 @@
script.RenameFile(src, tgt.name)
-
-
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
target_has_recovery_patch = HasRecoveryPatch(target_zip)
source_has_recovery_patch = HasRecoveryPatch(source_zip)
@@ -1082,13 +1098,15 @@
if OPTIONS.oem_source is None:
raise common.ExternalError("OEM source required for this build")
script.Mount("/oem", recovery_mount_options)
- oem_dict = common.LoadDictionaryFromLines(open(OPTIONS.oem_source).readlines())
+ oem_dict = common.LoadDictionaryFromLines(
+ open(OPTIONS.oem_source).readlines())
- metadata = {"pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
- OPTIONS.source_info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc",
- OPTIONS.target_info_dict),
- }
+ metadata = {
+ "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+ OPTIONS.source_info_dict),
+ "post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.target_info_dict),
+ }
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
@@ -1108,8 +1126,10 @@
else:
vendor_diff = None
- target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict)
- source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict)
+ target_fp = CalculateFingerprint(oem_props, oem_dict,
+ OPTIONS.target_info_dict)
+ source_fp = CalculateFingerprint(oem_props, oem_dict,
+ OPTIONS.source_info_dict)
if oem_props is None:
script.AssertSomeFingerprint(source_fp, target_fp)
@@ -1164,7 +1184,8 @@
# patch system files, etc.
# force full install of new boot image
# set up system to update recovery partition on first boot
- # complete script normally (allow recovery to mark itself finished and reboot)
+ # complete script normally
+ # (allow recovery to mark itself finished and reboot)
if OPTIONS.two_step:
if not OPTIONS.info_dict.get("multistage_support", None):
@@ -1177,7 +1198,7 @@
script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)
- script.AppendExtra("sleep(20);\n");
+ script.AppendExtra("sleep(20);\n")
script.WriteRawImage("/recovery", "recovery.img")
script.AppendExtra("""
set_stage("%(bcb_dev)s", "3/3");
@@ -1211,9 +1232,11 @@
so_far += source_boot.size
size = []
- if system_diff.patch_list: size.append(system_diff.largest_source_size)
+ if system_diff.patch_list:
+ size.append(system_diff.largest_source_size)
if vendor_diff:
- if vendor_diff.patch_list: size.append(vendor_diff.largest_source_size)
+ if vendor_diff.patch_list:
+ size.append(vendor_diff.largest_source_size)
if size or updating_recovery or updating_boot:
script.CacheFreeSpaceCheck(max(size))
@@ -1290,7 +1313,7 @@
if not target_has_recovery_patch:
def output_sink(fn, data):
common.ZipWriteStr(output_zip, "recovery/" + fn, data)
- system_items.Get("system/" + fn, dir=False)
+ system_items.Get("system/" + fn)
common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink,
target_recovery, target_boot)
@@ -1436,7 +1459,7 @@
OPTIONS.two_step = True
elif o == "--no_signing":
OPTIONS.no_signing = True
- elif o in ("--verify"):
+ elif o == "--verify":
OPTIONS.verify = True
elif o == "--block":
OPTIONS.block_based = True
@@ -1450,23 +1473,23 @@
args = common.ParseOptions(argv, __doc__,
extra_opts="b:k:i:d:wne:t:a:2o:",
- extra_long_opts=["board_config=",
- "package_key=",
- "incremental_from=",
- "wipe_user_data",
- "no_prereq",
- "extra_script=",
- "worker_threads=",
- "aslr_mode=",
- "two_step",
- "no_signing",
- "block",
- "binary=",
- "oem_settings=",
- "verify",
- "no_fallback_to_full",
- ],
- extra_option_handler=option_handler)
+ extra_long_opts=[
+ "board_config=",
+ "package_key=",
+ "incremental_from=",
+ "wipe_user_data",
+ "no_prereq",
+ "extra_script=",
+ "worker_threads=",
+ "aslr_mode=",
+ "two_step",
+ "no_signing",
+ "block",
+ "binary=",
+ "oem_settings=",
+ "verify",
+ "no_fallback_to_full",
+ ], extra_option_handler=option_handler)
if len(args) != 2:
common.Usage(__doc__)
@@ -1486,8 +1509,8 @@
# is in the out/ directory tree, but for repacking from target-files.zip it's
# in the root directory of the ramdisk.
if "selinux_fc" in OPTIONS.info_dict:
- OPTIONS.info_dict["selinux_fc"] = os.path.join(OPTIONS.input_tmp, "BOOT", "RAMDISK",
- "file_contexts")
+ OPTIONS.info_dict["selinux_fc"] = os.path.join(
+ OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts")
if OPTIONS.verbose:
print "--- target info ---"
@@ -1514,8 +1537,10 @@
while True:
if OPTIONS.no_signing:
- if os.path.exists(args[1]): os.unlink(args[1])
- output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
+ if os.path.exists(args[1]):
+ os.unlink(args[1])
+ output_zip = zipfile.ZipFile(args[1], "w",
+ compression=zipfile.ZIP_DEFLATED)
else:
temp_zip_file = tempfile.NamedTemporaryFile()
output_zip = zipfile.ZipFile(temp_zip_file, "w",
@@ -1531,12 +1556,13 @@
else:
print "unzipping source target-files..."
- OPTIONS.source_tmp, source_zip = common.UnzipTemp(OPTIONS.incremental_source)
+ OPTIONS.source_tmp, source_zip = common.UnzipTemp(
+ OPTIONS.incremental_source)
OPTIONS.target_info_dict = OPTIONS.info_dict
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
if "selinux_fc" in OPTIONS.source_info_dict:
- OPTIONS.source_info_dict["selinux_fc"] = os.path.join(OPTIONS.source_tmp, "BOOT", "RAMDISK",
- "file_contexts")
+ OPTIONS.source_info_dict["selinux_fc"] = os.path.join(
+ OPTIONS.source_tmp, "BOOT", "RAMDISK", "file_contexts")
if OPTIONS.package_key is None:
OPTIONS.package_key = OPTIONS.source_info_dict.get(
"default_system_dev_certificate",
@@ -1548,7 +1574,8 @@
WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
break
except ValueError:
- if not OPTIONS.fallback_to_full: raise
+ if not OPTIONS.fallback_to_full:
+ raise
print "--- failed to build incremental; falling back to full ---"
OPTIONS.incremental_source = None
output_zip.close()
@@ -1566,7 +1593,7 @@
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError, e:
+ except common.ExternalError as e:
print
print " ERROR: %s" % (e,)
print
diff --git a/tools/releasetools/pylintrc b/tools/releasetools/pylintrc
new file mode 100644
index 0000000..90de1af
--- /dev/null
+++ b/tools/releasetools/pylintrc
@@ -0,0 +1,382 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=invalid-name,missing-docstring,too-many-branches,too-many-locals,too-many-arguments,too-many-statements,duplicate-code,too-few-public-methods,too-many-instance-attributes,too-many-lines,too-many-public-methods,locally-disabled,fixme
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=80
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=LF
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py
index 7279c60..8b327fe 100644
--- a/tools/releasetools/rangelib.py
+++ b/tools/releasetools/rangelib.py
@@ -24,6 +24,7 @@
lots of runs."""
def __init__(self, data=None):
+ self.monotonic = False
if isinstance(data, str):
self._parse_internal(data)
elif data:
@@ -185,7 +186,7 @@
# This is like intersect, but we can stop as soon as we discover the
# output is going to be nonempty.
z = 0
- for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
+ for _, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((+1, -1)))):
if (z == 1 and d == 1) or (z == 2 and d == -1):
return True
diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks
new file mode 120000
index 0000000..b5ec59a
--- /dev/null
+++ b/tools/releasetools/sign_target_files_apks
@@ -0,0 +1 @@
+sign_target_files_apks.py
\ No newline at end of file
diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks.py
similarity index 94%
rename from tools/releasetools/sign_target_files_apks
rename to tools/releasetools/sign_target_files_apks.py
index 9e61051..d47cc4f 100755
--- a/tools/releasetools/sign_target_files_apks
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -169,22 +169,25 @@
f.write(data)
for info in input_tf_zip.infolist():
- if info.filename.startswith("IMAGES/"): continue
+ if info.filename.startswith("IMAGES/"):
+ continue
data = input_tf_zip.read(info.filename)
out_info = copy.copy(info)
if (info.filename == "META/misc_info.txt" and
OPTIONS.replace_verity_private_key):
- ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info, OPTIONS.replace_verity_private_key[1])
+ ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info,
+ OPTIONS.replace_verity_private_key[1])
elif (info.filename == "BOOT/RAMDISK/verity_key" and
- OPTIONS.replace_verity_public_key):
- new_data = ReplaceVerityPublicKey(output_tf_zip, OPTIONS.replace_verity_public_key[1])
+ OPTIONS.replace_verity_public_key):
+ new_data = ReplaceVerityPublicKey(output_tf_zip,
+ OPTIONS.replace_verity_public_key[1])
write_to_temp(info.filename, info.external_attr, new_data)
elif (info.filename.startswith("BOOT/") or
- info.filename.startswith("RECOVERY/") or
- info.filename.startswith("META/") or
- info.filename == "SYSTEM/etc/recovery-resource.dat"):
+ info.filename.startswith("RECOVERY/") or
+ info.filename.startswith("META/") or
+ info.filename == "SYSTEM/etc/recovery-resource.dat"):
write_to_temp(info.filename, info.external_attr, data)
if info.filename.endswith(".apk"):
@@ -231,7 +234,7 @@
if OPTIONS.replace_ota_keys:
new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info)
if new_recovery_keys:
- write_to_temp("RECOVERY/RAMDISK/res/keys", 0755 << 16, new_recovery_keys)
+ write_to_temp("RECOVERY/RAMDISK/res/keys", 0o755 << 16, new_recovery_keys)
if rebuild_recovery:
recovery_img = common.GetBootableImage(
@@ -268,8 +271,8 @@
if OPTIONS.verbose:
print " Replaced %d occurence(s) of %s.x509.pem with " \
"%s.x509.pem" % (num, old, new)
- except IOError, e:
- if (e.errno == errno.ENOENT and not OPTIONS.verbose):
+ except IOError as e:
+ if e.errno == errno.ENOENT and not OPTIONS.verbose:
continue
print " Error accessing %s. %s. Skip replacing %s.x509.pem " \
@@ -303,7 +306,7 @@
pieces[-1] = EditTags(pieces[-1])
value = "/".join(pieces)
elif (key in ("ro.build.thumbprint", "ro.vendor.build.thumbprint")
- and misc_info.get("oem_fingerprint_properties") is not None):
+ and misc_info.get("oem_fingerprint_properties") is not None):
pieces = value.split("/")
pieces[-1] = EditTags(pieces[-1])
value = "/".join(pieces)
@@ -380,13 +383,13 @@
# put into a zipfile system/etc/security/otacerts.zip.
# We DO NOT include the extra_recovery_keys (if any) here.
- tempfile = cStringIO.StringIO()
- certs_zip = zipfile.ZipFile(tempfile, "w")
+ temp_file = cStringIO.StringIO()
+ certs_zip = zipfile.ZipFile(temp_file, "w")
for k in mapped_keys:
certs_zip.write(k)
certs_zip.close()
common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip",
- tempfile.getvalue())
+ temp_file.getvalue())
return new_recovery_keys
@@ -397,7 +400,8 @@
common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data)
return data
-def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip, misc_info, key_path):
+def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip,
+ misc_info, key_path):
print "Replacing verity private key with %s" % key_path
current_key = misc_info["verity_key"]
original_misc_info = targetfile_input_zip.read("META/misc_info.txt")
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 7574747..b97bb84 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -14,12 +14,11 @@
import bisect
import os
-import sys
import struct
-import pprint
from hashlib import sha1
-from rangelib import *
+import rangelib
+
class SparseImage(object):
"""Wraps a sparse image file (and optional file map) into an image
@@ -39,7 +38,6 @@
self.blocksize = blk_sz = header[5]
self.total_blocks = total_blks = header[6]
total_chunks = header[7]
- image_checksum = header[8]
if magic != 0xED26FF3A:
raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
@@ -64,7 +62,6 @@
header_bin = f.read(12)
header = struct.unpack("<2H2I", header_bin)
chunk_type = header[0]
- reserved1 = header[1]
chunk_sz = header[2]
total_sz = header[3]
data_sz = total_sz - 12
@@ -102,7 +99,7 @@
raise ValueError("Unknown chunk type 0x%04X not supported" %
(chunk_type,))
- self.care_map = RangeSet(care_data)
+ self.care_map = rangelib.RangeSet(care_data)
self.offset_index = [i[0] for i in offset_map]
if file_map_fn:
@@ -166,7 +163,7 @@
with open(fn) as f:
for line in f:
fn, ranges = line.split(None, 1)
- ranges = RangeSet.parse(ranges)
+ ranges = rangelib.RangeSet.parse(ranges)
out[fn] = ranges
assert ranges.size() == ranges.intersect(remaining).size()
remaining = remaining.subtract(ranges)
@@ -186,7 +183,7 @@
for s, e in remaining:
for b in range(s, e):
idx = bisect.bisect_right(self.offset_index, b) - 1
- chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
+ chunk_start, _, filepos, fill_data = self.offset_map[idx]
if filepos is not None:
filepos += (b-chunk_start) * self.blocksize
f.seek(filepos, os.SEEK_SET)
@@ -204,8 +201,8 @@
nonzero_blocks.append(b)
nonzero_blocks.append(b+1)
- out["__ZERO"] = RangeSet(data=zero_blocks)
- out["__NONZERO"] = RangeSet(data=nonzero_blocks)
+ out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
+ out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks)
def ResetFileMap(self):
"""Throw away the file map and treat the entire image as