Revert "Generate care map after merging target_files"
This reverts commit c184fa1887df633645e85570c06428cd97671fc8.
Reason for revert: b/184541365
Change-Id: Icbcbb6deb92863f4a468b9fa54f18d824a6f0939
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index d7de85b..4fe10c6 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -59,11 +59,12 @@
import build_image
import build_super_image
import common
+import rangelib
+import sparse_img
import verity_utils
import ota_metadata_pb2
from apex_utils import GetSystemApexInfoFromTargetFiles
-from common import AddCareMapForAbOta
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -109,6 +110,45 @@
common.ZipWrite(self._output_zip, self.name, self._zip_name)
+def GetCareMap(which, imgname):
+ """Returns the care_map string for the given partition.
+
+ Args:
+ which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
+ imgname: The filename of the image.
+
+ Returns:
+ (which, care_map_ranges): care_map_ranges is the raw string of the care_map
+ RangeSet; or None.
+ """
+ assert which in common.PARTITIONS_WITH_CARE_MAP
+
+ # which + "_image_size" contains the size that the actual filesystem image
+ # resides in, which is all that needs to be verified. The additional blocks in
+ # the image file contain verity metadata, by reading which would trigger
+ # invalid reads.
+ image_size = OPTIONS.info_dict.get(which + "_image_size")
+ if not image_size:
+ return None
+
+ image_blocks = int(image_size) // 4096 - 1
+ assert image_blocks > 0, "blocks for {} must be positive".format(which)
+
+ # For sparse images, we will only check the blocks that are listed in the care
+ # map, i.e. the ones with meaningful data.
+ if "extfs_sparse_flag" in OPTIONS.info_dict:
+ simg = sparse_img.SparseImage(imgname)
+ care_map_ranges = simg.care_map.intersect(
+ rangelib.RangeSet("0-{}".format(image_blocks)))
+
+ # Otherwise for non-sparse images, we read all the blocks in the filesystem
+ # image.
+ else:
+ care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
+
+ return [which, care_map_ranges.to_string_raw()]
+
+
def AddSystem(output_zip, recovery_img=None, boot_img=None):
"""Turn the contents of SYSTEM into a system image and store it in
output_zip. Returns the name of the system image file."""
@@ -134,13 +174,12 @@
"board_uses_vendorimage") == "true"
if (OPTIONS.rebuild_recovery and not board_uses_vendorimage and
- recovery_img is not None and boot_img is not None):
+ recovery_img is not None and boot_img is not None):
logger.info("Building new recovery patch on system at system/vendor")
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict)
- block_list = OutputFile(output_zip, OPTIONS.input_tmp,
- "IMAGES", "system.map")
+ block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.map")
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
block_list=block_list)
return img.name
@@ -183,13 +222,12 @@
"board_uses_vendorimage") == "true"
if (OPTIONS.rebuild_recovery and board_uses_vendorimage and
- recovery_img is not None and boot_img is not None):
+ recovery_img is not None and boot_img is not None):
logger.info("Building new recovery patch on vendor")
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict)
- block_list = OutputFile(output_zip, OPTIONS.input_tmp,
- "IMAGES", "vendor.map")
+ block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img,
block_list=block_list)
return img.name
@@ -261,7 +299,6 @@
block_list=block_list)
return img.name
-
def AddOdmDlkm(output_zip):
"""Turn the contents of OdmDlkm into an odm_dlkm image and store it in output_zip."""
@@ -313,7 +350,6 @@
img.Write()
return img.name
-
def AddPvmfw(output_zip):
"""Adds the pvmfw image.
@@ -349,7 +385,6 @@
img.Write()
return img.name
-
def AddCustomImages(output_zip, partition_name):
"""Adds and signs custom images in IMAGES/.
@@ -378,16 +413,15 @@
key_path, algorithm, extra_args)
for img_name in OPTIONS.info_dict.get(
- "avb_{}_image_list".format(partition_name)).split():
- custom_image = OutputFile(
- output_zip, OPTIONS.input_tmp, "IMAGES", img_name)
+ "avb_{}_image_list".format(partition_name)).split():
+ custom_image = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", img_name)
if os.path.exists(custom_image.name):
continue
custom_image_prebuilt_path = os.path.join(
OPTIONS.input_tmp, "PREBUILT_IMAGES", img_name)
assert os.path.exists(custom_image_prebuilt_path), \
- "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path)
+ "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path)
shutil.copy(custom_image_prebuilt_path, custom_image.name)
@@ -610,6 +644,72 @@
assert available, "Failed to find " + img_name
+def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
+ """Generates and adds care_map.pb for a/b partition that has care_map.
+
+ Args:
+ output_zip: The output zip file (needs to be already open), or None to
+ write care_map.pb to OPTIONS.input_tmp/.
+ ab_partitions: The list of A/B partitions.
+ image_paths: A map from the partition name to the image path.
+ """
+ care_map_list = []
+ for partition in ab_partitions:
+ partition = partition.strip()
+ if partition not in common.PARTITIONS_WITH_CARE_MAP:
+ continue
+
+ verity_block_device = "{}_verity_block_device".format(partition)
+ avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
+ if (verity_block_device in OPTIONS.info_dict or
+ OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
+ image_path = image_paths[partition]
+ assert os.path.exists(image_path)
+
+ care_map = GetCareMap(partition, image_path)
+ if not care_map:
+ continue
+ care_map_list += care_map
+
+ # adds fingerprint field to the care_map
+ # TODO(xunchang) revisit the fingerprint calculation for care_map.
+ partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
+ prop_name_list = ["ro.{}.build.fingerprint".format(partition),
+ "ro.{}.build.thumbprint".format(partition)]
+
+ present_props = [x for x in prop_name_list if
+ partition_props and partition_props.GetProp(x)]
+ if not present_props:
+ logger.warning("fingerprint is not present for partition %s", partition)
+ property_id, fingerprint = "unknown", "unknown"
+ else:
+ property_id = present_props[0]
+ fingerprint = partition_props.GetProp(property_id)
+ care_map_list += [property_id, fingerprint]
+
+ if not care_map_list:
+ return
+
+ # Converts the list into proto buf message by calling care_map_generator; and
+ # writes the result to a temp file.
+ temp_care_map_text = common.MakeTempFile(prefix="caremap_text-",
+ suffix=".txt")
+ with open(temp_care_map_text, 'w') as text_file:
+ text_file.write('\n'.join(care_map_list))
+
+ temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
+ care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
+ common.RunAndCheckOutput(care_map_gen_cmd)
+
+ care_map_path = "META/care_map.pb"
+ if output_zip and care_map_path not in output_zip.namelist():
+ common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
+ else:
+ shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
+ if output_zip:
+ OPTIONS.replace_updated_files_list.append(care_map_path)
+
+
def AddPackRadioImages(output_zip, images):
"""Copies images listed in META/pack_radioimages.txt from RADIO/ to IMAGES/.
@@ -685,12 +785,11 @@
return ((os.path.isdir(
os.path.join(OPTIONS.input_tmp, partition_name.upper())) and
- OPTIONS.info_dict.get(
- "building_{}_image".format(partition_name)) == "true") or
- os.path.exists(
- os.path.join(OPTIONS.input_tmp, "IMAGES",
- "{}.img".format(partition_name))))
-
+ OPTIONS.info_dict.get(
+ "building_{}_image".format(partition_name)) == "true") or
+ os.path.exists(
+ os.path.join(OPTIONS.input_tmp, "IMAGES",
+ "{}.img".format(partition_name))))
def AddApexInfo(output_zip):
apex_infos = GetSystemApexInfoFromTargetFiles(OPTIONS.input_tmp)
@@ -779,7 +878,7 @@
boot_images = OPTIONS.info_dict.get("boot_images")
if boot_images is None:
boot_images = "boot.img"
- for index, b in enumerate(boot_images.split()):
+ for index,b in enumerate(boot_images.split()):
# common.GetBootableImage() returns the image directly if present.
boot_image = common.GetBootableImage(
"IMAGES/" + b, b, OPTIONS.input_tmp, "BOOT")
@@ -934,7 +1033,7 @@
if OPTIONS.info_dict.get("build_super_partition") == "true":
if OPTIONS.info_dict.get(
- "build_retrofit_dynamic_partitions_ota_package") == "true":
+ "build_retrofit_dynamic_partitions_ota_package") == "true":
banner("super split images")
AddSuperSplit(output_zip)
@@ -1000,7 +1099,6 @@
AddImagesToTargetFiles(args[0])
logger.info("done.")
-
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index c5e1ade..414ab97 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -41,7 +41,6 @@
from hashlib import sha1, sha256
import images
-import rangelib
import sparse_img
from blockimgdiff import BlockImageDiff
@@ -138,7 +137,6 @@
# existing search paths.
RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
-
class ErrorCode(object):
"""Define error_codes for failures that happen during the actual
update package installation.
@@ -227,7 +225,6 @@
def SetHostToolLocation(tool_name, location):
OPTIONS.host_tools[tool_name] = location
-
def FindHostToolPath(tool_name):
"""Finds the path to the host tool.
@@ -248,7 +245,6 @@
return tool_name
-
def Run(args, verbose=None, **kwargs):
"""Creates and returns a subprocess.Popen object.
@@ -464,7 +460,7 @@
"""Returns the inquired build property for the provided partition."""
# Boot image uses ro.[product.]bootimage instead of boot.
- prop_partition = "bootimage" if partition == "boot" else partition
+ prop_partition = "bootimage" if partition == "boot" else partition
# If provided a partition for this property, only look within that
# partition's build.prop.
@@ -773,8 +769,7 @@
for partition in PARTITIONS_WITH_BUILD_PROP:
fingerprint = build_info.GetPartitionFingerprint(partition)
if fingerprint:
- d["avb_{}_salt".format(partition)] = sha256(
- fingerprint.encode()).hexdigest()
+ d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
try:
d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
except KeyError:
@@ -782,6 +777,7 @@
return d
+
def LoadListFromFile(file_path):
with open(file_path) as f:
return f.read().splitlines()
@@ -1095,7 +1091,7 @@
return " ".join(sorted(combined))
if (framework_dict.get("use_dynamic_partitions") !=
- "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
+ "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
merged_dict = {"use_dynamic_partitions": "true"}
@@ -1573,7 +1569,7 @@
RunAndCheckOutput(cmd)
if (info_dict.get("boot_signer") == "true" and
- info_dict.get("verity_key")):
+ info_dict.get("verity_key")):
# Hard-code the path as "/boot" for two-step special recovery image (which
# will be loaded into /boot during the two-step OTA).
if two_step_image:
@@ -1738,19 +1734,15 @@
if os.access(fn, os.F_OK):
ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
for ramdisk_fragment in ramdisk_fragments:
- fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
- ramdisk_fragment, "mkbootimg_args")
+ fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args")
cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
- fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
- ramdisk_fragment, "prebuilt_ramdisk")
+ fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk")
# Use prebuilt image if found, else create ramdisk from supplied files.
if os.access(fn, os.F_OK):
ramdisk_fragment_pathname = fn
else:
- ramdisk_fragment_root = os.path.join(
- sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
- ramdisk_fragment_img = _MakeRamdisk(
- ramdisk_fragment_root, lz4_ramdisks=use_lz4)
+ ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
+ ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, lz4_ramdisks=use_lz4)
ramdisk_fragment_imgs.append(ramdisk_fragment_img)
ramdisk_fragment_pathname = ramdisk_fragment_img.name
cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
@@ -3521,7 +3513,7 @@
for g in tgt_groups:
for p in shlex.split(info_dict.get(
- "super_%s_partition_list" % g, "").strip()):
+ "super_%s_partition_list" % g, "").strip()):
assert p in self._partition_updates, \
"{} is in target super_{}_partition_list but no BlockDifference " \
"object is provided.".format(p, g)
@@ -3529,7 +3521,7 @@
for g in src_groups:
for p in shlex.split(source_info_dict.get(
- "super_%s_partition_list" % g, "").strip()):
+ "super_%s_partition_list" % g, "").strip()):
assert p in self._partition_updates, \
"{} is in source super_{}_partition_list but no BlockDifference " \
"object is provided.".format(p, g)
@@ -3638,7 +3630,7 @@
if u.src_size is not None and u.tgt_size is None:
append('remove_group %s' % g)
if (u.src_size is not None and u.tgt_size is not None and
- u.src_size > u.tgt_size):
+ u.src_size > u.tgt_size):
comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
append('resize_group %s %d' % (g, u.tgt_size))
@@ -3647,7 +3639,7 @@
comment('Add group %s with maximum size %d' % (g, u.tgt_size))
append('add_group %s %d' % (g, u.tgt_size))
if (u.src_size is not None and u.tgt_size is not None and
- u.src_size < u.tgt_size):
+ u.src_size < u.tgt_size):
comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
append('resize_group %s %d' % (g, u.tgt_size))
@@ -3681,8 +3673,7 @@
"""
tmp_dir = MakeTempDir('boot_', suffix='.img')
try:
- RunAndCheckOutput(['unpack_bootimg', '--boot_img',
- boot_img, '--out', tmp_dir])
+ RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir])
ramdisk = os.path.join(tmp_dir, 'ramdisk')
if not os.path.isfile(ramdisk):
logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
@@ -3695,14 +3686,13 @@
# Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
# the host environment.
RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
- cwd=extracted_ramdisk)
+ cwd=extracted_ramdisk)
for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
prop_file = os.path.join(extracted_ramdisk, search_path)
if os.path.isfile(prop_file):
return prop_file
- logger.warning(
- 'Unable to get boot image timestamp: no %s in ramdisk', search_path)
+ logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path)
return None
@@ -3735,116 +3725,9 @@
timestamp = props.GetProp('ro.bootimage.build.date.utc')
if timestamp:
return int(timestamp)
- logger.warning(
- 'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
+ logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
return None
except ExternalError as e:
logger.warning('Unable to get boot image timestamp: %s', e)
return None
-
-
-def GetCareMap(which, imgname):
- """Returns the care_map string for the given partition.
-
- Args:
- which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
- imgname: The filename of the image.
-
- Returns:
- (which, care_map_ranges): care_map_ranges is the raw string of the care_map
- RangeSet; or None.
- """
- assert which in PARTITIONS_WITH_CARE_MAP
-
- # which + "_image_size" contains the size that the actual filesystem image
- # resides in, which is all that needs to be verified. The additional blocks in
- # the image file contain verity metadata, by reading which would trigger
- # invalid reads.
- image_size = OPTIONS.info_dict.get(which + "_image_size")
- if not image_size:
- return None
-
- image_blocks = int(image_size) // 4096 - 1
- assert image_blocks > 0, "blocks for {} must be positive".format(which)
-
- # For sparse images, we will only check the blocks that are listed in the care
- # map, i.e. the ones with meaningful data.
- if "extfs_sparse_flag" in OPTIONS.info_dict:
- simg = sparse_img.SparseImage(imgname)
- care_map_ranges = simg.care_map.intersect(
- rangelib.RangeSet("0-{}".format(image_blocks)))
-
- # Otherwise for non-sparse images, we read all the blocks in the filesystem
- # image.
- else:
- care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
-
- return [which, care_map_ranges.to_string_raw()]
-
-
-def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
- """Generates and adds care_map.pb for a/b partition that has care_map.
-
- Args:
- output_zip: The output zip file (needs to be already open), or None to
- write care_map.pb to OPTIONS.input_tmp/.
- ab_partitions: The list of A/B partitions.
- image_paths: A map from the partition name to the image path.
- """
- care_map_list = []
- for partition in ab_partitions:
- partition = partition.strip()
- if partition not in PARTITIONS_WITH_CARE_MAP:
- continue
-
- verity_block_device = "{}_verity_block_device".format(partition)
- avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
- if (verity_block_device in OPTIONS.info_dict or
- OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
- image_path = image_paths[partition]
- assert os.path.exists(image_path)
-
- care_map = GetCareMap(partition, image_path)
- if not care_map:
- continue
- care_map_list += care_map
-
- # adds fingerprint field to the care_map
- # TODO(xunchang) revisit the fingerprint calculation for care_map.
- partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
- prop_name_list = ["ro.{}.build.fingerprint".format(partition),
- "ro.{}.build.thumbprint".format(partition)]
-
- present_props = [x for x in prop_name_list if
- partition_props and partition_props.GetProp(x)]
- if not present_props:
- logger.warning(
- "fingerprint is not present for partition %s", partition)
- property_id, fingerprint = "unknown", "unknown"
- else:
- property_id = present_props[0]
- fingerprint = partition_props.GetProp(property_id)
- care_map_list += [property_id, fingerprint]
-
- if not care_map_list:
- return
-
- # Converts the list into proto buf message by calling care_map_generator; and
- # writes the result to a temp file.
- temp_care_map_text = MakeTempFile(prefix="caremap_text-",
- suffix=".txt")
- with open(temp_care_map_text, 'w') as text_file:
- text_file.write('\n'.join(care_map_list))
-
- temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb")
- care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
- RunAndCheckOutput(care_map_gen_cmd)
-
- care_map_path = "META/care_map.pb"
- if output_zip and care_map_path not in output_zip.namelist():
- ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
- else:
- shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
- if output_zip:
- OPTIONS.replace_updated_files_list.append(care_map_path)
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index c2fd450..16cab4f 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -103,8 +103,6 @@
import find_shareduid_violation
import ota_from_target_files
-from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP
-
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
@@ -358,7 +356,7 @@
has_error = True
if ('dynamic_partition_list' in framework_misc_info_keys) or (
- 'super_partition_groups' in framework_misc_info_keys):
+ 'super_partition_groups' in framework_misc_info_keys):
logger.error('Dynamic partition misc info keys should come from '
'the vendor instance of META/misc_info.txt.')
has_error = True
@@ -450,7 +448,7 @@
# Merge misc info keys used for Dynamic Partitions.
if (merged_dict.get('use_dynamic_partitions') == 'true') and (
- framework_dict.get('use_dynamic_partitions') == 'true'):
+ framework_dict.get('use_dynamic_partitions') == 'true'):
merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dict, vendor_dict=merged_dict)
merged_dict.update(merged_dynamic_partitions_dict)
@@ -695,7 +693,7 @@
vendor_plat_version_file = get_file('vendor',
'etc/selinux/plat_sepolicy_vers.txt')
if not vendor_plat_version_file or not os.path.exists(
- vendor_plat_version_file):
+ vendor_plat_version_file):
raise ExternalError('Missing required sepolicy file %s',
vendor_plat_version_file)
with open(vendor_plat_version_file) as f:
@@ -1094,8 +1092,6 @@
temp_dir)
# Create the IMG package from the merged target files package.
- with zipfile.ZipFile(output_zip, allowZip64=True) as zfp:
- AddCareMapForAbOta(zfp, PARTITIONS_WITH_CARE_MAP, partition_map)
if output_img:
img_from_target_files.main([output_zip, output_img])
@@ -1168,8 +1164,7 @@
elif o == '--vendor-target-files':
OPTIONS.vendor_target_files = a
elif o == '--other-item-list':
- logger.warning(
- '--other-item-list has been renamed to --vendor-item-list')
+ logger.warning('--other-item-list has been renamed to --vendor-item-list')
OPTIONS.vendor_item_list = a
elif o == '--vendor-item-list':
OPTIONS.vendor_item_list = a
@@ -1225,7 +1220,7 @@
if (args or OPTIONS.framework_target_files is None or
OPTIONS.vendor_target_files is None or
(OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or
- (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)):
+ (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)):
common.Usage(__doc__)
sys.exit(1)
@@ -1251,9 +1246,9 @@
output_item_list = None
if not validate_config_lists(
- framework_item_list=framework_item_list,
- framework_misc_info_keys=framework_misc_info_keys,
- vendor_item_list=vendor_item_list):
+ framework_item_list=framework_item_list,
+ framework_misc_info_keys=framework_misc_info_keys,
+ vendor_item_list=vendor_item_list):
sys.exit(1)
call_func_with_temp_dir(
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 3d5300e..6b7a7db 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -21,10 +21,9 @@
import common
import test_utils
from add_img_to_target_files import (
- AddPackRadioImages,
+ AddCareMapForAbOta, AddPackRadioImages,
CheckAbOtaImages, GetCareMap)
from rangelib import RangeSet
-from common import AddCareMapForAbOta
OPTIONS = common.OPTIONS
@@ -124,9 +123,9 @@
def _test_AddCareMapForAbOta():
"""Helper function to set up the test for test_AddCareMapForAbOta()."""
OPTIONS.info_dict = {
- 'extfs_sparse_flag': '-s',
- 'system_image_size': 65536,
- 'vendor_image_size': 40960,
+ 'extfs_sparse_flag' : '-s',
+ 'system_image_size' : 65536,
+ 'vendor_image_size' : 40960,
'system_verity_block_device': '/dev/block/system',
'vendor_verity_block_device': '/dev/block/vendor',
'system.build.prop': common.PartitionBuildProps.FromDictionary(
@@ -154,8 +153,8 @@
(0xCAC2, 12)])
image_paths = {
- 'system': system_image,
- 'vendor': vendor_image,
+ 'system' : system_image,
+ 'vendor' : vendor_image,
}
return image_paths
@@ -244,9 +243,9 @@
"""Tests the case for partitions without fingerprint."""
image_paths = self._test_AddCareMapForAbOta()
OPTIONS.info_dict = {
- 'extfs_sparse_flag': '-s',
- 'system_image_size': 65536,
- 'vendor_image_size': 40960,
+ 'extfs_sparse_flag' : '-s',
+ 'system_image_size' : 65536,
+ 'vendor_image_size' : 40960,
'system_verity_block_device': '/dev/block/system',
'vendor_verity_block_device': '/dev/block/vendor',
}
@@ -255,9 +254,8 @@
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown",
- "unknown", 'vendor', RangeSet(
- "0-9").to_string_raw(), "unknown",
- "unknown"]
+ "unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown",
+ "unknown"]
self._verifyCareMap(expected, care_map_file)
@@ -397,8 +395,8 @@
(0xCAC3, 4),
(0xCAC1, 6)])
OPTIONS.info_dict = {
- 'extfs_sparse_flag': '-s',
- 'system_image_size': 53248,
+ 'extfs_sparse_flag' : '-s',
+ 'system_image_size' : 53248,
}
name, care_map = GetCareMap('system', sparse_image)
self.assertEqual('system', name)
@@ -413,14 +411,14 @@
(0xCAC3, 4),
(0xCAC1, 6)])
OPTIONS.info_dict = {
- 'extfs_sparse_flag': '-s',
- 'system_image_size': -45056,
+ 'extfs_sparse_flag' : '-s',
+ 'system_image_size' : -45056,
}
self.assertRaises(AssertionError, GetCareMap, 'system', sparse_image)
def test_GetCareMap_nonSparseImage(self):
OPTIONS.info_dict = {
- 'system_image_size': 53248,
+ 'system_image_size' : 53248,
}
# 'foo' is the image filename, which is expected to be not used by
# GetCareMap().