Merge "Avoid adding jacocoagent to bootclasspath twice"
diff --git a/core/rust_device_benchmark_config_template.xml b/core/rust_device_benchmark_config_template.xml
new file mode 100644
index 0000000..2055df2
--- /dev/null
+++ b/core/rust_device_benchmark_config_template.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- This test config file is auto-generated. -->
+<configuration description="Config to run {MODULE} rust benchmark tests.">
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="false" />
+ <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
+ <option name="test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="{MODULE}" />
+ <option name="is-benchmark" value="true" />
+ </test>
+</configuration>
diff --git a/core/rust_host_benchmark_config_template.xml b/core/rust_host_benchmark_config_template.xml
new file mode 100644
index 0000000..bb7c1b5
--- /dev/null
+++ b/core/rust_host_benchmark_config_template.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Config to run {MODULE} rust benchmark host tests">
+ <test class="com.android.tradefed.testtype.rust.RustBinaryHostTest" >
+ <option name="test-file" value="{MODULE}" />
+ <option name="test-timeout" value="5m" />
+ <option name="is-benchmark" value="true" />
+ </test>
+</configuration>
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index c9e3e80..4138277 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -240,7 +240,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2021-03-05
+ PLATFORM_SECURITY_PATCH := 2021-04-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py
index 18f2166..bf958fb 100755
--- a/tools/generate-notice-files.py
+++ b/tools/generate-notice-files.py
@@ -231,8 +231,8 @@
input_dirs = [os.path.normpath(source_dir) for source_dir in args.source_dir]
# Find all the notice files and md5 them
+ files_with_same_hash = defaultdict(list)
for input_dir in input_dirs:
- files_with_same_hash = defaultdict(list)
for root, dir, files in os.walk(input_dir):
for file in files:
matched = True
@@ -254,8 +254,7 @@
file_md5sum = md5sum(filename)
files_with_same_hash[file_md5sum].append(filename)
- filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
-
+ filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
combine_notice_files_text(filesets, input_dirs, txt_output_file, file_title)
if html_output_file is not None:
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 46bae29..efbf614 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -44,14 +44,12 @@
if not prop_list.get_value("persist.sys.usb.config"):
prop_list.put("persist.sys.usb.config", "none")
-def validate_and_add_grf_props(prop_list, sdk_version):
+def validate_grf_props(prop_list, sdk_version):
"""Validate GRF properties if exist.
If ro.board.first_api_level is defined, check if its value is valid for the
sdk version.
- Also, validate the value of ro.board.api_level if defined. If the
- ro.board.api_level property is not defined, define it with the required
- vendor API level for the GRF policy.
+ Also, validate the value of ro.board.api_level if defined.
Returns:
True if the GRF properties are valid.
@@ -74,10 +72,6 @@
% (grf_api_level, sdk_version))
return False
- grf_window = 4
- grf_required_api_level = (grf_api_level
- + grf_window * ((sdk_version - grf_api_level) // grf_window))
-
if board_api_level:
board_api_level = int(board_api_level)
if board_api_level < grf_api_level or board_api_level > sdk_version:
@@ -86,13 +80,6 @@
"ro.build.version.sdk(%d)\n"
% (board_api_level, grf_api_level, sdk_version))
return False
- if board_api_level < grf_required_api_level:
- sys.stderr.write("error: ro.board.api_level(%d) must be greater than or "
- "equal to %d based on GRF policy\n"
- % (board_api_level, grf_required_api_level))
- return False
- else:
- prop_list.put("ro.board.api_level", str(grf_required_api_level))
return True
@@ -278,7 +265,7 @@
mangle_build_prop(props)
if not override_optional_props(props, args.allow_dup):
sys.exit(1)
- if not validate_and_add_grf_props(props, args.sdk_version):
+ if not validate_grf_props(props, args.sdk_version):
sys.exit(1)
if not validate(props):
sys.exit(1)
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 4fe10c6..a56c305 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -59,12 +59,11 @@
import build_image
import build_super_image
import common
-import rangelib
-import sparse_img
import verity_utils
import ota_metadata_pb2
from apex_utils import GetSystemApexInfoFromTargetFiles
+from common import AddCareMapForAbOta
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -110,45 +109,6 @@
common.ZipWrite(self._output_zip, self.name, self._zip_name)
-def GetCareMap(which, imgname):
- """Returns the care_map string for the given partition.
-
- Args:
- which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
- imgname: The filename of the image.
-
- Returns:
- (which, care_map_ranges): care_map_ranges is the raw string of the care_map
- RangeSet; or None.
- """
- assert which in common.PARTITIONS_WITH_CARE_MAP
-
- # which + "_image_size" contains the size that the actual filesystem image
- # resides in, which is all that needs to be verified. The additional blocks in
- # the image file contain verity metadata, by reading which would trigger
- # invalid reads.
- image_size = OPTIONS.info_dict.get(which + "_image_size")
- if not image_size:
- return None
-
- image_blocks = int(image_size) // 4096 - 1
- assert image_blocks > 0, "blocks for {} must be positive".format(which)
-
- # For sparse images, we will only check the blocks that are listed in the care
- # map, i.e. the ones with meaningful data.
- if "extfs_sparse_flag" in OPTIONS.info_dict:
- simg = sparse_img.SparseImage(imgname)
- care_map_ranges = simg.care_map.intersect(
- rangelib.RangeSet("0-{}".format(image_blocks)))
-
- # Otherwise for non-sparse images, we read all the blocks in the filesystem
- # image.
- else:
- care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
-
- return [which, care_map_ranges.to_string_raw()]
-
-
def AddSystem(output_zip, recovery_img=None, boot_img=None):
"""Turn the contents of SYSTEM into a system image and store it in
output_zip. Returns the name of the system image file."""
@@ -644,72 +604,6 @@
assert available, "Failed to find " + img_name
-def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
- """Generates and adds care_map.pb for a/b partition that has care_map.
-
- Args:
- output_zip: The output zip file (needs to be already open), or None to
- write care_map.pb to OPTIONS.input_tmp/.
- ab_partitions: The list of A/B partitions.
- image_paths: A map from the partition name to the image path.
- """
- care_map_list = []
- for partition in ab_partitions:
- partition = partition.strip()
- if partition not in common.PARTITIONS_WITH_CARE_MAP:
- continue
-
- verity_block_device = "{}_verity_block_device".format(partition)
- avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
- if (verity_block_device in OPTIONS.info_dict or
- OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
- image_path = image_paths[partition]
- assert os.path.exists(image_path)
-
- care_map = GetCareMap(partition, image_path)
- if not care_map:
- continue
- care_map_list += care_map
-
- # adds fingerprint field to the care_map
- # TODO(xunchang) revisit the fingerprint calculation for care_map.
- partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
- prop_name_list = ["ro.{}.build.fingerprint".format(partition),
- "ro.{}.build.thumbprint".format(partition)]
-
- present_props = [x for x in prop_name_list if
- partition_props and partition_props.GetProp(x)]
- if not present_props:
- logger.warning("fingerprint is not present for partition %s", partition)
- property_id, fingerprint = "unknown", "unknown"
- else:
- property_id = present_props[0]
- fingerprint = partition_props.GetProp(property_id)
- care_map_list += [property_id, fingerprint]
-
- if not care_map_list:
- return
-
- # Converts the list into proto buf message by calling care_map_generator; and
- # writes the result to a temp file.
- temp_care_map_text = common.MakeTempFile(prefix="caremap_text-",
- suffix=".txt")
- with open(temp_care_map_text, 'w') as text_file:
- text_file.write('\n'.join(care_map_list))
-
- temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
- care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
- common.RunAndCheckOutput(care_map_gen_cmd)
-
- care_map_path = "META/care_map.pb"
- if output_zip and care_map_path not in output_zip.namelist():
- common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
- else:
- shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
- if output_zip:
- OPTIONS.replace_updated_files_list.append(care_map_path)
-
-
def AddPackRadioImages(output_zip, images):
"""Copies images listed in META/pack_radioimages.txt from RADIO/ to IMAGES/.
@@ -1050,7 +944,9 @@
# Generate care_map.pb for ab_partitions, then write this file to
# target_files package.
- AddCareMapForAbOta(output_zip, ab_partitions, partitions)
+ output_care_map = os.path.join(OPTIONS.input_tmp, "META", "care_map.pb")
+ AddCareMapForAbOta(output_zip if output_zip else output_care_map,
+ ab_partitions, partitions)
# Radio images that need to be packed into IMAGES/, and product-img.zip.
pack_radioimages_txt = os.path.join(
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index b6ed8a4..83425cc 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -41,6 +41,7 @@
from hashlib import sha1, sha256
import images
+import rangelib
import sparse_img
from blockimgdiff import BlockImageDiff
@@ -3759,3 +3760,124 @@
except ExternalError as e:
logger.warning('Unable to get boot image timestamp: %s', e)
return None
+
+
+def GetCareMap(which, imgname):
+ """Returns the care_map string for the given partition.
+
+ Args:
+ which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
+ imgname: The filename of the image.
+
+ Returns:
+ (which, care_map_ranges): care_map_ranges is the raw string of the care_map
+ RangeSet; or None.
+ """
+ assert which in PARTITIONS_WITH_CARE_MAP
+
+ # which + "_image_size" contains the size that the actual filesystem image
+ # resides in, which is all that needs to be verified. The additional blocks in
+ # the image file contain verity metadata, by reading which would trigger
+ # invalid reads.
+ image_size = OPTIONS.info_dict.get(which + "_image_size")
+ if not image_size:
+ return None
+
+ image_blocks = int(image_size) // 4096 - 1
+ assert image_blocks > 0, "blocks for {} must be positive".format(which)
+
+ # For sparse images, we will only check the blocks that are listed in the care
+ # map, i.e. the ones with meaningful data.
+ if "extfs_sparse_flag" in OPTIONS.info_dict:
+ simg = sparse_img.SparseImage(imgname)
+ care_map_ranges = simg.care_map.intersect(
+ rangelib.RangeSet("0-{}".format(image_blocks)))
+
+ # Otherwise for non-sparse images, we read all the blocks in the filesystem
+ # image.
+ else:
+ care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
+
+ return [which, care_map_ranges.to_string_raw()]
+
+
+def AddCareMapForAbOta(output_file, ab_partitions, image_paths):
+ """Generates and adds care_map.pb for a/b partition that has care_map.
+
+ Args:
+ output_file: The output zip file (needs to be already open),
+ or file path to write care_map.pb.
+ ab_partitions: The list of A/B partitions.
+ image_paths: A map from the partition name to the image path.
+ """
+ if not output_file:
+ raise ExternalError('Expected output_file for AddCareMapForAbOta')
+
+ care_map_list = []
+ for partition in ab_partitions:
+ partition = partition.strip()
+ if partition not in PARTITIONS_WITH_CARE_MAP:
+ continue
+
+ verity_block_device = "{}_verity_block_device".format(partition)
+ avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
+ if (verity_block_device in OPTIONS.info_dict or
+ OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
+ if partition not in image_paths:
+ logger.warning('Potential partition with care_map missing from images: %s',
+ partition)
+ continue
+ image_path = image_paths[partition]
+ if not os.path.exists(image_path):
+ raise ExternalError('Expected image at path {}'.format(image_path))
+
+ care_map = GetCareMap(partition, image_path)
+ if not care_map:
+ continue
+ care_map_list += care_map
+
+ # adds fingerprint field to the care_map
+ # TODO(xunchang) revisit the fingerprint calculation for care_map.
+ partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
+ prop_name_list = ["ro.{}.build.fingerprint".format(partition),
+ "ro.{}.build.thumbprint".format(partition)]
+
+ present_props = [x for x in prop_name_list if
+ partition_props and partition_props.GetProp(x)]
+ if not present_props:
+ logger.warning(
+ "fingerprint is not present for partition %s", partition)
+ property_id, fingerprint = "unknown", "unknown"
+ else:
+ property_id = present_props[0]
+ fingerprint = partition_props.GetProp(property_id)
+ care_map_list += [property_id, fingerprint]
+
+ if not care_map_list:
+ return
+
+ # Converts the list into proto buf message by calling care_map_generator; and
+ # writes the result to a temp file.
+ temp_care_map_text = MakeTempFile(prefix="caremap_text-",
+ suffix=".txt")
+ with open(temp_care_map_text, 'w') as text_file:
+ text_file.write('\n'.join(care_map_list))
+
+ temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb")
+ care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
+ RunAndCheckOutput(care_map_gen_cmd)
+
+ if not isinstance(output_file, zipfile.ZipFile):
+ shutil.copy(temp_care_map, output_file)
+ return
+ # output_file is a zip file
+ care_map_path = "META/care_map.pb"
+ if care_map_path in output_file.namelist():
+ # Copy the temp file into the OPTIONS.input_tmp dir and update the
+ # replace_updated_files_list used by add_img_to_target_files
+ if not OPTIONS.replace_updated_files_list:
+ OPTIONS.replace_updated_files_list = []
+ shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
+ OPTIONS.replace_updated_files_list.append(care_map_path)
+ else:
+ ZipWrite(output_file, temp_care_map, arcname=care_map_path)
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index 16cab4f..17d3030 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -96,12 +96,17 @@
from xml.etree import ElementTree
import add_img_to_target_files
+import build_image
import build_super_image
import check_target_files_vintf
import common
import img_from_target_files
import find_shareduid_violation
import ota_from_target_files
+import sparse_img
+import verity_utils
+
+from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP
logger = logging.getLogger(__name__)
@@ -355,8 +360,9 @@
' includes %s.', partition, partition)
has_error = True
- if ('dynamic_partition_list' in framework_misc_info_keys) or (
- 'super_partition_groups' in framework_misc_info_keys):
+ if ('dynamic_partition_list'
+ in framework_misc_info_keys) or ('super_partition_groups'
+ in framework_misc_info_keys):
logger.error('Dynamic partition misc info keys should come from '
'the vendor instance of META/misc_info.txt.')
has_error = True
@@ -447,8 +453,8 @@
merged_dict[key] = framework_dict[key]
# Merge misc info keys used for Dynamic Partitions.
- if (merged_dict.get('use_dynamic_partitions') == 'true') and (
- framework_dict.get('use_dynamic_partitions') == 'true'):
+ if (merged_dict.get('use_dynamic_partitions')
+ == 'true') and (framework_dict.get('use_dynamic_partitions') == 'true'):
merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dict, vendor_dict=merged_dict)
merged_dict.update(merged_dynamic_partitions_dict)
@@ -733,6 +739,42 @@
return cmd
+def generate_care_map(partitions, output_target_files_dir):
+ """Generates a merged META/care_map.pb file in the output target files dir.
+
+ Depends on the info dict from META/misc_info.txt, as well as built images
+ within IMAGES/.
+
+ Args:
+ partitions: A list of partitions to potentially include in the care map.
+ output_target_files_dir: The name of a directory that will be used to create
+ the output target files package after all the special cases are processed.
+ """
+ OPTIONS.info_dict = common.LoadInfoDict(output_target_files_dir)
+ partition_image_map = {}
+ for partition in partitions:
+ image_path = os.path.join(output_target_files_dir, 'IMAGES',
+ '{}.img'.format(partition))
+ if os.path.exists(image_path):
+ partition_image_map[partition] = image_path
+ # Regenerated images should have their image_size property already set.
+ image_size_prop = '{}_image_size'.format(partition)
+ if image_size_prop not in OPTIONS.info_dict:
+ # Images copied directly from input target files packages will need
+ # their image sizes calculated.
+ partition_size = sparse_img.GetImagePartitionSize(image_path)
+ image_props = build_image.ImagePropFromGlobalDict(
+ OPTIONS.info_dict, partition)
+ verity_image_builder = verity_utils.CreateVerityImageBuilder(
+ image_props)
+ image_size = verity_image_builder.CalculateMaxImageSize(partition_size)
+ OPTIONS.info_dict[image_size_prop] = image_size
+
+ AddCareMapForAbOta(
+ os.path.join(output_target_files_dir, 'META', 'care_map.pb'),
+ PARTITIONS_WITH_CARE_MAP, partition_image_map)
+
+
def process_special_cases(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir,
@@ -1087,12 +1129,14 @@
if not output_target_files:
return
+ # Create the merged META/care_map.bp
+ generate_care_map(partition_map.keys(), output_target_files_temp_dir)
+
output_zip = create_target_files_archive(output_target_files,
output_target_files_temp_dir,
temp_dir)
# Create the IMG package from the merged target files package.
-
if output_img:
img_from_target_files.main([output_zip, output_img])
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 6b7a7db..a5850d3 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -21,9 +21,10 @@
import common
import test_utils
from add_img_to_target_files import (
- AddCareMapForAbOta, AddPackRadioImages,
- CheckAbOtaImages, GetCareMap)
+ AddPackRadioImages,
+ CheckAbOtaImages)
from rangelib import RangeSet
+from common import AddCareMapForAbOta, GetCareMap
OPTIONS = common.OPTIONS
@@ -174,9 +175,9 @@
def test_AddCareMapForAbOta(self):
image_paths = self._test_AddCareMapForAbOta()
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -191,10 +192,10 @@
"""Partitions without care_map should be ignored."""
image_paths = self._test_AddCareMapForAbOta()
- AddCareMapForAbOta(
- None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(
+ care_map_file, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -226,9 +227,9 @@
),
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys",
@@ -250,9 +251,9 @@
'vendor_verity_block_device': '/dev/block/vendor',
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown",
"unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown",
"unknown"]
@@ -281,9 +282,9 @@
),
}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.thumbprint",
"google/sailfish/123:user/dev-keys",
@@ -300,9 +301,9 @@
# Remove vendor_image_size to invalidate the care_map for vendor.img.
del OPTIONS.info_dict['vendor_image_size']
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
"ro.system.build.fingerprint",
"google/sailfish/12345:user/dev-keys"]
@@ -317,25 +318,26 @@
del OPTIONS.info_dict['system_image_size']
del OPTIONS.info_dict['vendor_image_size']
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
- self.assertFalse(
- os.path.exists(os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')))
+ self.assertFalse(os.path.exists(care_map_file))
def test_AddCareMapForAbOta_verityNotEnabled(self):
"""No care_map.pb should be generated if verity not enabled."""
image_paths = self._test_AddCareMapForAbOta()
OPTIONS.info_dict = {}
- AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
self.assertFalse(os.path.exists(care_map_file))
def test_AddCareMapForAbOta_missingImageFile(self):
"""Missing image file should be considered fatal."""
image_paths = self._test_AddCareMapForAbOta()
image_paths['vendor'] = ''
- self.assertRaises(AssertionError, AddCareMapForAbOta, None,
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ self.assertRaises(common.ExternalError, AddCareMapForAbOta, care_map_file,
['system', 'vendor'], image_paths)
@test_utils.SkipIfExternalToolsUnavailable()
diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py
index dd5f8ec..236f9ed 100644
--- a/tools/test_post_process_props.py
+++ b/tools/test_post_process_props.py
@@ -258,30 +258,20 @@
props.put("ro.board.first_api_level","25")
# ro.board.first_api_level must be less than or equal to the sdk version
- self.assertFalse(validate_and_add_grf_props(props, 20))
- self.assertTrue(validate_and_add_grf_props(props, 26))
- # ro.board.api_level is automatically set
- self.assertEqual(props.get_value("ro.board.api_level"), "25")
+ self.assertFalse(validate_grf_props(props, 20))
+ self.assertTrue(validate_grf_props(props, 26))
+ self.assertTrue(validate_grf_props(props, 35))
- props.get_all_props()[-1].make_as_comment()
- self.assertTrue(validate_and_add_grf_props(props, 35))
- # ro.board.api_level is automatically set to the required GRF version
- self.assertEqual(props.get_value("ro.board.api_level"), "33")
-
- props.get_all_props()[-1].make_as_comment()
# manually set ro.board.api_level to an invalid value
props.put("ro.board.api_level","20")
- self.assertFalse(validate_and_add_grf_props(props, 26))
+ self.assertFalse(validate_grf_props(props, 26))
props.get_all_props()[-1].make_as_comment()
# manually set ro.board.api_level to a valid value
props.put("ro.board.api_level","26")
- self.assertTrue(validate_and_add_grf_props(props, 26))
+ self.assertTrue(validate_grf_props(props, 26))
# ro.board.api_level must be less than or equal to the sdk version
- self.assertFalse(validate_and_add_grf_props(props, 25))
- # ro.board.api_level must be greater than or equal to the required GRF
- # version
- self.assertFalse(validate_and_add_grf_props(props, 30))
+ self.assertFalse(validate_grf_props(props, 25))
if __name__ == '__main__':
unittest.main(verbosity=2)