Merge "Workaround Legancy GSI for some Wifi firmware"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 398a006..a9093d2 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -503,6 +503,15 @@
# Remove obsolete recovery etc files
$(call add-clean-step, rm -rf $(TARGET_RECOVERY_ROOT_OUT)/etc)
+# Remove *_OUT_INTERMEDIATE_LIBRARIES
+$(call add-clean-step, rm -rf $(addsuffix /lib,\
+ $(HOST_OUT_INTERMEDIATES) $(2ND_HOST_OUT_INTERMEDIATES) \
+ $(HOST_CROSS_OUT_INTERMEDIATES) $(2ND_HOST_CROSS_OUT_INTERMEDIATES) \
+ $(TARGET_OUT_INTERMEDIATES) $(2ND_TARGET_OUT_INTERMEDIATES)))
+
+# Remove strip.sh intermediates to save space
+$(call add-clean-step, find $(OUT_DIR) \( -name "*.so.debug" -o -name "*.so.dynsyms" -o -name "*.so.funcsyms" -o -name "*.so.keep_symbols" -o -name "*.so.mini_debuginfo.xz" \) -print0 | xargs -0 rm -f)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/core/Makefile b/core/Makefile
index 23dc59a..f7f6f35 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -42,11 +42,14 @@
$(eval unique_product_copy_files_destinations += $(_dest))))
# Dump a list of overriden (and ignored PRODUCT_COPY_FILES entries)
-$(file >$(PRODUCT_OUT)/product_copy_files_ignored.txt,$(subst $(space),$(newline),$(strip $(product_copy_files_ignored))))
-ifdef dist_goal
-$(file >$(DIST_DIR)/logs/product_copy_files_ignored.txt,$(subst $(space),$(newline),$(strip $(product_copy_files_ignored))))
-endif
+pcf_ignored_file := $(PRODUCT_OUT)/product_copy_files_ignored.txt
+$(pcf_ignored_file): PRIVATE_IGNORED := $(sort $(product_copy_files_ignored))
+$(pcf_ignored_file):
+ echo "$(PRIVATE_IGNORED)" | tr " " "\n" >$@
+$(call dist-for-goals,droidcore,$(pcf_ignored_file):logs/$(notdir $(pcf_ignored_file)))
+
+pcf_ignored_file :=
product_copy_files_ignored :=
unique_product_copy_files_pairs :=
unique_product_copy_files_destinations :=
@@ -685,7 +688,7 @@
# for future OTA packages installed by this system. Actual product
# deliverables will be re-signed by hand. We expect this file to
# exist with the suffixes ".x509.pem" and ".pk8".
-DEFAULT_KEY_CERT_PAIR := $(DEFAULT_SYSTEM_DEV_CERTIFICATE)
+DEFAULT_KEY_CERT_PAIR := $(strip $(DEFAULT_SYSTEM_DEV_CERTIFICATE))
# Rules that need to be present for the all targets, even
@@ -847,13 +850,6 @@
--os_version $(PLATFORM_VERSION) \
--os_patch_level $(PLATFORM_SECURITY_PATCH)
-# BOARD_USES_RECOVERY_AS_BOOT = true must have BOARD_BUILD_SYSTEM_ROOT_IMAGE = true.
-ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- $(error BOARD_BUILD_SYSTEM_ROOT_IMAGE must be enabled for BOARD_USES_RECOVERY_AS_BOOT.)
-endif
-endif
-
# We build recovery as boot image if BOARD_USES_RECOVERY_AS_BOOT is true.
ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
ifeq ($(TARGET_BOOTIMAGE_USE_EXT2),true)
@@ -1147,12 +1143,12 @@
# This rule adds to ALL_DEFAULT_INSTALLED_MODULES, so it needs to come
# before the rules that use that variable to build the image.
ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT_ETC)/security/otacerts.zip
-$(TARGET_OUT_ETC)/security/otacerts.zip: KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
-$(TARGET_OUT_ETC)/security/otacerts.zip: $(addsuffix .x509.pem,$(DEFAULT_KEY_CERT_PAIR)) | $(ZIPTIME)
+$(TARGET_OUT_ETC)/security/otacerts.zip: PRIVATE_CERT := $(DEFAULT_KEY_CERT_PAIR).x509.pem
+$(TARGET_OUT_ETC)/security/otacerts.zip: $(SOONG_ZIP)
+$(TARGET_OUT_ETC)/security/otacerts.zip: $(DEFAULT_KEY_CERT_PAIR).x509.pem
$(hide) rm -f $@
$(hide) mkdir -p $(dir $@)
- $(hide) zip -qjX $@ $<
- $(remove-timestamps-from-package)
+ $(hide) $(SOONG_ZIP) -o $@ -C $(dir $(PRIVATE_CERT)) -f $(PRIVATE_CERT)
# Carry the public key for update_engine if it's a non-IoT target that
# uses the AB updater. We use the same key as otacerts but in RSA public key
@@ -1160,7 +1156,7 @@
ifeq ($(AB_OTA_UPDATER),true)
ifneq ($(PRODUCT_IOT),true)
ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem
-$(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem: $(addsuffix .x509.pem,$(DEFAULT_KEY_CERT_PAIR))
+$(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem: $(DEFAULT_KEY_CERT_PAIR).x509.pem
$(hide) rm -f $@
$(hide) mkdir -p $(dir $@)
$(hide) openssl x509 -pubkey -noout -in $< > $@
@@ -1552,17 +1548,14 @@
# (BOARD_USES_FULL_RECOVERY_IMAGE = true);
# b) We build a single image that contains boot and recovery both - no recovery image to install
# (BOARD_USES_RECOVERY_AS_BOOT = true);
-# c) We build the root into system image - not needing the resource file as we do bsdiff
+# c) We mount the system image as / and therefore do not have a ramdisk in boot.img
# (BOARD_BUILD_SYSTEM_ROOT_IMAGE = true).
# d) We include the recovery DTBO image within recovery - not needing the resource file as we
# do bsdiff because boot and recovery will contain different number of entries
# (BOARD_INCLUDE_RECOVERY_DTBO = true).
-# Note that condition b) implies condition c), because of the earlier check in this file:
-# "BOARD_USES_RECOVERY_AS_BOOT = true must have BOARD_BUILD_SYSTEM_ROOT_IMAGE = true" (not vice
-# versa though).
-ifeq (,$(filter true, $(BOARD_USES_FULL_RECOVERY_IMAGE) $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) \
- $(BOARD_INCLUDE_RECOVERY_DTBO)))
+ifeq (,$(filter true, $(BOARD_USES_FULL_RECOVERY_IMAGE) $(BOARD_USES_RECOVERY_AS_BOOT) \
+ $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) $(BOARD_INCLUDE_RECOVERY_DTBO)))
# Named '.dat' so we don't attempt to use imgdiff for patching it.
RECOVERY_RESOURCE_ZIP := $(TARGET_OUT)/etc/recovery-resource.dat
else
@@ -1928,7 +1921,7 @@
build/make/tools/releasetools/build_image.py \
$(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
$(systemimage_intermediates)/generated_system_image_info.txt \
- || ( mkdir -p $(DIST_DIR); cp $(INSTALLED_FILES_FILE) $(DIST_DIR)/installed-files-rescued.txt; \
+ || ( mkdir -p $$(DIST_DIR); cp $(INSTALLED_FILES_FILE) $$(DIST_DIR)/installed-files-rescued.txt; \
exit 1 )
endef
@@ -2036,6 +2029,7 @@
## PDK_PLATFORM_ZIP_PRODUCT_BINARIES is used to store specified files to platform.zip.
## The variable will be typically set from BoardConfig.mk.
## Files under out dir will be rejected to prevent possible conflicts with other rules.
+ifneq (,$(BUILD_PLATFORM_ZIP))
pdk_odex_javalibs := $(strip $(foreach m,$(DEXPREOPT.MODULES.JAVA_LIBRARIES),\
$(if $(filter $(DEXPREOPT.$(m).INSTALLED),$(ALL_DEFAULT_INSTALLED_MODULES)),$(m))))
pdk_odex_apps := $(strip $(foreach m,$(DEXPREOPT.MODULES.APPS),\
@@ -2075,39 +2069,45 @@
$(INSTALLED_PLATFORM_ZIP): PRIVATE_DEX_FILES := $(pdk_classes_dex)
$(INSTALLED_PLATFORM_ZIP): PRIVATE_ODEX_CONFIG := $(pdk_odex_config_mk)
-$(INSTALLED_PLATFORM_ZIP) : $(INTERNAL_SYSTEMIMAGE_FILES) $(pdk_odex_config_mk)
+$(INSTALLED_PLATFORM_ZIP) : $(SOONG_ZIP)
+# dependencies for the other partitions are defined below after their file lists
+# are known
+$(INSTALLED_PLATFORM_ZIP) : $(INTERNAL_SYSTEMIMAGE_FILES) $(pdk_classes_dex) $(pdk_odex_config_mk)
$(call pretty,"Platform zip package: $(INSTALLED_PLATFORM_ZIP)")
- $(hide) rm -f $@
- $(hide) cd $(dir $@) && zip -qryX $(notdir $@) \
- $(TARGET_COPY_OUT_SYSTEM) \
- $(patsubst $(PRODUCT_OUT)/%, %, $(TARGET_OUT_NOTICE_FILES)) \
- $(addprefix symbols/,$(PDK_SYMBOL_FILES_LIST))
+ rm -f $@ $@.lst
+ echo "-C $(PRODUCT_OUT)" >> $@.lst
+ echo "-D $(TARGET_OUT)" >> $@.lst
+ echo "-D $(TARGET_OUT_NOTICE_FILES)" >> $@.lst
+ echo "$(addprefix -f $(TARGET_OUT_UNSTRIPPED)/,$(PDK_SYMBOL_FILES_LIST))" >> $@.lst
ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
- $(hide) cd $(dir $@) && zip -qryX $(notdir $@) \
- $(TARGET_COPY_OUT_VENDOR)
+ echo "-D $(TARGET_OUT_VENDOR)" >> $@.lst
endif
ifdef BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE
- $(hide) cd $(dir $@) && zip -qryX $(notdir $@) \
- $(TARGET_COPY_OUT_PRODUCT)
+ echo "-D $(TARGET_OUT_PRODUCT)" >> $@.lst
endif
ifdef BOARD_PRODUCT_SERVICESIMAGE_FILE_SYSTEM_TYPE
- $(hide) cd $(dir $@) && zip -qryX $(notdir $@) \
- $(TARGET_COPY_OUT_PRODUCT_SERVICES)
+ echo "-D $(TARGET_OUT_PRODUCT_SERVICES)" >> $@.lst
endif
ifdef BOARD_ODMIMAGE_FILE_SYSTEM_TYPE
- $(hide) cd $(dir $@) && zip -qryX $(notdir $@) \
- $(TARGET_COPY_OUT_ODM)
+ echo "-D $(TARGET_OUT_ODM)" >> $@.lst
endif
ifneq ($(PDK_PLATFORM_JAVA_ZIP_CONTENTS),)
- $(hide) cd $(OUT_DIR) && zip -qryX $(patsubst $(OUT_DIR)/%,%,$@) $(PDK_PLATFORM_JAVA_ZIP_CONTENTS)
+ echo "-C $(OUT_DIR)" >> $@.lst
+ for f in $(filter-out $(PRIVATE_DEX_FILES),$(addprefix -f $(OUT_DIR)/,$(PDK_PLATFORM_JAVA_ZIP_CONTENTS))); do \
+ if [ -e $$f ]; then \
+ echo "-f $$f"; \
+ fi \
+ done >> $@.lst
endif
ifneq ($(PDK_PLATFORM_ZIP_PRODUCT_BINARIES),)
- $(hide) zip -qryX $@ $(PDK_PLATFORM_ZIP_PRODUCT_BINARIES)
+ echo "-C . $(addprefix -f ,$(PDK_PLATFORM_ZIP_PRODUCT_BINARIES))" >> $@.lst
endif
@# Add dex-preopt files and config.
- $(if $(PRIVATE_DEX_FILES),$(hide) cd $(OUT_DIR) && zip -qryX $(patsubst $(OUT_DIR)/%,%,$@ $(PRIVATE_DEX_FILES)))
- $(hide) touch $(PRODUCT_OUT)/pdk.mk
- $(hide) zip -qryXj $@ $(PRIVATE_ODEX_CONFIG) $(PRODUCT_OUT)/pdk.mk
+ $(if $(PRIVATE_DEX_FILES),\
+ echo "-C $(OUT_DIR) $(addprefix -f ,$(PRIVATE_DEX_FILES))") >> $@.lst
+ touch $(PRODUCT_OUT)/pdk.mk
+ echo "-C $(PRODUCT_OUT) -f $(PRIVATE_ODEX_CONFIG) -f $(PRODUCT_OUT)/pdk.mk" >> $@.lst
+ $(SOONG_ZIP) --ignore_missing_files -o $@ @$@.lst
.PHONY: platform
platform: $(INSTALLED_PLATFORM_ZIP)
@@ -2120,6 +2120,8 @@
$(call dist-for-goals, platform platform-java, $(INSTALLED_PLATFORM_ZIP))
endif
+endif # BUILD_PLATFORM_ZIP
+
# -----------------------------------------------------------------
## boot tarball
define build-boottarball-target
@@ -2658,12 +2660,12 @@
endif
INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES := \
- $(BOARD_AVB_VBMETA_MAINLINE) \
+ $(BOARD_AVB_VBMETA_SYSTEM) \
$(BOARD_AVB_VBMETA_VENDOR)
# Not allowing the same partition to appear in multiple groups.
ifneq ($(words $(sort $(INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES))),$(words $(INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES)))
- $(error BOARD_AVB_VBMETA_MAINLINE and BOARD_AVB_VBMETA_VENDOR cannot have duplicates)
+ $(error BOARD_AVB_VBMETA_SYSTEM and BOARD_AVB_VBMETA_VENDOR cannot have duplicates)
endif
BOOT_FOOTER_ARGS := BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS
@@ -2676,7 +2678,7 @@
ODM_FOOTER_ARGS := BOARD_AVB_ODM_ADD_HASHTREE_FOOTER_ARGS
# Helper function that checks and sets required build variables for an AVB chained partition.
-# $(1): the partition to enable AVB chain, e.g., boot or system or vbmeta_mainline.
+# $(1): the partition to enable AVB chain, e.g., boot or system or vbmeta_system.
define _check-and-set-avb-chain-args
$(eval part := $(1))
$(eval PART=$(call to-upper,$(part)))
@@ -2699,7 +2701,7 @@
--chain_partition $(part):$($(_rollback_index_location)):$(AVB_CHAIN_KEY_DIR)/$(part).avbpubkey)
# Set rollback_index via footer args for non-chained vbmeta image. Chained vbmeta image will pick up
-# the index via a separate flag (e.g. BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX).
+# the index via a separate flag (e.g. BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX).
$(if $(filter $(part),$(part:vbmeta_%=%)),\
$(eval _footer_args := $(PART)_FOOTER_ARGS) \
$(eval $($(_footer_args)) += --rollback_index $($(_rollback_index))))
@@ -2751,9 +2753,9 @@
$(eval $(call check-and-set-avb-args,recovery))
endif
-# Not using INSTALLED_VBMETA_MAINLINEIMAGE_TARGET as it won't be set yet.
-ifdef BOARD_AVB_VBMETA_MAINLINE
-$(eval $(call check-and-set-avb-args,vbmeta_mainline))
+# Not using INSTALLED_VBMETA_SYSTEMIMAGE_TARGET as it won't be set yet.
+ifdef BOARD_AVB_VBMETA_SYSTEM
+$(eval $(call check-and-set-avb-args,vbmeta_system))
endif
ifdef BOARD_AVB_VBMETA_VENDOR
@@ -2773,12 +2775,12 @@
endif
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --padding_size 4096
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += --padding_size 4096
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --padding_size 4096
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --padding_size 4096
ifeq (eng,$(filter eng, $(TARGET_BUILD_VARIANT)))
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --set_hashtree_disabled_flag
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += --set_hashtree_disabled_flag
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --set_hashtree_disabled_flag
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --set_hashtree_disabled_flag
endif
@@ -2786,9 +2788,9 @@
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --rollback_index $(BOARD_AVB_ROLLBACK_INDEX)
endif
-ifdef BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += \
- --rollback_index $(BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX)
+ifdef BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += \
+ --rollback_index $(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX)
endif
ifdef BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX
@@ -2822,9 +2824,9 @@
$(if $(BOARD_AVB_RECOVERY_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_RECOVERY_KEY_PATH) \
--output $(1)/recovery.avbpubkey)
- $(if $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH),\
- $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH) \
- --output $(1)/vbmeta_mainline.avbpubkey)
+ $(if $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH),\
+ $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH) \
+ --output $(1)/vbmeta_system.avbpubkey)
$(if $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH) \
--output $(1)/vbmeta_vendor.avbpubkey)
@@ -2832,11 +2834,11 @@
# Builds a chained VBMeta image. This VBMeta image will contain the descriptors for the partitions
# specified in BOARD_AVB_VBMETA_<NAME>. The built VBMeta image will be included into the top-level
-# vbmeta image as a chained partition. For example, if a target defines `BOARD_AVB_VBMETA_MAINLINE
-# := system product_services`, `vbmeta_mainline.img` will be created that includes the descriptors
-# for `system.img` and `product_services.img`. `vbmeta_mainline.img` itself will be included into
+# vbmeta image as a chained partition. For example, if a target defines `BOARD_AVB_VBMETA_SYSTEM
+# := system product_services`, `vbmeta_system.img` will be created that includes the descriptors
+# for `system.img` and `product_services.img`. `vbmeta_system.img` itself will be included into
# `vbmeta.img` as a chained partition.
-# $(1): VBMeta image name, such as "vbmeta_mainline", "vbmeta_vendor" etc.
+# $(1): VBMeta image name, such as "vbmeta_system", "vbmeta_vendor" etc.
# $(2): Output filename.
define build-chained-vbmeta-image
$(call pretty,"Target chained vbmeta image: $@")
@@ -2848,13 +2850,13 @@
--output $@
endef
-ifdef BOARD_AVB_VBMETA_MAINLINE
-INSTALLED_VBMETA_MAINLINEIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta_mainline.img
-$(INSTALLED_VBMETA_MAINLINEIMAGE_TARGET): \
+ifdef BOARD_AVB_VBMETA_SYSTEM
+INSTALLED_VBMETA_SYSTEMIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta_system.img
+$(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET): \
$(AVBTOOL) \
- $(call images-for-partitions,$(BOARD_AVB_VBMETA_MAINLINE)) \
- $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH)
- $(call build-chained-vbmeta-image,vbmeta_mainline)
+ $(call images-for-partitions,$(BOARD_AVB_VBMETA_SYSTEM)) \
+ $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)
+ $(call build-chained-vbmeta-image,vbmeta_system)
endif
ifdef BOARD_AVB_VBMETA_VENDOR
@@ -2892,9 +2894,9 @@
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
$(INSTALLED_RECOVERYIMAGE_TARGET) \
- $(INSTALLED_VBMETA_MAINLINEIMAGE_TARGET) \
+ $(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET) \
$(INSTALLED_VBMETA_VENDORIMAGE_TARGET) \
- $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH) \
+ $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH) \
$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH) \
$(BOARD_AVB_KEY_PATH)
$(build-vbmetaimage-target)
@@ -2917,7 +2919,7 @@
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
# BOARD_SUPER_PARTITION_SIZE must be defined to build super image.
-ifdef BOARD_SUPER_PARTITION_SIZE
+ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
INSTALLED_SUPERIMAGE_TARGET := $(PRODUCT_OUT)/super.img
INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
@@ -2940,18 +2942,20 @@
--metadata-size 65536 \
--metadata-slots $(if $(1),2,1) \
--device-size $(BOARD_SUPER_PARTITION_SIZE) \
- $(foreach name,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
- --partition $(name)$(1):$$($(UUIDGEN) $(name)$(1)):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0) \
- $(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
- $(if $(1), --partition $(name)_b:$$($(UUIDGEN) $(name)_b):readonly:0) \
- )
+ $(foreach group,$(BOARD_SUPER_PARTITION_GROUPS), \
+ --group $(group):$(BOARD_$(call to-upper,$(group))_SIZE) \
+ $(foreach name,$(BOARD_$(call to-upper,$(group))_PARTITION_LIST), \
+ --partition $(name)$(1):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0):$(group) \
+ $(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
+ $(if $(1), --partition $(name)_b:readonly:0:$(group)) \
+ ))
endef
# $(1): output image path
# $(2): slot A suffix (_a or empty)
# $(3): include images or not (true or empty)
define build-superimage-target
- $(HOST_OUT_EXECUTABLES)/lpmake \
+ $(LPMAKE) \
$(call build-superimage-target-args,$(2),$(3)) \
--output $(1)
endef
@@ -2978,30 +2982,61 @@
# Do not check for apps-only build
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
-ifdef BOARD_SUPER_PARTITION_SIZE
-ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
-droid_targets: check_android_partition_sizes
+droid_targets: check-all-partition-sizes
-.PHONY: check_android_partition_sizes
+.PHONY: check-all-partition-sizes check-all-partition-sizes-nodeps
# Add image dependencies so that generated_*_image_info.txt are written before checking.
-check_android_partition_sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
+check-all-partition-sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
-check_android_partition_sizes:
- partition_size_list="$(call read-size-of-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))"; \
- sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${partition_size_list}"); \
- if [ $$(( $${sum_sizes_expr} )) -gt $(BOARD_SUPER_PARTITION_SIZE) ]; then \
- echo 'The sum of sizes of all logical partitions is larger than BOARD_SUPER_PARTITION_SIZE.'; \
- echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' $(BOARD_SUPER_PARTITION_SIZE); \
- exit 1; \
- else \
- echo 'The sum of sizes of all logical partitions is within BOARD_SUPER_PARTITION_SIZE:' \
- $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' $(BOARD_SUPER_PARTITION_SIZE); \
- fi
+# $(1): human-readable max size string
+# $(2): max size expression
+# $(3): list of partition names
+define check-sum-of-partition-sizes
+ partition_size_list="$(call read-size-of-partitions,$(3))"; \
+ sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${partition_size_list}"); \
+ if [ $$(( $${sum_sizes_expr} )) -gt $$(( $(2) )) ]; then \
+ echo "The sum of sizes of [$(strip $(3))] is larger than $(strip $(1)):"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' "$(2)" '==' $$(( $(2) )); \
+ exit 1; \
+ else \
+ echo "The sum of sizes of [$(strip $(3))] is within $(strip $(1)):"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' "$(2)" '==' $$(( $(2) )); \
+ fi
+endef
-endif # BOARD_SUPER_PARTITION_PARTITION_LIST
-endif # BOARD_SUPER_PARTITION_SIZE
+define check-all-partition-sizes-target
+ # Check sum(all partitions) <= super partition (/ 2 for A/B)
+ $(if $(BOARD_SUPER_PARTITION_SIZE),$(if $(BOARD_SUPER_PARTITION_PARTITION_LIST), \
+ $(call check-sum-of-partition-sizes,BOARD_SUPER_PARTITION_SIZE$(if $(filter true,$(AB_OTA_UPDATER)), / 2), \
+ $(BOARD_SUPER_PARTITION_SIZE)$(if $(filter true,$(AB_OTA_UPDATER)), / 2),$(BOARD_SUPER_PARTITION_PARTITION_LIST))))
+
+ # For each group, check sum(partitions in group) <= group size
+ $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(if $(BOARD_$(group)_SIZE),$(if $(BOARD_$(group)_PARTITION_LIST), \
+ $(call check-sum-of-partition-sizes,BOARD_$(group)_SIZE,$(BOARD_$(group)_SIZE),$(BOARD_$(group)_PARTITION_LIST)))))
+
+ # Check sum(all group sizes) <= super partition (/ 2 for A/B)
+ if [[ ! -z $(BOARD_SUPER_PARTITION_SIZE) ]]; then \
+ group_size_list="$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)),$(BOARD_$(group)_SIZE))"; \
+ sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${group_size_list}"); \
+ max_size_tail=$(if $(filter true,$(AB_OTA_UPDATER))," / 2"); \
+ max_size_expr="$(BOARD_SUPER_PARTITION_SIZE)$${max_size_tail}"; \
+ if [ $$(( $${sum_sizes_expr} )) -gt $$(( $${max_size_expr} )) ]; then \
+ echo "The sum of sizes of [$(strip $(BOARD_SUPER_PARTITION_GROUPS))] is larger than BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
+ exit 1; \
+ else \
+ echo "The sum of sizes of [$(strip $(BOARD_SUPER_PARTITION_GROUPS))] is within BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
+ fi \
+ fi
+endef
+
+check-all-partition-sizes check-all-partition-sizes-nodeps:
+ $(call check-all-partition-sizes-target)
+
endif # PRODUCT_BUILD_SUPER_PARTITION
endif # TARGET_BUILD_APPS
@@ -3015,33 +3050,42 @@
# -----------------------------------------------------------------
# host tools needed to build dist and OTA packages
-build_ota_package := true
-ifeq ($(TARGET_SKIP_OTA_PACKAGE),true)
-build_ota_package := false
-endif
ifeq ($(BUILD_OS),darwin)
-build_ota_package := false
-endif
-ifneq ($(strip $(SANITIZE_TARGET)),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_PRODUCT),sdk)
-build_ota_package := false
-endif
-ifneq ($(filter generic%,$(TARGET_DEVICE)),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_NO_KERNEL),true)
-build_ota_package := false
-endif
-ifeq ($(recovery_fstab),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_BUILD_PDK),true)
-build_ota_package := false
+ build_ota_package := false
+ build_otatools_package := false
+else
+ # set build_ota_package, and allow opt-out below
+ build_ota_package := true
+ ifeq ($(TARGET_SKIP_OTA_PACKAGE),true)
+ build_ota_package := false
+ endif
+ ifneq ($(strip $(SANITIZE_TARGET)),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_PRODUCT),sdk)
+ build_ota_package := false
+ endif
+ ifneq ($(filter generic%,$(TARGET_DEVICE)),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_NO_KERNEL),true)
+ build_ota_package := false
+ endif
+ ifeq ($(recovery_fstab),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_BUILD_PDK),true)
+ build_ota_package := false
+ endif
+
+ # set build_otatools_package, and allow opt-out below
+ build_otatools_package := true
+ ifeq ($(TARGET_SKIP_OTATOOLS_PACKAGE),true)
+ build_otatools_package := false
+ endif
endif
-ifeq ($(build_ota_package),true)
+ifeq ($(build_otatools_package),true)
OTATOOLS := $(HOST_OUT_EXECUTABLES)/minigzip \
$(HOST_OUT_EXECUTABLES)/aapt \
$(HOST_OUT_EXECUTABLES)/checkvintf \
@@ -3130,13 +3174,23 @@
OTATOOLS_DEPS := \
system/extras/ext4_utils/mke2fs.conf \
- $(sort $(shell find external/avb/test/data -type f -name "testkey_*.pem" -o \
- -name "atx_metadata.bin")) \
- $(sort $(shell find system/update_engine/scripts -name "*.pyc" -prune -o -type f -print)) \
$(sort $(shell find build/target/product/security -type f -name "*.x509.pem" -o -name "*.pk8" -o \
- -name verity_key)) \
+ -name verity_key))
+
+ifneq (,$(wildcard device))
+OTATOOLS_DEPS += \
$(sort $(shell find device $(wildcard vendor) -type f -name "*.pk8" -o -name "verifiedboot*" -o \
-name "*.x509.pem" -o -name "oem*.prop"))
+endif
+ifneq (,$(wildcard external/avb))
+OTATOOLS_DEPS += \
+ $(sort $(shell find external/avb/test/data -type f -name "testkey_*.pem" -o \
+ -name "atx_metadata.bin"))
+endif
+ifneq (,$(wildcard system/update_engine))
+OTATOOLS_DEPS += \
+ $(sort $(shell find system/update_engine/scripts -name "*.pyc" -prune -o -type f -print))
+endif
OTATOOLS_RELEASETOOLS := \
$(sort $(shell find build/make/tools/releasetools -name "*.pyc" -prune -o -type f))
@@ -3146,20 +3200,20 @@
$(sort $(shell find external/vboot_reference/tests/devkeys -type f))
endif
-$(BUILT_OTATOOLS_PACKAGE): $(OTATOOLS) $(OTATOOLS_DEPS) $(OTATOOLS_RELEASETOOLS) | $(ACP)
+$(BUILT_OTATOOLS_PACKAGE): $(OTATOOLS) $(OTATOOLS_DEPS) $(OTATOOLS_RELEASETOOLS) $(SOONG_ZIP)
@echo "Package OTA tools: $@"
$(hide) rm -rf $@ $(zip_root)
$(hide) mkdir -p $(dir $@) $(zip_root)/bin $(zip_root)/framework $(zip_root)/releasetools
$(call copy-files-with-structure,$(OTATOOLS),$(HOST_OUT)/,$(zip_root))
- $(hide) $(ACP) -r -d -p build/make/tools/releasetools/* $(zip_root)/releasetools
+ $(hide) cp -r -d -p build/make/tools/releasetools/* $(zip_root)/releasetools
$(hide) rm -rf $@ $(zip_root)/releasetools/*.pyc
- $(hide) (cd $(zip_root) && zip -qryX $(abspath $@) *)
- $(hide) echo $(OTATOOLS_DEPS) | xargs zip -qryX $(abspath $@)>/dev/null || true
+ $(hide) $(SOONG_ZIP) -o $@ -C $(zip_root) -D $(zip_root) \
+ -C . $(addprefix -f ,$(OTATOOLS_DEPS))
.PHONY: otatools-package
otatools-package: $(BUILT_OTATOOLS_PACKAGE)
-endif # build_ota_package
+endif # build_otatools_package
# -----------------------------------------------------------------
# A zip of the directories that map to the target filesystem.
@@ -3471,16 +3525,16 @@
$(hide) echo "avb_recovery_algorithm=$(BOARD_AVB_RECOVERY_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
endif # BOARD_AVB_RECOVERY_KEY_PATH
-ifneq (,$(strip $(BOARD_AVB_VBMETA_MAINLINE)))
- $(hide) echo "avb_vbmeta_mainline=$(BOARD_AVB_VBMETA_MAINLINE)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_args=$(BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_key_path=$(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_algorithm=$(BOARD_AVB_VBMETA_MAINLINE_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_rollback_index_location=$(BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_VBMETA_MAINLINE
+ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
+ $(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_key_path=$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_algorithm=$(BOARD_AVB_VBMETA_SYSTEM_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_rollback_index_location=$(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
+endif # BOARD_AVB_VBMETA_SYSTEM
ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
$(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
@@ -3578,12 +3632,12 @@
endif
@# ROOT always contains the files for the root under normal boot.
$(hide) $(call fs_config,$(zip_root)/ROOT,) > $(zip_root)/META/root_filesystem_config.txt
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- @# BOOT/RAMDISK exists only if additionally using BOARD_USES_RECOVERY_AS_BOOT.
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ @# BOOT/RAMDISK exists and contains the ramdisk for recovery if using BOARD_USES_RECOVERY_AS_BOOT.
$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
endif
-else # BOARD_BUILD_SYSTEM_ROOT_IMAGE != true
+ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+ @# BOOT/RAMDISK also exists and contains the first stage ramdisk if not using BOARD_BUILD_SYSTEM_ROOT_IMAGE.
$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
endif
ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
@@ -3601,7 +3655,7 @@
ifdef BUILT_VENDOR_MATRIX
$(hide) cp $(BUILT_VENDOR_MATRIX) $(zip_root)/META/vendor_matrix.xml
endif
-ifdef BOARD_SUPER_PARTITION_SIZE
+ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
$(hide) echo "super_size=$(BOARD_SUPER_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "lpmake=$(notdir $(LPMAKE))" >> $(zip_root)/META/misc_info.txt
$(hide) echo -n "lpmake_args=" >> $(zip_root)/META/misc_info.txt
@@ -3885,6 +3939,18 @@
odmimage: $(INSTALLED_QEMU_ODMIMAGE)
droidcore: $(INSTALLED_QEMU_ODMIMAGE)
endif
+
+ifeq ($(BOARD_AVB_ENABLE),true)
+QEMU_VERIFIED_BOOT_PARAMS := $(PRODUCT_OUT)/VerifiedBootParams.textproto
+MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH := device/generic/goldfish/tools/mk_verified_boot_params.sh
+$(QEMU_VERIFIED_BOOT_PARAMS): $(INSTALLED_QEMU_SYSTEMIMAGE) $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_VBMETAIMAGE_TARGET) $(SGDISK_HOST) $(AVBTOOL)
+ @echo Creating $@
+ (export SGDISK=$(SGDISK_HOST) AVBTOOL=$(AVBTOOL); $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_VBMETAIMAGE_TARGET) $(INSTALLED_QEMU_SYSTEMIMAGE) $(QEMU_VERIFIED_BOOT_PARAMS))
+
+
+systemimage: $(QEMU_VERIFIED_BOOT_PARAMS)
+droidcore: $(QEMU_VERIFIED_BOOT_PARAMS)
+endif
endif
# -----------------------------------------------------------------
# The emulator package
@@ -3985,6 +4051,7 @@
$(INSTALLED_SYSTEMIMAGE_TARGET) \
$(INSTALLED_QEMU_SYSTEMIMAGE) \
$(INSTALLED_QEMU_VENDORIMAGE) \
+ $(QEMU_VERIFIED_BOOT_PARAMS) \
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_SDK_BUILD_PROP_TARGET) \
diff --git a/core/android_manifest.mk b/core/android_manifest.mk
index 8e8bfec..8608ca1 100644
--- a/core/android_manifest.mk
+++ b/core/android_manifest.mk
@@ -72,8 +72,15 @@
my_manifest_fixer_flags += --uses-non-sdk-api
endif
$(fixed_android_manifest): PRIVATE_MANIFEST_FIXER_FLAGS := $(my_manifest_fixer_flags)
+# These two libs are added as optional dependencies (<uses-library> with
+# android:required set to false). This is because they haven't existed in pre-P
+# devices, but classes in them were in bootclasspath jars, etc. So making them
+# hard dependencies (andriod:required=true) would prevent apps from being
+# installed to such legacy devices.
+$(fixed_android_manifest): PRIVATE_OPTIONAL_SDK_LIB_NAMES := android.test.base android.test.mock
$(fixed_android_manifest): $(MANIFEST_FIXER)
$(fixed_android_manifest): $(main_android_manifest)
+ echo $(PRIVATE_OPTIONAL_SDK_LIB_NAMES) | tr ' ' '\n' > $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
@echo "Fix manifest: $@"
$(MANIFEST_FIXER) \
--minSdkVersion $(PRIVATE_MIN_SDK_VERSION) \
@@ -81,5 +88,8 @@
--raise-min-sdk-version \
$(PRIVATE_MANIFEST_FIXER_FLAGS) \
$(if (PRIVATE_EXPORTED_SDK_LIBS_FILE),\
- $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | sort -u | sed -e 's/^/\ --uses-library\ /' | tr '\n' ' ')) \
+ $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | grep -v -f $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional | sort -u | sed -e 's/^/\ --uses-library\ /' | tr '\n' ' ') \
+ $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | grep -f $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional | sort -u | sed -e 's/^/\ --optional-uses-library\ /' | tr '\n' ' ') \
+ ) \
$< $@
+ rm $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
diff --git a/core/aux_config.mk b/core/aux_config.mk
index 6a5cd63..d382ff5 100644
--- a/core/aux_config.mk
+++ b/core/aux_config.mk
@@ -47,7 +47,6 @@
$(eval AUX_OUT_INTERMEDIATES_$(1) := $(AUX_OUT_$(1))/obj) \
$(eval AUX_OUT_COMMON_INTERMEDIATES_$(1) := $(AUX_COMMON_OUT_ROOT_$(1))/obj) \
$(eval AUX_OUT_HEADERS_$(1) := $(AUX_OUT_INTERMEDIATES_$(1))/include) \
-$(eval AUX_OUT_INTERMEDIATE_LIBRARIES_$(1) := $(AUX_OUT_INTERMEDIATES_$(1))/lib) \
$(eval AUX_OUT_NOTICE_FILES_$(1) := $(AUX_OUT_INTERMEDIATES_$(1))/NOTICE_FILES) \
$(eval AUX_OUT_FAKE_$(1) := $(AUX_OUT_$(1))/fake_packages) \
$(eval AUX_OUT_GEN_$(1) := $(AUX_OUT_$(1))/gen) \
@@ -78,7 +77,6 @@
$(eval AUX_OUT_INTERMEDIATES := $(AUX_OUT_INTERMEDIATES_$(1))) \
$(eval AUX_OUT_COMMON_INTERMEDIATES := $(AUX_OUT_COMMON_INTERMEDIATES_$(1))) \
$(eval AUX_OUT_HEADERS := $(AUX_OUT_HEADERS_$(1))) \
-$(eval AUX_OUT_INTERMEDIATE_LIBRARIES := $(AUX_OUT_INTERMEDIATE_LIBRARIES_$(1))) \
$(eval AUX_OUT_NOTICE_FILES := $(AUX_OUT_NOTICE_FILES_$(1))) \
$(eval AUX_OUT_FAKE := $(AUX_OUT_FAKE_$(1))) \
$(eval AUX_OUT_GEN := $(AUX_OUT_GEN_$(1))) \
diff --git a/core/aux_executable.mk b/core/aux_executable.mk
index daf30e7..5395e61 100644
--- a/core/aux_executable.mk
+++ b/core/aux_executable.mk
@@ -80,7 +80,6 @@
$(linked_module) \
# Define PRIVATE_ variables from global vars
-$(linked_module): PRIVATE_TARGET_OUT_INTERMEDIATE_LIBRARIES := $(AUX_OUT_INTERMEDIATE_LIBRARIES)
$(linked_module): PRIVATE_POST_LINK_CMD := $(LOCAL_POST_LINK_CMD)
ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 4db8f27..57fd818 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -295,16 +295,6 @@
LOCAL_BUILT_MODULE := $(intermediates)/$(my_built_module_stem)
-# OVERRIDE_BUILT_MODULE_PATH is only allowed to be used by the
-# internal SHARED_LIBRARIES build files.
-OVERRIDE_BUILT_MODULE_PATH := $(strip $(OVERRIDE_BUILT_MODULE_PATH))
-ifdef OVERRIDE_BUILT_MODULE_PATH
- ifneq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
- $(error $(LOCAL_PATH): Illegal use of OVERRIDE_BUILT_MODULE_PATH)
- endif
- $(eval $(call copy-one-file,$(LOCAL_BUILT_MODULE),$(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem)))
-endif
-
ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE))
# Apk and its attachments reside in its own subdir.
ifeq ($(LOCAL_MODULE_CLASS),APPS)
@@ -343,11 +333,6 @@
.KATI_RESTAT: $(LOCAL_BUILT_MODULE).toc
# Build .toc file when using mm, mma, or make $(my_register_name)
$(my_all_targets): $(LOCAL_BUILT_MODULE).toc
-
-ifdef OVERRIDE_BUILT_MODULE_PATH
-$(eval $(call copy-one-file,$(LOCAL_BUILT_MODULE).toc,$(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem).toc))
-$(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem).toc: $(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem)
-endif
endif
endif
@@ -774,8 +759,6 @@
ALL_MODULES.$(my_register_name).MODULE_NAME := $(LOCAL_MODULE)
ALL_MODULES.$(my_register_name).COMPATIBILITY_SUITES := $(LOCAL_COMPATIBILITY_SUITE)
ALL_MODULES.$(my_register_name).TEST_CONFIG := $(test_config)
-ALL_MODULES.$(my_register_name).SRCS := \
- $(ALL_MODULES.$(my_register_name).SRCS) $(LOCAL_SRC_FILES)
test_config :=
INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
diff --git a/core/binary.mk b/core/binary.mk
index a594149..07fb48a 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -46,8 +46,8 @@
my_cflags := $(LOCAL_CFLAGS)
my_conlyflags := $(LOCAL_CONLYFLAGS)
my_cppflags := $(LOCAL_CPPFLAGS)
-my_cflags_no_override := $(GLOBAL_CFLAGS_NO_OVERRIDE)
-my_cppflags_no_override := $(GLOBAL_CPPFLAGS_NO_OVERRIDE)
+my_cflags_no_override := $(GLOBAL_CLANG_CFLAGS_NO_OVERRIDE)
+my_cppflags_no_override := $(GLOBAL_CLANG_CPPFLAGS_NO_OVERRIDE)
my_ldflags := $(LOCAL_LDFLAGS)
my_ldlibs := $(LOCAL_LDLIBS)
my_asflags := $(LOCAL_ASFLAGS)
@@ -281,13 +281,18 @@
# all code is position independent, and then those warnings get promoted to
# errors.
ifneq ($(LOCAL_NO_PIC),true)
-ifneq ($($(my_prefix)OS),windows)
-ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
-my_cflags += -fPIE
-else
-my_cflags += -fPIC
-endif
-endif
+ ifneq ($($(my_prefix)OS),windows)
+ ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
+ my_cflags += -fPIE
+ ifndef BUILD_HOST_static
+ ifneq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
+ my_ldflags += -pie
+ endif
+ endif
+ else
+ my_cflags += -fPIC
+ endif
+ endif
endif
ifdef LOCAL_IS_HOST_MODULE
@@ -621,8 +626,6 @@
# actually used (although they are usually empty).
arm_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(arm_objects_mode)_CFLAGS)
normal_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(normal_objects_mode)_CFLAGS)
-arm_objects_cflags := $(call convert-to-clang-flags,$(arm_objects_cflags))
-normal_objects_cflags := $(call convert-to-clang-flags,$(normal_objects_cflags))
else
arm_objects_mode :=
@@ -1480,9 +1483,9 @@
ifneq ($(LOCAL_SDK_VERSION),)
built_shared_libraries := \
- $(addprefix $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)/, \
- $(addsuffix $(so_suffix), \
- $(my_shared_libraries)))
+ $(foreach lib,$(my_shared_libraries), \
+ $(call intermediates-dir-for, \
+ SHARED_LIBRARIES,$(lib),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/$(lib)$(so_suffix))
built_shared_library_deps := $(addsuffix .toc, $(built_shared_libraries))
# Add the NDK libraries to the built module dependency
@@ -1506,9 +1509,9 @@
else
built_shared_libraries := \
- $(addprefix $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)/, \
- $(addsuffix $(so_suffix), \
- $(installed_shared_library_module_names)))
+ $(foreach lib,$(installed_shared_library_module_names), \
+ $(call intermediates-dir-for, \
+ SHARED_LIBRARIES,$(lib),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/$(lib)$(so_suffix))
built_shared_library_deps := $(addsuffix .toc, $(built_shared_libraries))
my_system_shared_libraries_fullpath :=
endif
@@ -1556,8 +1559,6 @@
my_cflags += $(LOCAL_CLANG_CFLAGS)
my_conlyflags += $(LOCAL_CLANG_CONLYFLAGS)
my_cppflags += $(LOCAL_CLANG_CPPFLAGS)
-my_cflags_no_override += $(GLOBAL_CLANG_CFLAGS_NO_OVERRIDE)
-my_cppflags_no_override += $(GLOBAL_CLANG_CPPFLAGS_NO_OVERRIDE)
my_asflags += $(LOCAL_CLANG_ASFLAGS)
my_ldflags += $(LOCAL_CLANG_LDFLAGS)
my_cflags += $(LOCAL_CLANG_CFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_CFLAGS_$(my_32_64_bit_suffix))
@@ -1649,7 +1650,7 @@
ifneq ($(LOCAL_TIDY_CHECKS),)
my_tidy_checks := $(my_tidy_checks),$(LOCAL_TIDY_CHECKS)
endif
- my_tidy_flags += $(WITH_TIDY_FLAGS) $(LOCAL_TIDY_FLAGS)
+ my_tidy_flags := $(strip $(WITH_TIDY_FLAGS) $(LOCAL_TIDY_FLAGS))
# If tidy flags are not specified, default to check all header files.
ifeq ($(my_tidy_flags),)
my_tidy_flags := $(call default_tidy_header_filter,$(LOCAL_PATH))
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 97a74ec..07e34e1 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -43,7 +43,6 @@
LOCAL_CONLYFLAGS:=
LOCAL_COPY_HEADERS:=
LOCAL_COPY_HEADERS_TO:=
-LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES:=
LOCAL_CPP_EXTENSION:=
LOCAL_CPPFLAGS:=
LOCAL_CPP_STD:=
@@ -68,6 +67,7 @@
LOCAL_DPI_FILE_STEM:=
LOCAL_DPI_VARIANTS:=
LOCAL_DROIDDOC_ANNOTATIONS_ZIP :=
+LOCAL_DROIDDOC_API_VERSIONS_XML :=
LOCAL_DROIDDOC_ASSET_DIR:=
LOCAL_DROIDDOC_CUSTOM_ASSET_DIR:=
LOCAL_DROIDDOC_CUSTOM_TEMPLATE_DIR:=
@@ -298,6 +298,7 @@
LOCAL_WARNINGS_ENABLE:=
LOCAL_WHOLE_STATIC_LIBRARIES:=
LOCAL_YACCFLAGS:=
+# TODO: deprecate, it does nothing
OVERRIDE_BUILT_MODULE_PATH:=
# arch specific variables
@@ -317,6 +318,7 @@
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_ARCH):=
+LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(TARGET_ARCH):=
LOCAL_SRC_FILES_$(TARGET_ARCH):=
LOCAL_STATIC_LIBRARIES_$(TARGET_ARCH):=
@@ -339,6 +341,7 @@
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_2ND_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_2ND_ARCH):=
+LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(TARGET_2ND_ARCH):=
LOCAL_SRC_FILES_$(TARGET_2ND_ARCH):=
LOCAL_STATIC_LIBRARIES_$(TARGET_2ND_ARCH):=
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
index 3ce64f9..ffb6021 100644
--- a/core/combo/TARGET_linux-arm.mk
+++ b/core/combo/TARGET_linux-arm.mk
@@ -33,7 +33,7 @@
TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT := generic
endif
-KNOWN_ARMv8_CORES := cortex-a53 cortex-a53.a57 cortex-a55 cortex-a73 cortex-a75
+KNOWN_ARMv8_CORES := cortex-a53 cortex-a53.a57 cortex-a55 cortex-a73 cortex-a75 cortex-a76
KNOWN_ARMv8_CORES += kryo denver64 exynos-m1 exynos-m2
# Many devices (incorrectly) use armv7-a-neon as the 2nd architecture variant
diff --git a/core/config.mk b/core/config.mk
index 676bd64..aeb8aee 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -48,22 +48,9 @@
# Prevent accidentally changing these variables
.KATI_READONLY := SHELL empty space comma newline pound backslash
-# this turns off the suffix rules built into make
-.SUFFIXES:
-
-# this turns off the RCS / SCCS implicit rules of GNU Make
-% : RCS/%,v
-% : RCS/%
-% : %,v
-% : s.%
-% : SCCS/s.%
-
-# If a rule fails, delete $@.
-.DELETE_ON_ERROR:
-
# Mark variables that should be coming as environment variables from soong_ui
# as readonly
-.KATI_READONLY := OUT_DIR TMPDIR BUILD_DATETIME_FILE
+.KATI_READONLY := OUT_DIR TMPDIR BUILD_DATETIME_FILE DIST_DIR
# Mark variables deprecated/obsolete
CHANGES_URL := https://android.googlesource.com/platform/build/+/master/Changes.md
@@ -112,6 +99,15 @@
TARGET_CLANG_SUPPORTED 2ND_TARGET_CLANG_SUPPORTED \
TARGET_CC 2ND_TARGET_CC \
TARGET_CXX 2ND_TARGET_CXX \
+ TARGET_TOOLCHAIN_ROOT 2ND_TARGET_TOOLCHAIN_ROOT \
+ HOST_TOOLCHAIN_ROOT 2ND_HOST_TOOLCHAIN_ROOT \
+ HOST_CROSS_TOOLCHAIN_ROOT 2ND_HOST_CROSS_TOOLCHAIN_ROOT \
+ HOST_TOOLS_PREFIX 2ND_HOST_TOOLS_PREFIX \
+ HOST_CROSS_TOOLS_PREFIX 2ND_HOST_CROSS_TOOLS_PREFIX \
+ HOST_GCC_VERSION 2ND_HOST_GCC_VERSION \
+ HOST_CROSS_GCC_VERSION 2ND_HOST_CROSS_GCC_VERSION \
+ TARGET_NDK_GCC_VERSION 2ND_TARGET_NDK_GCC_VERSION \
+ GLOBAL_CFLAGS_NO_OVERRIDE GLOBAL_CPPFLAGS_NO_OVERRIDE \
,GCC support has been removed. Use Clang instead)
# This is marked as obsolete in envsetup.mk after reading the BoardConfig.mk
@@ -144,6 +140,8 @@
# Here since this file is included by envsetup as well as during build.
include $(BUILD_SYSTEM)/math.mk
+include $(BUILD_SYSTEM)/strings.mk
+
# Various mappings to avoid hard-coding paths all over the place
include $(BUILD_SYSTEM)/pathmap.mk
@@ -716,7 +714,6 @@
DATA_BINDING_COMPILER := $(HOST_OUT_JAVA_LIBRARIES)/databinding-compiler.jar
FAT16COPY := build/make/tools/fat16copy.py
CHECK_LINK_TYPE := build/make/tools/check_link_type.py
-UUIDGEN := build/make/tools/uuidgen.py
LPMAKE := $(HOST_OUT_EXECUTABLES)/lpmake$(HOST_EXECUTABLE_SUFFIX)
PROGUARD := external/proguard/bin/proguard.sh
@@ -961,7 +958,6 @@
requirements := \
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
- PRODUCT_USE_FASTBOOTD \
$(foreach req,$(requirements),$(if $(filter false,$($(req))),\
$(error PRODUCT_USE_LOGICAL_PARTITIONS requires $(req) to be true)))
@@ -1011,16 +1007,42 @@
endif # PRODUCT_USE_DYNAMIC_PARTITION_SIZE
ifeq ($(PRODUCT_BUILD_SUPER_PARTITION),true)
-ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
-# BOARD_SUPER_PARTITION_PARTITION_LIST: a list of the following tokens
+
+# BOARD_SUPER_PARTITION_GROUPS defines a list of "updatable groups". Each updatable group is a
+# group of partitions that share the same pool of free spaces.
+# For each group in BOARD_SUPER_PARTITION_GROUPS, a BOARD_{GROUP}_SIZE and
+# BOARD_{GROUP}_PARTITION_PARTITION_LIST may be defined.
+# - BOARD_{GROUP}_SIZE: The maximum sum of sizes of all partitions in the group.
+# If empty, no limit is enforced on the sum of sizes for this group.
+# - BOARD_{GROUP}_PARTITION_PARTITION_LIST: the list of partitions that belongs to this group.
+# If empty, no partitions belong to this group, and the sum of sizes is effectively 0.
+$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(eval BOARD_$(group)_SIZE ?=) \
+ $(eval .KATI_READONLY := BOARD_$(group)_SIZE) \
+ $(eval BOARD_$(group)_PARTITION_LIST ?=) \
+ $(eval .KATI_READONLY := BOARD_$(group)_PARTITION_LIST) \
+)
+
+# BOARD_*_PARTITION_LIST: a list of the following tokens
valid_super_partition_list := system vendor product product_services
-ifneq (,$(filter-out $(valid_super_partition_list),$(BOARD_SUPER_PARTITION_PARTITION_LIST)))
-$(error BOARD_SUPER_PARTITION_PARTITION_LIST contains invalid partition name \
- ($(filter-out $(valid_super_partition_list),$(BOARD_SUPER_PARTITION_PARTITION_LIST))). \
- Valid names are $(valid_super_partition_list))
-endif
+$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(if $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)), \
+ $(error BOARD_$(group)_PARTITION_LIST contains invalid partition name \
+ $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)). \
+ Valid names are $(valid_super_partition_list))))
valid_super_partition_list :=
-endif # BOARD_SUPER_PARTITION_PARTITION_LIST
+
+
+# Define BOARD_SUPER_PARTITION_PARTITION_LIST, the sum of all BOARD_*_PARTITION_LIST
+ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
+$(error BOARD_SUPER_PARTITION_PARTITION_LIST should not be defined, but computed from \
+ BOARD_SUPER_PARTITION_GROUPS and BOARD_*_PARTITION_LIST)
+endif
+BOARD_SUPER_PARTITION_PARTITION_LIST := \
+ $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(BOARD_$(group)_PARTITION_LIST))
+.KATI_READONLY := BOARD_SUPER_PARTITION_PARTITION_LIST
+
endif # PRODUCT_BUILD_SUPER_PARTITION
# ###############################################################
@@ -1146,6 +1168,7 @@
INTERNAL_PLATFORM_HIDDENAPI_LIGHT_GREYLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-light-greylist.txt
INTERNAL_PLATFORM_HIDDENAPI_DARK_GREYLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-dark-greylist.txt
INTERNAL_PLATFORM_HIDDENAPI_BLACKLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-blacklist.txt
+INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-greylist.csv
# Missing optional uses-libraries so that the platform doesn't create build rules that depend on
# them. See setup_one_odex.mk.
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index e58f676..be1b124 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -212,10 +212,6 @@
my_sanitize := $(filter-out scudo,$(my_sanitize))
endif
-ifneq ($(filter scudo,$(my_sanitize)),)
- my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_RUNTIME_LIBRARY)
-endif
-
# Undefined symbols can occur if a non-sanitized library links
# sanitized static libraries. That's OK, because the executable
# always depends on the ASan runtime library, which defines these
@@ -375,7 +371,7 @@
endif
endif
ifneq ($(filter unsigned-integer-overflow signed-integer-overflow integer,$(my_sanitize)),)
- ifeq ($(filter unsigned-integer-overflow signed-integer overflow integer,$(my_sanitize_diag)),)
+ ifeq ($(filter unsigned-integer-overflow signed-integer-overflow integer,$(my_sanitize_diag)),)
ifeq ($(filter cfi,$(my_sanitize_diag)),)
ifeq ($(filter address hwaddress,$(my_sanitize)),)
my_cflags += -fsanitize-minimal-runtime
@@ -387,6 +383,18 @@
endif
endif
+# For Scudo, we opt for the minimal runtime, unless some diagnostics are enabled.
+ifneq ($(filter scudo,$(my_sanitize)),)
+ ifeq ($(filter unsigned-integer-overflow signed-integer-overflow integer cfi,$(my_sanitize_diag)),)
+ my_cflags += -fsanitize-minimal-runtime
+ endif
+ ifneq ($(filter -fsanitize-minimal-runtime,$(my_cflags)),)
+ my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_MINIMAL_RUNTIME_LIBRARY)
+ else
+ my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_RUNTIME_LIBRARY)
+ endif
+endif
+
ifneq ($(strip $(LOCAL_SANITIZE_RECOVER)),)
recover_arg := $(subst $(space),$(comma),$(LOCAL_SANITIZE_RECOVER)),
my_cflags += -fsanitize-recover=$(recover_arg)
diff --git a/core/cxx_stl_setup.mk b/core/cxx_stl_setup.mk
index 5171b8a..3590079 100644
--- a/core/cxx_stl_setup.mk
+++ b/core/cxx_stl_setup.mk
@@ -15,8 +15,7 @@
endif
ifeq ($($(my_prefix)OS),windows)
- # libc++ is not supported on mingw.
- my_cxx_stl := libstdc++
+ my_cxx_stl := libc++_static
endif
endif
else
@@ -38,9 +37,9 @@
endif
ifdef LOCAL_IS_HOST_MODULE
ifeq ($($(my_prefix)OS),windows)
- ifneq ($(filter $(my_cxx_stl),libc++ libc++_static),)
- # libc++ is not supported on mingw.
- my_cxx_stl := libstdc++
+ ifneq ($(filter $(my_cxx_stl),libc++),)
+ # only libc++_static is supported on mingw.
+ my_cxx_stl := libc++_static
endif
endif
endif
@@ -52,8 +51,9 @@
darwin_dynamic_gcclibs := -lc -lSystem
darwin_static_gcclibs := NO_STATIC_HOST_BINARIES_ON_DARWIN
windows_dynamic_gcclibs := \
- -lmsvcr110 -lmingw32 -lgcc -lmoldname -lmingwex -lmsvcrt -ladvapi32 \
- -lshell32 -luser32 -lkernel32 -lmingw32 -lgcc -lmoldname -lmingwex -lmsvcrt
+ -Wl,--start-group -lmingw32 -lgcc -lgcc_eh -lmoldname -lmingwex -lmsvcr110 \
+ -lmsvcrt -lpthread -ladvapi32 -lshell32 -luser32 -lkernel32 -lpsapi \
+ -Wl,--end-group
windows_static_gcclibs := NO_STATIC_HOST_BINARIES_ON_WINDOWS
my_link_type := dynamic
@@ -100,6 +100,20 @@
my_cppflags += -nostdinc++
my_ldflags += -nodefaultlibs
my_cxx_ldlibs += $($($(my_prefix)OS)_$(my_link_type)_gcclibs)
+
+ ifeq ($($(my_prefix)OS),windows)
+ # Use SjLj exceptions for 32-bit. libgcc_eh implements SjLj
+ # exception model for 32-bit.
+ ifeq (x86,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
+ my_cppflags += -fsjlj-exceptions
+ endif
+ # Disable visibility annotations since we're using libc++ static
+ # library.
+ my_cppflags += -D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS
+ my_cppflags += -D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS
+ # Use Win32 threads in libc++.
+ my_cppflags += -D_LIBCPP_HAS_THREAD_API_WIN32
+ endif
else
ifeq (arm,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
my_static_libraries += libunwind_llvm
@@ -113,11 +127,7 @@
else ifeq ($(my_cxx_stl),ndk)
# Using an NDK STL. Handled in binary.mk.
else ifeq ($(my_cxx_stl),libstdc++)
- ifndef LOCAL_IS_HOST_MODULE
- $(error $(LOCAL_PATH): $(LOCAL_MODULE): libstdc++ is not supported for device modules)
- else ifneq ($($(my_prefix)OS),windows)
- $(error $(LOCAL_PATH): $(LOCAL_MODULE): libstdc++ is not supported on $($(my_prefix)OS))
- endif
+ $(error $(LOCAL_PATH): $(LOCAL_MODULE): libstdc++ is not supported)
else ifeq ($(my_cxx_stl),none)
ifdef LOCAL_IS_HOST_MODULE
my_cppflags += -nostdinc++
diff --git a/core/definitions.mk b/core/definitions.mk
index 82447c9..3538166 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -740,79 +740,6 @@
endef
###########################################################
-## Returns true if $(1) and $(2) are equal. Returns
-## the empty string if they are not equal.
-###########################################################
-define streq
-$(strip $(if $(strip $(1)),\
- $(if $(strip $(2)),\
- $(if $(filter-out __,_$(subst $(strip $(1)),,$(strip $(2)))$(subst $(strip $(2)),,$(strip $(1)))_),,true), \
- ),\
- $(if $(strip $(2)),\
- ,\
- true)\
- ))
-endef
-
-###########################################################
-## Convert "a b c" into "a:b:c"
-###########################################################
-define normalize-path-list
-$(subst $(space),:,$(strip $(1)))
-endef
-
-###########################################################
-## Convert "a b c" into "a,b,c"
-###########################################################
-define normalize-comma-list
-$(subst $(space),$(comma),$(strip $(1)))
-endef
-
-###########################################################
-## Read the word out of a colon-separated list of words.
-## This has the same behavior as the built-in function
-## $(word n,str).
-##
-## The individual words may not contain spaces.
-##
-## $(1): 1 based index
-## $(2): value of the form a:b:c...
-###########################################################
-
-define word-colon
-$(word $(1),$(subst :,$(space),$(2)))
-endef
-
-###########################################################
-## Convert "a=b c= d e = f" into "a=b c=d e=f"
-##
-## $(1): list to collapse
-## $(2): if set, separator word; usually "=", ":", or ":="
-## Defaults to "=" if not set.
-###########################################################
-
-define collapse-pairs
-$(eval _cpSEP := $(strip $(if $(2),$(2),=)))\
-$(strip $(subst $(space)$(_cpSEP)$(space),$(_cpSEP),$(strip \
- $(subst $(_cpSEP), $(_cpSEP) ,$(1)))$(space)))
-endef
-
-###########################################################
-## Given a list of pairs, if multiple pairs have the same
-## first components, keep only the first pair.
-##
-## $(1): list of pairs
-## $(2): the separator word, such as ":", "=", etc.
-define uniq-pairs-by-first-component
-$(eval _upbfc_fc_set :=)\
-$(strip $(foreach w,$(1), $(eval _first := $(word 1,$(subst $(2),$(space),$(w))))\
- $(if $(filter $(_upbfc_fc_set),$(_first)),,$(w)\
- $(eval _upbfc_fc_set += $(_first)))))\
-$(eval _upbfc_fc_set :=)\
-$(eval _first:=)
-endef
-
-###########################################################
## MODULE_TAG set operations
###########################################################
@@ -1729,7 +1656,6 @@
ifneq ($(HOST_CUSTOM_LD_COMMAND),true)
define transform-host-o-to-shared-lib-inner
$(hide) $(PRIVATE_CXX) \
- -Wl,-rpath-link=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OUT_INTERMEDIATE_LIBRARIES) \
-Wl,-rpath,\$$ORIGIN/../$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OUT_SHARED_LIBRARIES)) \
-Wl,-rpath,\$$ORIGIN/$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OUT_SHARED_LIBRARIES)) \
-shared -Wl,-soname,$(notdir $@) \
@@ -1809,7 +1735,6 @@
-Wl,-dynamic-linker,$(PRIVATE_LINKER) \
-Wl,--gc-sections \
-Wl,-z,nocopyreloc \
- -Wl,-rpath-link=$(PRIVATE_TARGET_OUT_INTERMEDIATE_LIBRARIES) \
$(PRIVATE_TARGET_CRTBEGIN_DYNAMIC_O) \
$(PRIVATE_ALL_OBJECTS) \
-Wl,--whole-archive \
@@ -1881,11 +1806,6 @@
###########################################################
## Commands for running gcc to link a host executable
###########################################################
-ifdef BUILD_HOST_static
-HOST_FPIE_FLAGS :=
-else
-HOST_FPIE_FLAGS := -pie
-endif
ifneq ($(HOST_CUSTOM_LD_COMMAND),true)
define transform-host-o-to-executable-inner
@@ -1900,7 +1820,6 @@
$(if $(filter true,$(NATIVE_COVERAGE)),-lgcov) \
$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_HOST_LIBPROFILE_RT)) \
$(PRIVATE_ALL_SHARED_LIBRARIES) \
- -Wl,-rpath-link=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OUT_INTERMEDIATE_LIBRARIES) \
$(foreach path,$(PRIVATE_RPATHS), \
-Wl,-rpath,\$$ORIGIN/$(path)) \
$(if $(PRIVATE_NO_DEFAULT_COMPILER_FLAGS),, \
@@ -2294,6 +2213,7 @@
$(hide) $(ZIP2ZIP) -j -i $< -o $(dir $@)d8_input.jar "**/*.class"
$(hide) $(DX_COMMAND) \
--output $(dir $@) \
+ $(addprefix --lib ,$(PRIVATE_D8_LIBS)) \
--min-api $(PRIVATE_MIN_SDK_VERSION) \
$(subst --main-dex-list=, --main-dex-list , \
$(filter-out --core-library --multi-dex --minimal-main-dex,$(PRIVATE_DX_FLAGS))) \
@@ -2463,7 +2383,7 @@
define run-appcompat
$(hide) \
echo "appcompat.sh output:" >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log && \
- art/tools/veridex/appcompat.sh --dex-file=$@ 2>&1 >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log
+ PACKAGING=$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING art/tools/veridex/appcompat.sh --dex-file=$@ 2>&1 >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log
endef
appcompat-files = \
art/tools/veridex/appcompat.sh \
@@ -2490,25 +2410,21 @@
# Uncompress dex files embedded in an apk.
#
define uncompress-dexs
-$(hide) if (zipinfo $@ '*.dex' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
- tmpdir=$@.tmpdir; \
- rm -rf $$tmpdir && mkdir $$tmpdir; \
- unzip -q $@ '*.dex' -d $$tmpdir && \
- zip -qd $@ '*.dex' && \
- ( cd $$tmpdir && find . -type f | sort | zip -qD -X -0 ../$(notdir $@) -@ ) && \
- rm -rf $$tmpdir; \
+ if (zipinfo $@ '*.dex' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
+ $(ZIP2ZIP) -i $@ -o $@.tmp -0 "classes*.dex" && \
+ mv -f $@.tmp $@ ; \
fi
endef
-# Uncompress shared libraries embedded in an apk.
+# Uncompress shared JNI libraries embedded in an apk.
#
-define uncompress-shared-libs
-$(hide) if (zipinfo $@ $(PRIVATE_EMBEDDED_JNI_LIBS) 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
- rm -rf $(dir $@)uncompressedlibs && mkdir $(dir $@)uncompressedlibs; \
- unzip -q $@ $(PRIVATE_EMBEDDED_JNI_LIBS) -d $(dir $@)uncompressedlibs && \
- zip -qd $@ 'lib/*.so' && \
- ( cd $(dir $@)uncompressedlibs && find lib -type f | sort | zip -qD -X -0 ../$(notdir $@) -@ ) && \
- rm -rf $(dir $@)uncompressedlibs; \
+define uncompress-prebuilt-embedded-jni-libs
+ if (zipinfo $@ 'lib/*.so' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
+ $(ZIP2ZIP) -i $@ -o $@.tmp -0 'lib/**/*.so' \
+ $(if $(PRIVATE_EMBEDDED_JNI_LIBS), \
+ -x 'lib/**/*.so' \
+ $(addprefix -X ,$(PRIVATE_EMBEDDED_JNI_LIBS))) && \
+ mv -f $@.tmp $@ ; \
fi
endef
@@ -2553,7 +2469,7 @@
endef
define copy-and-uncompress-dexs
-$(2): $(1) $(ZIPALIGN)
+$(2): $(1) $(ZIPALIGN) $(ZIP2ZIP)
@echo "Uncompress dexs in: $$@"
$$(copy-file-to-target)
$$(uncompress-dexs)
@@ -2568,7 +2484,8 @@
$(eval _cmf_tuple := $(subst :, ,$(f))) \
$(eval _cmf_src := $(word 1,$(_cmf_tuple))) \
$(eval _cmf_dest := $(word 2,$(_cmf_tuple))) \
- $(eval $(call copy-one-file,$(_cmf_src),$(_cmf_dest))) \
+ $(if $(filter-out $(_cmf_src), $(_cmf_dest)), \
+ $(eval $(call copy-one-file,$(_cmf_src),$(_cmf_dest)))) \
$(_cmf_dest)))
endef
@@ -2791,12 +2708,20 @@
--write-greylist $(3) \
--write-greylist 26,28:$(4)
+$(5): $(1) $(CLASS2GREYLIST) $(INTERNAL_PLATFORM_HIDDENAPI_PUBLIC_LIST)
+ $(CLASS2GREYLIST) --public-api-list $(INTERNAL_PLATFORM_HIDDENAPI_PUBLIC_LIST) $(1) \
+ --write-metadata-csv $(5)
+
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): $(2) $(3) $(4)
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): \
PRIVATE_WHITELIST_INPUTS := $$(PRIVATE_WHITELIST_INPUTS) $(2)
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): \
PRIVATE_GREYLIST_INPUTS := $$(PRIVATE_GREYLIST_INPUTS) $(3)
PRIVATE_DARKGREYLIST_INPUTS := $$(PRIVATE_DARKGREYLIST_INPUTS) $(4)
+$(INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA): $(5)
+$(INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA): \
+ PRIVATE_METADATA_INPUTS := $$(PRIVATE_METADATA_INPUTS) $(5)
+
endif
endef
@@ -2819,7 +2744,7 @@
$(2): OUTPUT_DIR := $(dir $(call hiddenapi-soong-output-dex,$(2)))
$(2): OUTPUT_JAR := $(dir $(call hiddenapi-soong-output-dex,$(2)))classes.jar
$(2): $(1) $(call hiddenapi-soong-output-dex,$(2)) | $(SOONG_ZIP) $(MERGE_ZIPS)
- $(SOONG_ZIP) -o $${OUTPUT_JAR} -C $${OUTPUT_DIR} -D $${OUTPUT_DIR}
+ $(SOONG_ZIP) -o $${OUTPUT_JAR} -C $${OUTPUT_DIR} -f "$${OUTPUT_DIR}/classes*.dex"
$(MERGE_ZIPS) -D -zipToNotStrip $${OUTPUT_JAR} -stripFile "classes*.dex" $(2) $${OUTPUT_JAR} $(1)
endef
@@ -3241,7 +3166,7 @@
##
## $(1): path to validate
define try-validate-path-is-subdir
-$(strip
+$(strip \
$(if $(filter /%,$(1)),
$(1) starts with a slash
)
@@ -3451,35 +3376,6 @@
endef
###########################################################
-## Convert to lower case without requiring a shell, which isn't cacheable.
-##
-## $(1): string
-###########################################################
-to-lower=$(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$1))))))))))))))))))))))))))
-
-###########################################################
-## Convert to upper case without requiring a shell, which isn't cacheable.
-##
-## $(1): string
-###########################################################
-to-upper=$(subst a,A,$(subst b,B,$(subst c,C,$(subst d,D,$(subst e,E,$(subst f,F,$(subst g,G,$(subst h,H,$(subst i,I,$(subst j,J,$(subst k,K,$(subst l,L,$(subst m,M,$(subst n,N,$(subst o,O,$(subst p,P,$(subst q,Q,$(subst r,R,$(subst s,S,$(subst t,T,$(subst u,U,$(subst v,V,$(subst w,W,$(subst x,X,$(subst y,Y,$(subst z,Z,$1))))))))))))))))))))))))))
-
-# Sanity-check to-lower and to-upper
-lower := abcdefghijklmnopqrstuvwxyz-_
-upper := ABCDEFGHIJKLMNOPQRSTUVWXYZ-_
-
-ifneq ($(lower),$(call to-lower,$(upper)))
- $(error to-lower sanity check failure)
-endif
-
-ifneq ($(upper),$(call to-upper,$(lower)))
- $(error to-upper sanity check failure)
-endif
-
-lower :=
-upper :=
-
-###########################################################
## Verify module name meets character requirements:
## a-z A-Z 0-9
## _.+-,@~
diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk
index aa24c20..1527047 100644
--- a/core/dex_preopt.mk
+++ b/core/dex_preopt.mk
@@ -159,7 +159,7 @@
$(boot_profile_jars_zip): $(all_boot_jars) $(SOONG_ZIP)
echo "Create boot profiles package: $@"
rm -f $@
- $(SOONG_ZIP) -o $@ -C $(PRODUCT_OUT) $(PRIVATE_JARS)
+ $(SOONG_ZIP) -o $@ -C $(PRODUCT_OUT) $(addprefix -f ,$(PRIVATE_JARS))
droidcore: $(boot_profile_jars_zip)
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 504cc57..698034c 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -7,14 +7,11 @@
# Set USE_DEX2OAT_DEBUG to false for only building non-debug versions.
ifeq ($(USE_DEX2OAT_DEBUG),false)
DEX2OAT := $(HOST_OUT_EXECUTABLES)/dex2oat$(HOST_EXECUTABLE_SUFFIX)
-PATCHOAT := $(HOST_OUT_EXECUTABLES)/patchoat$(HOST_EXECUTABLE_SUFFIX)
else
DEX2OAT := $(HOST_OUT_EXECUTABLES)/dex2oatd$(HOST_EXECUTABLE_SUFFIX)
-PATCHOAT := $(HOST_OUT_EXECUTABLES)/patchoatd$(HOST_EXECUTABLE_SUFFIX)
endif
DEX2OAT_DEPENDENCY += $(DEX2OAT)
-PATCHOAT_DEPENDENCY += $(PATCHOAT)
# Use the first preloaded-classes file in PRODUCT_COPY_FILES.
PRELOADED_CLASSES := $(call word-colon,1,$(firstword \
@@ -85,8 +82,8 @@
# is converted into to boot.art (to match the legacy assumption that boot.art
# exists), and the rest are converted to boot-<name>.art.
# In addition, each .art file has an associated .oat file.
-LIBART_TARGET_BOOT_ART_EXTRA_FILES := $(foreach jar,$(wordlist 2,999,$(LIBART_TARGET_BOOT_JARS)),boot-$(jar).art boot-$(jar).art.rel boot-$(jar).oat)
-LIBART_TARGET_BOOT_ART_EXTRA_FILES += boot.art.rel boot.oat
+LIBART_TARGET_BOOT_ART_EXTRA_FILES := $(foreach jar,$(wordlist 2,999,$(LIBART_TARGET_BOOT_JARS)),boot-$(jar).art boot-$(jar).oat)
+LIBART_TARGET_BOOT_ART_EXTRA_FILES += boot.oat
LIBART_TARGET_BOOT_ART_VDEX_FILES := $(foreach jar,$(wordlist 2,999,$(LIBART_TARGET_BOOT_JARS)),boot-$(jar).vdex)
LIBART_TARGET_BOOT_ART_VDEX_FILES += boot.vdex
@@ -202,7 +199,6 @@
--instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
- --runtime-arg -Xnorelocate --compile-pic \
--no-generate-debug-info --generate-build-id \
--abort-on-hard-verifier-error \
--force-determinism \
diff --git a/core/dex_preopt_libart_boot.mk b/core/dex_preopt_libart_boot.mk
index 5a68738..14955f0 100644
--- a/core/dex_preopt_libart_boot.mk
+++ b/core/dex_preopt_libart_boot.mk
@@ -85,16 +85,14 @@
$($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME): PRIVATE_BOOT_IMAGE_FLAGS := $(my_boot_image_flags)
$($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME): PRIVATE_2ND_ARCH_VAR_PREFIX := $(my_2nd_arch_prefix)
-$($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME): PRIVATE_IMAGE_LOCATION := $($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)
# Use dex2oat debug version for better error reporting
-$($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) : $(LIBART_TARGET_BOOT_DEX_FILES) $(PRELOADED_CLASSES) $(DIRTY_IMAGE_OBJECTS) $(DEX2OAT_DEPENDENCY) $(PATCHOAT_DEPENDENCY) $(my_out_boot_image_profile_location)
+$($(my_2nd_arch_prefix)DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) : $(LIBART_TARGET_BOOT_DEX_FILES) $(PRELOADED_CLASSES) $(DIRTY_IMAGE_OBJECTS) $(DEX2OAT_DEPENDENCY) $(my_out_boot_image_profile_location)
@echo "target dex2oat: $@"
@mkdir -p $(dir $@)
@mkdir -p $(dir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)LIBART_TARGET_BOOT_OAT_UNSTRIPPED))
- @rm -f $(dir $@)/*.art $(dir $@)/*.oat $(dir $@)/*.art.rel
+ @rm -f $(dir $@)/*.art $(dir $@)/*.oat
@rm -f $(dir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)LIBART_TARGET_BOOT_OAT_UNSTRIPPED))/*.art
@rm -f $(dir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)LIBART_TARGET_BOOT_OAT_UNSTRIPPED))/*.oat
- @rm -f $(dir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)LIBART_TARGET_BOOT_OAT_UNSTRIPPED))/*.art.rel
$(hide) $(DEX2OAT_BOOT_IMAGE_LOG_TAGS) $(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
$(PRIVATE_BOOT_IMAGE_FLAGS) \
@@ -110,16 +108,10 @@
--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$(PRODUCT_OUT)/system \
- --runtime-arg -Xnorelocate --compile-pic \
- --multi-image --no-inline-from=core-oj.jar \
+ --no-inline-from=core-oj.jar \
--abort-on-hard-verifier-error \
--abort-on-soft-verifier-error \
$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS) \
- || ( echo "$(DEX2OAT_FAILURE_MESSAGE)" ; false ) && \
- $(DEX2OAT_BOOT_IMAGE_LOG_TAGS) ANDROID_ROOT=$(PRODUCT_OUT)/system ANDROID_DATA=$(dir $@) $(PATCHOAT) \
- --input-image-location=$(PRIVATE_IMAGE_LOCATION) \
- --output-image-relocation-directory=$(dir $@) \
- --instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
- --base-offset-delta=0x10000000
+ || ( echo "$(DEX2OAT_FAILURE_MESSAGE)" ; false )
endif
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 54f8ccc..cd78eda 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -7,87 +7,95 @@
# privileged apps
LOCAL_UNCOMPRESS_DEX := false
ifneq (true,$(DONT_UNCOMPRESS_PRIV_APPS_DEXS))
-ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
- LOCAL_UNCOMPRESS_DEX := true
-else
+ ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
+ LOCAL_UNCOMPRESS_DEX := true
+ endif
+
ifneq (,$(filter $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES), $(LOCAL_MODULE)))
LOCAL_UNCOMPRESS_DEX := true
- endif # PRODUCT_LOADED_BY_PRIVILEGED_MODULES
-endif # LOCAL_PRIVILEGED_MODULE
+ endif
endif # DONT_UNCOMPRESS_PRIV_APPS_DEXS
# Setting LOCAL_DEX_PREOPT based on WITH_DEXPREOPT, LOCAL_DEX_PREOPT, etc
LOCAL_DEX_PREOPT := $(strip $(LOCAL_DEX_PREOPT))
-ifneq (true,$(WITH_DEXPREOPT))
- LOCAL_DEX_PREOPT :=
-else # WITH_DEXPREOPT=true
- ifeq (,$(TARGET_BUILD_APPS)) # TARGET_BUILD_APPS empty
- ifndef LOCAL_DEX_PREOPT # LOCAL_DEX_PREOPT undefined
- ifneq ($(filter $(TARGET_OUT)/%,$(my_module_path)),) # Installed to system.img.
- ifeq (,$(LOCAL_APK_LIBRARIES)) # LOCAL_APK_LIBRARIES empty
- # If we have product-specific config for this module?
- ifeq (disable,$(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG))
- LOCAL_DEX_PREOPT := false
- else
- LOCAL_DEX_PREOPT := $(DEX_PREOPT_DEFAULT)
- endif
- else # LOCAL_APK_LIBRARIES not empty
- LOCAL_DEX_PREOPT := nostripping
- endif # LOCAL_APK_LIBRARIES not empty
- else
- # Default to nostripping for non system preopt (enables preopt).
- # Don't strip in case the oat/vdex version in system ROM doesn't match the one in other
- # partitions. It needs to be able to fall back to the APK for that case.
- # Also only enable preopt for non tests.
- ifeq (,$(filter $(LOCAL_MODULE_TAGS),tests))
- LOCAL_DEX_PREOPT := nostripping
- endif
- endif # Installed to system.img.
- endif # LOCAL_DEX_PREOPT undefined
- endif # TARGET_BUILD_APPS empty
-endif # WITH_DEXPREOPT=true
+ifndef LOCAL_DEX_PREOPT # LOCAL_DEX_PREOPT undefined
+ LOCAL_DEX_PREOPT := $(DEX_PREOPT_DEFAULT)
+
+ ifeq ($(filter $(TARGET_OUT)/%,$(my_module_path)),) # Not installed to system.img.
+ # Default to nostripping for non system preopt (enables preopt).
+ # Don't strip in case the oat/vdex version in system ROM doesn't match the one in other
+ # partitions. It needs to be able to fall back to the APK for that case.
+ LOCAL_DEX_PREOPT := nostripping
+ endif
+
+ ifneq (,$(LOCAL_APK_LIBRARIES)) # LOCAL_APK_LIBRARIES not empty
+ LOCAL_DEX_PREOPT := nostripping
+ endif
+endif
+
ifeq (false,$(LOCAL_DEX_PREOPT))
LOCAL_DEX_PREOPT :=
endif
+
+# Only enable preopt for non tests.
+ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests))
+ LOCAL_DEX_PREOPT :=
+endif
+
+# If we have product-specific config for this module?
+ifeq (disable,$(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG))
+ LOCAL_DEX_PREOPT :=
+endif
+
+# Disable preopt for TARGET_BUILD_APPS
+ifneq (,$(TARGET_BUILD_APPS))
+ LOCAL_DEX_PREOPT :=
+endif
+
+# Disable preopt if not WITH_DEXPREOPT
+ifneq (true,$(WITH_DEXPREOPT))
+ LOCAL_DEX_PREOPT :=
+endif
+
ifdef LOCAL_UNINSTALLABLE_MODULE
-LOCAL_DEX_PREOPT :=
+ LOCAL_DEX_PREOPT :=
endif
+
ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR))) # contains no java code
-LOCAL_DEX_PREOPT :=
+ LOCAL_DEX_PREOPT :=
endif
+
# if WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY=true and module is not in boot class path skip
# Also preopt system server jars since selinux prevents system server from loading anything from
# /data. If we don't do this they will need to be extracted which is not favorable for RAM usage
# or performance. If my_preopt_for_extracted_apk is true, we ignore the only preopt boot image
# options.
ifneq (true,$(my_preopt_for_extracted_apk))
-ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
-ifeq ($(filter $(PRODUCT_SYSTEM_SERVER_JARS) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
-LOCAL_DEX_PREOPT :=
-endif
-endif
+ ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
+ ifeq ($(filter $(PRODUCT_SYSTEM_SERVER_JARS) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
+ LOCAL_DEX_PREOPT :=
+ endif
+ endif
endif
ifeq ($(LOCAL_DEX_PREOPT),true)
+ # Don't strip with dexes we explicitly uncompress (dexopt will not store the dex code).
+ ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
+ LOCAL_DEX_PREOPT := nostripping
+ endif # LOCAL_UNCOMPRESS_DEX
-# Don't strip with dexes we explicitly uncompress (dexopt will not store the dex code).
-ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
-LOCAL_DEX_PREOPT := nostripping
-endif # LOCAL_UNCOMPRESS_DEX
+ # system_other isn't there for an OTA, so don't strip
+ # if module is on system, and odex is on system_other.
+ ifeq ($(BOARD_USES_SYSTEM_OTHER_ODEX),true)
+ ifneq ($(call install-on-system-other, $(my_module_path)),)
+ LOCAL_DEX_PREOPT := nostripping
+ endif # install-on-system-other
+ endif # BOARD_USES_SYSTEM_OTHER_ODEX
-# system_other isn't there for an OTA, so don't strip
-# if module is on system, and odex is on system_other.
-ifeq ($(BOARD_USES_SYSTEM_OTHER_ODEX),true)
-ifneq ($(call install-on-system-other, $(my_module_path)),)
-LOCAL_DEX_PREOPT := nostripping
-endif # install-on-system-other
-endif # BOARD_USES_SYSTEM_OTHER_ODEX
-
-# We also don't strip if all dexs are uncompressed (dexopt will not store the dex code),
-# but that requires to inspect the source file, which is too early at this point (as we
-# don't know if the source file will actually be used).
-# See dexpreopt-remove-classes.dex.
-
+ # We also don't strip if all dexs are uncompressed (dexopt will not store the dex code),
+ # but that requires to inspect the source file, which is too early at this point (as we
+ # don't know if the source file will actually be used).
+ # See dexpreopt-remove-classes.dex.
endif # LOCAL_DEX_PREOPT
built_odex :=
@@ -101,64 +109,64 @@
built_installed_art :=
my_process_profile :=
my_profile_is_text_listing :=
+my_generate_dm :=
ifeq (false,$(WITH_DEX_PREOPT_GENERATE_PROFILE))
-LOCAL_DEX_PREOPT_GENERATE_PROFILE := false
+ LOCAL_DEX_PREOPT_GENERATE_PROFILE := false
endif
ifndef LOCAL_DEX_PREOPT_GENERATE_PROFILE
+ # If LOCAL_DEX_PREOPT_GENERATE_PROFILE is not defined, default it based on the existence of the
+ # profile class listing. TODO: Use product specific directory here.
+ my_classes_directory := $(PRODUCT_DEX_PREOPT_PROFILE_DIR)
+ LOCAL_DEX_PREOPT_PROFILE := $(my_classes_directory)/$(LOCAL_MODULE).prof
-
-# If LOCAL_DEX_PREOPT_GENERATE_PROFILE is not defined, default it based on the existence of the
-# profile class listing. TODO: Use product specific directory here.
-my_classes_directory := $(PRODUCT_DEX_PREOPT_PROFILE_DIR)
-LOCAL_DEX_PREOPT_PROFILE := $(my_classes_directory)/$(LOCAL_MODULE).prof
-
-ifneq (,$(wildcard $(LOCAL_DEX_PREOPT_PROFILE)))
-my_process_profile := true
-my_profile_is_text_listing := false
-endif
+ ifneq (,$(wildcard $(LOCAL_DEX_PREOPT_PROFILE)))
+ my_process_profile := true
+ my_profile_is_text_listing := false
+ endif
else
-my_process_profile := $(LOCAL_DEX_PREOPT_GENERATE_PROFILE)
-my_profile_is_text_listing := true
-LOCAL_DEX_PREOPT_PROFILE := $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
+ my_process_profile := $(LOCAL_DEX_PREOPT_GENERATE_PROFILE)
+ my_profile_is_text_listing := true
+ LOCAL_DEX_PREOPT_PROFILE := $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
endif
ifeq (true,$(my_process_profile))
-ifeq (,$(LOCAL_DEX_PREOPT_APP_IMAGE))
-LOCAL_DEX_PREOPT_APP_IMAGE := true
-endif
+ ifeq (,$(LOCAL_DEX_PREOPT_APP_IMAGE))
+ LOCAL_DEX_PREOPT_APP_IMAGE := true
+ endif
-ifndef LOCAL_DEX_PREOPT_PROFILE
-$(call pretty-error,Must have specified class listing (LOCAL_DEX_PREOPT_PROFILE))
-endif
-ifeq (,$(dex_preopt_profile_src_file))
-$(call pretty-error, Internal error: dex_preopt_profile_src_file must be set)
-endif
-my_built_profile := $(dir $(LOCAL_BUILT_MODULE))/profile.prof
-my_dex_location := $(patsubst $(PRODUCT_OUT)%,%,$(LOCAL_INSTALLED_MODULE))
-# Remove compressed APK extension.
-my_dex_location := $(patsubst %.gz,%,$(my_dex_location))
-$(my_built_profile): PRIVATE_BUILT_MODULE := $(dex_preopt_profile_src_file)
-$(my_built_profile): PRIVATE_DEX_LOCATION := $(my_dex_location)
-$(my_built_profile): PRIVATE_SOURCE_CLASSES := $(LOCAL_DEX_PREOPT_PROFILE)
-$(my_built_profile): $(LOCAL_DEX_PREOPT_PROFILE)
-$(my_built_profile): $(PROFMAN)
-$(my_built_profile): $(dex_preopt_profile_src_file)
-ifeq (true,$(my_profile_is_text_listing))
-# The profile is a test listing of classes (used for framework jars).
-# We need to generate the actual binary profile before being able to compile.
+ ifndef LOCAL_DEX_PREOPT_PROFILE
+ $(call pretty-error,Must have specified class listing (LOCAL_DEX_PREOPT_PROFILE))
+ endif
+ ifeq (,$(dex_preopt_profile_src_file))
+ $(call pretty-error, Internal error: dex_preopt_profile_src_file must be set)
+ endif
+ my_built_profile := $(dir $(LOCAL_BUILT_MODULE))/profile.prof
+ my_dex_location := $(patsubst $(PRODUCT_OUT)%,%,$(LOCAL_INSTALLED_MODULE))
+ # Remove compressed APK extension.
+ my_dex_location := $(patsubst %.gz,%,$(my_dex_location))
+ $(my_built_profile): PRIVATE_BUILT_MODULE := $(dex_preopt_profile_src_file)
+ $(my_built_profile): PRIVATE_DEX_LOCATION := $(my_dex_location)
+ $(my_built_profile): PRIVATE_SOURCE_CLASSES := $(LOCAL_DEX_PREOPT_PROFILE)
+ $(my_built_profile): $(LOCAL_DEX_PREOPT_PROFILE)
+ $(my_built_profile): $(PROFMAN)
+ $(my_built_profile): $(dex_preopt_profile_src_file)
+ ifeq (true,$(my_profile_is_text_listing))
+ # The profile is a test listing of classes (used for framework jars).
+ # We need to generate the actual binary profile before being able to compile.
+ $(my_built_profile):
$(hide) mkdir -p $(dir $@)
ANDROID_LOG_TAGS="*:e" $(PROFMAN) \
--create-profile-from=$(PRIVATE_SOURCE_CLASSES) \
--apk=$(PRIVATE_BUILT_MODULE) \
--dex-location=$(PRIVATE_DEX_LOCATION) \
--reference-profile-file=$@
-else
-# The profile is binary profile (used for apps). Run it through profman to
-# ensure the profile keys match the apk.
-$(my_built_profile):
+ else
+ # The profile is binary profile (used for apps). Run it through profman to
+ # ensure the profile keys match the apk.
+ $(my_built_profile):
$(hide) mkdir -p $(dir $@)
touch $@
ANDROID_LOG_TAGS="*:i" $(PROFMAN) \
@@ -168,245 +176,240 @@
--dex-location=$(PRIVATE_DEX_LOCATION) \
--reference-profile-file=$@ \
|| echo "Profile out of date for $(PRIVATE_BUILT_MODULE)"
-endif
+ endif
-my_profile_is_text_listing :=
-dex_preopt_profile_src_file :=
+ my_profile_is_text_listing :=
+ dex_preopt_profile_src_file :=
-# Remove compressed APK extension.
-my_installed_profile := $(patsubst %.gz,%,$(LOCAL_INSTALLED_MODULE)).prof
+ # Remove compressed APK extension.
+ my_installed_profile := $(patsubst %.gz,%,$(LOCAL_INSTALLED_MODULE)).prof
-# my_installed_profile := $(LOCAL_INSTALLED_MODULE).prof
-$(eval $(call copy-one-file,$(my_built_profile),$(my_installed_profile)))
-build_installed_profile:=$(my_built_profile):$(my_installed_profile)
+ # my_installed_profile := $(LOCAL_INSTALLED_MODULE).prof
+ $(eval $(call copy-one-file,$(my_built_profile),$(my_installed_profile)))
+ build_installed_profile:=$(my_built_profile):$(my_installed_profile)
else
-build_installed_profile:=
-my_installed_profile :=
+ build_installed_profile:=
+ my_installed_profile :=
endif
ifdef LOCAL_DEX_PREOPT
-dexpreopt_boot_jar_module := $(filter $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE))
+ dexpreopt_boot_jar_module := $(filter $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE))
-# Filter org.apache.http.legacy.boot.
-ifeq ($(dexpreopt_boot_jar_module),org.apache.http.legacy.boot)
-dexpreopt_boot_jar_module :=
-endif
+ ifdef dexpreopt_boot_jar_module
+ # For libart, the boot jars' odex files are replaced by $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE).
+ # We use this installed_odex trick to get boot.art installed.
+ installed_odex := $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
+ # Append the odex for the 2nd arch if we have one.
+ installed_odex += $($(TARGET_2ND_ARCH_VAR_PREFIX)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
+ else # boot jar
+ ifeq ($(LOCAL_MODULE_CLASS),JAVA_LIBRARIES)
-ifdef dexpreopt_boot_jar_module
-# For libart, the boot jars' odex files are replaced by $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE).
-# We use this installed_odex trick to get boot.art installed.
-installed_odex := $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
-# Append the odex for the 2nd arch if we have one.
-installed_odex += $($(TARGET_2ND_ARCH_VAR_PREFIX)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
-else # boot jar
-ifeq ($(LOCAL_MODULE_CLASS),JAVA_LIBRARIES)
+ my_module_multilib := $(LOCAL_MULTILIB)
+ # If the module is not an SDK library and it's a system server jar, only preopt the primary arch.
+ my_filtered_lib_name := $(patsubst %.impl,%,$(LOCAL_MODULE))
+ ifeq (,$(filter $(JAVA_SDK_LIBRARIES),$(my_filtered_lib_name)))
+ # For a Java library, by default we build odex for both 1st arch and 2nd arch.
+ # But it can be overridden with "LOCAL_MULTILIB := first".
+ ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
+ # For system server jars, we build for only "first".
+ my_module_multilib := first
+ endif
+ endif
-my_module_multilib := $(LOCAL_MULTILIB)
-# If the module is not an SDK library and it's a system server jar, only preopt the primary arch.
-my_filtered_lib_name := $(patsubst %.impl,%,$(LOCAL_MODULE))
-ifeq (,$(filter $(JAVA_SDK_LIBRARIES),$(my_filtered_lib_name)))
-# For a Java library, by default we build odex for both 1st arch and 2nd arch.
-# But it can be overridden with "LOCAL_MULTILIB := first".
-ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
-# For system server jars, we build for only "first".
-my_module_multilib := first
-endif
-endif
+ # Only preopt primary arch for translated arch since there is only an image there.
+ ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
+ my_module_multilib := first
+ endif
-# Only preopt primary arch for translated arch since there is only an image there.
-ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
-my_module_multilib := first
-endif
+ # #################################################
+ # Odex for the 1st arch
+ my_2nd_arch_prefix :=
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ # #################################################
+ # Odex for the 2nd arch
+ ifdef TARGET_2ND_ARCH
+ ifneq ($(TARGET_TRANSLATE_2ND_ARCH),true)
+ ifneq (first,$(my_module_multilib))
+ my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ endif # my_module_multilib is not first.
+ endif # TARGET_TRANSLATE_2ND_ARCH not true
+ endif # TARGET_2ND_ARCH
+ # #################################################
+ else # must be APPS
+ # The preferred arch
+ my_2nd_arch_prefix := $(LOCAL_2ND_ARCH_VAR_PREFIX)
+ # Save the module multilib since setup_one_odex modifies it.
+ saved_my_module_multilib := $(my_module_multilib)
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ my_module_multilib := $(saved_my_module_multilib)
+ ifdef TARGET_2ND_ARCH
+ ifeq ($(my_module_multilib),both)
+ # The non-preferred arch
+ my_2nd_arch_prefix := $(if $(LOCAL_2ND_ARCH_VAR_PREFIX),,$(TARGET_2ND_ARCH_VAR_PREFIX))
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ endif # LOCAL_MULTILIB is both
+ endif # TARGET_2ND_ARCH
+ endif # LOCAL_MODULE_CLASS
+ endif # boot jar
-# #################################################
-# Odex for the 1st arch
-my_2nd_arch_prefix :=
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-# #################################################
-# Odex for the 2nd arch
-ifdef TARGET_2ND_ARCH
-ifneq ($(TARGET_TRANSLATE_2ND_ARCH),true)
-ifneq (first,$(my_module_multilib))
-my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-endif # my_module_multilib is not first.
-endif # TARGET_TRANSLATE_2ND_ARCH not true
-endif # TARGET_2ND_ARCH
-# #################################################
-else # must be APPS
-# The preferred arch
-my_2nd_arch_prefix := $(LOCAL_2ND_ARCH_VAR_PREFIX)
-# Save the module multilib since setup_one_odex modifies it.
-saved_my_module_multilib := $(my_module_multilib)
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-my_module_multilib := $(saved_my_module_multilib)
-ifdef TARGET_2ND_ARCH
-ifeq ($(my_module_multilib),both)
-# The non-preferred arch
-my_2nd_arch_prefix := $(if $(LOCAL_2ND_ARCH_VAR_PREFIX),,$(TARGET_2ND_ARCH_VAR_PREFIX))
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-endif # LOCAL_MULTILIB is both
-endif # TARGET_2ND_ARCH
-endif # LOCAL_MODULE_CLASS
-endif # boot jar
+ built_odex := $(strip $(built_odex))
+ built_vdex := $(strip $(built_vdex))
+ built_art := $(strip $(built_art))
+ installed_odex := $(strip $(installed_odex))
+ installed_vdex := $(strip $(installed_vdex))
+ installed_art := $(strip $(installed_art))
-built_odex := $(strip $(built_odex))
-built_vdex := $(strip $(built_vdex))
-built_art := $(strip $(built_art))
-installed_odex := $(strip $(installed_odex))
-installed_vdex := $(strip $(installed_vdex))
-installed_art := $(strip $(installed_art))
-
-ifdef built_odex
-ifeq (true,$(my_process_profile))
-$(built_odex): $(my_built_profile)
-$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS := --profile-file=$(my_built_profile)
-else
-$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS :=
-endif
-
-ifndef LOCAL_DEX_PREOPT_FLAGS
-LOCAL_DEX_PREOPT_FLAGS := $(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG)
-ifndef LOCAL_DEX_PREOPT_FLAGS
-LOCAL_DEX_PREOPT_FLAGS := $(PRODUCT_DEX_PREOPT_DEFAULT_FLAGS)
-endif
-endif
-
-my_system_server_compiler_filter := $(PRODUCT_SYSTEM_SERVER_COMPILER_FILTER)
-ifeq (,$(my_system_server_compiler_filter))
-my_system_server_compiler_filter := speed
-endif
-
-my_default_compiler_filter := $(PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER)
-ifeq (,$(my_default_compiler_filter))
-# If no default compiler filter is specified, default to 'quicken' to save on storage.
-my_default_compiler_filter := quicken
-endif
-
-ifeq (,$(filter --compiler-filter=%, $(LOCAL_DEX_PREOPT_FLAGS)))
- ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
- # Jars of system server, use the product option if it is set, speed otherwise.
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_system_server_compiler_filter)
- else
- ifneq (,$(filter $(PRODUCT_DEXPREOPT_SPEED_APPS) $(PRODUCT_SYSTEM_SERVER_APPS),$(LOCAL_MODULE)))
- # Apps loaded into system server, and apps the product default to being compiled with the
- # 'speed' compiler filter.
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed
+ ifdef built_odex
+ ifeq (true,$(my_process_profile))
+ $(built_odex): $(my_built_profile)
+ $(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS := --profile-file=$(my_built_profile)
else
- ifeq (true,$(my_process_profile))
- # For non system server jars, use speed-profile when we have a profile.
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed-profile
- else
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_default_compiler_filter)
+ $(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS :=
+ endif
+
+ ifndef LOCAL_DEX_PREOPT_FLAGS
+ LOCAL_DEX_PREOPT_FLAGS := $(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG)
+ ifndef LOCAL_DEX_PREOPT_FLAGS
+ LOCAL_DEX_PREOPT_FLAGS := $(PRODUCT_DEX_PREOPT_DEFAULT_FLAGS)
endif
endif
- endif
-endif
-my_generate_dm := $(PRODUCT_DEX_PREOPT_GENERATE_DM_FILES)
-ifeq (,$(filter $(LOCAL_DEX_PREOPT_FLAGS),--compiler-filter=verify))
-# Generating DM files only makes sense for verify, avoid doing for non verify compiler filter APKs.
-my_generate_dm := false
-endif
+ my_system_server_compiler_filter := $(PRODUCT_SYSTEM_SERVER_COMPILER_FILTER)
+ ifeq (,$(my_system_server_compiler_filter))
+ my_system_server_compiler_filter := speed
+ endif
-# No reason to use a dm file if the dex is already uncompressed.
-ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
-my_generate_dm := false
-endif
+ my_default_compiler_filter := $(PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER)
+ ifeq (,$(my_default_compiler_filter))
+ # If no default compiler filter is specified, default to 'quicken' to save on storage.
+ my_default_compiler_filter := quicken
+ endif
-ifeq (true,$(my_generate_dm))
-LOCAL_DEX_PREOPT_FLAGS += --copy-dex-files=false
-LOCAL_DEX_PREOPT := nostripping
-my_built_dm := $(dir $(LOCAL_BUILT_MODULE))generated.dm
-my_installed_dm := $(patsubst %.apk,%,$(LOCAL_INSTALLED_MODULE)).dm
-my_copied_vdex := $(dir $(LOCAL_BUILT_MODULE))primary.vdex
-$(eval $(call copy-one-file,$(built_vdex),$(my_copied_vdex)))
-$(my_built_dm): PRIVATE_INPUT_VDEX := $(my_copied_vdex)
-$(my_built_dm): $(my_copied_vdex) $(ZIPTIME)
+ ifeq (,$(filter --compiler-filter=%, $(LOCAL_DEX_PREOPT_FLAGS)))
+ ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
+ # Jars of system server, use the product option if it is set, speed otherwise.
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_system_server_compiler_filter)
+ else
+ ifneq (,$(filter $(PRODUCT_DEXPREOPT_SPEED_APPS) $(PRODUCT_SYSTEM_SERVER_APPS),$(LOCAL_MODULE)))
+ # Apps loaded into system server, and apps the product default to being compiled with the
+ # 'speed' compiler filter.
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed
+ else
+ ifeq (true,$(my_process_profile))
+ # For non system server jars, use speed-profile when we have a profile.
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed-profile
+ else
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_default_compiler_filter)
+ endif
+ endif
+ endif
+ endif
+
+ my_generate_dm := $(PRODUCT_DEX_PREOPT_GENERATE_DM_FILES)
+ ifeq (,$(filter $(LOCAL_DEX_PREOPT_FLAGS),--compiler-filter=verify))
+ # Generating DM files only makes sense for verify, avoid doing for non verify compiler filter APKs.
+ my_generate_dm := false
+ endif
+
+ # No reason to use a dm file if the dex is already uncompressed.
+ ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
+ my_generate_dm := false
+ endif
+
+ ifeq (true,$(my_generate_dm))
+ LOCAL_DEX_PREOPT_FLAGS += --copy-dex-files=false
+ LOCAL_DEX_PREOPT := nostripping
+ my_built_dm := $(dir $(LOCAL_BUILT_MODULE))generated.dm
+ my_installed_dm := $(patsubst %.apk,%,$(LOCAL_INSTALLED_MODULE)).dm
+ my_copied_vdex := $(dir $(LOCAL_BUILT_MODULE))primary.vdex
+ $(eval $(call copy-one-file,$(built_vdex),$(my_copied_vdex)))
+ $(my_built_dm): PRIVATE_INPUT_VDEX := $(my_copied_vdex)
+ $(my_built_dm): $(my_copied_vdex) $(ZIPTIME)
$(hide) mkdir -p $(dir $@)
$(hide) rm -f $@
$(hide) zip -qD -j -X -9 $@ $(PRIVATE_INPUT_VDEX)
$(ZIPTIME) $@
-$(eval $(call copy-one-file,$(my_built_dm),$(my_installed_dm)))
-endif
+ $(eval $(call copy-one-file,$(my_built_dm),$(my_installed_dm)))
+ endif
-# By default, emit debug info.
-my_dexpreopt_debug_info := true
-# If the global setting suppresses mini-debug-info, disable it.
-ifeq (false,$(WITH_DEXPREOPT_DEBUG_INFO))
- my_dexpreopt_debug_info := false
-endif
-
-# PRODUCT_SYSTEM_SERVER_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
-# PRODUCT_OTHER_JAVA_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
-ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
- ifeq (true,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
+ # By default, emit debug info.
my_dexpreopt_debug_info := true
- else ifeq (false,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
- my_dexpreopt_debug_info := false
+ # If the global setting suppresses mini-debug-info, disable it.
+ ifeq (false,$(WITH_DEXPREOPT_DEBUG_INFO))
+ my_dexpreopt_debug_info := false
+ endif
+
+ # PRODUCT_SYSTEM_SERVER_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
+ # PRODUCT_OTHER_JAVA_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
+ ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
+ ifeq (true,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
+ my_dexpreopt_debug_info := true
+ else ifeq (false,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
+ my_dexpreopt_debug_info := false
+ endif
+ else
+ ifeq (true,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
+ my_dexpreopt_debug_info := true
+ else ifeq (false,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
+ my_dexpreopt_debug_info := false
+ endif
+ endif
+
+ # Never enable on eng.
+ ifeq (eng,$(filter eng, $(TARGET_BUILD_VARIANT)))
+ my_dexpreopt_debug_info := false
+ endif
+
+ # Add dex2oat flag for debug-info/no-debug-info.
+ ifeq (true,$(my_dexpreopt_debug_info))
+ LOCAL_DEX_PREOPT_FLAGS += --generate-mini-debug-info
+ else ifeq (false,$(my_dexpreopt_debug_info))
+ LOCAL_DEX_PREOPT_FLAGS += --no-generate-mini-debug-info
+ endif
+
+ # Set the compiler reason to 'prebuilt' to identify the oat files produced
+ # during the build, as opposed to compiled on the device.
+ LOCAL_DEX_PREOPT_FLAGS += --compilation-reason=prebuilt
+
+ $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
+ $(built_vdex): $(built_odex)
+ $(built_art): $(built_odex)
endif
-else
- ifeq (true,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
- my_dexpreopt_debug_info := true
- else ifeq (false,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
- my_dexpreopt_debug_info := false
+
+ ifneq (true,$(my_generate_dm))
+ # Add the installed_odex to the list of installed files for this module if we aren't generating a
+ # dm file.
+ ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
+ ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
+ ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
+
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
+
+ # Make sure to install the .odex and .vdex when you run "make <module_name>"
+ $(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
+ else
+ ALL_MODULES.$(my_register_name).INSTALLED += $(my_installed_dm)
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(my_built_dm) $(my_installed_dm)
+
+ # Make sure to install the .dm when you run "make <module_name>"
+ $(my_all_targets): $(installed_dm)
endif
-endif
-# Never enable on eng.
-ifeq (eng,$(filter eng, $(TARGET_BUILD_VARIANT)))
-my_dexpreopt_debug_info := false
-endif
-
-# Add dex2oat flag for debug-info/no-debug-info.
-ifeq (true,$(my_dexpreopt_debug_info))
- LOCAL_DEX_PREOPT_FLAGS += --generate-mini-debug-info
-else ifeq (false,$(my_dexpreopt_debug_info))
- LOCAL_DEX_PREOPT_FLAGS += --no-generate-mini-debug-info
-endif
-
-# Set the compiler reason to 'prebuilt' to identify the oat files produced
-# during the build, as opposed to compiled on the device.
-LOCAL_DEX_PREOPT_FLAGS += --compilation-reason=prebuilt
-
-$(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
-$(built_vdex): $(built_odex)
-$(built_art): $(built_odex)
-endif
-
-ifneq (true,$(my_generate_dm))
- # Add the installed_odex to the list of installed files for this module if we aren't generating a
- # dm file.
- ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
- ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
- ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
-
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
-
- # Make sure to install the .odex and .vdex when you run "make <module_name>"
- $(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
-else
- ALL_MODULES.$(my_register_name).INSTALLED += $(my_installed_dm)
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(my_built_dm) $(my_installed_dm)
-
- # Make sure to install the .dm when you run "make <module_name>"
- $(my_all_targets): $(installed_dm)
-endif
-
-# Record dex-preopt config.
-DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
-DEXPREOPT.$(LOCAL_MODULE).MULTILIB := $(LOCAL_MULTILIB)
-DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
-DEXPREOPT.$(LOCAL_MODULE).PRIVILEGED_MODULE := $(LOCAL_PRIVILEGED_MODULE)
-DEXPREOPT.$(LOCAL_MODULE).VENDOR_MODULE := $(LOCAL_VENDOR_MODULE)
-DEXPREOPT.$(LOCAL_MODULE).TARGET_ARCH := $(LOCAL_MODULE_TARGET_ARCH)
-DEXPREOPT.$(LOCAL_MODULE).INSTALLED := $(installed_odex)
-DEXPREOPT.$(LOCAL_MODULE).INSTALLED_STRIPPED := $(LOCAL_INSTALLED_MODULE)
-DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS) := $(sort \
- $(DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS)) $(LOCAL_MODULE))
+ # Record dex-preopt config.
+ DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
+ DEXPREOPT.$(LOCAL_MODULE).MULTILIB := $(LOCAL_MULTILIB)
+ DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
+ DEXPREOPT.$(LOCAL_MODULE).PRIVILEGED_MODULE := $(LOCAL_PRIVILEGED_MODULE)
+ DEXPREOPT.$(LOCAL_MODULE).VENDOR_MODULE := $(LOCAL_VENDOR_MODULE)
+ DEXPREOPT.$(LOCAL_MODULE).TARGET_ARCH := $(LOCAL_MODULE_TARGET_ARCH)
+ DEXPREOPT.$(LOCAL_MODULE).INSTALLED := $(installed_odex)
+ DEXPREOPT.$(LOCAL_MODULE).INSTALLED_STRIPPED := $(LOCAL_INSTALLED_MODULE)
+ DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS) := $(sort \
+ $(DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS)) $(LOCAL_MODULE))
endif # LOCAL_DEX_PREOPT
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 8ffbc19..f5babb6 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -183,62 +183,26 @@
TARGET_COPY_OUT_DATA := data
TARGET_COPY_OUT_ASAN := $(TARGET_COPY_OUT_DATA)/asan
TARGET_COPY_OUT_OEM := oem
-TARGET_COPY_OUT_ODM := odm
-TARGET_COPY_OUT_PRODUCT := product
-TARGET_COPY_OUT_PRODUCT_SERVICES := product_services
TARGET_COPY_OUT_RAMDISK := ramdisk
TARGET_COPY_OUT_ROOT := root
TARGET_COPY_OUT_RECOVERY := recovery
+# The directory used for optional partitions depend on the BoardConfig, so
+# they're defined to placeholder values here and swapped after reading the
+# BoardConfig, to be either the partition dir, or a subdir within 'system'.
+_vendor_path_placeholder := ||VENDOR-PATH-PH||
+_product_path_placeholder := ||PRODUCT-PATH-PH||
+_product_services_path_placeholder := ||PRODUCT_SERVICES-PATH-PH||
+_odm_path_placeholder := ||ODM-PATH-PH||
+TARGET_COPY_OUT_VENDOR := $(_vendor_path_placeholder)
+TARGET_COPY_OUT_PRODUCT := $(_product_path_placeholder)
+TARGET_COPY_OUT_PRODUCT_SERVICES := $(_product_services_path_placeholder)
+TARGET_COPY_OUT_ODM := $(_odm_path_placeholder)
# Returns the non-sanitized version of the path provided in $1.
define get_non_asan_path
$(patsubst $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/%,$(PRODUCT_OUT)/%,$1)
endef
-###########################################
-# Define TARGET_COPY_OUT_VENDOR to a placeholder, for at this point
-# we don't know if the device wants to build a separate vendor.img
-# or just build vendor stuff into system.img.
-# A device can set up TARGET_COPY_OUT_VENDOR to "vendor" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_vendor_path_placeholder := ||VENDOR-PATH-PH||
-TARGET_COPY_OUT_VENDOR := $(_vendor_path_placeholder)
-###########################################
-
-###########################################
-# Define TARGET_COPY_OUT_PRODUCT to a placeholder, for at this point
-# we don't know if the device wants to build a separate product.img
-# or just build product stuff into system.img.
-# A device can set up TARGET_COPY_OUT_PRODUCT to "product" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_product_path_placeholder := ||PRODUCT-PATH-PH||
-TARGET_COPY_OUT_PRODUCT := $(_product_path_placeholder)
-###########################################
-
-###########################################
-# Define TARGET_COPY_OUT_PRODUCT_SERVICES to a placeholder, for at this point
-# we don't know if the device wants to build a separate product_services.img
-# or just build product stuff into system.img.
-# A device can set up TARGET_COPY_OUT_PRODUCT_SERVICES to "product_services" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_product_services_path_placeholder := ||PRODUCT_SERVICES-PATH-PH||
-TARGET_COPY_OUT_PRODUCT_SERVICES := $(_product_services_path_placeholder)
-###########################################
-
-###########################################
-# Define TARGET_COPY_OUT_ODM to a placeholder, for at this point
-# we don't know if the device wants to build a separate odm.img
-# or just build odm stuff into vendor.img.
-# A device can set up TARGET_COPY_OUT_ODM to "odm" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_odm_path_placeholder := ||ODM-PATH-PH||
-TARGET_COPY_OUT_ODM := $(_odm_path_placeholder)
-###########################################
-
#################################################################
# Set up minimal BOOTCLASSPATH list of jars to build/execute
# java code with dalvikvm/art.
@@ -446,9 +410,11 @@
TARGET_VENDOR_TEST_SUFFIX :=
endif
+ifeq (,$(TARGET_BUILD_APPS))
ifdef PRODUCT_EXTRA_VNDK_VERSIONS
$(foreach v,$(PRODUCT_EXTRA_VNDK_VERSIONS),$(call check_vndk_version,$(v)))
endif
+endif
# Ensure that BOARD_SYSTEMSDK_VERSIONS are all within PLATFORM_SYSTEMSDK_VERSIONS
_unsupported_systemsdk_versions := $(filter-out $(PLATFORM_SYSTEMSDK_VERSIONS),$(BOARD_SYSTEMSDK_VERSIONS))
@@ -543,13 +509,11 @@
HOST_CROSS_OUT_TESTCASES
HOST_OUT_INTERMEDIATES := $(HOST_OUT)/obj
-HOST_OUT_INTERMEDIATE_LIBRARIES := $(HOST_OUT_INTERMEDIATES)/lib
HOST_OUT_NOTICE_FILES := $(HOST_OUT_INTERMEDIATES)/NOTICE_FILES
HOST_OUT_COMMON_INTERMEDIATES := $(HOST_COMMON_OUT_ROOT)/obj
HOST_OUT_FAKE := $(HOST_OUT)/fake_packages
.KATI_READONLY := \
HOST_OUT_INTERMEDIATES \
- HOST_OUT_INTERMEDIATE_LIBRARIES \
HOST_OUT_NOTICE_FILES \
HOST_OUT_COMMON_INTERMEDIATES \
HOST_OUT_FAKE
@@ -558,11 +522,9 @@
include $(BUILD_SYSTEM)/aux_config.mk
HOST_CROSS_OUT_INTERMEDIATES := $(HOST_CROSS_OUT)/obj
-HOST_CROSS_OUT_INTERMEDIATE_LIBRARIES := $(HOST_CROSS_OUT_INTERMEDIATES)/lib
HOST_CROSS_OUT_NOTICE_FILES := $(HOST_CROSS_OUT_INTERMEDIATES)/NOTICE_FILES
.KATI_READONLY := \
HOST_CROSS_OUT_INTERMEDIATES \
- HOST_CROSS_OUT_INTERMEDIATE_LIBRARIES \
HOST_CROSS_OUT_NOTICE_FILES
HOST_OUT_GEN := $(HOST_OUT)/gen
@@ -581,7 +543,6 @@
HOST_2ND_ARCH_VAR_PREFIX := 2ND_
HOST_2ND_ARCH_MODULE_SUFFIX := _32
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_INTERMEDIATES := $(HOST_OUT)/obj32
-$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES := $($(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_INTERMEDIATES)/lib
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT)/lib
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_EXECUTABLES := $(HOST_OUT_EXECUTABLES)
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_JAVA_LIBRARIES := $(HOST_OUT_JAVA_LIBRARIES)
@@ -591,7 +552,6 @@
HOST_2ND_ARCH_VAR_PREFIX \
HOST_2ND_ARCH_MODULE_SUFFIX \
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_INTERMEDIATES \
- $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES \
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_SHARED_LIBRARIES \
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_EXECUTABLES \
$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_JAVA_LIBRARIES \
@@ -607,7 +567,6 @@
HOST_CROSS_2ND_ARCH_VAR_PREFIX := 2ND_
HOST_CROSS_2ND_ARCH_MODULE_SUFFIX := _64
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_INTERMEDIATES := $(HOST_CROSS_OUT)/obj64
-$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_INTERMEDIATE_LIBRARIES := $($(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_INTERMEDIATES)/lib
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_SHARED_LIBRARIES := $(HOST_CROSS_OUT)/lib64
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_EXECUTABLES := $(HOST_CROSS_OUT_EXECUTABLES)
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_NATIVE_TESTS := $(HOST_CROSS_OUT)/nativetest64
@@ -615,7 +574,6 @@
HOST_CROSS_2ND_ARCH_VAR_PREFIX \
HOST_CROSS_2ND_ARCH_MODULE_SUFFIX \
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_INTERMEDIATES \
- $(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_INTERMEDIATE_LIBRARIES \
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_SHARED_LIBRARIES \
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_EXECUTABLES \
$(HOST_CROSS_2ND_ARCH_VAR_PREFIX)HOST_CROSS_OUT_NATIVE_TESTS
@@ -626,8 +584,7 @@
TARGET_OUT_INTERMEDIATES := $(PRODUCT_OUT)/obj
endif
TARGET_OUT_HEADERS := $(TARGET_OUT_INTERMEDIATES)/include
-TARGET_OUT_INTERMEDIATE_LIBRARIES := $(TARGET_OUT_INTERMEDIATES)/lib
-.KATI_READONLY := TARGET_OUT_INTERMEDIATES TARGET_OUT_HEADERS TARGET_OUT_INTERMEDIATE_LIBRARIES
+.KATI_READONLY := TARGET_OUT_INTERMEDIATES TARGET_OUT_HEADERS
ifneq ($(filter address,$(SANITIZE_TARGET)),)
TARGET_OUT_COMMON_INTERMEDIATES := $(TARGET_COMMON_OUT_ROOT)/obj_asan
@@ -716,7 +673,6 @@
else
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES := $(PRODUCT_OUT)/obj_$(TARGET_2ND_ARCH)
endif
-$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES := $($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES)/lib
ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SHARED_LIBRARIES := $(target_out_shared_libraries_base)/lib/$(TARGET_2ND_ARCH)
else
@@ -729,7 +685,6 @@
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_TESTCASES := $(TARGET_OUT_TESTCASES)
.KATI_READONLY := \
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES \
- $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES \
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SHARED_LIBRARIES \
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_RENDERSCRIPT_BITCODE \
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_EXECUTABLES \
@@ -801,11 +756,11 @@
TARGET_OUT_VENDOR := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR)
.KATI_READONLY := TARGET_OUT_VENDOR
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/vendor
+target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_VENDOR)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_vendor_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/vendor
+target_out_vendor_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_VENDOR)
else
target_out_vendor_app_base := $(TARGET_OUT_VENDOR)
endif
@@ -884,11 +839,11 @@
TARGET_OUT_ODM := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ODM)
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_odm_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/odm
+target_out_odm_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_OEM)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_odm_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/odm
+target_out_odm_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_OEM)
else
target_out_odm_app_base := $(TARGET_OUT_ODM)
endif
@@ -940,11 +895,11 @@
TARGET_OUT_PRODUCT_EXECUTABLES := $(TARGET_OUT_PRODUCT)/bin
.KATI_READONLY := TARGET_OUT_PRODUCT
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_product_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product
+target_out_product_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_product_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product
+target_out_product_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT)
else
target_out_product_app_base := $(TARGET_OUT_PRODUCT)
endif
@@ -986,11 +941,11 @@
TARGET_OUT_PRODUCT_SERVICES := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_product_services_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product_services
+target_out_product_services_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_product_services_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product_services
+target_out_product_services_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
else
target_out_product_services_app_base := $(TARGET_OUT_PRODUCT_SERVICES)
endif
@@ -1081,11 +1036,6 @@
PER_ARCH_MODULE_CLASSES := SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP RENDERSCRIPT_BITCODE NATIVE_TESTS HEADER_LIBRARIES
.KATI_READONLY := COMMON_MODULE_CLASSES PER_ARCH_MODULE_CLASSES
-ifeq (,$(strip $(DIST_DIR)))
- DIST_DIR := $(OUT_DIR)/dist
-endif
-.KATI_READONLY := DIST_DIR
-
ifeq ($(CALLED_FROM_SETUP),true)
PRINT_BUILD_CONFIG ?= true
endif
diff --git a/core/executable.mk b/core/executable.mk
index f1b2462..e71ff33 100644
--- a/core/executable.mk
+++ b/core/executable.mk
@@ -12,6 +12,8 @@
my_skip_this_target := true
else ifeq (false, $(LOCAL_CLANG))
my_skip_this_target := true
+ else ifeq (never, $(LOCAL_SANITIZE))
+ my_skip_this_target := true
endif
endif
@@ -79,7 +81,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# non-preferred arch is supported
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
diff --git a/core/executable_internal.mk b/core/executable_internal.mk
index 4a62fbf..70b2ea8 100644
--- a/core/executable_internal.mk
+++ b/core/executable_internal.mk
@@ -47,13 +47,13 @@
my_target_crtbegin_static_o :=
my_target_crtend_o :=
else ifdef LOCAL_USE_VNDK
-my_target_crtbegin_dynamic_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_dynamic.vendor.o
-my_target_crtbegin_static_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_static.vendor.o
-my_target_crtend_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_android.vendor.o
+my_target_crtbegin_dynamic_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_dynamic.vendor)
+my_target_crtbegin_static_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_static.vendor)
+my_target_crtend_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_android.vendor)
else
-my_target_crtbegin_dynamic_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_dynamic.o
-my_target_crtbegin_static_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_static.o
-my_target_crtend_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_android.o
+my_target_crtbegin_dynamic_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_dynamic)
+my_target_crtbegin_static_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_static)
+my_target_crtend_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_android)
endif
ifneq ($(LOCAL_SDK_VERSION),)
my_target_crtbegin_dynamic_o := $(wildcard $(my_ndk_sysroot_lib)/crtbegin_dynamic.o)
@@ -65,7 +65,6 @@
$(linked_module): PRIVATE_TARGET_CRTBEGIN_DYNAMIC_O := $(my_target_crtbegin_dynamic_o)
$(linked_module): PRIVATE_TARGET_CRTBEGIN_STATIC_O := $(my_target_crtbegin_static_o)
$(linked_module): PRIVATE_TARGET_CRTEND_O := $(my_target_crtend_o)
-$(linked_module): PRIVATE_TARGET_OUT_INTERMEDIATE_LIBRARIES := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)
$(linked_module): PRIVATE_POST_LINK_CMD := $(LOCAL_POST_LINK_CMD)
ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
diff --git a/core/header_library.mk b/core/header_library.mk
index 5144679..3f2730e 100644
--- a/core/header_library.mk
+++ b/core/header_library.mk
@@ -25,7 +25,6 @@
ifeq ($(my_module_arch_supported),true)
# Build for 2ND_ARCH
- OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -43,7 +42,6 @@
ifeq ($(my_module_arch_supported),true)
# Build for 2ND_ARCH
- OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -56,7 +54,6 @@
ifeq ($(my_module_arch_supported),true)
# Build for HOST_CROSS_2ND_ARCH
- OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
diff --git a/core/host_executable.mk b/core/host_executable.mk
index 1480c2c..a2111a1 100644
--- a/core/host_executable.mk
+++ b/core/host_executable.mk
@@ -11,10 +11,6 @@
endif
endif
-ifeq ($(LOCAL_NO_FPIE),)
-LOCAL_LDFLAGS += $(HOST_FPIE_FLAGS)
-endif
-
ifeq ($(my_module_multilib),both)
ifneq ($(LOCAL_MODULE_CLASS),NATIVE_TESTS)
ifeq ($(LOCAL_MODULE_PATH_32)$(LOCAL_MODULE_STEM_32),)
@@ -40,7 +36,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for HOST_2ND_ARCH
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -56,7 +51,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for Windows
-OVERRIDE_BUILT_MODULE_PATH :=
# we don't want others using the cross compiled version
saved_LOCAL_BUILT_MODULE := $(LOCAL_BUILT_MODULE)
saved_LOCAL_INSTALLED_MODULE := $(LOCAL_INSTALLED_MODULE)
@@ -65,10 +59,6 @@
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
-ifeq ($(LOCAL_NO_FPIE),)
-LOCAL_LDFLAGS += $(HOST_CROSS_FPIE_FLAGS)
-endif
-
include $(BUILD_SYSTEM)/host_executable_internal.mk
LOCAL_LDFLAGS := $(saved_LOCAL_LDFLAGS)
LOCAL_BUILT_MODULE := $(saved_LOCAL_BUILT_MODULE)
@@ -79,7 +69,6 @@
LOCAL_2ND_ARCH_VAR_PREFIX := $(HOST_CROSS_2ND_ARCH_VAR_PREFIX)
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
-OVERRIDE_BUILT_MODULE_PATH :=
# we don't want others using the cross compiled version
saved_LOCAL_BUILT_MODULE := $(LOCAL_BUILT_MODULE)
saved_LOCAL_INSTALLED_MODULE := $(LOCAL_INSTALLED_MODULE)
@@ -88,10 +77,6 @@
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
-ifeq ($(LOCAL_NO_FPIE),)
-LOCAL_LDFLAGS += $(HOST_CROSS_FPIE_FLAGS)
-endif
-
include $(BUILD_SYSTEM)/host_executable_internal.mk
LOCAL_LDFLAGS := $(saved_LOCAL_LDFLAGS)
LOCAL_BUILT_MODULE := $(saved_LOCAL_BUILT_MODULE)
diff --git a/core/host_shared_library.mk b/core/host_shared_library.mk
index 5da7913..e9b3dad 100644
--- a/core/host_shared_library.mk
+++ b/core/host_shared_library.mk
@@ -23,7 +23,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for HOST_2ND_ARCH
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -39,7 +38,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for Windows
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_MODULE_SUFFIX :=
# We don't want makefiles using the cross-compiled host tool
@@ -56,7 +54,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for HOST_CROSS_2ND_ARCH
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_MODULE_SUFFIX :=
# We don't want makefiles using the cross-compiled host tool
diff --git a/core/host_shared_library_internal.mk b/core/host_shared_library_internal.mk
index 0a3b317..da20874 100644
--- a/core/host_shared_library_internal.mk
+++ b/core/host_shared_library_internal.mk
@@ -13,9 +13,6 @@
ifeq ($(strip $(LOCAL_MODULE_SUFFIX)),)
LOCAL_MODULE_SUFFIX := $($(my_prefix)SHLIB_SUFFIX)
endif
-ifneq ($(strip $(OVERRIDE_BUILT_MODULE_PATH)),)
-$(error $(LOCAL_PATH): Illegal use of OVERRIDE_BUILT_MODULE_PATH)
-endif
ifneq ($(strip $(LOCAL_MODULE_STEM)$(LOCAL_BUILT_MODULE_STEM)),)
$(error $(LOCAL_PATH): Cannot set module stem for a library)
endif
@@ -34,10 +31,6 @@
ifndef skip_build_from_source
-# Put the built modules of all shared libraries in a common directory
-# to simplify the link line.
-OVERRIDE_BUILT_MODULE_PATH := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)
-
include $(BUILD_SYSTEM)/binary.mk
my_host_libprofile_rt := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBPROFILE_RT)
diff --git a/core/host_static_library.mk b/core/host_static_library.mk
index aa0421e..71f4fd9 100644
--- a/core/host_static_library.mk
+++ b/core/host_static_library.mk
@@ -23,7 +23,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for HOST_2ND_ARCH
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -39,7 +38,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for Windows
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -52,7 +50,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# Build for HOST_CROSS_2ND_ARCH
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
diff --git a/core/install_jni_libs_internal.mk b/core/install_jni_libs_internal.mk
index a99d88a..e786691 100644
--- a/core/install_jni_libs_internal.mk
+++ b/core/install_jni_libs_internal.mk
@@ -13,9 +13,8 @@
#
my_jni_shared_libraries := \
- $(addprefix $($(my_2nd_arch_prefix)TARGET_OUT_INTERMEDIATE_LIBRARIES)/, \
- $(addsuffix .so, \
- $(LOCAL_JNI_SHARED_LIBRARIES)))
+ $(foreach lib,$(LOCAL_JNI_SHARED_LIBRARIES), \
+ $(call intermediates-dir-for,SHARED_LIBRARIES,$(lib),,,$(my_2nd_arch_prefix))/$(lib).so)
# App-specific lib path.
my_app_lib_path := $(dir $(LOCAL_INSTALLED_MODULE))lib/$(TARGET_$(my_2nd_arch_prefix)ARCH)
@@ -52,7 +51,9 @@
my_shared_library_path := $(call get_non_asan_path,\
$($(my_2nd_arch_prefix)TARGET_OUT$(partition_tag)_SHARED_LIBRARIES))
# Do not use order-only dependency, because we want to rebuild the image if an jni is updated.
-$(LOCAL_INSTALLED_MODULE) : $(addprefix $(my_shared_library_path)/, $(my_jni_filenames))
+my_installed_library := $(addprefix $(my_shared_library_path)/, $(my_jni_filenames))
+$(LOCAL_INSTALLED_MODULE) : $(my_installed_library)
+ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(my_installed_library)
# Create symlink in the app specific lib path
# Skip creating this symlink when running the second part of a target sanitization build.
@@ -97,7 +98,9 @@
$(foreach lib, $(my_prebuilt_jni_libs), \
$(eval $(call copy-one-file, $(lib), $(my_app_lib_path)/$(notdir $(lib)))))
-$(LOCAL_INSTALLED_MODULE) : $(addprefix $(my_app_lib_path)/, $(notdir $(my_prebuilt_jni_libs)))
+my_installed_library := $(addprefix $(my_app_lib_path)/, $(notdir $(my_prebuilt_jni_libs)))
+$(LOCAL_INSTALLED_MODULE) : $(my_installed_library)
+ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(my_installed_library)
endif # my_embed_jni
endif # inner my_prebuilt_jni_libs
endif # outer my_prebuilt_jni_libs
diff --git a/core/java.mk b/core/java.mk
index cf9bf56..30571b7 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -77,6 +77,7 @@
hiddenapi_whitelist_txt := $(intermediates.COMMON)/hiddenapi/whitelist.txt
hiddenapi_greylist_txt := $(intermediates.COMMON)/hiddenapi/greylist.txt
hiddenapi_darkgreylist_txt := $(intermediates.COMMON)/hiddenapi/darkgreylist.txt
+hiddenapi_greylist_metadata_csv := $(intermediates.COMMON)/hiddenapi/greylist.csv
ifeq ($(LOCAL_MODULE_CLASS)$(LOCAL_SRC_FILES)$(LOCAL_STATIC_JAVA_LIBRARIES)$(LOCAL_SOURCE_FILES_ALL_GENERATED),APPS)
# If this is an apk without any Java code (e.g. framework-res), we should skip compiling Java.
@@ -170,6 +171,7 @@
$(filter %.java,$(LOCAL_GENERATED_SOURCES))
java_intermediate_sources := $(addprefix $(TARGET_OUT_COMMON_INTERMEDIATES)/, $(filter %.java,$(LOCAL_INTERMEDIATE_SOURCES)))
all_java_sources := $(java_sources) $(java_intermediate_sources)
+ALL_MODULES.$(my_register_name).SRCS := $(ALL_MODULES.$(my_register_name).SRCS) $(all_java_sources)
include $(BUILD_SYSTEM)/java_common.mk
@@ -494,6 +496,8 @@
$(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(proguard_flag_files) $(legacy_proguard_lib_deps) $(R8_COMPAT_PROGUARD)
$(transform-jar-to-dex-r8)
else # !LOCAL_PROGUARD_ENABLED
+ $(built_dex_intermediate): PRIVATE_D8_LIBS := $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
+ $(built_dex_intermediate): $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
$(built_dex_intermediate): $(full_classes_pre_proguard_jar) $(DX) $(ZIP2ZIP)
$(transform-classes.jar-to-dex)
endif
@@ -504,8 +508,8 @@
# dex later on. The difference is academic currently, as we don't proguard any
# bootclasspath code at the moment. If we were to do that, we should add keep
# rules for all members with the @UnsupportedAppUsage annotation.
- $(eval $(call hiddenapi-generate-greylist-txt, $(full_classes_pre_proguard_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt)))
- LOCAL_INTERMEDIATE_TARGETS += $(hiddenapi_whitelist_txt) $(hiddenapi_greylist_txt) $(hiddenapi_darkgreylist_txt)
+ $(eval $(call hiddenapi-generate-greylist-txt, $(full_classes_pre_proguard_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt),$(hiddenapi_greylist_metadata_csv)))
+ LOCAL_INTERMEDIATE_TARGETS += $(hiddenapi_whitelist_txt) $(hiddenapi_greylist_txt) $(hiddenapi_darkgreylist_txt) $(hiddenapi_greylist_metadata_csv)
$(eval $(call hiddenapi-copy-dex-files,$(built_dex_intermediate),$(built_dex_hiddenapi)))
built_dex_copy_from := $(built_dex_hiddenapi)
else # !is_boot_jar
diff --git a/core/java_renderscript.mk b/core/java_renderscript.mk
index cf75910..406d679 100644
--- a/core/java_renderscript.mk
+++ b/core/java_renderscript.mk
@@ -107,7 +107,7 @@
# Prevent these from showing up on the device
# One exception is librsjni.so, which is needed for
# both native path and compat path.
-rs_jni_lib := $(TARGET_OUT_INTERMEDIATE_LIBRARIES)/librsjni.so
+rs_jni_lib := $(call intermediates-dir-for,SHARED_LIBRARIES,librsjni.so)/librsjni.so
LOCAL_JNI_SHARED_LIBRARIES += librsjni
ifneq (,$(TARGET_BUILD_APPS)$(FORCE_BUILD_RS_COMPAT))
@@ -118,13 +118,13 @@
$(rs_generated_src_jar): .KATI_IMPLICIT_OUTPUTS += $(rs_generated_bc)
-rs_support_lib := $(TARGET_OUT_INTERMEDIATE_LIBRARIES)/libRSSupport.so
+rs_support_lib := $(call intermediates-dir-for,SHARED_LIBRARIES,libRSSupport)/libRSSupport.so
LOCAL_JNI_SHARED_LIBRARIES += libRSSupport
rs_support_io_lib :=
# check if the target api level support USAGE_IO
ifeq ($(filter $(RSCOMPAT_NO_USAGEIO_API_LEVELS),$(renderscript_target_api)),)
-rs_support_io_lib := $(TARGET_OUT_INTERMEDIATE_LIBRARIES)/libRSSupportIO.so
+rs_support_io_lib := $(call intermediates-dir-for,SHARED_LIBRARIES,libRSSupportIO)/libRSSupportIO.so
LOCAL_JNI_SHARED_LIBRARIES += libRSSupportIO
endif
diff --git a/core/main.mk b/core/main.mk
index 54d7c23..ecb8c0d 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -317,8 +317,6 @@
ADDITIONAL_DEFAULT_PROPERTIES += ro.debuggable=1
# Enable Dalvik lock contention logging.
ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.lockprof.threshold=500
- # Include the debugging/testing OTA keys in this build.
- INCLUDE_TEST_OTA_KEYS := true
else # !enable_target_debugging
# Target is less debuggable and adbd is off by default
ADDITIONAL_DEFAULT_PROPERTIES += ro.debuggable=0
@@ -1014,6 +1012,18 @@
product_FILES := $(call product-installed-files, $(INTERNAL_PRODUCT))
# Verify the artifact path requirements made by included products.
+
+ # Fakes don't get installed, and host files are irrelevant.
+ static_whitelist_patterns := $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%
+ # RROs become REQUIRED by the source module, but are always placed on the vendor partition.
+ static_whitelist_patterns += %__auto_generated_rro.apk
+ ifeq (true,$(BOARD_USES_SYSTEM_OTHER_ODEX))
+ # Allow system_other odex space optimization.
+ static_whitelist_patterns += \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.odex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.vdex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.art
+ endif
all_offending_files :=
$(foreach makefile,$(ARTIFACT_PATH_REQUIREMENT_PRODUCTS),\
$(eval requirements := $(PRODUCTS.$(makefile).ARTIFACT_PATH_REQUIREMENTS)) \
@@ -1022,10 +1032,7 @@
$(eval path_patterns := $(call resolve-product-relative-paths,$(requirements),%)) \
$(eval whitelist_patterns := $(call resolve-product-relative-paths,$(whitelist))) \
$(eval files := $(call product-installed-files, $(makefile))) \
- $(eval files := $(filter-out $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%,$(files))) \
- $(eval # RROs become REQUIRED by the source module, but are always placed on the vendor partition.) \
- $(eval files := $(filter-out %__auto_generated_rro.apk,$(files))) \
- $(eval offending_files := $(filter-out $(path_patterns) $(whitelist_patterns),$(files))) \
+ $(eval offending_files := $(filter-out $(path_patterns) $(whitelist_patterns) $(static_whitelist_patterns),$(files))) \
$(call maybe-print-list-and-error,$(offending_files),$(makefile) produces files outside its artifact path requirement.) \
$(eval unused_whitelist := $(filter-out $(files),$(whitelist_patterns))) \
$(call maybe-print-list-and-error,$(unused_whitelist),$(makefile) includes redundant whitelist entries in its artifact path requirement.) \
@@ -1036,9 +1043,13 @@
$(eval whitelist := $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST)) \
$(eval whitelist_patterns := $(call resolve-product-relative-paths,$(whitelist))) \
$(eval offending_files := $(filter-out $(whitelist_patterns),$(files_in_requirement))) \
- $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS),\
- $(call maybe-print-list-and-error,$(offending_files),$(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement.) \
- $(eval unused_whitelist := $(filter-out $(extra_files),$(whitelist_patterns))) \
+ $(eval enforcement := $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS)) \
+ $(if $(enforcement),\
+ $(call maybe-print-list-and-error,$(offending_files),\
+ $(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement. \
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT)) \
+ $(eval unused_whitelist := $(if $(filter true strict,$(enforcement)),\
+ $(foreach p,$(whitelist_patterns),$(if $(filter $(p),$(extra_files)),,$(p))))) \
$(call maybe-print-list-and-error,$(unused_whitelist),$(INTERNAL_PRODUCT) includes redundant artifact path requirement whitelist entries.) \
) \
)
@@ -1053,12 +1064,6 @@
product_FILES :=
endif
-ifeq (0,1)
- $(info product_FILES for $(TARGET_DEVICE) ($(INTERNAL_PRODUCT)):)
- $(foreach p,$(product_FILES),$(info : $(p)))
- $(error done)
-endif
-
# TODO: Remove the 3 places in the tree that use ALL_DEFAULT_INSTALLED_MODULES
# and get rid of it from this list.
modules_to_install := $(sort \
@@ -1445,6 +1450,12 @@
$(dump-products)
@echo Successfully dumped products
+.PHONY: dump-files
+dump-files:
+ $(info product_FILES for $(TARGET_DEVICE) ($(INTERNAL_PRODUCT)):)
+ $(foreach p,$(product_FILES),$(info : $(p)))
+ @echo Successfully dumped product file list
+
.PHONY: nothing
nothing:
@echo Successfully read the makefiles.
diff --git a/core/multi_prebuilt.mk b/core/multi_prebuilt.mk
index 77c57ab..c97d481 100644
--- a/core/multi_prebuilt.mk
+++ b/core/multi_prebuilt.mk
@@ -38,7 +38,6 @@
# $(2): IS_HOST_MODULE
# $(3): MODULE_CLASS
# $(4): MODULE_TAGS
-# $(5): OVERRIDE_BUILT_MODULE_PATH
# $(6): UNINSTALLABLE_MODULE
# $(7): BUILT_MODULE_STEM
# $(8): LOCAL_STRIP_MODULE
@@ -56,7 +55,6 @@
$(eval LOCAL_IS_HOST_MODULE := $(2)) \
$(eval LOCAL_MODULE_CLASS := $(3)) \
$(eval LOCAL_MODULE_TAGS := $(4)) \
- $(eval OVERRIDE_BUILT_MODULE_PATH := $(5)) \
$(eval LOCAL_UNINSTALLABLE_MODULE := $(6)) \
$(eval tw := $(subst :, ,$(strip $(t)))) \
$(if $(word 3,$(tw)),$(error $(LOCAL_PATH): Bad prebuilt filename '$(t)')) \
@@ -98,7 +96,7 @@
$(prebuilt_is_host), \
SHARED_LIBRARIES, \
$(prebuilt_module_tags), \
- $($(if $(prebuilt_is_host),HOST,TARGET)_OUT_INTERMEDIATE_LIBRARIES), \
+ , \
, \
, \
$(prebuilt_strip_module))
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 42539f6..84d1c2c 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -620,11 +620,18 @@
ifdef LOCAL_COMPRESSED_MODULE
$(LOCAL_BUILT_MODULE) : $(MINIGZIP)
endif
+ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
+$(LOCAL_BUILT_MODULE) : $(ZIP2ZIP)
+endif
+ifneq ($(BUILD_PLATFORM_ZIP),)
+$(LOCAL_BUILT_MODULE) : .KATI_IMPLICIT_OUTPUTS := $(dir $(LOCAL_BUILT_MODULE))package.dex.apk
+endif
+$(LOCAL_BUILT_MODULE):
@echo "target Package: $(PRIVATE_MODULE) ($@)"
rm -rf $@.parts
mkdir -p $@.parts
ifeq ($(LOCAL_USE_AAPT2),true)
- cp -f $< $@.parts/apk.zip
+ cp -f $(PRIVATE_RES_PACKAGE) $@.parts/apk.zip
else # ! LOCAL_USE_AAPT2
$(call create-assets-package,$@.parts/apk.zip)
endif # LOCAL_USE_AAPT2
@@ -681,6 +688,9 @@
## Rule to build the odex file
ifdef LOCAL_DEX_PREOPT
$(built_odex): PRIVATE_DEX_FILE := $(built_dex)
+ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
+$(built_odex): $(ZIP2ZIP)
+endif
# Use pattern rule - we may have multiple built odex files.
$(built_odex) : $(dir $(LOCAL_BUILT_MODULE))% : $(built_dex)
$(hide) mkdir -p $(dir $@) && rm -f $@
diff --git a/core/pdk_config.mk b/core/pdk_config.mk
index b3cbb9c..b2c9e9e 100644
--- a/core/pdk_config.mk
+++ b/core/pdk_config.mk
@@ -174,11 +174,15 @@
# files under $(PRODUCT_OUT)/symbols to help debugging.
# Source not included to PDK due to dependency issue, so provide symbols instead.
- # We may not be building all of them.
- # The platform.zip just silently ignores the nonexistent ones.
- PDK_SYMBOL_FILES_LIST := \
- system/bin/app_process32 \
- system/bin/app_process64
+ PDK_SYMBOL_FILES_LIST :=
+ ifeq ($(TARGET_IS_64_BIT),true)
+ PDK_SYMBOL_FILES_LIST += system/bin/app_process64
+ ifdef TARGET_2ND_ARCH
+ PDK_SYMBOL_FILES_LIST += system/bin/app_process32
+ endif
+ else
+ PDK_SYMBOL_FILES_LIST += system/bin/app_process32
+ endif
ifneq (,$(PDK_FUSION_PLATFORM_ZIP)$(PDK_FUSION_PLATFORM_DIR))
# symbols should be explicitly pulled for fusion build
diff --git a/core/prebuilt.mk b/core/prebuilt.mk
index 839e14f..fb08625 100644
--- a/core/prebuilt.mk
+++ b/core/prebuilt.mk
@@ -47,7 +47,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# secondary arch is supported
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -66,7 +65,6 @@
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
# host cross compilation is supported
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
@@ -81,7 +79,6 @@
LOCAL_HOST_PREFIX := $(my_prefix)
include $(BUILD_SYSTEM)/module_arch_supported.mk
ifeq ($(my_module_arch_supported),true)
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index 4d1aebc..809c572 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -44,18 +44,6 @@
$(LOCAL_STRIP_MODULE))
ifeq (SHARED_LIBRARIES,$(LOCAL_MODULE_CLASS))
- # LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES indicates that this prebuilt should be
- # installed to the common directory of libraries. This is needed for the NDK
- # shared libraries built by soong, as we build many different versions of each
- # library (one for each API level). Since they all have the same basename,
- # they'd clobber each other (as well as any platform libraries by the same
- # name).
- ifneq ($(LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES),false)
- # Put the built targets of all shared libraries in a common directory
- # to simplify the link line.
- OVERRIDE_BUILT_MODULE_PATH := \
- $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)
- endif
ifeq ($(LOCAL_IS_HOST_MODULE)$(my_strip_module),)
# Strip but not try to add debuglink
my_strip_module := no_debuglink
@@ -195,14 +183,6 @@
endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
-
-# We also need the LOCAL_BUILT_MODULE dependency,
-# since we use -rpath-link which points to the built module's path.
-my_built_shared_libraries := \
- $(addprefix $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)/, \
- $(addsuffix $($(my_prefix)SHLIB_SUFFIX), \
- $(my_shared_libraries)))
-$(LOCAL_BUILT_MODULE) : $(my_built_shared_libraries)
endif
endif
@@ -355,13 +335,14 @@
# For PRESIGNED apks we must uncompress every .so file:
# even if the .so file isn't for the current TARGET_ARCH,
# we can't strip the file.
-embedded_prebuilt_jni_libs := 'lib/*.so'
+embedded_prebuilt_jni_libs :=
endif
ifndef embedded_prebuilt_jni_libs
# No LOCAL_PREBUILT_JNI_LIBS, uncompress all.
-embedded_prebuilt_jni_libs := 'lib/*.so'
+embedded_prebuilt_jni_libs :=
endif
$(built_module): PRIVATE_EMBEDDED_JNI_LIBS := $(embedded_prebuilt_jni_libs)
+$(built_module): $(ZIP2ZIP)
ifdef LOCAL_COMPRESSED_MODULE
$(built_module) : $(MINIGZIP)
@@ -372,9 +353,12 @@
$(LOCAL_BUILT_MODULE): PRIVATE_INSTALLED_MODULE := $(LOCAL_INSTALLED_MODULE)
endif
+ifneq ($(BUILD_PLATFORM_ZIP),)
+$(built_module) : .KATI_IMPLICIT_OUTPUTS := $(dir $(LOCAL_BUILT_MODULE))package.dex.apk
+endif
$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(SIGNAPK_JAR)
$(transform-prebuilt-to-target)
- $(uncompress-shared-libs)
+ $(uncompress-prebuilt-embedded-jni-libs)
ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
$(uncompress-dexs)
endif # LOCAL_UNCOMPRESS_DEX
diff --git a/core/product.mk b/core/product.mk
index 8c8246e..f9f8d60 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -205,10 +205,10 @@
PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE \
PRODUCT_USE_LOGICAL_PARTITIONS \
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS \
+ PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT \
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST \
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
- PRODUCT_USE_FASTBOOTD \
PRODUCT_FORCE_PRODUCT_MODULES_TO_SYSTEM_PARTITION \
define dump-product
@@ -408,7 +408,7 @@
BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE \
BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE \
BOARD_SUPER_PARTITION_SIZE \
- BOARD_SUPER_PARTITION_PARTITION_LIST \
+ BOARD_SUPER_PARTITION_GROUPS \
#
# Mark the variables in _product_stash_var_list as readonly
diff --git a/core/product_config.mk b/core/product_config.mk
index 7cbea91..27af09e 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -525,10 +525,6 @@
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BUILD_SUPER_PARTITION)),\
$(PRODUCT_USE_LOGICAL_PARTITIONS))
.KATI_READONLY := PRODUCT_BUILD_SUPER_PARTITION
-PRODUCT_USE_FASTBOOTD := $(or \
- $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_FASTBOOTD)),\
- $(PRODUCT_USE_LOGICAL_PARTITIONS))
-.KATI_READONLY := PRODUCT_USE_FASTBOOTD
# List of modules that should be forcefully unmarked from being LOCAL_PRODUCT_MODULE, and hence
# installed on /system directory by default.
diff --git a/core/shared_library.mk b/core/shared_library.mk
index a15b1a6..2832c17 100644
--- a/core/shared_library.mk
+++ b/core/shared_library.mk
@@ -36,7 +36,6 @@
ifeq ($(my_module_arch_supported),true)
# Build for TARGET_2ND_ARCH
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
diff --git a/core/shared_library_internal.mk b/core/shared_library_internal.mk
index ab887e0..41e6a95 100644
--- a/core/shared_library_internal.mk
+++ b/core/shared_library_internal.mk
@@ -13,9 +13,6 @@
ifeq ($(strip $(LOCAL_MODULE_SUFFIX)),)
LOCAL_MODULE_SUFFIX := $(TARGET_SHLIB_SUFFIX)
endif
-ifneq ($(strip $(OVERRIDE_BUILT_MODULE_PATH)),)
-$(error $(LOCAL_PATH): Illegal use of OVERRIDE_BUILT_MODULE_PATH)
-endif
ifneq ($(strip $(LOCAL_MODULE_STEM)$(LOCAL_BUILT_MODULE_STEM)$(LOCAL_MODULE_STEM_32)$(LOCAL_MODULE_STEM_64)),)
$(error $(LOCAL_PATH): Cannot set module stem for a library)
endif
@@ -34,10 +31,6 @@
ifndef skip_build_from_source
-# Put the built targets of all shared libraries in a common directory
-# to simplify the link line.
-OVERRIDE_BUILT_MODULE_PATH := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)
-
include $(BUILD_SYSTEM)/dynamic_binary.mk
# Define PRIVATE_ variables from global vars
@@ -51,11 +44,11 @@
my_target_crtbegin_so_o :=
my_target_crtend_so_o :=
else ifdef LOCAL_USE_VNDK
-my_target_crtbegin_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_so.vendor.o
-my_target_crtend_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_so.vendor.o
+my_target_crtbegin_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_so.vendor)
+my_target_crtend_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_so.vendor)
else
-my_target_crtbegin_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_so.o
-my_target_crtend_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_so.o
+my_target_crtbegin_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_so)
+my_target_crtend_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_so)
endif
ifneq ($(LOCAL_SDK_VERSION),)
my_target_crtbegin_so_o := $(wildcard $(my_ndk_sysroot_lib)/crtbegin_so.o)
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index 68916c0..d34f367 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -62,6 +62,10 @@
# defines built_odex along with rule to install odex
include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
+ifneq ($(BUILD_PLATFORM_ZIP),)
+ $(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(dir $(LOCAL_BUILT_MODULE))package.dex.apk))
+endif
+
ifdef LOCAL_DEX_PREOPT
$(built_odex): $(LOCAL_SOONG_DEX_JAR)
$(call dexpreopt-one-file,$<,$@)
@@ -70,6 +74,25 @@
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(LOCAL_BUILT_MODULE)))
endif
+# embedded JNI will already have been handled by soong
+my_embed_jni :=
+my_prebuilt_jni_libs :=
+ifdef LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH)
+ my_2nd_arch_prefix :=
+ LOCAL_JNI_SHARED_LIBRARIES := $(LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH))
+ include $(BUILD_SYSTEM)/install_jni_libs_internal.mk
+endif
+ifdef TARGET_2ND_ARCH
+ ifdef LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH)
+ my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
+ LOCAL_JNI_SHARED_LIBRARIES := $(LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH))
+ include $(BUILD_SYSTEM)/install_jni_libs_internal.mk
+ endif
+endif
+LOCAL_SHARED_JNI_LIBRARIES :=
+my_embed_jni :=
+my_prebuilt_jni_libs :=
+my_2nd_arch_prefix :=
PACKAGES := $(PACKAGES) $(LOCAL_MODULE)
ifdef LOCAL_CERTIFICATE
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index 9aa6fb7..f213563 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -51,21 +51,6 @@
endif
endif
-ifeq (SHARED_LIBRARIES,$(LOCAL_MODULE_CLASS))
- # LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES indicates that this prebuilt should be
- # installed to the common directory of libraries. This is needed for the NDK
- # shared libraries built by soong, as we build many different versions of each
- # library (one for each API level). Since they all have the same basename,
- # they'd clobber each other (as well as any platform libraries by the same
- # name).
- ifneq ($(LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES),false)
- # Put the built targets of all shared libraries in a common directory
- # to simplify the link line.
- OVERRIDE_BUILT_MODULE_PATH := \
- $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)
- endif
-endif
-
#######################################
include $(BUILD_SYSTEM)/base_rules.mk
#######################################
@@ -87,12 +72,6 @@
$(eval $(call copy-one-file,$(LOCAL_SOONG_TOC),$(LOCAL_BUILT_MODULE).toc))
$(call add-dependency,$(LOCAL_BUILT_MODULE).toc,$(LOCAL_BUILT_MODULE))
$(my_all_targets): $(LOCAL_BUILT_MODULE).toc
-
- ifdef OVERRIDE_BUILT_MODULE_PATH
- $(eval $(call copy-one-file,$(LOCAL_BUILT_MODULE).toc,$(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem).toc))
- $(call add-dependency,$(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem).toc,$(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem))
- $(my_all_targets): $(OVERRIDE_BUILT_MODULE_PATH)/$(my_built_module_stem).toc
- endif
endif
SOONG_ALREADY_CONV := $(SOONG_ALREADY_CONV) $(LOCAL_MODULE)
@@ -125,14 +104,6 @@
endif
$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
$(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
-
- # We also need the LOCAL_BUILT_MODULE dependency,
- # since we use -rpath-link which points to the built module's path.
- my_built_shared_libraries := \
- $(addprefix $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)/, \
- $(addsuffix $($(my_prefix)SHLIB_SUFFIX), \
- $(my_shared_libraries)))
- $(LOCAL_BUILT_MODULE) : $(my_built_shared_libraries)
endif
endif
@@ -204,6 +175,8 @@
$($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP) --strip-all $(LOCAL_INSTALLED_MODULE)
endif
+$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
+
endif # !skip_module
skip_module :=
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 998e23d..e61aad0 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -115,6 +115,7 @@
$(call add_json_str, BtConfigIncludeDir, $(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR))
$(call add_json_bool, Device_uses_hwc2, $(filter true,$(TARGET_USES_HWC2)))
$(call add_json_list, DeviceKernelHeaders, $(TARGET_PROJECT_SYSTEM_INCLUDES))
+$(call add_json_bool, DevicePrefer32BitApps, $(filter true,$(TARGET_PREFER_32_BIT_APPS)))
$(call add_json_bool, DevicePrefer32BitExecutables, $(filter true,$(TARGET_PREFER_32_BIT_EXECUTABLES)))
$(call add_json_str, DeviceVndkVersion, $(BOARD_VNDK_VERSION))
$(call add_json_str, Platform_vndk_version, $(PLATFORM_VNDK_VERSION))
diff --git a/core/soong_droiddoc_prebuilt.mk b/core/soong_droiddoc_prebuilt.mk
index e2ec88c..08df019 100644
--- a/core/soong_droiddoc_prebuilt.mk
+++ b/core/soong_droiddoc_prebuilt.mk
@@ -24,6 +24,10 @@
$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_ANNOTATIONS_ZIP),$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/$(LOCAL_MODULE)_annotations.zip))
endif
+ifdef LOCAL_DROIDDOC_API_VERSIONS_XML
+$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_API_VERSIONS_XML),$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/$(LOCAL_MODULE)_generated-api-versions.xml))
+endif
+
ifdef LOCAL_DROIDDOC_JDIFF_DOC_ZIP
$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_JDIFF_DOC_ZIP),$(OUT_DOCS)/$(LOCAL_MODULE)-jdiff-docs.zip))
$(call dist-for-goals,docs,$(OUT_DOCS)/$(LOCAL_MODULE)-jdiff-docs.zip)
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index 18a09fb..20bfc66 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -22,6 +22,7 @@
hiddenapi_whitelist_txt := $(intermediates.COMMON)/hiddenapi/whitelist.txt
hiddenapi_greylist_txt := $(intermediates.COMMON)/hiddenapi/greylist.txt
hiddenapi_darkgreylist_txt := $(intermediates.COMMON)/hiddenapi/darkgreylist.txt
+hiddenapi_greylist_metadata_csv := $(intermediates.COMMON)/hiddenapi/greylist.csv
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_jar)))
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_pre_proguard_jar)))
@@ -79,7 +80,7 @@
# We use full_classes_jar here, which is the post-proguard jar (on the basis that we also
# have a full_classes_pre_proguard_jar). This is consistent with the equivalent code in
# java.mk.
- $(eval $(call hiddenapi-generate-greylist-txt,$(full_classes_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt)))
+ $(eval $(call hiddenapi-generate-greylist-txt,$(full_classes_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt),$(hiddenapi_greylist_metadata_csv)))
$(eval $(call hiddenapi-copy-soong-jar,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
else # !is_boot_jar
$(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
diff --git a/core/static_library.mk b/core/static_library.mk
index 25e5279..8002e5c 100644
--- a/core/static_library.mk
+++ b/core/static_library.mk
@@ -21,7 +21,6 @@
ifeq ($(my_module_arch_supported),true)
# Build for TARGET_2ND_ARCH
-OVERRIDE_BUILT_MODULE_PATH :=
LOCAL_BUILT_MODULE :=
LOCAL_INSTALLED_MODULE :=
LOCAL_INTERMEDIATE_TARGETS :=
diff --git a/core/strings.mk b/core/strings.mk
new file mode 100644
index 0000000..ce6d6fb
--- /dev/null
+++ b/core/strings.mk
@@ -0,0 +1,117 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+###########################################################
+## Convert to lower case without requiring a shell, which isn't cacheable.
+##
+## $(1): string
+###########################################################
+to-lower=$(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$1))))))))))))))))))))))))))
+
+###########################################################
+## Convert to upper case without requiring a shell, which isn't cacheable.
+##
+## $(1): string
+###########################################################
+to-upper=$(subst a,A,$(subst b,B,$(subst c,C,$(subst d,D,$(subst e,E,$(subst f,F,$(subst g,G,$(subst h,H,$(subst i,I,$(subst j,J,$(subst k,K,$(subst l,L,$(subst m,M,$(subst n,N,$(subst o,O,$(subst p,P,$(subst q,Q,$(subst r,R,$(subst s,S,$(subst t,T,$(subst u,U,$(subst v,V,$(subst w,W,$(subst x,X,$(subst y,Y,$(subst z,Z,$1))))))))))))))))))))))))))
+
+# Sanity-check to-lower and to-upper
+lower := abcdefghijklmnopqrstuvwxyz-_
+upper := ABCDEFGHIJKLMNOPQRSTUVWXYZ-_
+
+ifneq ($(lower),$(call to-lower,$(upper)))
+ $(error to-lower sanity check failure)
+endif
+
+ifneq ($(upper),$(call to-upper,$(lower)))
+ $(error to-upper sanity check failure)
+endif
+
+lower :=
+upper :=
+
+###########################################################
+## Returns true if $(1) and $(2) are equal. Returns
+## the empty string if they are not equal.
+###########################################################
+define streq
+$(strip $(if $(strip $(1)),\
+ $(if $(strip $(2)),\
+ $(if $(filter-out __,_$(subst $(strip $(1)),,$(strip $(2)))$(subst $(strip $(2)),,$(strip $(1)))_),,true), \
+ ),\
+ $(if $(strip $(2)),\
+ ,\
+ true)\
+ ))
+endef
+
+###########################################################
+## Convert "a b c" into "a:b:c"
+###########################################################
+define normalize-path-list
+$(subst $(space),:,$(strip $(1)))
+endef
+
+###########################################################
+## Convert "a b c" into "a,b,c"
+###########################################################
+define normalize-comma-list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+###########################################################
+## Read the word out of a colon-separated list of words.
+## This has the same behavior as the built-in function
+## $(word n,str).
+##
+## The individual words may not contain spaces.
+##
+## $(1): 1 based index
+## $(2): value of the form a:b:c...
+###########################################################
+
+define word-colon
+$(word $(1),$(subst :,$(space),$(2)))
+endef
+
+###########################################################
+## Convert "a=b c= d e = f" into "a=b c=d e=f"
+##
+## $(1): list to collapse
+## $(2): if set, separator word; usually "=", ":", or ":="
+## Defaults to "=" if not set.
+###########################################################
+
+define collapse-pairs
+$(eval _cpSEP := $(strip $(if $(2),$(2),=)))\
+$(strip $(subst $(space)$(_cpSEP)$(space),$(_cpSEP),$(strip \
+ $(subst $(_cpSEP), $(_cpSEP) ,$(1)))$(space)))
+endef
+
+###########################################################
+## Given a list of pairs, if multiple pairs have the same
+## first components, keep only the first pair.
+##
+## $(1): list of pairs
+## $(2): the separator word, such as ":", "=", etc.
+define uniq-pairs-by-first-component
+$(eval _upbfc_fc_set :=)\
+$(strip $(foreach w,$(1), $(eval _first := $(word 1,$(subst $(2),$(space),$(w))))\
+ $(if $(filter $(_upbfc_fc_set),$(_first)),,$(w)\
+ $(eval _upbfc_fc_set += $(_first)))))\
+$(eval _upbfc_fc_set :=)\
+$(eval _first:=)
+endef
diff --git a/core/tasks/check_emu_boot.mk b/core/tasks/check_emu_boot.mk
deleted file mode 100644
index 4870677..0000000
--- a/core/tasks/check_emu_boot.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-check_emu_boot0 := $(DIST_DIR)/$(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)-emulator-boot-test-result.txt
-$(check_emu_boot0) : PRIVATE_PREFIX := $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)
-$(check_emu_boot0) : PRIVATE_EMULATOR_BOOT_TEST_SH := device/generic/goldfish/tools/emulator_boot_test.sh
-$(check_emu_boot0) : PRIVATE_BOOT_COMPLETE_STRING := "emulator: INFO: boot completed"
-$(check_emu_boot0) : PRIVATE_BOOT_FAIL_STRING := "emulator: ERROR: fail to boot after"
-$(check_emu_boot0) : PRIVATE_SUCCESS_FILE := $(DIST_DIR)/$(PRIVATE_PREFIX)-BOOT-SUCCESS.txt
-$(check_emu_boot0) : PRIVATE_FAIL_FILE := $(DIST_DIR)/$(PRIVATE_PREFIX)-BOOT-FAIL.txt
-$(check_emu_boot0) : $(INSTALLED_QEMU_SYSTEMIMAGE) $(INSTALLED_QEMU_VENDORIMAGE) \
- $(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(PRODUCT_OUT)/userdata.img) \
- $(PRODUCT_OUT)/ramdisk.img device/generic/goldfish/tools/emulator_boot_test.sh
- @mkdir -p $(dir $(check_emu_boot0))
- $(hide) rm -f $(check_emu_boot0)
- $(hide) rm -f $(PRIVATE_SUCCESS_FILE)
- $(hide) rm -f $(PRIVATE_FAIL_FILE)
- (export ANDROID_PRODUCT_OUT=$$(cd $(PRODUCT_OUT);pwd);\
- export ANDROID_BUILD_TOP=$$(pwd);\
- $(PRIVATE_EMULATOR_BOOT_TEST_SH) > $(check_emu_boot0))
- (if grep -q $(PRIVATE_BOOT_COMPLETE_STRING) $(check_emu_boot0);\
- then echo boot_succeeded > $(PRIVATE_SUCCESS_FILE); fi)
- (if grep -q $(PRIVATE_BOOT_FAIL_STRING) $(check_emu_boot0);\
- then echo boot_failed > $(PRIVATE_FAIL_FILE); fi)
-.PHONY: check_emu_boot
-check_emu_boot: $(check_emu_boot0)
diff --git a/core/tasks/collect_gpl_sources.mk b/core/tasks/collect_gpl_sources.mk
index fdbf6c9..acbe9be 100644
--- a/core/tasks/collect_gpl_sources.mk
+++ b/core/tasks/collect_gpl_sources.mk
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ifdef dist_goal
-
# The rule below doesn't have dependenices on the files that it copies,
-# so manually generate directly into the DIST_DIR directory that is always
-# wiped between dist builds.
-gpl_source_tgz := $(DIST_DIR)/gpl_source.tgz
+# so manually generate into a PACKAGING intermediate dir, which is wiped
+# in installclean between incremental builds on build servers.
+gpl_source_tgz := $(call intermediates-dir-for,PACKAGING,gpl_source)/gpl_source.tgz
# FORCE since we can't know whether any of the sources changed
$(gpl_source_tgz): PRIVATE_PATHS := $(sort $(patsubst %/, %, $(dir $(ALL_GPL_MODULE_LICENSE_FILES))))
@@ -26,8 +24,4 @@
$(hide) tar cfz $@ --exclude ".git*" $(PRIVATE_PATHS)
# Dist the tgz only if we are doing a full build
-ifeq (,$(TARGET_BUILD_APPS))
-droidcore: $(gpl_source_tgz)
-endif
-
-endif # dist_goal
+$(call dist-for-goals,droidcore,$(gpl_source_tgz))
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index 3b4a98f..9eb3ab3 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -28,3 +28,4 @@
files: $(MODULE_INFO_JSON)
endif
+$(call dist-for-goals, general-tests, $(MODULE_INFO_JSON))
diff --git a/core/tasks/owners.mk b/core/tasks/owners.mk
new file mode 100644
index 0000000..6f32aaf
--- /dev/null
+++ b/core/tasks/owners.mk
@@ -0,0 +1,33 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# Create an artifact to include TEST_MAPPING files in source tree.
+
+.PHONY: owners
+
+intermediates := $(call intermediates-dir-for,PACKAGING,owners)
+owners_zip := $(intermediates)/owners.zip
+owners_list := $(OUT_DIR)/.module_paths/OWNERS.list
+owners := $(file <$(owners_list))
+$(owners_zip) : PRIVATE_owners := $(subst $(newline),\n,$(owners))
+
+$(owners_zip) : $(owners) $(SOONG_ZIP)
+ @echo "Building artifact to include OWNERS files."
+ rm -rf $@
+ echo -e "$(PRIVATE_owners)" > $@.list
+ $(SOONG_ZIP) -o $@ -C . -l $@.list
+ rm -f $@.list
+
+owners : $(owners_zip)
+
+$(call dist-for-goals, general-tests, $(owners_zip))
diff --git a/core/tasks/sdk-addon.mk b/core/tasks/sdk-addon.mk
index 29abf48..93fea4e 100644
--- a/core/tasks/sdk-addon.mk
+++ b/core/tasks/sdk-addon.mk
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+ifndef ONE_SHOT_MAKEFILE
+
.PHONY: sdk_addon
# If they didn't define PRODUCT_SDK_ADDON_NAME, then we won't define
@@ -68,9 +70,16 @@
$(addon_dir_img):$(INSTALLED_QEMU_VENDORIMAGE):images/$(TARGET_CPU_ABI)/vendor.img \
$(addon_dir_img):$(BUILT_RAMDISK_TARGET):images/$(TARGET_CPU_ABI)/ramdisk.img \
$(addon_dir_img):$(PRODUCT_OUT)/system/build.prop:images/$(TARGET_CPU_ABI)/build.prop \
+ $(addon_dir_img):device/generic/goldfish/data/etc/userdata.img:images/$(TARGET_CPU_ABI)/userdata.img \
$(addon_dir_img):$(target_notice_file_txt):images/$(TARGET_CPU_ABI)/NOTICE.txt \
$(addon_dir_img):$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SDK_ADDON_SYS_IMG_SOURCE_PROP):images/source.properties
+
+ifeq ($(BOARD_AVB_ENABLE),true)
+files_to_copy += \
+ $(addon_dir_img):$(QEMU_VERIFIED_BOOT_PARAMS):images/$(TARGET_CPU_ABI)/VerifiedBootParams.textproto
+endif
+
# Generate rules to copy the requested files
$(foreach cf,$(files_to_copy), \
$(eval _root := $(call word-colon,1,$(cf))) \
@@ -141,3 +150,5 @@
$(error Trying to build sdk_addon, but product '$(INTERNAL_PRODUCT)' does not define one)
endif
endif # addon_name
+
+endif # !ONE_SHOT_MAKEFILE
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index e3cf13d..42a3bea 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -249,7 +249,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2018-08-05
+ PLATFORM_SECURITY_PATCH := 2018-09-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/envsetup.sh b/envsetup.sh
index 5894144..a4d950e 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -585,7 +585,13 @@
local choices=($(TARGET_BUILD_APPS= LUNCH_MENU_CHOICES="${LUNCH_MENU_CHOICES[@]}" get_build_var COMMON_LUNCH_CHOICES))
if [ $answer -le ${#choices[@]} ]
then
- selection=${choices[$(($answer-1))]}
+ # array in zsh starts from 1 instead of 0.
+ if [ -n "$ZSH_VERSION" ]
+ then
+ selection=${choices[$(($answer))]}
+ else
+ selection=${choices[$(($answer-1))]}
+ fi
fi
else
selection=$answer
@@ -1568,9 +1574,12 @@
}
# Zsh needs bashcompinit called to support bash-style completion.
-function add_zsh_completion() {
- autoload -U compinit && compinit
- autoload -U bashcompinit && bashcompinit
+function enable_zsh_completion() {
+ # Don't override user's options if bash-style completion is already enabled.
+ if ! declare -f complete >/dev/null; then
+ autoload -U compinit && compinit
+ autoload -U bashcompinit && bashcompinit
+ fi
}
function validate_current_shell() {
@@ -1581,13 +1590,32 @@
;;
*zsh*)
function check_type() { type "$1"; }
- add_zsh_completion ;;
+ enable_zsh_completion ;;
*)
echo -e "WARNING: Only bash and zsh are supported.\nUse of other shell would lead to erroneous results."
;;
esac
}
+function acloud()
+{
+ # Let's use the built version over the prebuilt.
+ local built_acloud=${ANDROID_HOST_OUT}/bin/acloud
+ if [ -f $built_acloud ]; then
+ $built_acloud "$@"
+ return $?
+ fi
+
+ local host_os_arch=$(get_build_var HOST_PREBUILT_TAG)
+ case $host_os_arch in
+ linux-x86) "$(gettop)"/prebuilts/asuite/acloud/linux-x86/acloud "$@"
+ ;;
+ *)
+ echo "acloud is not supported on your host arch: $host_os_arch"
+ ;;
+ esac
+}
+
# Execute the contents of any vendorsetup.sh files we can find.
function source_vendorsetup() {
for dir in device vendor product; do
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index 1df981b..fe47626 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -10,10 +10,8 @@
# we explicit specify this need below (even though it's the current default).
TARGET_USERIMAGES_SPARSE_EXT_DISABLED := false
-# Enable dynamic system image size and reserved 128MB in it.
-# Currently the reserve size includes verified boot metadata.
-# TODO: adjust to a smaller value if the reserved size is only for file system.
-BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE := 134217728
+# Enable dynamic system image size and reserved 64MB in it.
+BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE := 67108864
# Android Verified Boot (AVB):
# 1) Sets BOARD_AVB_ENABLE to sign the GSI image.
diff --git a/target/board/generic/device.mk b/target/board/generic/device.mk
index a75bd07..0a32415 100644
--- a/target/board/generic/device.mk
+++ b/target/board/generic/device.mk
@@ -14,17 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs.xml \
- hardware/libhardware_legacy/audio/audio_policy.conf:system/etc/audio_policy.conf
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 13d5bd6..1b6429c 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -23,7 +23,7 @@
TARGET_2ND_CPU_ABI := armeabi-v7a
TARGET_2ND_CPU_ABI2 := armeabi
-ifneq ($(TARGET_BUILD_APPS)$(filter cts sdk,$(MAKECMDGOALS)),)
+ifneq ($(TARGET_BUILD_APPS)$(filter cts vts sdk,$(MAKECMDGOALS)),)
# DO NOT USE
# DO NOT USE
#
diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk
index 8bd6a8b..2004624 100644
--- a/target/board/generic_arm64/device.mk
+++ b/target/board/generic_arm64/device.mk
@@ -14,16 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:$(TARGET_COPY_OUT_VENDOR)/etc/media_codecs.xml
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
diff --git a/target/board/generic_x86/device.mk b/target/board/generic_x86/device.mk
index fa2d472..0a32415 100644
--- a/target/board/generic_x86/device.mk
+++ b/target/board/generic_x86/device.mk
@@ -14,16 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:system/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:system/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:system/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:system/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:system/etc/media_codecs.xml
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
@@ -32,7 +22,3 @@
# NFC configuration file should be in vendor/etc, instead of system/etc
PRODUCT_COPY_FILES += \
device/generic/common/nfc/libnfc-nci.conf:system/etc/libnfc-nci.conf
-
-PRODUCT_PACKAGES := \
- audio.primary.goldfish \
- vibrator.goldfish
diff --git a/target/board/generic_x86_64/device.mk b/target/board/generic_x86_64/device.mk
index fa2d472..0a32415 100755
--- a/target/board/generic_x86_64/device.mk
+++ b/target/board/generic_x86_64/device.mk
@@ -14,16 +14,6 @@
# limitations under the License.
#
-# This is a build configuration for the product aspects that
-# are specific to the emulator.
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/camera/media_profiles.xml:system/etc/media_profiles.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_audio.xml:system/etc/media_codecs_google_audio.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_telephony.xml:system/etc/media_codecs_google_telephony.xml \
- frameworks/av/media/libstagefright/data/media_codecs_google_video.xml:system/etc/media_codecs_google_video.xml \
- device/generic/goldfish/camera/media_codecs.xml:system/etc/media_codecs.xml
-
# NFC:
# Provide default libnfc-nci.conf file for devices that does not have one in
# vendor/etc because aosp system image (of aosp_$arch products) is going to
@@ -32,7 +22,3 @@
# NFC configuration file should be in vendor/etc, instead of system/etc
PRODUCT_COPY_FILES += \
device/generic/common/nfc/libnfc-nci.conf:system/etc/libnfc-nci.conf
-
-PRODUCT_PACKAGES := \
- audio.primary.goldfish \
- vibrator.goldfish
diff --git a/target/board/generic_x86_arm/BoardConfig.mk b/target/board/generic_x86_arm/BoardConfig.mk
index d1e4884..8e70b25 100644
--- a/target/board/generic_x86_arm/BoardConfig.mk
+++ b/target/board/generic_x86_arm/BoardConfig.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2016 The Android Open Source Project
+# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,11 +13,7 @@
# limitations under the License.
#
-# Configuration for generic_x86 + arm libraries needed by binary translation.
-
-# The generic product target doesn't have any hardware-specific pieces.
-TARGET_NO_BOOTLOADER := true
-TARGET_NO_KERNEL := true
+# x86 emulator specific definitions
TARGET_CPU_ABI := x86
TARGET_ARCH := x86
TARGET_ARCH_VARIANT := x86
@@ -28,39 +24,27 @@
TARGET_2ND_ARCH_VARIANT := armv7-a
TARGET_2ND_CPU_VARIANT := generic
-# Tell the build system this isn't a typical 64bit+32bit multilib configuration.
+TARGET_CPU_ABI_LIST := x86 armeabi-v7a armeabi
TARGET_TRANSLATE_2ND_ARCH := true
BUILD_BROKEN_DUP_RULES := true
-# no hardware camera
-USE_CAMERA_STUB := true
-# Enable dex-preoptimization to speed up the first boot sequence
-# of an SDK AVD. Note that this operation only works on Linux for now
-ifeq ($(HOST_OS),linux)
- ifeq ($(WITH_DEXPREOPT),)
- WITH_DEXPREOPT := true
- WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY := false
- endif
-endif
+include build/make/target/board/BoardConfigEmuCommon.mk
+include build/make/target/board/BoardConfigGsiCommon.mk
-TARGET_USES_HWC2 := true
-NUM_FRAMEBUFFER_SURFACE_BUFFERS := 3
+# Resize to 4G to accomodate ASAN and CTS
+BOARD_USERDATAIMAGE_PARTITION_SIZE := 4294967296
-# Build OpenGLES emulation host and guest libraries
-BUILD_EMULATOR_OPENGL := true
+BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/x86
-# Build and enable the OpenGL ES View renderer. When running on the emulator,
-# the GLES renderer disables itself if host GL acceleration isn't available.
-USE_OPENGL_RENDERER := true
-
-TARGET_USERIMAGES_USE_EXT4 := true
-BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1879048192 # 1.75 GB
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-BOARD_CACHEIMAGE_PARTITION_SIZE := 69206016
-BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE := ext4
-BOARD_FLASH_BLOCK_SIZE := 512
-TARGET_USERIMAGES_SPARSE_EXT_DISABLED := true
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/common
+# Wifi.
+BOARD_WLAN_DEVICE := emulator
+BOARD_HOSTAPD_DRIVER := NL80211
+BOARD_WPA_SUPPLICANT_DRIVER := NL80211
+BOARD_HOSTAPD_PRIVATE_LIB := lib_driver_cmd_simulated
+BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
+WPA_SUPPLICANT_VERSION := VER_0_8_X
+WIFI_DRIVER_FW_PATH_PARAM := "/dev/null"
+WIFI_DRIVER_FW_PATH_STA := "/dev/null"
+WIFI_DRIVER_FW_PATH_AP := "/dev/null"
diff --git a/target/board/generic_x86_arm/README.txt b/target/board/generic_x86_arm/README.txt
new file mode 100644
index 0000000..05f7ca2
--- /dev/null
+++ b/target/board/generic_x86_arm/README.txt
@@ -0,0 +1,10 @@
+The "generic_x86_arm" product defines a non-hardware-specific IA target
+without a kernel or bootloader.
+
+It can be used to build the entire user-level system, and
+will work with the IA version of the emulator,
+
+It is not a product "base class"; no other products inherit
+from it or use it in any way.
+
+Third party arm to x86 translator has to be installed as well
diff --git a/target/board/generic_x86_arm/device.mk b/target/board/generic_x86_arm/device.mk
new file mode 100644
index 0000000..0a32415
--- /dev/null
+++ b/target/board/generic_x86_arm/device.mk
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# NFC:
+# Provide default libnfc-nci.conf file for devices that does not have one in
+# vendor/etc because aosp system image (of aosp_$arch products) is going to
+# be used as GSI.
+# May need to remove the following for newly launched devices in P since this
+# NFC configuration file should be in vendor/etc, instead of system/etc
+PRODUCT_COPY_FILES += \
+ device/generic/common/nfc/libnfc-nci.conf:system/etc/libnfc-nci.conf
diff --git a/target/board/generic_x86_arm/system.prop b/target/board/generic_x86_arm/system.prop
new file mode 100644
index 0000000..64829f3
--- /dev/null
+++ b/target/board/generic_x86_arm/system.prop
@@ -0,0 +1,5 @@
+#
+# system.prop for generic sdk
+#
+
+rild.libpath=/vendor/lib/libreference-ril.so
diff --git a/target/board/treble_common.mk b/target/board/treble_common.mk
index 1869000..9413a75 100644
--- a/target/board/treble_common.mk
+++ b/target/board/treble_common.mk
@@ -35,10 +35,8 @@
TARGET_USERIMAGES_USE_F2FS := true
TARGET_USERIMAGES_SPARSE_EXT_DISABLED := false
-# Enable dynamic system image size and reserved 128MB in it.
-# Currently the reserve size includes verified boot metadata.
-# TODO: adjust to a smaller value if the reserved size is only for file system.
-BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE := 134217728
+# Enable dynamic system image size and reserved 64MB in it.
+BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE := 67108864
# Generic AOSP image always requires separate vendor.img
TARGET_COPY_OUT_VENDOR := vendor
diff --git a/target/product/aosp_arm.mk b/target/product/aosp_arm.mk
index f0752a8..795f8aa 100644
--- a/target/product/aosp_arm.mk
+++ b/target/product/aosp_arm.mk
@@ -38,4 +38,7 @@
# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+# Support addtional P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 28
+
PRODUCT_NAME := aosp_arm
diff --git a/target/product/aosp_arm64.mk b/target/product/aosp_arm64.mk
index ab23111..f3f3c5a 100644
--- a/target/product/aosp_arm64.mk
+++ b/target/product/aosp_arm64.mk
@@ -54,6 +54,9 @@
# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+# Support addtional P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 28
+
PRODUCT_NAME := aosp_arm64
PRODUCT_DEVICE := generic_arm64
PRODUCT_BRAND := Android
diff --git a/target/product/aosp_x86.mk b/target/product/aosp_x86.mk
index 9d1b14b..e3167af 100644
--- a/target/product/aosp_x86.mk
+++ b/target/product/aosp_x86.mk
@@ -38,4 +38,7 @@
# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+# Support addtional P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 28
+
PRODUCT_NAME := aosp_x86
diff --git a/target/product/aosp_x86_64.mk b/target/product/aosp_x86_64.mk
index b38c417..222adaa 100644
--- a/target/product/aosp_x86_64.mk
+++ b/target/product/aosp_x86_64.mk
@@ -54,6 +54,9 @@
# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+# Support addtional P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 28
+
ifdef NET_ETH0_STARTONBOOT
PRODUCT_PROPERTY_OVERRIDES += net.eth0.startonboot=1
endif
diff --git a/target/product/aosp_x86_arm.mk b/target/product/aosp_x86_arm.mk
index 19f57e8..b921c97 100644
--- a/target/product/aosp_x86_arm.mk
+++ b/target/product/aosp_x86_arm.mk
@@ -17,27 +17,32 @@
# aosp_x86 with arm libraries needed by binary translation.
+# The system image of aosp_x86-userdebug is a GSI for the devices with:
+# - x86 32 bits user space
+# - 64 bits binder interface
+# - system-as-root
+# - VNDK enforcement
+# - compatible property override enabled
+
+-include device/generic/goldfish/x86-vendor.mk
+
include $(SRC_TARGET_DIR)/product/full_x86.mk
-# arm libraries. This is the list of shared libraries included in the NDK.
-# Their dependency libraries will be automatically pulled in.
+# Enable dynamic partition size
+PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
+
+# Enable A/B update
+AB_OTA_UPDATER := true
+AB_OTA_PARTITIONS := system
PRODUCT_PACKAGES += \
- libandroid_arm \
- libaaudio_arm \
- libc_arm \
- libdl_arm \
- libEGL_arm \
- libGLESv1_CM_arm \
- libGLESv2_arm \
- libGLESv3_arm \
- libjnigraphics_arm \
- liblog_arm \
- libm_arm \
- libmediandk_arm \
- libOpenMAXAL_arm \
- libstdc++_arm \
- libOpenSLES_arm \
- libz_arm \
+ update_engine \
+ update_verifier
+
+# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
+PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+
+# Support addtional P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 28
PRODUCT_NAME := aosp_x86_arm
PRODUCT_DEVICE := generic_x86_arm
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 6cc6561..a3c9ac7 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -74,6 +74,8 @@
fsck_msdos \
fs_config_files_system \
fs_config_dirs_system \
+ heapprofd \
+ heapprofd_client \
gatekeeperd \
healthd \
hid \
@@ -85,11 +87,12 @@
incidentd \
incident_helper \
incident_report \
- init \
init.environ.rc \
init.rc \
+ init_system \
input \
installd \
+ iorapd \
ip \
ip6tables \
iptables \
@@ -211,6 +214,7 @@
pppd \
privapp-permissions-platform.xml \
racoon \
+ recovery-persist \
resize2fs \
run-as \
schedtest \
@@ -304,6 +308,7 @@
# Packages included only for eng or userdebug builds, previously debug tagged
PRODUCT_PACKAGES_DEBUG := \
adb_keys \
+ apex_debug_key \
iotop \
logpersist.start \
perfprofd \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 1b25f27..9bb45d1 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -34,6 +34,7 @@
fs_config_dirs_nonsystem \
gralloc.default \
group \
+ init_vendor \
libbundlewrapper \
libclearkeycasplugin \
libdownmix \
diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk
index af4097d..ee59090 100644
--- a/target/product/full_base_telephony.mk
+++ b/target/product/full_base_telephony.mk
@@ -28,4 +28,5 @@
frameworks/native/data/etc/handheld_core_hardware.xml:$(TARGET_COPY_OUT_VENDOR)/etc/permissions/handheld_core_hardware.xml
$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_vendor.mk)
diff --git a/target/product/generic.mk b/target/product/generic.mk
index cc856f4..7a9732d 100644
--- a/target/product/generic.mk
+++ b/target/product/generic.mk
@@ -18,7 +18,8 @@
# It includes the base Android platform.
$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_no_telephony.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_vendor.mk)
# Overrides
PRODUCT_BRAND := generic
diff --git a/target/product/mainline_arm64.mk b/target/product/mainline_arm64.mk
index 92954db..4e511e1 100644
--- a/target/product/mainline_arm64.mk
+++ b/target/product/mainline_arm64.mk
@@ -16,7 +16,8 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/mainline_system.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/base_vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_vendor.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_vendor.mk)
PRODUCT_NAME := mainline_arm64
PRODUCT_DEVICE := generic_arm64
diff --git a/target/product/mainline_system.mk b/target/product/mainline_system.mk
index 3581c4a..ed6dcc9 100644
--- a/target/product/mainline_system.mk
+++ b/target/product/mainline_system.mk
@@ -14,29 +14,69 @@
# limitations under the License.
#
-# This makefile is the basis of a generic system image for a handheld
-# device with no telephony.
+# This makefile is the basis of a generic system image for a handheld device.
$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system.mk)
-# OTA support.
+# Shared java libs
+PRODUCT_PACKAGES += \
+ com.android.nfc_extras \
+
+# Applications
+PRODUCT_PACKAGES += \
+ DMService \
+ LiveWallpapersPicker \
+ PartnerBookmarksProvider \
+ PresencePolling \
+ RcsService \
+ SafetyRegulatoryInfo \
+ Stk \
+
+# OTA support
PRODUCT_PACKAGES += \
update_engine \
update_verifier \
+# Wrapped net utils for /vendor access.
+PRODUCT_PACKAGES += \
+ netutils-wrapper-1.0 \
+
+# Charger images
+PRODUCT_PACKAGES += \
+ charger_res_images \
+
+# system_other support
+PRODUCT_PACKAGES += \
+ cppreopts.sh \
+ otapreopt_script \
+
+# Bluetooth libraries
+PRODUCT_PACKAGES += \
+ audio.a2dp.default \
+ audio.hearing_aid.default \
+
+PRODUCT_PACKAGES_DEBUG += \
+ avbctl \
+ bootctl \
+ tinyplay \
+ tinycap \
+ tinymix \
+ tinypcminfo \
+ update_engine_client \
+
# Enable dynamic partition size
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
PRODUCT_NAME := mainline_system
PRODUCT_BRAND := generic
-PRODUCT_SHIPPING_API_LEVEL := 28
_base_mk_whitelist :=
_my_whitelist := $(_base_mk_whitelist)
-# Both /system and / are in system.img when PRODUCT_SHIPPING_API_LEVEL>=28.
+# For mainline, system.img should be mounted at /, so we include ROOT here.
_my_paths := \
- $(TARGET_COPY_OUT_ROOT) \
- $(TARGET_COPY_OUT_SYSTEM) \
+ $(TARGET_COPY_OUT_ROOT)/ \
+ $(TARGET_COPY_OUT_SYSTEM)/ \
$(call require-artifacts-in-path, $(_my_paths), $(_my_whitelist))
diff --git a/target/product/security/Android.mk b/target/product/security/Android.mk
index 4142ea9..73ebd75 100644
--- a/target/product/security/Android.mk
+++ b/target/product/security/Android.mk
@@ -12,6 +12,19 @@
include $(BUILD_PREBUILT)
#######################################
+# apex_debug_key for eng/userdebug
+ifneq ($(filter eng userdebug,$(TARGET_BUILD_VARIANT)),)
+ include $(CLEAR_VARS)
+
+ LOCAL_MODULE := apex_debug_key
+ LOCAL_SRC_FILES := $(LOCAL_MODULE)
+ LOCAL_MODULE_CLASS := ETC
+ LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/security/apex
+
+ include $(BUILD_PREBUILT)
+endif
+
+#######################################
# adb key, if configured via PRODUCT_ADB_KEYS
ifdef PRODUCT_ADB_KEYS
ifneq ($(filter eng userdebug,$(TARGET_BUILD_VARIANT)),)
diff --git a/target/product/security/apex_debug_key b/target/product/security/apex_debug_key
new file mode 100644
index 0000000..28bc8f7
--- /dev/null
+++ b/target/product/security/apex_debug_key
Binary files differ
diff --git a/target/product/security/apex_debug_key.pem b/target/product/security/apex_debug_key.pem
new file mode 100644
index 0000000..bd56778
--- /dev/null
+++ b/target/product/security/apex_debug_key.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKgIBAAKCAgEAt4iSfTF+e2khGQf0bUzTMwWFsgaiQbwQB3cvyBlE9XekFXUt
+GdOEhC2J0p+930UoF6gjjRRrgGF+8K5iV1m3oEbB3qGz6UUOurvVkt4tq96e/Q5a
+ogCOZEuWHjZfs2tQUVNJJtptIp9+0cM768vdf+qnK2JNFIhBqSY0FhjVljKevMcM
+w2tWFRZnKPQ3JoRnWqi5CIauQtBcWRFKIApyf41uHGMjpQRd8aTGeLXBRTi/yD73
+HltuKwSF2SXpj1F+9j4stqskQvipjQnid/Wb+nN3CNgyrGuRrtGvz71WWYcK3DLM
+jvGLOl06QrN6a7ZfLUN4qQjJ6Is5SLTSw/sfFE7Fpcbg6/Geh+jSvChuo6EUtzoX
+Qu42HsVXhrJLQ9/AVTWNmGc9IDr4PMtDiQc4FN8MOpUtR6V/zwrZFoeR3PHl9Z7v
+uTxLIcQLIott0mAjPhbNgbFBs5HP1Z8TfFcyZWpShlx+aM1V2mzYQ7sgsWjFKMSQ
+wIUk/YZ9QK/H5WKjC5M0yxueCU0ocvWFaAZ4RyS/r/SUyQpvyNXNwUsdp1a8sNxp
+LP9U7FG64C+T791yoQJ0sKVbts5SEu/Tojw6miYbH6Fspdo2xxfCbrv6SAbkjlct
+afOnEepgTlHet0G+y0N7OZRJ9WRGyLJNgGjmmDy9XSYGAykwwe4Fv348D0cCAwEA
+AQKCAgBuFra/78NNpXbb++CK+20oCqTyb3Y+dd8rizuXDElH8Fb1JA9EkZLIckRc
+mcMbvPDal9mTU29UV6b8Ga4VdVRnCGpb76TqRKkcK3Vlnm3IzUWSx1xoFmtTD9/h
+CX6IMdPApHOZoaWbAg7hJfm4a9XWV9ukc1eG/GBeZPMTWhwr9vsugztNsQG2rnR8
+pVi7eupAADrVOWwn2bG7H1rWM04Q4rXswy7rWd48BzmhyGxA6FRpehNjGzbPCOx8
+n3gkpp7Ad/T8MVYT8fJKDmbQy/ue1EnPfVeQAwok0dRiiNDV7OH/yVzYVVzNSoSa
+4+uH1qHqlbE3u3TZT0GyMfzG38f4scsbvG/AhH1fuPsy4QcWyLlMV6KUnk3KPc3Q
+yOeRR82qndQMTYQ5/PFiilk7cNbTU0OBjuNpu/t1LIE2J2gGZ5Jw+g2NGtM/xsgC
+jOahpRYvZB8fZ/bSjirwwmSSU+v0ZoPDHtt75R/QxqwPG2jai8kaGr7GEXWJfrfv
+CktMnb6LoCyNiiiZSMUgdDHOQEkVNmt9fxiVaxsaIL4BygropwlD4WbuyRMevfYz
+EffvvmaqC24zJi8WzDszCNLgP/piNhXDyxZX+KaQXj0Do/tzWBBkO0OO6mVGOkX2
+6dadXfhOIggWO8K2lKCUKwWMO9LaKwSwZ4gzcc1a+U9rpE8kUQKCAQEA8lBGLzOL
+Ht8+d13SY+NdPbL6qGvoqsKd5BfIhaNbH04Cp2zQs2TWySxmV47df03pGUpQOCKn
+tFRxoczUrf1gfFDCCC95+A/crls8QJHG+MScTBH5U8Q0s9ReUo/0xaa55u77x5uS
+0fAtdnOdqP8/pf1fSXUJvyLW85LWdkge1c7jk7I5MnWVO2Ak9/GkuRgITSSgVdBa
+kr8nU1BCzDY0gOTWo5J1+NqqVH2eYfEI621iD4SAE3n2JrCC4K/Nt2enEJwup2TR
+ym15g9nClicUQP5Y67eDfqTZu1d0I0Ezl1tL8UPxcLI+ucN4V6KL8RvqTVMnGX/R
+s1FwkPVMQ6dKaQKCAQEAweZeggcSFukr+tTbnzDAHxg4YqiR+30wo7i8NadGu6W/
+EiAdcCdmZYMI9KKc+B/N3cuFqBnaSd7VM7XvINdwZRanRj56Ya8LvQMi0S9YPiRn
+T4TXC3EeewN5+SSO0Dkw83tW1PLqgSINy5ijBs5lGoIYMCC+GSA2DuRBiPpcfhqJ
+kmC9uFQvrsge8CC8Sb1wHCr0Wz34qhPoTff6ZV8wm11Jkb5+tT7PMS5Ft0sEBsxV
+R1JFtLNs0k/YpMb4/OrZFZZSIFCTUVPvHQ1/5BwumVnolBC4LORCaSk1xUOydU9h
+bZd4qzIpFteGLGGRT6nEWC1YejLAvcFHVJiKs1F2LwKCAQEAzgnwA8bCLvgIt5rx
+gLod2I7NkFRhPIHLm92VRf0HSHEe1Jo0Q7Yk5F56j00NjmgDItwLpg/hpfZ/wOLY
+nTFrz4kj0636+jESprcxXn4WQAV+GTjXVqDpZ1fW9EEwEriYLoNbV/kzOIwPPD9G
++iJATrZJRb7dEMdhGy/qaB0fCxKmdDoBZKSSxjAUfzfbpv+GX4IbS5ykx07+81q1
+0crtjgQHdoLdCUN1ve4qtIEt4nHaBfPWq7jy0ycXwlH6jE74wajsCq4xrPy1bKXH
+TcHg+PrNRXF/wDoQYboVKL0ST0r0IixxqjAGIhLRy0KN1/CypBlmj8od12oSW1AZ
+DxW6sQKCAQEAtIMW8M5MVO/2dam8XFMySMBvncl5PjuqEIFnFjwIaaFAZEtpnIPR
+nCeFKtpIb+aL7TQP1hNbWPIOYfm6CUUH6dRRHeAEZvRjZS+KNlxxNkkFtM3itVA2
+JCd0YjFakxbrL4FfsRgEoPtnBGexPiDflvIOOqAA2btXGD3/lNofSXbDJHbTqMsX
+KQw9YSfYon2t5UtH+bmTyiKGXi/B+KXJxpnuZ7SEmY9DrHF7jcxUj0+jBKbfJf70
+DEcxVRW3rx2jw6kSA+t/enM9ZDqxGVfzOeit0UpPa9uEyAoJeQAxH20rMq+VMyub
+fRxgWOjsMtHFbKGqgPjG3uEU2vi4B4CLGQKCAQEA2Mr5f2AXPR8jca1+Id+CxZpU
+bgMML7gW31L4lGX9Teo9z+zSdN7sIwqe42Zla1N9wda8p5ribnJxwRdxcPL8bid5
+LLlls4xXD/jQCQCFL90X59Tm6VD6tm1VyCjL44nRwAqP4vJObSB5rTqJYtkfVmnp
+KERF5P0i5yv4Oox0ZOsThou9jtyl1dS50Td0Urhp4LhPdmpDPUq25K1sDDfnGFm6
+IcMPkVznRPUoKQCG9DSQcQqttkSV9Po+qfLa3aHtdndfe88Gd9uom8bsAMTZAfSZ
+D4YhqBHSLWrxvtQ8GxkaPITJv7hocwssdFRUj5/UJKJBgUXPBXEXh+fxlDaGQQ==
+-----END RSA PRIVATE KEY-----
diff --git a/target/product/telephony.mk b/target/product/telephony_system.mk
similarity index 78%
rename from target/product/telephony.mk
rename to target/product/telephony_system.mk
index 38a8caa..0b1e8a2 100644
--- a/target/product/telephony.mk
+++ b/target/product/telephony_system.mk
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007 The Android Open Source Project
+# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,16 +14,16 @@
# limitations under the License.
#
-# This is the list of product-level settings that are specific
-# to products that have telephony hardware.
+# This is the list of modules that are specific to products that have telephony
+# hardware, and install on the system partition.
PRODUCT_PACKAGES := \
+ ANS \
CarrierConfig \
CarrierDefaultApp \
Dialer \
CallLogBackup \
CellBroadcastReceiver \
EmergencyInfo \
- rild
PRODUCT_COPY_FILES := \
diff --git a/target/product/telephony.mk b/target/product/telephony_vendor.mk
similarity index 67%
copy from target/product/telephony.mk
copy to target/product/telephony_vendor.mk
index 38a8caa..bddd383 100644
--- a/target/product/telephony.mk
+++ b/target/product/telephony_vendor.mk
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007 The Android Open Source Project
+# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,16 +14,10 @@
# limitations under the License.
#
-# This is the list of product-level settings that are specific
-# to products that have telephony hardware.
+# This is the list of modules that are specific to products that have telephony
+# hardware, and install outside the system partition.
PRODUCT_PACKAGES := \
- CarrierConfig \
- CarrierDefaultApp \
- Dialer \
- CallLogBackup \
- CellBroadcastReceiver \
- EmergencyInfo \
- rild
+ rild \
PRODUCT_COPY_FILES := \
diff --git a/target/product/treble_common.mk b/target/product/treble_common.mk
index 6b0ef2f..d3cce76 100644
--- a/target/product/treble_common.mk
+++ b/target/product/treble_common.mk
@@ -21,7 +21,8 @@
# Generic system image inherits from AOSP with telephony
$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_vendor.mk)
# Enable dynamic partition size
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
@@ -54,5 +55,5 @@
PRODUCT_PACKAGES += \
ld.config.vndk_lite.txt
-# Support addtional O-MR1 vendor interface
-PRODUCT_EXTRA_VNDK_VERSIONS := 27
+# Support addtional O-MR1 and P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 27 28
diff --git a/target/product/vndk/current.txt b/target/product/vndk/current.txt
index d70fde6d..7d8409b 100644
--- a/target/product/vndk/current.txt
+++ b/target/product/vndk/current.txt
@@ -29,6 +29,7 @@
VNDK-SP: libbacktrace.so
VNDK-SP: libbase.so
VNDK-SP: libbcinfo.so
+VNDK-SP: libbinderthreadstate.so
VNDK-SP: libblas.so
VNDK-SP: libc++.so
VNDK-SP: libcompiler_rt.so
@@ -50,6 +51,7 @@
VNDK-core: android.frameworks.schedulerservice@1.0.so
VNDK-core: android.frameworks.sensorservice@1.0.so
VNDK-core: android.frameworks.vr.composer@1.0.so
+VNDK-core: android.hardware.atrace@1.0.so
VNDK-core: android.hardware.audio.common-util.so
VNDK-core: android.hardware.audio.common@2.0.so
VNDK-core: android.hardware.audio.common@2.0-util.so
@@ -97,7 +99,7 @@
VNDK-core: android.hardware.graphics.bufferqueue@1.0.so
VNDK-core: android.hardware.graphics.composer@2.1.so
VNDK-core: android.hardware.graphics.composer@2.2.so
-VNDK-core: android.hardware.health.filesystem@1.0.so
+VNDK-core: android.hardware.health.storage@1.0.so
VNDK-core: android.hardware.health@1.0.so
VNDK-core: android.hardware.health@2.0.so
VNDK-core: android.hardware.ir@1.0.so
@@ -110,6 +112,7 @@
VNDK-core: android.hardware.memtrack@1.0.so
VNDK-core: android.hardware.neuralnetworks@1.0.so
VNDK-core: android.hardware.neuralnetworks@1.1.so
+VNDK-core: android.hardware.neuralnetworks@1.2.so
VNDK-core: android.hardware.nfc@1.0.so
VNDK-core: android.hardware.nfc@1.1.so
VNDK-core: android.hardware.oemlock@1.0.so
@@ -151,6 +154,7 @@
VNDK-core: android.hardware.wifi@1.2.so
VNDK-core: android.hidl.allocator@1.0.so
VNDK-core: android.hidl.memory.block@1.0.so
+VNDK-core: android.hidl.safe_union@1.0.so
VNDK-core: android.hidl.token@1.0.so
VNDK-core: android.hidl.token@1.0-utils.so
VNDK-core: android.system.net.netd@1.0.so
@@ -249,6 +253,7 @@
VNDK-core: libyuv.so
VNDK-core: libziparchive.so
VNDK-private: libbacktrace.so
+VNDK-private: libbinderthreadstate.so
VNDK-private: libblas.so
VNDK-private: libcompiler_rt.so
VNDK-private: libft2.so
diff --git a/tools/atree/files.cpp b/tools/atree/files.cpp
index d5c8a97..b90f8b3 100644
--- a/tools/atree/files.cpp
+++ b/tools/atree/files.cpp
@@ -81,7 +81,7 @@
state = TEXT;
break;
}
- // otherwise fall-through to TEXT case
+ [[fallthrough]];
case TEXT:
if (state != IN_QUOTE && isspace(*p)) {
if (q != p) {
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index e9419fe..1e8677c 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -46,10 +46,10 @@
from __future__ import print_function
import datetime
+import logging
import os
import shlex
import shutil
-import subprocess
import sys
import uuid
import zipfile
@@ -63,8 +63,9 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
-OPTIONS = common.OPTIONS
+logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
OPTIONS.add_missing = False
OPTIONS.rebuild_recovery = False
OPTIONS.replace_updated_files_list = []
@@ -72,9 +73,6 @@
OPTIONS.replace_verity_private_key = False
OPTIONS.is_signing = False
-# Partitions that should have their care_map added to META/care_map.txt.
-PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'product_services',
- 'odm')
# Use a fixed timestamp (01/01/2009 00:00:00 UTC) for files when packaging
# images. (b/24377993, b/80600931)
FIXED_FILE_TIMESTAMP = int((
@@ -111,16 +109,16 @@
(which, care_map_ranges): care_map_ranges is the raw string of the care_map
RangeSet.
"""
- assert which in PARTITIONS_WITH_CARE_MAP
+ assert which in common.PARTITIONS_WITH_CARE_MAP
simg = sparse_img.SparseImage(imgname)
care_map_ranges = simg.care_map
- key = which + "_adjusted_partition_size"
- adjusted_blocks = OPTIONS.info_dict.get(key)
- if adjusted_blocks:
- assert adjusted_blocks > 0, "blocks should be positive for " + which
- care_map_ranges = care_map_ranges.intersect(rangelib.RangeSet(
- "0-%d" % (adjusted_blocks,)))
+ key = which + "_image_blocks"
+ image_blocks = OPTIONS.info_dict.get(key)
+ if image_blocks:
+ assert image_blocks > 0, "blocks for {} must be positive".format(which)
+ care_map_ranges = care_map_ranges.intersect(
+ rangelib.RangeSet("0-{}".format(image_blocks)))
return [which, care_map_ranges.to_string_raw()]
@@ -131,7 +129,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.img")
if os.path.exists(img.input_name):
- print("system.img already exists; no need to rebuild...")
+ logger.info("system.img already exists; no need to rebuild...")
return img.input_name
def output_sink(fn, data):
@@ -146,7 +144,7 @@
common.ZipWrite(output_zip, ofile.name, arc_name)
if OPTIONS.rebuild_recovery:
- print("Building new recovery patch")
+ logger.info("Building new recovery patch")
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict)
@@ -163,7 +161,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system_other.img")
if os.path.exists(img.input_name):
- print("system_other.img already exists; no need to rebuild...")
+ logger.info("system_other.img already exists; no need to rebuild...")
return
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
@@ -175,7 +173,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.img")
if os.path.exists(img.input_name):
- print("vendor.img already exists; no need to rebuild...")
+ logger.info("vendor.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
@@ -190,7 +188,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "product.img")
if os.path.exists(img.input_name):
- print("product.img already exists; no need to rebuild...")
+ logger.info("product.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -208,7 +206,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES",
"product_services.img")
if os.path.exists(img.input_name):
- print("product_services.img already exists; no need to rebuild...")
+ logger.info("product_services.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -224,7 +222,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "odm.img")
if os.path.exists(img.input_name):
- print("odm.img already exists; no need to rebuild...")
+ logger.info("odm.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -243,7 +241,7 @@
"""
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "dtbo.img")
if os.path.exists(img.input_name):
- print("dtbo.img already exists; no need to rebuild...")
+ logger.info("dtbo.img already exists; no need to rebuild...")
return img.input_name
dtbo_prebuilt_path = os.path.join(
@@ -262,17 +260,18 @@
args = OPTIONS.info_dict.get("avb_dtbo_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
- p = common.Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, \
- "avbtool add_hash_footer of %s failed" % (img.name,)
+ proc = common.Run(cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to call 'avbtool add_hash_footer' for {}:\n{}".format(
+ img.name, output)
img.Write()
return img.name
def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
- print("creating " + what + ".img...")
+ logger.info("creating " + what + ".img...")
image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
fstab = info_dict["fstab"]
@@ -311,26 +310,25 @@
hash_seed = "hash_seed-" + uuid_seed
image_props["hash_seed"] = str(uuid.uuid5(uuid.NAMESPACE_URL, hash_seed))
- succ = build_image.BuildImage(os.path.join(input_dir, what.upper()),
- image_props, output_file.name)
- assert succ, "build " + what + ".img image failed"
+ build_image.BuildImage(
+ os.path.join(input_dir, what.upper()), image_props, output_file.name)
output_file.Write()
if block_list:
block_list.Write()
- # Set the 'adjusted_partition_size' that excludes the verity blocks of the
- # given image. When avb is enabled, this size is the max image size returned
- # by the avb tool.
+ # Set the '_image_blocks' that excludes the verity metadata blocks of the
+ # given image. When AVB is enabled, this size is the max image size returned
+ # by the AVB tool.
is_verity_partition = "verity_block_device" in image_props
verity_supported = (image_props.get("verity") == "true" or
image_props.get("avb_enable") == "true")
is_avb_enable = image_props.get("avb_hashtree_enable") == "true"
if verity_supported and (is_verity_partition or is_avb_enable):
- adjusted_blocks_value = image_props.get("partition_size")
- if adjusted_blocks_value:
- adjusted_blocks_key = what + "_adjusted_partition_size"
- info_dict[adjusted_blocks_key] = int(adjusted_blocks_value)/4096 - 1
+ image_size = image_props.get("image_size")
+ if image_size:
+ image_blocks_key = what + "_image_blocks"
+ info_dict[image_blocks_key] = int(image_size) / 4096 - 1
def AddUserdata(output_zip):
@@ -344,7 +342,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img")
if os.path.exists(img.input_name):
- print("userdata.img already exists; no need to rebuild...")
+ logger.info("userdata.img already exists; no need to rebuild...")
return
# Skip userdata.img if no size.
@@ -352,7 +350,7 @@
if not image_props.get("partition_size"):
return
- print("creating userdata.img...")
+ logger.info("creating userdata.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -364,8 +362,7 @@
fstab = OPTIONS.info_dict["fstab"]
if fstab:
image_props["fs_type"] = fstab["/data"].fs_type
- succ = build_image.BuildImage(user_dir, image_props, img.name)
- assert succ, "build userdata.img image failed"
+ build_image.BuildImage(user_dir, image_props, img.name)
common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
img.Write()
@@ -404,7 +401,7 @@
partitions: A dict that's keyed by partition names with image paths as
values. Only valid partition names are accepted, as listed in
common.AVB_PARTITIONS.
- name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_mainline'.
+ name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
needed_partitions: Partitions whose descriptors should be included into the
generated VBMeta image.
@@ -416,7 +413,7 @@
img = OutputFile(
output_zip, OPTIONS.input_tmp, "IMAGES", "{}.img".format(name))
if os.path.exists(img.input_name):
- print("{}.img already exists; not rebuilding...".format(name))
+ logger.info("%s.img already exists; not rebuilding...", name)
return img.input_name
avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"]
@@ -456,9 +453,9 @@
assert found, 'Failed to find {}'.format(image_path)
cmd.extend(split_args)
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"avbtool make_vbmeta_image failed:\n{}".format(stdoutdata)
img.Write()
@@ -486,9 +483,9 @@
if args:
cmd.extend(shlex.split(args))
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"bpttool make_table failed:\n{}".format(stdoutdata)
img.Write()
@@ -500,7 +497,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img")
if os.path.exists(img.input_name):
- print("cache.img already exists; no need to rebuild...")
+ logger.info("cache.img already exists; no need to rebuild...")
return
image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -508,7 +505,7 @@
if "fs_type" not in image_props:
return
- print("creating cache.img...")
+ logger.info("creating cache.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -517,8 +514,7 @@
fstab = OPTIONS.info_dict["fstab"]
if fstab:
image_props["fs_type"] = fstab["/cache"].fs_type
- succ = build_image.BuildImage(user_dir, image_props, img.name)
- assert succ, "build cache.img image failed"
+ build_image.BuildImage(user_dir, image_props, img.name)
common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
img.Write()
@@ -556,19 +552,19 @@
assert available, "Failed to find " + img_name
-def AddCareMapTxtForAbOta(output_zip, ab_partitions, image_paths):
- """Generates and adds care_map.txt for system and vendor partitions.
+def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
+ """Generates and adds care_map.pb for a/b partition that has care_map.
Args:
output_zip: The output zip file (needs to be already open), or None to
- write care_map.txt to OPTIONS.input_tmp/.
+ write care_map.pb to OPTIONS.input_tmp/.
ab_partitions: The list of A/B partitions.
image_paths: A map from the partition name to the image path.
"""
care_map_list = []
for partition in ab_partitions:
partition = partition.strip()
- if partition not in PARTITIONS_WITH_CARE_MAP:
+ if partition not in common.PARTITIONS_WITH_CARE_MAP:
continue
verity_block_device = "{}_verity_block_device".format(partition)
@@ -579,6 +575,20 @@
assert os.path.exists(image_path)
care_map_list += GetCareMap(partition, image_path)
+ # adds fingerprint field to the care_map
+ build_props = OPTIONS.info_dict.get(partition + ".build.prop", {})
+ prop_name_list = ["ro.{}.build.fingerprint".format(partition),
+ "ro.{}.build.thumbprint".format(partition)]
+
+ present_props = [x for x in prop_name_list if x in build_props]
+ if not present_props:
+ logger.warning("fingerprint is not present for partition %s", partition)
+ property_id, fingerprint = "unknown", "unknown"
+ else:
+ property_id = present_props[0]
+ fingerprint = build_props[property_id]
+ care_map_list += [property_id, fingerprint]
+
if not care_map_list:
return
@@ -589,16 +599,14 @@
with open(temp_care_map_text, 'w') as text_file:
text_file.write('\n'.join(care_map_list))
- temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".txt")
- care_map_gen_cmd = (["care_map_generator", temp_care_map_text, temp_care_map])
- p = common.Run(care_map_gen_cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output, _ = p.communicate()
- assert p.returncode == 0, "Failed to generate the care_map proto message."
- if OPTIONS.verbose:
- print(output.rstrip())
+ temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
+ care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
+ proc = common.Run(care_map_gen_cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to generate the care_map proto message:\n{}".format(output)
- care_map_path = "META/care_map.txt"
+ care_map_path = "META/care_map.pb"
if output_zip and care_map_path not in output_zip.namelist():
common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
else:
@@ -626,7 +634,7 @@
prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
if os.path.exists(prebuilt_path):
- print("%s already exists, no need to overwrite..." % (img_name,))
+ logger.info("%s already exists, no need to overwrite...", img_name)
continue
img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name)
@@ -647,9 +655,9 @@
cmd += shlex.split(OPTIONS.info_dict.get('lpmake_args').strip())
cmd += ['--output', img.name]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"lpmake tool failed:\n{}".format(stdoutdata)
img.Write()
@@ -658,7 +666,7 @@
def ReplaceUpdatedFiles(zip_filename, files_list):
"""Updates all the ZIP entries listed in files_list.
- For now the list includes META/care_map.txt, and the related files under
+ For now the list includes META/care_map.pb, and the related files under
SYSTEM/ after rebuilding recovery.
"""
common.ZipDelete(zip_filename, files_list)
@@ -691,7 +699,7 @@
if not OPTIONS.add_missing:
if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
- print("target_files appears to already contain images.")
+ logger.warning("target_files appears to already contain images.")
sys.exit(1)
OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True)
@@ -741,7 +749,7 @@
partitions = dict()
def banner(s):
- print("\n\n++++ " + s + " ++++\n\n")
+ logger.info("\n\n++++ " + s + " ++++\n\n")
banner("boot")
# common.GetBootableImage() returns the image directly if present.
@@ -825,15 +833,15 @@
# chained VBMeta image plus the chained VBMeta images themselves.
vbmeta_partitions = common.AVB_PARTITIONS[:]
- vbmeta_mainline = OPTIONS.info_dict.get("avb_vbmeta_mainline", "").strip()
- if vbmeta_mainline:
- banner("vbmeta_mainline")
+ vbmeta_system = OPTIONS.info_dict.get("avb_vbmeta_system", "").strip()
+ if vbmeta_system:
+ banner("vbmeta_system")
AddVBMeta(
- output_zip, partitions, "vbmeta_mainline", vbmeta_mainline.split())
+ output_zip, partitions, "vbmeta_system", vbmeta_system.split())
vbmeta_partitions = [
item for item in vbmeta_partitions
- if item not in vbmeta_mainline.split()]
- vbmeta_partitions.append("vbmeta_mainline")
+ if item not in vbmeta_system.split()]
+ vbmeta_partitions.append("vbmeta_system")
vbmeta_vendor = OPTIONS.info_dict.get("avb_vbmeta_vendor", "").strip()
if vbmeta_vendor:
@@ -863,9 +871,9 @@
# ready under IMAGES/ or RADIO/.
CheckAbOtaImages(output_zip, ab_partitions)
- # Generate care_map.txt for system and vendor partitions (if present), then
- # write this file to target_files package.
- AddCareMapTxtForAbOta(output_zip, ab_partitions, partitions)
+ # Generate care_map.pb for ab_partitions, then write this file to
+ # target_files package.
+ AddCareMapForAbOta(output_zip, ab_partitions, partitions)
# Radio images that need to be packed into IMAGES/, and product-img.zip.
pack_radioimages_txt = os.path.join(
@@ -905,20 +913,21 @@
"is_signing"],
extra_option_handler=option_handler)
-
if len(args) != 1:
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
AddImagesToTargetFiles(args[0])
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ except common.ExternalError:
+ logger.exception("\n ERROR:\n")
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index c7d93d3..2d20e23 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -19,11 +19,11 @@
import functools
import heapq
import itertools
+import logging
import multiprocessing
import os
import os.path
import re
-import subprocess
import sys
import threading
from collections import deque, OrderedDict
@@ -32,9 +32,10 @@
import common
from rangelib import RangeSet
-
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
+logger = logging.getLogger(__name__)
+
def compute_patch(srcfile, tgtfile, imgdiff=False):
patchfile = common.MakeTempFile(prefix='patch-')
@@ -44,11 +45,10 @@
# Don't dump the bsdiff/imgdiff commands, which are not useful for the case
# here, since they contain temp filenames only.
- p = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output, _ = p.communicate()
+ proc = common.Run(cmd, verbose=False)
+ output, _ = proc.communicate()
- if p.returncode != 0:
+ if proc.returncode != 0:
raise ValueError(output)
with open(patchfile, 'rb') as f:
@@ -307,8 +307,8 @@
"""Prints a report of the collected imgdiff stats."""
def print_header(header, separator):
- print(header)
- print(separator * len(header) + '\n')
+ logger.info(header)
+ logger.info(separator * len(header) + '\n')
print_header(' Imgdiff Stats Report ', '=')
for key in self.REASONS:
@@ -317,7 +317,7 @@
values = self.stats[key]
section_header = ' {} (count: {}) '.format(key, len(values))
print_header(section_header, '-')
- print(''.join([' {}\n'.format(name) for name in values]))
+ logger.info(''.join([' {}\n'.format(name) for name in values]))
class BlockImageDiff(object):
@@ -485,7 +485,7 @@
self.WriteTransfers(prefix)
# Report the imgdiff stats.
- if common.OPTIONS.verbose and not self.disable_imgdiff:
+ if not self.disable_imgdiff:
self.imgdiff_stats.Report()
def WriteTransfers(self, prefix):
@@ -649,6 +649,14 @@
self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
+ if self.tgt.hashtree_info:
+ out.append("compute_hash_tree {} {} {} {} {}\n".format(
+ self.tgt.hashtree_info.hashtree_range.to_string_raw(),
+ self.tgt.hashtree_info.filesystem_range.to_string_raw(),
+ self.tgt.hashtree_info.hash_algorithm,
+ self.tgt.hashtree_info.salt,
+ self.tgt.hashtree_info.root_hash))
+
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
assert (WriteSplitTransfers(out, "zero", self.tgt.extended) ==
@@ -687,16 +695,17 @@
OPTIONS = common.OPTIONS
if OPTIONS.cache_size is not None:
max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
- print("max stashed blocks: %d (%d bytes), "
- "limit: %d bytes (%.2f%%)\n" % (
- max_stashed_blocks, self._max_stashed_size, max_allowed,
- self._max_stashed_size * 100.0 / max_allowed))
+ logger.info(
+ "max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n",
+ max_stashed_blocks, self._max_stashed_size, max_allowed,
+ self._max_stashed_size * 100.0 / max_allowed)
else:
- print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
- max_stashed_blocks, self._max_stashed_size))
+ logger.info(
+ "max stashed blocks: %d (%d bytes), limit: <unknown>\n",
+ max_stashed_blocks, self._max_stashed_size)
def ReviseStashSize(self):
- print("Revising stash size...")
+ logger.info("Revising stash size...")
stash_map = {}
# Create the map between a stash and its def/use points. For example, for a
@@ -741,7 +750,7 @@
# that will use this stash and replace the command with "new".
use_cmd = stash_map[stash_raw_id][2]
replaced_cmds.append(use_cmd)
- print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
+ logger.info("%10d %9s %s", sr.size(), "explicit", use_cmd)
else:
# Update the stashes map.
if sh in stashes:
@@ -757,7 +766,7 @@
if xf.src_ranges.overlaps(xf.tgt_ranges):
if stashed_blocks + xf.src_ranges.size() > max_allowed:
replaced_cmds.append(xf)
- print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf))
+ logger.info("%10d %9s %s", xf.src_ranges.size(), "implicit", xf)
# Replace the commands in replaced_cmds with "new"s.
for cmd in replaced_cmds:
@@ -783,28 +792,29 @@
stashes.pop(sh)
num_of_bytes = new_blocks * self.tgt.blocksize
- print(" Total %d blocks (%d bytes) are packed as new blocks due to "
- "insufficient cache size." % (new_blocks, num_of_bytes))
+ logger.info(
+ " Total %d blocks (%d bytes) are packed as new blocks due to "
+ "insufficient cache size.", new_blocks, num_of_bytes)
return new_blocks
def ComputePatches(self, prefix):
- print("Reticulating splines...")
+ logger.info("Reticulating splines...")
diff_queue = []
patch_num = 0
with open(prefix + ".new.dat", "wb") as new_f:
for index, xf in enumerate(self.transfers):
if xf.style == "zero":
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s" % (
- tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name,
- str(xf.tgt_ranges)))
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+ xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "new":
self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s" % (
- tgt_size, tgt_size, 100.0, xf.style,
- xf.tgt_name, str(xf.tgt_ranges)))
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+ xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "diff":
# We can't compare src and tgt directly because they may have
@@ -822,11 +832,12 @@
xf.patch = None
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
if xf.src_ranges != xf.tgt_ranges:
- print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
- tgt_size, tgt_size, 100.0, xf.style,
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s (from %s)", tgt_size, tgt_size,
+ 100.0, xf.style,
xf.tgt_name if xf.tgt_name == xf.src_name else (
xf.tgt_name + " (from " + xf.src_name + ")"),
- str(xf.tgt_ranges), str(xf.src_ranges)))
+ str(xf.tgt_ranges), str(xf.src_ranges))
else:
if xf.patch:
# We have already generated the patch with imgdiff, while
@@ -845,9 +856,9 @@
if diff_queue:
if self.threads > 1:
- print("Computing patches (using %d threads)..." % (self.threads,))
+ logger.info("Computing patches (using %d threads)...", self.threads)
else:
- print("Computing patches...")
+ logger.info("Computing patches...")
diff_total = len(diff_queue)
patches = [None] * diff_total
@@ -869,13 +880,6 @@
xf_index, imgdiff, patch_index = diff_queue.pop()
xf = self.transfers[xf_index]
- if sys.stdout.isatty():
- diff_left = len(diff_queue)
- progress = (diff_total - diff_left) * 100 / diff_total
- # '\033[K' is to clear to EOL.
- print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
- sys.stdout.flush()
-
patch = xf.patch
if not patch:
src_ranges = xf.src_ranges
@@ -913,13 +917,10 @@
while threads:
threads.pop().join()
- if sys.stdout.isatty():
- print('\n')
-
if error_messages:
- print('ERROR:')
- print('\n'.join(error_messages))
- print('\n\n\n')
+ logger.error('ERROR:')
+ logger.error('\n'.join(error_messages))
+ logger.error('\n\n\n')
sys.exit(1)
else:
patches = []
@@ -933,14 +934,13 @@
offset += xf.patch_len
patch_fd.write(patch)
- if common.OPTIONS.verbose:
- tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
- xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
- xf.style,
- xf.tgt_name if xf.tgt_name == xf.src_name else (
- xf.tgt_name + " (from " + xf.src_name + ")"),
- xf.tgt_ranges, xf.src_ranges))
+ tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s %s", xf.patch_len, tgt_size,
+ xf.patch_len * 100.0 / tgt_size, xf.style,
+ xf.tgt_name if xf.tgt_name == xf.src_name else (
+ xf.tgt_name + " (from " + xf.src_name + ")"),
+ xf.tgt_ranges, xf.src_ranges)
def AssertSha1Good(self):
"""Check the SHA-1 of the src & tgt blocks in the transfer list.
@@ -988,13 +988,19 @@
assert touched[i] == 0
touched[i] = 1
+ if self.tgt.hashtree_info:
+ for s, e in self.tgt.hashtree_info.hashtree_range:
+ for i in range(s, e):
+ assert touched[i] == 0
+ touched[i] = 1
+
# Check that we've written every target block.
for s, e in self.tgt.care_map:
for i in range(s, e):
assert touched[i] == 1
def ImproveVertexSequence(self):
- print("Improving vertex order...")
+ logger.info("Improving vertex order...")
# At this point our digraph is acyclic; we reversed any edges that
# were backwards in the heuristically-generated sequence. The
@@ -1046,7 +1052,7 @@
blocks will be written to the same stash slot in WriteTransfers().
"""
- print("Reversing backward edges...")
+ logger.info("Reversing backward edges...")
in_order = 0
out_of_order = 0
stash_raw_id = 0
@@ -1078,15 +1084,15 @@
xf.goes_after[u] = None # value doesn't matter
u.goes_before[xf] = None
- print((" %d/%d dependencies (%.2f%%) were violated; "
- "%d source blocks stashed.") %
- (out_of_order, in_order + out_of_order,
- (out_of_order * 100.0 / (in_order + out_of_order))
- if (in_order + out_of_order) else 0.0,
- stash_size))
+ logger.info(
+ " %d/%d dependencies (%.2f%%) were violated; %d source blocks "
+ "stashed.", out_of_order, in_order + out_of_order,
+ (out_of_order * 100.0 / (in_order + out_of_order)) if (
+ in_order + out_of_order) else 0.0,
+ stash_size)
def FindVertexSequence(self):
- print("Finding vertex sequence...")
+ logger.info("Finding vertex sequence...")
# This is based on "A Fast & Effective Heuristic for the Feedback
# Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of
@@ -1199,7 +1205,7 @@
self.transfers = new_transfers
def GenerateDigraph(self):
- print("Generating digraph...")
+ logger.info("Generating digraph...")
# Each item of source_ranges will be:
# - None, if that block is not used as a source,
@@ -1365,9 +1371,9 @@
if tgt_changed < tgt_size * crop_threshold:
assert tgt_changed + tgt_skipped.size() == tgt_size
- print('%10d %10d (%6.2f%%) %s' % (
- tgt_skipped.size(), tgt_size,
- tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+ logger.info(
+ '%10d %10d (%6.2f%%) %s', tgt_skipped.size(), tgt_size,
+ tgt_skipped.size() * 100.0 / tgt_size, tgt_name)
AddSplitTransfers(
"%s-skipped" % (tgt_name,),
"%s-skipped" % (src_name,),
@@ -1481,9 +1487,9 @@
"--block-limit={}".format(max_blocks_per_transfer),
"--split-info=" + patch_info_file,
src_file, tgt_file, patch_file]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- imgdiff_output, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ imgdiff_output, _ = proc.communicate()
+ assert proc.returncode == 0, \
"Failed to create imgdiff patch between {} and {}:\n{}".format(
src_name, tgt_name, imgdiff_output)
@@ -1508,7 +1514,7 @@
split_src_ranges,
patch_content))
- print("Finding transfers...")
+ logger.info("Finding transfers...")
large_apks = []
split_large_apks = []
@@ -1533,6 +1539,9 @@
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
continue
+ elif tgt_fn == "__HASHTREE":
+ continue
+
elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source.
AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 1904085..4a013c2 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -26,321 +26,51 @@
from __future__ import print_function
+import logging
import os
import os.path
import re
-import shlex
import shutil
-import subprocess
import sys
import common
-import sparse_img
+import verity_utils
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
-
-FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
-BLOCK_SIZE = 4096
+BLOCK_SIZE = common.BLOCK_SIZE
BYTES_IN_MB = 1024 * 1024
-def RunCommand(cmd, verbose=None, env=None):
- """Echo and run the given command.
+class BuildImageError(Exception):
+ """An Exception raised during image building."""
- Args:
- cmd: the command represented as a list of strings.
- verbose: show commands being executed.
- env: a dictionary of additional environment variables.
- Returns:
- A tuple of the output and the exit code.
- """
- env_copy = None
- if env is not None:
- env_copy = os.environ.copy()
- env_copy.update(env)
- if verbose is None:
- verbose = OPTIONS.verbose
- if verbose:
- print("Running: " + " ".join(cmd))
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
- env=env_copy)
- output, _ = p.communicate()
-
- if verbose:
- print(output.rstrip())
- return (output, p.returncode)
-
-
-def GetVerityFECSize(partition_size):
- cmd = ["fec", "-s", str(partition_size)]
- output, exit_code = RunCommand(cmd, False)
- if exit_code != 0:
- return False, 0
- return True, int(output)
-
-
-def GetVerityTreeSize(partition_size):
- cmd = ["build_verity_tree", "-s", str(partition_size)]
- output, exit_code = RunCommand(cmd, False)
- if exit_code != 0:
- return False, 0
- return True, int(output)
-
-
-def GetVerityMetadataSize(partition_size):
- cmd = ["build_verity_metadata.py", "size", str(partition_size)]
- output, exit_code = RunCommand(cmd, False)
- if exit_code != 0:
- return False, 0
- return True, int(output)
-
-
-def GetVeritySize(partition_size, fec_supported):
- success, verity_tree_size = GetVerityTreeSize(partition_size)
- if not success:
- return 0
- success, verity_metadata_size = GetVerityMetadataSize(partition_size)
- if not success:
- return 0
- verity_size = verity_tree_size + verity_metadata_size
- if fec_supported:
- success, fec_size = GetVerityFECSize(partition_size + verity_size)
- if not success:
- return 0
- return verity_size + fec_size
- return verity_size
+ def __init__(self, message):
+ Exception.__init__(self, message)
def GetDiskUsage(path):
- """Return number of bytes that "path" occupies on host.
+ """Returns the number of bytes that "path" occupies on host.
Args:
path: The directory or file to calculate size on
+
Returns:
- True and the number of bytes if successful,
- False and 0 otherwise.
+ The number of bytes.
+
+ Raises:
+ BuildImageError: On error.
"""
- env = {"POSIXLY_CORRECT": "1"}
+ env_copy = os.environ.copy()
+ env_copy["POSIXLY_CORRECT"] = "1"
cmd = ["du", "-s", path]
- output, exit_code = RunCommand(cmd, verbose=False, env=env)
- if exit_code != 0:
- return False, 0
- # POSIX du returns number of blocks with block size 512
- return True, int(output.split()[0]) * 512
-
-
-def GetSimgSize(image_file):
- simg = sparse_img.SparseImage(image_file, build_map=False)
- return simg.blocksize * simg.total_blocks
-
-
-def ZeroPadSimg(image_file, pad_size):
- blocks = pad_size // BLOCK_SIZE
- print("Padding %d blocks (%d bytes)" % (blocks, pad_size))
- simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
- simg.AppendFillChunk(0, blocks)
-
-
-def AVBCalcMaxImageSize(avbtool, footer_type, partition_size, additional_args):
- """Calculates max image size for a given partition size.
-
- Args:
- avbtool: String with path to avbtool.
- footer_type: 'hash' or 'hashtree' for generating footer.
- partition_size: The size of the partition in question.
- additional_args: Additional arguments to pass to 'avbtool
- add_hashtree_image'.
- Returns:
- The maximum image size or 0 if an error occurred.
- """
- cmd = [avbtool, "add_%s_footer" % footer_type,
- "--partition_size", partition_size, "--calc_max_image_size"]
- cmd.extend(shlex.split(additional_args))
-
- (output, exit_code) = RunCommand(cmd)
- if exit_code != 0:
- return 0
- else:
- return int(output)
-
-
-def AVBAddFooter(image_path, avbtool, footer_type, partition_size,
- partition_name, key_path, algorithm, salt,
- additional_args):
- """Adds dm-verity hashtree and AVB metadata to an image.
-
- Args:
- image_path: Path to image to modify.
- avbtool: String with path to avbtool.
- footer_type: 'hash' or 'hashtree' for generating footer.
- partition_size: The size of the partition in question.
- partition_name: The name of the partition - will be embedded in metadata.
- key_path: Path to key to use or None.
- algorithm: Name of algorithm to use or None.
- salt: The salt to use (a hexadecimal string) or None.
- additional_args: Additional arguments to pass to 'avbtool
- add_hashtree_image'.
-
- Returns:
- True if the operation succeeded.
- """
- cmd = [avbtool, "add_%s_footer" % footer_type,
- "--partition_size", partition_size,
- "--partition_name", partition_name,
- "--image", image_path]
-
- if key_path and algorithm:
- cmd.extend(["--key", key_path, "--algorithm", algorithm])
- if salt:
- cmd.extend(["--salt", salt])
-
- cmd.extend(shlex.split(additional_args))
-
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- print("Failed to add AVB footer! Error: %s" % output)
- return False
- return True
-
-
-def AdjustPartitionSizeForVerity(partition_size, fec_supported):
- """Modifies the provided partition size to account for the verity metadata.
-
- This information is used to size the created image appropriately.
-
- Args:
- partition_size: the size of the partition to be verified.
-
- Returns:
- A tuple of the size of the partition adjusted for verity metadata, and
- the size of verity metadata.
- """
- key = "%d %d" % (partition_size, fec_supported)
- if key in AdjustPartitionSizeForVerity.results:
- return AdjustPartitionSizeForVerity.results[key]
-
- hi = partition_size
- if hi % BLOCK_SIZE != 0:
- hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
-
- # verity tree and fec sizes depend on the partition size, which
- # means this estimate is always going to be unnecessarily small
- verity_size = GetVeritySize(hi, fec_supported)
- lo = partition_size - verity_size
- result = lo
-
- # do a binary search for the optimal size
- while lo < hi:
- i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
- v = GetVeritySize(i, fec_supported)
- if i + v <= partition_size:
- if result < i:
- result = i
- verity_size = v
- lo = i + BLOCK_SIZE
- else:
- hi = i
-
- if OPTIONS.verbose:
- print("Adjusted partition size for verity, partition_size: {},"
- " verity_size: {}".format(result, verity_size))
- AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
- return (result, verity_size)
-
-
-AdjustPartitionSizeForVerity.results = {}
-
-
-def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
- padding_size):
- cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
- verity_path, verity_fec_path]
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- print("Could not build FEC data! Error: %s" % output)
- return False
- return True
-
-
-def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
- cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
- verity_image_path]
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- print("Could not build verity tree! Error: %s" % output)
- return False
- root, salt = output.split()
- prop_dict["verity_root_hash"] = root
- prop_dict["verity_salt"] = salt
- return True
-
-
-def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
- block_device, signer_path, key, signer_args,
- verity_disable):
- cmd = ["build_verity_metadata.py", "build", str(image_size),
- verity_metadata_path, root_hash, salt, block_device, signer_path, key]
- if signer_args:
- cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
- if verity_disable:
- cmd.append("--verity_disable")
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- print("Could not build verity metadata! Error: %s" % output)
- return False
- return True
-
-
-def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
- """Appends the unsparse image to the given sparse image.
-
- Args:
- sparse_image_path: the path to the (sparse) image
- unsparse_image_path: the path to the (unsparse) image
- Returns:
- True on success, False on failure.
- """
- cmd = ["append2simg", sparse_image_path, unsparse_image_path]
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- print("%s: %s" % (error_message, output))
- return False
- return True
-
-
-def Append(target, file_to_append, error_message):
- """Appends file_to_append to target."""
try:
- with open(target, "a") as out_file, open(file_to_append, "r") as input_file:
- for line in input_file:
- out_file.write(line)
- except IOError:
- print(error_message)
- return False
- return True
-
-
-def BuildVerifiedImage(data_image_path, verity_image_path,
- verity_metadata_path, verity_fec_path,
- padding_size, fec_supported):
- if not Append(verity_image_path, verity_metadata_path,
- "Could not append verity metadata!"):
- return False
-
- if fec_supported:
- # build FEC for the entire partition, including metadata
- if not BuildVerityFEC(data_image_path, verity_image_path,
- verity_fec_path, padding_size):
- return False
-
- if not Append(verity_image_path, verity_fec_path, "Could not append FEC!"):
- return False
-
- if not Append2Simg(data_image_path, verity_image_path,
- "Could not append verity data!"):
- return False
- return True
+ output = common.RunAndCheckOutput(cmd, verbose=False, env=env_copy)
+ except common.ExternalError:
+ raise BuildImageError("Failed to get disk usage:\n{}".format(output))
+ # POSIX du returns number of blocks with block size 512
+ return int(output.split()[0]) * 512
def UnsparseImage(sparse_image_path, replace=True):
@@ -351,81 +81,21 @@
if replace:
os.unlink(unsparse_image_path)
else:
- return True, unsparse_image_path
+ return unsparse_image_path
inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
- (inflate_output, exit_code) = RunCommand(inflate_command)
- if exit_code != 0:
- print("Error: '%s' failed with exit code %d:\n%s" % (
- inflate_command, exit_code, inflate_output))
+ try:
+ common.RunAndCheckOutput(inflate_command)
+ except:
os.remove(unsparse_image_path)
- return False, None
- return True, unsparse_image_path
-
-
-def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
- """Creates an image that is verifiable using dm-verity.
-
- Args:
- out_file: the location to write the verifiable image at
- prop_dict: a dictionary of properties required for image creation and
- verification
- Returns:
- True on success, False otherwise.
- """
- # get properties
- image_size = int(prop_dict["partition_size"])
- block_dev = prop_dict["verity_block_device"]
- signer_key = prop_dict["verity_key"] + ".pk8"
- if OPTIONS.verity_signer_path is not None:
- signer_path = OPTIONS.verity_signer_path
- else:
- signer_path = prop_dict["verity_signer_cmd"]
- signer_args = OPTIONS.verity_signer_args
-
- # make a tempdir
- tempdir_name = common.MakeTempDir(suffix="_verity_images")
-
- # get partial image paths
- verity_image_path = os.path.join(tempdir_name, "verity.img")
- verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
- verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
-
- # build the verity tree and get the root hash and salt
- if not BuildVerityTree(out_file, verity_image_path, prop_dict):
- return False
-
- # build the metadata blocks
- root_hash = prop_dict["verity_root_hash"]
- salt = prop_dict["verity_salt"]
- verity_disable = "verity_disable" in prop_dict
- if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
- block_dev, signer_path, signer_key, signer_args,
- verity_disable):
- return False
-
- # build the full verified image
- target_size = int(prop_dict["original_partition_size"])
- verity_size = int(prop_dict["verity_size"])
-
- padding_size = target_size - image_size - verity_size
- assert padding_size >= 0
-
- if not BuildVerifiedImage(out_file,
- verity_image_path,
- verity_metadata_path,
- verity_fec_path,
- padding_size,
- fec_supported):
- return False
-
- return True
+ raise
+ return unsparse_image_path
def ConvertBlockMapToBaseFs(block_map_file):
base_fs_file = common.MakeTempFile(prefix="script_gen_", suffix=".base_fs")
convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
- (_, exit_code) = RunCommand(convert_command)
- return base_fs_file if exit_code == 0 else None
+ common.RunAndCheckOutput(convert_command)
+ return base_fs_file
def SetUpInDirAndFsConfig(origin_in, prop_dict):
@@ -489,11 +159,9 @@
ext4fs_output: The output string from mke2fs command.
prop_dict: The property dict.
- Returns:
- The check result.
-
Raises:
AssertionError: On invalid input.
+ BuildImageError: On check failure.
"""
assert ext4fs_output is not None
assert prop_dict.get('fs_type', '').startswith('ext4')
@@ -511,12 +179,11 @@
adjusted_blocks = total_blocks - headroom_blocks
if used_blocks > adjusted_blocks:
mount_point = prop_dict["mount_point"]
- print("Error: Not enough room on %s (total: %d blocks, used: %d blocks, "
- "headroom: %d blocks, available: %d blocks)" % (
- mount_point, total_blocks, used_blocks, headroom_blocks,
- adjusted_blocks))
- return False
- return True
+ raise BuildImageError(
+ "Error: Not enough room on {} (total: {} blocks, used: {} blocks, "
+ "headroom: {} blocks, available: {} blocks)".format(
+ mount_point, total_blocks, used_blocks, headroom_blocks,
+ adjusted_blocks))
def BuildImage(in_dir, prop_dict, out_file, target_out=None):
@@ -532,8 +199,8 @@
under system/core/libcutils) reads device specific FS config files from
there.
- Returns:
- True iff the image is built successfully.
+ Raises:
+ BuildImageError: On build image failures.
"""
in_dir, fs_config = SetUpInDirAndFsConfig(in_dir, prop_dict)
@@ -549,52 +216,54 @@
verity_supported = prop_dict.get("verity") == "true"
verity_fec_supported = prop_dict.get("verity_fec") == "true"
+ avb_footer_type = None
+ if prop_dict.get("avb_hash_enable") == "true":
+ avb_footer_type = "hash"
+ elif prop_dict.get("avb_hashtree_enable") == "true":
+ avb_footer_type = "hashtree"
+
+ if avb_footer_type:
+ avbtool = prop_dict.get("avb_avbtool")
+ avb_signing_args = prop_dict.get(
+ "avb_add_" + avb_footer_type + "_footer_args")
+
if (prop_dict.get("use_dynamic_partition_size") == "true" and
"partition_size" not in prop_dict):
- # if partition_size is not defined, use output of `du' + reserved_size
- success, size = GetDiskUsage(in_dir)
- if not success:
- return False
- if OPTIONS.verbose:
- print("The tree size of %s is %d MB." % (in_dir, size // BYTES_IN_MB))
+ # If partition_size is not defined, use output of `du' + reserved_size.
+ size = GetDiskUsage(in_dir)
+ logger.info(
+ "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
size += int(prop_dict.get("partition_reserved_size", 0))
# Round this up to a multiple of 4K so that avbtool works
size = common.RoundUpTo4K(size)
+ # Adjust partition_size to add more space for AVB footer, to prevent
+ # it from consuming partition_reserved_size.
+ if avb_footer_type:
+ size = verity_utils.AVBCalcMinPartitionSize(
+ size,
+ lambda x: verity_utils.AVBCalcMaxImageSize(
+ avbtool, avb_footer_type, x, avb_signing_args))
prop_dict["partition_size"] = str(size)
- if OPTIONS.verbose:
- print("Allocating %d MB for %s." % (size // BYTES_IN_MB, out_file))
+ logger.info(
+ "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
- # Adjust the partition size to make room for the hashes if this is to be
- # verified.
+ prop_dict["image_size"] = prop_dict["partition_size"]
+
+ # Adjust the image size to make room for the hashes if this is to be verified.
if verity_supported and is_verity_partition:
partition_size = int(prop_dict.get("partition_size"))
- (adjusted_size, verity_size) = AdjustPartitionSizeForVerity(
+ image_size, verity_size = verity_utils.AdjustPartitionSizeForVerity(
partition_size, verity_fec_supported)
- if not adjusted_size:
- return False
- prop_dict["partition_size"] = str(adjusted_size)
- prop_dict["original_partition_size"] = str(partition_size)
+ prop_dict["image_size"] = str(image_size)
prop_dict["verity_size"] = str(verity_size)
- # Adjust partition size for AVB hash footer or AVB hashtree footer.
- avb_footer_type = ''
- if prop_dict.get("avb_hash_enable") == "true":
- avb_footer_type = 'hash'
- elif prop_dict.get("avb_hashtree_enable") == "true":
- avb_footer_type = 'hashtree'
-
+ # Adjust the image size for AVB hash footer or AVB hashtree footer.
if avb_footer_type:
- avbtool = prop_dict["avb_avbtool"]
partition_size = prop_dict["partition_size"]
# avb_add_hash_footer_args or avb_add_hashtree_footer_args.
- additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"]
- max_image_size = AVBCalcMaxImageSize(avbtool, avb_footer_type,
- partition_size, additional_args)
- if max_image_size <= 0:
- print("AVBCalcMaxImageSize is <= 0: %d" % max_image_size)
- return False
- prop_dict["partition_size"] = str(max_image_size)
- prop_dict["original_partition_size"] = partition_size
+ max_image_size = verity_utils.AVBCalcMaxImageSize(
+ avbtool, avb_footer_type, partition_size, avb_signing_args)
+ prop_dict["image_size"] = str(max_image_size)
if fs_type.startswith("ext"):
build_command = [prop_dict["ext_mkuserimg"]]
@@ -603,7 +272,7 @@
run_e2fsck = True
build_command.extend([in_dir, out_file, fs_type,
prop_dict["mount_point"]])
- build_command.append(prop_dict["partition_size"])
+ build_command.append(prop_dict["image_size"])
if "journal_size" in prop_dict:
build_command.extend(["-j", prop_dict["journal_size"]])
if "timestamp" in prop_dict:
@@ -616,8 +285,6 @@
build_command.extend(["-B", prop_dict["block_list"]])
if "base_fs_file" in prop_dict:
base_fs_file = ConvertBlockMapToBaseFs(prop_dict["base_fs_file"])
- if base_fs_file is None:
- return False
build_command.extend(["-d", base_fs_file])
build_command.extend(["-L", prop_dict["mount_point"]])
if "extfs_inode_count" in prop_dict:
@@ -662,7 +329,7 @@
build_command.extend(["-a"])
elif fs_type.startswith("f2fs"):
build_command = ["mkf2fsuserimg.sh"]
- build_command.extend([out_file, prop_dict["partition_size"]])
+ build_command.extend([out_file, prop_dict["image_size"]])
if fs_config:
build_command.extend(["-C", fs_config])
build_command.extend(["-f", in_dir])
@@ -675,92 +342,79 @@
build_command.extend(["-T", str(prop_dict["timestamp"])])
build_command.extend(["-L", prop_dict["mount_point"]])
else:
- print("Error: unknown filesystem type '%s'" % (fs_type))
- return False
+ raise BuildImageError(
+ "Error: unknown filesystem type: {}".format(fs_type))
- (mkfs_output, exit_code) = RunCommand(build_command)
- if exit_code != 0:
- print("Error: '%s' failed with exit code %d:\n%s" % (
- build_command, exit_code, mkfs_output))
- success, du = GetDiskUsage(in_dir)
- du_str = ("%d bytes (%d MB)" % (du, du // BYTES_IN_MB)
- ) if success else "unknown"
+ try:
+ mkfs_output = common.RunAndCheckOutput(build_command)
+ except:
+ try:
+ du = GetDiskUsage(in_dir)
+ du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB)
+ # Suppress any errors from GetDiskUsage() to avoid hiding the real errors
+ # from common.RunAndCheckOutput().
+ except Exception: # pylint: disable=broad-except
+ logger.exception("Failed to compute disk usage with du")
+ du_str = "unknown"
print(
"Out of space? The tree size of {} is {}, with reserved space of {} "
"bytes ({} MB).".format(
in_dir, du_str,
int(prop_dict.get("partition_reserved_size", 0)),
int(prop_dict.get("partition_reserved_size", 0)) // BYTES_IN_MB))
- if "original_partition_size" in prop_dict:
- print(
- "The max size for filsystem files is {} bytes ({} MB), out of a "
- "total image size of {} bytes ({} MB).".format(
- int(prop_dict["partition_size"]),
- int(prop_dict["partition_size"]) // BYTES_IN_MB,
- int(prop_dict["original_partition_size"]),
- int(prop_dict["original_partition_size"]) // BYTES_IN_MB))
- else:
- print("The max image size is {} bytes ({} MB).".format(
- int(prop_dict["partition_size"]),
- int(prop_dict["partition_size"]) // BYTES_IN_MB))
- return False
+ print(
+ "The max image size for filsystem files is {} bytes ({} MB), out of a "
+ "total partition size of {} bytes ({} MB).".format(
+ int(prop_dict["image_size"]),
+ int(prop_dict["image_size"]) // BYTES_IN_MB,
+ int(prop_dict["partition_size"]),
+ int(prop_dict["partition_size"]) // BYTES_IN_MB))
+ raise
# Check if there's enough headroom space available for ext4 image.
if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
- if not CheckHeadroom(mkfs_output, prop_dict):
- return False
+ CheckHeadroom(mkfs_output, prop_dict)
if not fs_spans_partition:
mount_point = prop_dict.get("mount_point")
- partition_size = int(prop_dict.get("partition_size"))
- image_size = GetSimgSize(out_file)
- if image_size > partition_size:
- print("Error: %s image size of %d is larger than partition size of "
- "%d" % (mount_point, image_size, partition_size))
- return False
+ image_size = int(prop_dict["image_size"])
+ sparse_image_size = verity_utils.GetSimgSize(out_file)
+ if sparse_image_size > image_size:
+ raise BuildImageError(
+ "Error: {} image size of {} is larger than partition size of "
+ "{}".format(mount_point, sparse_image_size, image_size))
if verity_supported and is_verity_partition:
- ZeroPadSimg(out_file, partition_size - image_size)
+ verity_utils.ZeroPadSimg(out_file, image_size - sparse_image_size)
# Create the verified image if this is to be verified.
if verity_supported and is_verity_partition:
- if not MakeVerityEnabledImage(out_file, verity_fec_supported, prop_dict):
- return False
+ verity_utils.MakeVerityEnabledImage(
+ out_file, verity_fec_supported, prop_dict)
# Add AVB HASH or HASHTREE footer (metadata).
if avb_footer_type:
- avbtool = prop_dict["avb_avbtool"]
- original_partition_size = prop_dict["original_partition_size"]
+ partition_size = prop_dict["partition_size"]
partition_name = prop_dict["partition_name"]
# key_path and algorithm are only available when chain partition is used.
key_path = prop_dict.get("avb_key_path")
algorithm = prop_dict.get("avb_algorithm")
salt = prop_dict.get("avb_salt")
- # avb_add_hash_footer_args or avb_add_hashtree_footer_args
- additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"]
- if not AVBAddFooter(out_file, avbtool, avb_footer_type,
- original_partition_size, partition_name, key_path,
- algorithm, salt, additional_args):
- return False
+ verity_utils.AVBAddFooter(
+ out_file, avbtool, avb_footer_type, partition_size, partition_name,
+ key_path, algorithm, salt, avb_signing_args)
if run_e2fsck and prop_dict.get("skip_fsck") != "true":
- success, unsparse_image = UnsparseImage(out_file, replace=False)
- if not success:
- return False
+ unsparse_image = UnsparseImage(out_file, replace=False)
# Run e2fsck on the inflated image file
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
# TODO(b/112062612): work around e2fsck failure with SANITIZE_HOST=address
- env4e2fsck = {"ASAN_OPTIONS": "detect_odr_violation=0"}
- (e2fsck_output, exit_code) = RunCommand(e2fsck_command, env=env4e2fsck)
-
- os.remove(unsparse_image)
-
- if exit_code != 0:
- print("Error: '%s' failed with exit code %d:\n%s" % (
- e2fsck_command, exit_code, e2fsck_output))
- return False
-
- return True
+ env4e2fsck = os.environ.copy()
+ env4e2fsck["ASAN_OPTIONS"] = "detect_odr_violation=0"
+ try:
+ common.RunAndCheckOutput(e2fsck_command, env=env4e2fsck)
+ finally:
+ os.remove(unsparse_image)
def ImagePropFromGlobalDict(glob_dict, mount_point):
@@ -988,23 +642,18 @@
return True
return False
- if "original_partition_size" in image_prop:
- size_property = "original_partition_size"
- else:
- size_property = "partition_size"
-
if mount_point == "system":
- copy_prop(size_property, "system_size")
+ copy_prop("partition_size", "system_size")
elif mount_point == "system_other":
- copy_prop(size_property, "system_size")
+ copy_prop("partition_size", "system_size")
elif mount_point == "vendor":
- copy_prop(size_property, "vendor_size")
+ copy_prop("partition_size", "vendor_size")
elif mount_point == "odm":
- copy_prop(size_property, "odm_size")
+ copy_prop("partition_size", "odm_size")
elif mount_point == "product":
- copy_prop(size_property, "product_size")
+ copy_prop("partition_size", "product_size")
elif mount_point == "product_services":
- copy_prop(size_property, "product_services_size")
+ copy_prop("partition_size", "product_services_size")
return d
@@ -1018,6 +667,8 @@
print(__doc__)
sys.exit(1)
+ common.InitLogging()
+
in_dir = argv[0]
glob_dict_file = argv[1]
out_file = argv[2]
@@ -1051,20 +702,22 @@
elif image_filename == "product_services.img":
mount_point = "product_services"
else:
- print("error: unknown image file name ", image_filename, file=sys.stderr)
+ logger.error("Unknown image file name %s", image_filename)
sys.exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
- if not BuildImage(in_dir, image_properties, out_file, target_out):
- print("error: failed to build %s from %s" % (out_file, in_dir),
- file=sys.stderr)
- sys.exit(1)
+ try:
+ BuildImage(in_dir, image_properties, out_file, target_out)
+ except:
+ logger.error("Failed to build %s from %s", out_file, in_dir)
+ raise
if prop_file_out:
glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point)
SaveGlobalDict(prop_file_out, glob_dict_out)
+
if __name__ == '__main__':
try:
main(sys.argv[1:])
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index 3cac90a..7d3424b 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -21,17 +21,18 @@
from __future__ import print_function
import argparse
+import logging
import re
import subprocess
import sys
-import tempfile
import zipfile
-
from hashlib import sha1
from hashlib import sha256
import common
+logger = logging.getLogger(__name__)
+
def CertUsesSha256(cert):
"""Check if the cert uses SHA-256 hashing algorithm."""
@@ -165,11 +166,11 @@
cmd = ['delta_generator',
'--in_file=' + payload_file,
'--public_key=' + pubkey]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
- 'Failed to verify payload with delta_generator: %s\n%s' % (package,
- stdoutdata)
+ 'Failed to verify payload with delta_generator: {}\n{}'.format(
+ package, stdoutdata)
common.ZipClose(package_zip)
# Verified successfully upon reaching here.
@@ -182,6 +183,8 @@
parser.add_argument('package', help='The OTA package to be verified.')
args = parser.parse_args()
+ common.InitLogging()
+
VerifyPackage(args.certificate, args.package)
VerifyAbOtaPayload(args.certificate, args.package)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index ee2c6f4..fe63458 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -20,6 +20,9 @@
import getpass
import gzip
import imp
+import json
+import logging
+import logging.config
import os
import platform
import re
@@ -37,6 +40,9 @@
import blockimgdiff
import sparse_img
+logger = logging.getLogger(__name__)
+
+
class Options(object):
def __init__(self):
platform_search_path = {
@@ -72,15 +78,20 @@
OPTIONS = Options()
+# The block size that's used across the releasetools scripts.
+BLOCK_SIZE = 4096
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
-
# The partitions allowed to be signed by AVB (Android verified boot 2.0).
AVB_PARTITIONS = ('boot', 'recovery', 'system', 'vendor', 'product',
'product_services', 'dtbo', 'odm')
+# Partitions that should have their care_map added to META/care_map.pb
+PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'product_services',
+ 'odm')
+
class ErrorCode(object):
"""Define error_codes for failures that happen during the actual
@@ -115,19 +126,98 @@
pass
-def Run(args, verbose=None, **kwargs):
- """Create and return a subprocess.Popen object.
+def InitLogging():
+ DEFAULT_LOGGING_CONFIG = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'standard': {
+ 'format':
+ '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
+ 'datefmt': '%Y-%m-%d %H:%M:%S',
+ },
+ },
+ 'handlers': {
+ 'default': {
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'standard',
+ },
+ },
+ 'loggers': {
+ '': {
+ 'handlers': ['default'],
+ 'level': 'WARNING',
+ 'propagate': True,
+ }
+ }
+ }
+ env_config = os.getenv('LOGGING_CONFIG')
+ if env_config:
+ with open(env_config) as f:
+ config = json.load(f)
+ else:
+ config = DEFAULT_LOGGING_CONFIG
- Caller can specify if the command line should be printed. The global
- OPTIONS.verbose will be used if not specified.
+ # Increase the logging level for verbose mode.
+ if OPTIONS.verbose:
+ config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
+ config['loggers']['']['level'] = 'INFO'
+
+ logging.config.dictConfig(config)
+
+
+def Run(args, verbose=None, **kwargs):
+ """Creates and returns a subprocess.Popen object.
+
+ Args:
+ args: The command represented as a list of strings.
+ verbose: Whether the commands should be shown. Default to the global
+ verbosity if unspecified.
+ kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
+ stdin, etc. stdout and stderr will default to subprocess.PIPE and
+ subprocess.STDOUT respectively unless caller specifies any of them.
+
+ Returns:
+ A subprocess.Popen object.
"""
- if verbose is None:
- verbose = OPTIONS.verbose
- if verbose:
- print(" running: ", " ".join(args))
+ if 'stdout' not in kwargs and 'stderr' not in kwargs:
+ kwargs['stdout'] = subprocess.PIPE
+ kwargs['stderr'] = subprocess.STDOUT
+ # Don't log any if caller explicitly says so.
+ if verbose != False:
+ logger.info(" Running: \"%s\"", " ".join(args))
return subprocess.Popen(args, **kwargs)
+def RunAndCheckOutput(args, verbose=None, **kwargs):
+ """Runs the given command and returns the output.
+
+ Args:
+ args: The command represented as a list of strings.
+ verbose: Whether the commands should be shown. Default to the global
+ verbosity if unspecified.
+ kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
+ stdin, etc. stdout and stderr will default to subprocess.PIPE and
+ subprocess.STDOUT respectively unless caller specifies any of them.
+
+ Returns:
+ The output string.
+
+ Raises:
+ ExternalError: On non-zero exit from the command.
+ """
+ proc = Run(args, verbose=verbose, **kwargs)
+ output, _ = proc.communicate()
+ # Don't log any if caller explicitly says so.
+ if verbose != False:
+ logger.info("%s", output.rstrip())
+ if proc.returncode != 0:
+ raise ExternalError(
+ "Failed to run command '{}' (exit code {}):\n{}".format(
+ args, proc.returncode, output))
+ return output
+
+
def RoundUpTo4K(value):
rounded_up = value + 4095
return rounded_up - (rounded_up % 4096)
@@ -230,8 +320,8 @@
if os.path.exists(system_base_fs_file):
d["system_base_fs_file"] = system_base_fs_file
else:
- print("Warning: failed to find system base fs file: %s" % (
- system_base_fs_file,))
+ logger.warning(
+ "Failed to find system base fs file: %s", system_base_fs_file)
del d["system_base_fs_file"]
if "vendor_base_fs_file" in d:
@@ -240,8 +330,8 @@
if os.path.exists(vendor_base_fs_file):
d["vendor_base_fs_file"] = vendor_base_fs_file
else:
- print("Warning: failed to find vendor base fs file: %s" % (
- vendor_base_fs_file,))
+ logger.warning(
+ "Failed to find vendor base fs file: %s", vendor_base_fs_file)
del d["vendor_base_fs_file"]
def makeint(key):
@@ -290,8 +380,12 @@
else:
d["fstab"] = None
- d["build.prop"] = LoadBuildProp(read_helper, 'SYSTEM/build.prop')
- d["vendor.build.prop"] = LoadBuildProp(read_helper, 'VENDOR/build.prop')
+ # Tries to load the build props for all partitions with care_map, including
+ # system and vendor.
+ for partition in PARTITIONS_WITH_CARE_MAP:
+ d["{}.build.prop".format(partition)] = LoadBuildProp(
+ read_helper, "{}/build.prop".format(partition.upper()))
+ d["build.prop"] = d["system.build.prop"]
# Set up the salt (based on fingerprint or thumbprint) that will be used when
# adding AVB footer.
@@ -313,7 +407,7 @@
try:
data = read_helper(prop_file)
except KeyError:
- print("Warning: could not read %s" % (prop_file,))
+ logger.warning("Failed to read %s", prop_file)
data = ""
return LoadDictionaryFromLines(data.split("\n"))
@@ -343,7 +437,7 @@
try:
data = read_helper(recovery_fstab_path)
except KeyError:
- print("Warning: could not find {}".format(recovery_fstab_path))
+ logger.warning("Failed to find %s", recovery_fstab_path)
data = ""
assert fstab_version == 2
@@ -396,7 +490,7 @@
def DumpInfoDict(d):
for k, v in sorted(d.items()):
- print("%-25s = (%s) %s" % (k, type(v).__name__, v))
+ logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
def AppendAVBSigningArgs(cmd, partition):
@@ -425,21 +519,13 @@
Returns:
A string of form "partition:rollback_index_location:key" that can be used to
build or verify vbmeta image.
-
- Raises:
- AssertionError: When it fails to extract the public key with avbtool.
"""
if key is None:
key = info_dict["avb_" + partition + "_key_path"]
avbtool = os.getenv('AVBTOOL') or info_dict["avb_avbtool"]
pubkey_path = MakeTempFile(prefix="avb-", suffix=".pubkey")
- proc = Run(
- [avbtool, "extract_public_key", "--key", key, "--output", pubkey_path],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = proc.communicate()
- assert proc.returncode == 0, \
- "Failed to extract pubkey for {}:\n{}".format(
- partition, stdoutdata)
+ RunAndCheckOutput(
+ [avbtool, "extract_public_key", "--key", key, "--output", pubkey_path])
rollback_index_location = info_dict[
"avb_" + partition + "_rollback_index_location"]
@@ -542,9 +628,7 @@
fn = os.path.join(sourcedir, "recovery_dtbo")
cmd.extend(["--recovery_dtbo", fn])
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "mkbootimg of %s image failed" % (partition_name,)
+ RunAndCheckOutput(cmd)
if (info_dict.get("boot_signer") == "true" and
info_dict.get("verity_key")):
@@ -559,9 +643,7 @@
cmd.extend([path, img.name,
info_dict["verity_key"] + ".pk8",
info_dict["verity_key"] + ".x509.pem", img.name])
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "boot_signer of %s image failed" % path
+ RunAndCheckOutput(cmd)
# Sign the image if vboot is non-empty.
elif info_dict.get("vboot"):
@@ -579,9 +661,7 @@
info_dict["vboot_subkey"] + ".vbprivk",
img_keyblock.name,
img.name]
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "vboot_signer of %s image failed" % path
+ RunAndCheckOutput(cmd)
# Clean up the temp files.
img_unsigned.close()
@@ -598,10 +678,7 @@
args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "avbtool add_hash_footer of %s failed" % (
- partition_name,)
+ RunAndCheckOutput(cmd)
img.seek(os.SEEK_SET, 0)
data = img.read()
@@ -623,15 +700,15 @@
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
+ logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
+ logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
- print("building image from target_files %s..." % (tree_subdir,))
+ logger.info("building image from target_files %s...", tree_subdir)
if info_dict is None:
info_dict = OPTIONS.info_dict
@@ -673,12 +750,7 @@
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.extend(pattern)
- p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- if p.returncode != 0:
- raise ExternalError(
- "Failed to unzip input target-files \"{}\":\n{}".format(
- filename, stdoutdata))
+ RunAndCheckOutput(cmd)
tmp = MakeTempDir(prefix="targetfiles-")
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
@@ -692,7 +764,8 @@
return tmp
-def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
+def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
+ hashtree_info_generator=None):
"""Returns a SparseImage object suitable for passing to BlockImageDiff.
This function loads the specified sparse image from the given path, and
@@ -705,7 +778,8 @@
tmpdir: The directory that contains the prebuilt image and block map file.
input_zip: The target-files ZIP archive.
allow_shared_blocks: Whether having shared blocks is allowed.
-
+ hashtree_info_generator: If present, generates the hashtree_info for this
+ sparse image.
Returns:
A SparseImage object, with file_map info loaded.
"""
@@ -723,8 +797,9 @@
# unconditionally. Note that they are still part of care_map. (Bug: 20939131)
clobbered_blocks = "0"
- image = sparse_img.SparseImage(path, mappath, clobbered_blocks,
- allow_shared_blocks=allow_shared_blocks)
+ image = sparse_img.SparseImage(
+ path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
+ hashtree_info_generator=hashtree_info_generator)
# block.map may contain less blocks, because mke2fs may skip allocating blocks
# if they contain all zeros. We can't reconstruct such a file from its block
@@ -914,15 +989,14 @@
key + OPTIONS.private_key_suffix,
input_name, output_name])
- p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ proc = Run(cmd, stdin=subprocess.PIPE)
if password is not None:
password += "\n"
- stdoutdata, _ = p.communicate(password)
- if p.returncode != 0:
+ stdoutdata, _ = proc.communicate(password)
+ if proc.returncode != 0:
raise ExternalError(
"Failed to run signapk.jar: return code {}:\n{}".format(
- p.returncode, stdoutdata))
+ proc.returncode, stdoutdata))
def CheckSize(data, target, info_dict):
@@ -970,9 +1044,9 @@
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
- print("\n WARNING: %s\n" % (msg,))
- elif OPTIONS.verbose:
- print(" ", msg)
+ logger.warning("\n WARNING: %s\n", msg)
+ else:
+ logger.info(" %s", msg)
def ReadApkCerts(tf_zip):
@@ -1255,8 +1329,7 @@
first_line = i + 4
f.close()
- p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
- _, _ = p.communicate()
+ RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
return self.ReadFile()
@@ -1272,13 +1345,13 @@
continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
- print("failed to parse password file: ", line)
+ logger.warning("Failed to parse password file: %s", line)
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError as e:
if e.errno != errno.ENOENT:
- print("error reading password file: ", str(e))
+ logger.exception("Error reading password file:")
return result
@@ -1384,10 +1457,7 @@
if isinstance(entries, basestring):
entries = [entries]
cmd = ["zip", "-d", zip_filename] + entries
- proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = proc.communicate()
- assert proc.returncode == 0, "Failed to delete %s:\n%s" % (entries,
- stdoutdata)
+ RunAndCheckOutput(cmd)
def ZipClose(zip_file):
@@ -1425,10 +1495,10 @@
if x == ".py":
f = b
info = imp.find_module(f, [d])
- print("loaded device-specific extensions from", path)
+ logger.info("loaded device-specific extensions from %s", path)
self.module = imp.load_module("device_specific", *info)
except ImportError:
- print("unable to load device-specific module; assuming none")
+ logger.info("unable to load device-specific module; assuming none")
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
@@ -1570,7 +1640,7 @@
th.start()
th.join(timeout=300) # 5 mins
if th.is_alive():
- print("WARNING: diff command timed out")
+ logger.warning("diff command timed out")
p.terminate()
th.join(5)
if th.is_alive():
@@ -1578,8 +1648,7 @@
th.join()
if p.returncode != 0:
- print("WARNING: failure running %s:\n%s\n" % (
- diff_program, "".join(err)))
+ logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
self.patch = None
return None, None, None
diff = ptemp.read()
@@ -1603,7 +1672,7 @@
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
- print(len(diffs), "diffs to compute")
+ logger.info("%d diffs to compute", len(diffs))
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
@@ -1629,14 +1698,14 @@
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
- print(
- "patching failed! %s" % (name,))
+ logger.error("patching failed! %40s", name)
else:
- print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
- dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
+ logger.info(
+ "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
+ tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
- except Exception as e:
- print(e)
+ except Exception:
+ logger.exception("Failed to compute diff from worker")
raise
# start worker threads; wait for them all to finish.
@@ -1848,11 +1917,7 @@
'--output={}.new.dat.br'.format(self.path),
'{}.new.dat'.format(self.path)]
print("Compressing {}.new.dat with brotli".format(self.partition))
- p = Run(brotli_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
- 'Failed to compress {}.new.dat with brotli:\n{}'.format(
- self.partition, stdoutdata)
+ RunAndCheckOutput(brotli_cmd)
new_data_name = '{}.new.dat.br'.format(self.partition)
ZipWrite(output_zip,
@@ -2063,6 +2128,6 @@
# in the L release.
sh_location = "bin/install-recovery.sh"
- print("putting script in", sh_location)
+ logger.info("putting script in %s", sh_location)
output_sink(sh_location, sh)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 01ff149..0156b72 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -28,6 +28,7 @@
from __future__ import print_function
+import logging
import os
import shutil
import sys
@@ -39,6 +40,7 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
@@ -72,6 +74,8 @@
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"])
output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
CopyInfo(output_zip)
@@ -90,11 +94,11 @@
common.ZipWrite(output_zip, os.path.join(images_path, image), image)
finally:
- print("cleaning up...")
+ logger.info("cleaning up...")
common.ZipClose(output_zip)
shutil.rmtree(OPTIONS.input_tmp)
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
@@ -102,5 +106,5 @@
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ logger.exception("\n ERROR:\n")
sys.exit(1)
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 7c6007e..725b355 100755
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -16,24 +16,27 @@
from __future__ import print_function
+import logging
+import os
import sys
+import common
+
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
-import os
-import common
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
-def main(argv):
- # def option_handler(o, a):
- # return False
+def main(argv):
args = common.ParseOptions(argv, __doc__)
input_dir, output_dir = args
+ common.InitLogging()
+
OPTIONS.info_dict = common.LoadInfoDict(input_dir)
recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index eaba4df..2264655 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -164,23 +164,25 @@
from __future__ import print_function
+import logging
import multiprocessing
import os.path
import shlex
import shutil
import struct
-import subprocess
import sys
import tempfile
import zipfile
import common
import edify_generator
+import verity_utils
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
OPTIONS.package_key = None
@@ -392,12 +394,7 @@
cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
cmd.extend(["-out", signing_key])
-
- get_signing_key = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- stdoutdata, _ = get_signing_key.communicate()
- assert get_signing_key.returncode == 0, \
- "Failed to get signing key: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd, verbose=False)
self.signer = "openssl"
self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
@@ -410,10 +407,7 @@
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- signing = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = signing.communicate()
- assert signing.returncode == 0, \
- "Failed to sign the input file: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd)
return out_file
@@ -431,8 +425,6 @@
Args:
secondary: Whether it's generating a secondary payload (default: False).
"""
- # The place where the output from the subprocess should go.
- self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
self.payload_file = None
self.payload_properties = None
self.secondary = secondary
@@ -457,10 +449,7 @@
if source_file is not None:
cmd.extend(["--source_image", source_file])
cmd.extend(additional_args)
- p = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
- "brillo_update_payload generate failed: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd)
self.payload_file = payload_file
self.payload_properties = None
@@ -484,9 +473,7 @@
"--signature_size", "256",
"--metadata_hash_file", metadata_sig_file,
"--payload_hash_file", payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload hash failed"
+ common.RunAndCheckOutput(cmd)
# 2. Sign the hashes.
signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
@@ -501,9 +488,7 @@
"--signature_size", "256",
"--metadata_signature_file", signed_metadata_sig_file,
"--payload_signature_file", signed_payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload sign failed"
+ common.RunAndCheckOutput(cmd)
# 4. Dump the signed payload properties.
properties_file = common.MakeTempFile(prefix="payload-properties-",
@@ -511,9 +496,7 @@
cmd = ["brillo_update_payload", "properties",
"--payload", signed_payload_file,
"--properties_file", properties_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload properties failed"
+ common.RunAndCheckOutput(cmd)
if self.secondary:
with open(properties_file, "a") as f:
@@ -595,11 +578,11 @@
OPTIONS.input_tmp, "RECOVERY")
common.ZipWriteStr(
output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
- print("two-step package: using %s in stage 1/3" % (
- recovery_two_step_img_name,))
+ logger.info(
+ "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
script.WriteRawImage("/boot", recovery_two_step_img_name)
else:
- print("two-step package: using recovery.img in stage 1/3")
+ logger.info("two-step package: using recovery.img in stage 1/3")
# The "recovery.img" entry has been written into package earlier.
script.WriteRawImage("/boot", "recovery.img")
@@ -1167,7 +1150,8 @@
'payload_properties.txt',
)
self.optional = (
- # care_map.txt is available only if dm-verity is enabled.
+ # care_map is available only if dm-verity is enabled.
+ 'care_map.pb',
'care_map.txt',
# compatibility.zip is available only if target supports Treble.
'compatibility.zip',
@@ -1362,8 +1346,8 @@
target_api_version = target_info["recovery_api_version"]
source_api_version = source_info["recovery_api_version"]
if source_api_version == 0:
- print("WARNING: generating edify script for a source that "
- "can't install it.")
+ logger.warning(
+ "Generating edify script for a source that can't install it.")
script = edify_generator.EdifyGenerator(
source_api_version, target_info, fstab=source_info["fstab"])
@@ -1410,8 +1394,12 @@
target_info.get('ext4_share_dup_blocks') == "true")
system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
allow_shared_blocks)
+
+ hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
+ "system", 4096, target_info)
system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
- allow_shared_blocks)
+ allow_shared_blocks,
+ hashtree_info_generator)
blockimgdiff_version = max(
int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
@@ -1438,8 +1426,11 @@
raise RuntimeError("can't generate incremental that adds /vendor")
vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
allow_shared_blocks)
- vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip,
- allow_shared_blocks)
+ hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
+ "vendor", 4096, target_info)
+ vendor_tgt = common.GetSparseImage(
+ "vendor", OPTIONS.target_tmp, target_zip, allow_shared_blocks,
+ hashtree_info_generator)
# Check first block of vendor partition for remount R/W only if
# disk type is ext4
@@ -1534,8 +1525,9 @@
else:
include_full_boot = False
- print("boot target: %d source: %d diff: %d" % (
- target_boot.size, source_boot.size, len(d)))
+ logger.info(
+ "boot target: %d source: %d diff: %d", target_boot.size,
+ source_boot.size, len(d))
common.ZipWriteStr(output_zip, "boot.img.p", d)
@@ -1585,19 +1577,19 @@
if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
script.WriteRawImage("/boot", "boot.img")
- print("writing full boot image (forced by two-step mode)")
+ logger.info("writing full boot image (forced by two-step mode)")
if not OPTIONS.two_step:
if updating_boot:
if include_full_boot:
- print("boot image changed; including full.")
+ logger.info("boot image changed; including full.")
script.Print("Installing boot image...")
script.WriteRawImage("/boot", "boot.img")
else:
# Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the
# partition.
- print("boot image changed; including patch.")
+ logger.info("boot image changed; including patch.")
script.Print("Patching boot image...")
script.ShowProgress(0.1, 10)
script.PatchPartition(
@@ -1607,7 +1599,7 @@
boot_type, boot_device, source_boot.size, source_boot.sha1),
'boot.img.p')
else:
- print("boot image unchanged; skipping.")
+ logger.info("boot image unchanged; skipping.")
# Do device-specific installation (eg, write radio image).
device_specific.IncrementalOTA_InstallEnd()
@@ -1786,16 +1778,19 @@
target_zip = zipfile.ZipFile(target_file, "r")
if (target_info.get("verity") == "true" or
target_info.get("avb_enable") == "true"):
- care_map_path = "META/care_map.txt"
- namelist = target_zip.namelist()
- if care_map_path in namelist:
- care_map_data = target_zip.read(care_map_path)
- # In order to support streaming, care_map.txt needs to be packed as
+ care_map_list = [x for x in ["care_map.pb", "care_map.txt"] if
+ "META/" + x in target_zip.namelist()]
+
+ # Adds care_map if either the protobuf format or the plain text one exists.
+ if care_map_list:
+ care_map_name = care_map_list[0]
+ care_map_data = target_zip.read("META/" + care_map_name)
+ # In order to support streaming, care_map needs to be packed as
# ZIP_STORED.
- common.ZipWriteStr(output_zip, "care_map.txt", care_map_data,
+ common.ZipWriteStr(output_zip, care_map_name, care_map_data,
compress_type=zipfile.ZIP_STORED)
else:
- print("Warning: cannot find care map file in target_file package")
+ logger.warning("Cannot find care map file in target_file package")
AddCompatibilityArchiveIfTrebleEnabled(
target_zip, output_zip, target_info, source_info)
@@ -1911,6 +1906,8 @@
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
if OPTIONS.downgrade:
# We should only allow downgrading incrementals (as opposed to full).
# Otherwise the device may go back from arbitrary build with this full
@@ -1931,9 +1928,8 @@
with zipfile.ZipFile(args[0], 'r') as input_zip:
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
- if OPTIONS.verbose:
- print("--- target info ---")
- common.DumpInfoDict(OPTIONS.info_dict)
+ logger.info("--- target info ---")
+ common.DumpInfoDict(OPTIONS.info_dict)
# Load the source build dict if applicable.
if OPTIONS.incremental_source is not None:
@@ -1941,9 +1937,8 @@
with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
- if OPTIONS.verbose:
- print("--- source info ---")
- common.DumpInfoDict(OPTIONS.source_info_dict)
+ logger.info("--- source info ---")
+ common.DumpInfoDict(OPTIONS.source_info_dict)
# Load OEM dicts if provided.
OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
@@ -1967,7 +1962,7 @@
output_file=args[1],
source_file=OPTIONS.incremental_source)
- print("done.")
+ logger.info("done.")
return
# Sanity check the loaded info dicts first.
@@ -1978,7 +1973,7 @@
# Non-A/B OTAs rely on /cache partition to store temporary files.
cache_size = OPTIONS.info_dict.get("cache_size")
if cache_size is None:
- print("--- can't determine the cache partition size ---")
+ logger.warning("--- can't determine the cache partition size ---")
OPTIONS.cache_size = cache_size
if OPTIONS.extra_script is not None:
@@ -1987,7 +1982,7 @@
if OPTIONS.extracted_input is not None:
OPTIONS.input_tmp = OPTIONS.extracted_input
else:
- print("unzipping target target-files...")
+ logger.info("unzipping target target-files...")
OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
OPTIONS.target_tmp = OPTIONS.input_tmp
@@ -1999,7 +1994,7 @@
if OPTIONS.device_specific is None:
from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
if os.path.exists(from_input):
- print("(using device-specific extensions from target_files)")
+ logger.info("(using device-specific extensions from target_files)")
OPTIONS.device_specific = from_input
else:
OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
@@ -2016,7 +2011,7 @@
# Generate an incremental OTA.
else:
- print("unzipping source target-files...")
+ logger.info("unzipping source target-files...")
OPTIONS.source_tmp = common.UnzipTemp(
OPTIONS.incremental_source, UNZIP_PATTERN)
with zipfile.ZipFile(args[0], 'r') as input_zip, \
@@ -2032,15 +2027,15 @@
target_files_diff.recursiveDiff(
'', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ except common.ExternalError:
+ logger.exception("\n ERROR:\n")
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index cb0c268..d35e9e8 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -395,7 +395,7 @@
pass
# Skip the care_map as we will regenerate the system/vendor images.
- elif filename == "META/care_map.txt":
+ elif filename == "META/care_map.pb" or filename == "META/care_map.txt":
pass
# A non-APK file; copy it verbatim.
@@ -597,8 +597,7 @@
if p.returncode != 0:
raise common.ExternalError("failed to run dumpkeys")
- if (misc_info.get("system_root_image") == "true" and
- misc_info.get("recovery_as_boot") == "true"):
+ if misc_info.get("recovery_as_boot") == "true":
recovery_keys_location = "BOOT/RAMDISK/res/keys"
else:
recovery_keys_location = "RECOVERY/RAMDISK/res/keys"
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 083da7a..5ebb1f0 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -13,6 +13,7 @@
# limitations under the License.
import bisect
+import logging
import os
import struct
import threading
@@ -20,6 +21,8 @@
import rangelib
+logger = logging.getLogger(__name__)
+
class SparseImage(object):
"""Wraps a sparse image file into an image object.
@@ -33,7 +36,8 @@
"""
def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
- mode="rb", build_map=True, allow_shared_blocks=False):
+ mode="rb", build_map=True, allow_shared_blocks=False,
+ hashtree_info_generator=None):
self.simg_f = f = open(simg_fn, mode)
header_bin = f.read(28)
@@ -60,10 +64,13 @@
raise ValueError("Chunk header size was expected to be 12, but is %u." %
(chunk_hdr_sz,))
- print("Total of %u %u-byte output blocks in %u input chunks."
- % (total_blks, blk_sz, total_chunks))
+ logger.info(
+ "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
+ blk_sz, total_chunks)
if not build_map:
+ assert not hashtree_info_generator, \
+ "Cannot generate the hashtree info without building the offset map."
return
pos = 0 # in blocks
@@ -102,8 +109,18 @@
if data_sz != 0:
raise ValueError("Don't care chunk input size is non-zero (%u)" %
(data_sz))
- else:
- pos += chunk_sz
+ # Fills the don't care data ranges with zeros.
+ # TODO(xunchang) pass the care_map to hashtree info generator.
+ if hashtree_info_generator:
+ fill_data = '\x00' * 4
+ # In order to compute verity hashtree on device, we need to write
+ # zeros explicitly to the don't care ranges. Because these ranges may
+ # contain non-zero data from the previous build.
+ care_data.append(pos)
+ care_data.append(pos + chunk_sz)
+ offset_map.append((pos, chunk_sz, None, fill_data))
+
+ pos += chunk_sz
elif chunk_type == 0xCAC4:
raise ValueError("CRC32 chunks are not supported")
@@ -128,6 +145,10 @@
extended = extended.intersect(all_blocks).subtract(self.care_map)
self.extended = extended
+ self.hashtree_info = None
+ if hashtree_info_generator:
+ self.hashtree_info = hashtree_info_generator.Generate(self)
+
if file_map_fn:
self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
allow_shared_blocks)
@@ -246,6 +267,8 @@
remaining = remaining.subtract(ranges)
remaining = remaining.subtract(clobbered_blocks)
+ if self.hashtree_info:
+ remaining = remaining.subtract(self.hashtree_info.hashtree_range)
# For all the remaining blocks in the care_map (ie, those that
# aren't part of the data for any file nor part of the clobbered_blocks),
@@ -308,6 +331,8 @@
out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
if clobbered_blocks:
out["__COPY"] = clobbered_blocks
+ if self.hashtree_info:
+ out["__HASHTREE"] = self.hashtree_info.hashtree_range
def ResetFileMap(self):
"""Throw away the file map and treat the entire image as
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 834e989..ad22b72 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -16,14 +16,12 @@
import os
import os.path
-import subprocess
-import unittest
import zipfile
import common
import test_utils
from add_img_to_target_files import (
- AddCareMapTxtForAbOta, AddPackRadioImages, AppendVBMetaArgsForPartition,
+ AddCareMapForAbOta, AddPackRadioImages, AppendVBMetaArgsForPartition,
CheckAbOtaImages, GetCareMap)
from rangelib import RangeSet
@@ -31,23 +29,22 @@
OPTIONS = common.OPTIONS
-class AddImagesToTargetFilesTest(unittest.TestCase):
+class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
OPTIONS.input_tmp = common.MakeTempDir()
- def tearDown(self):
- common.Cleanup()
-
def _verifyCareMap(self, expected, file_name):
- """Parses the care_map proto; and checks the content in plain text."""
+ """Parses the care_map.pb; and checks the content in plain text."""
text_file = common.MakeTempFile(prefix="caremap-", suffix=".txt")
# Calls an external binary to convert the proto message.
cmd = ["care_map_generator", "--parse_proto", file_name, text_file]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- output, _ = p.communicate()
- self.assertEqual(0, p.returncode)
+ proc = common.Run(cmd)
+ output, _ = proc.communicate()
+ self.assertEqual(
+ 0, proc.returncode,
+ "Failed to run care_map_generator:\n{}".format(output))
with open(text_file, 'r') as verify_fp:
plain_text = verify_fp.read()
@@ -139,11 +136,18 @@
images + ['baz'])
@staticmethod
- def _test_AddCareMapTxtForAbOta():
- """Helper function to set up the test for test_AddCareMapTxtForAbOta()."""
+ def _test_AddCareMapForAbOta():
+ """Helper function to set up the test for test_AddCareMapForAbOta()."""
OPTIONS.info_dict = {
- 'system_verity_block_device' : '/dev/block/system',
- 'vendor_verity_block_device' : '/dev/block/vendor',
+ 'system_verity_block_device': '/dev/block/system',
+ 'vendor_verity_block_device': '/dev/block/vendor',
+ 'system.build.prop': {
+ 'ro.system.build.fingerprint':
+ 'google/sailfish/12345:user/dev-keys',
+ },
+ 'vendor.build.prop': {
+ 'ro.vendor.build.fingerprint': 'google/sailfish/678:user/dev-keys',
+ }
}
# Prepare the META/ folder.
@@ -164,101 +168,172 @@
}
return image_paths
- def test_AddCareMapTxtForAbOta(self):
- image_paths = self._test_AddCareMapTxtForAbOta()
+ def test_AddCareMapForAbOta(self):
+ image_paths = self._test_AddCareMapForAbOta()
- AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths)
+ AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
- care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
- expected = ['system', RangeSet("0-5 10-15").to_string_raw(), 'vendor',
- RangeSet("0-9").to_string_raw()]
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
+ "ro.system.build.fingerprint",
+ "google/sailfish/12345:user/dev-keys",
+ 'vendor', RangeSet("0-9").to_string_raw(),
+ "ro.vendor.build.fingerprint",
+ "google/sailfish/678:user/dev-keys"]
self._verifyCareMap(expected, care_map_file)
- def test_AddCareMapTxtForAbOta_withNonCareMapPartitions(self):
+ def test_AddCareMapForAbOta_withNonCareMapPartitions(self):
"""Partitions without care_map should be ignored."""
- image_paths = self._test_AddCareMapTxtForAbOta()
+ image_paths = self._test_AddCareMapForAbOta()
- AddCareMapTxtForAbOta(
+ AddCareMapForAbOta(
None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
- care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
- expected = ['system', RangeSet("0-5 10-15").to_string_raw(), 'vendor',
- RangeSet("0-9").to_string_raw()]
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
+ "ro.system.build.fingerprint",
+ "google/sailfish/12345:user/dev-keys",
+ 'vendor', RangeSet("0-9").to_string_raw(),
+ "ro.vendor.build.fingerprint",
+ "google/sailfish/678:user/dev-keys"]
self._verifyCareMap(expected, care_map_file)
- def test_AddCareMapTxtForAbOta_withAvb(self):
+ def test_AddCareMapForAbOta_withAvb(self):
"""Tests the case for device using AVB."""
- image_paths = self._test_AddCareMapTxtForAbOta()
+ image_paths = self._test_AddCareMapForAbOta()
OPTIONS.info_dict = {
'avb_system_hashtree_enable' : 'true',
'avb_vendor_hashtree_enable' : 'true',
+ 'system.build.prop': {
+ 'ro.system.build.fingerprint':
+ 'google/sailfish/12345:user/dev-keys',
+ },
+ 'vendor.build.prop': {
+ 'ro.vendor.build.fingerprint': 'google/sailfish/678:user/dev-keys',
+ }
}
- AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths)
+ AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
- care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
- expected = ['system', RangeSet("0-5 10-15").to_string_raw(), 'vendor',
- RangeSet("0-9").to_string_raw()]
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
+ "ro.system.build.fingerprint",
+ "google/sailfish/12345:user/dev-keys",
+ 'vendor', RangeSet("0-9").to_string_raw(),
+ "ro.vendor.build.fingerprint",
+ "google/sailfish/678:user/dev-keys"]
self._verifyCareMap(expected, care_map_file)
- def test_AddCareMapTxtForAbOta_verityNotEnabled(self):
- """No care_map.txt should be generated if verity not enabled."""
- image_paths = self._test_AddCareMapTxtForAbOta()
- OPTIONS.info_dict = {}
- AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths)
+ def test_AddCareMapForAbOta_noFingerprint(self):
+ """Tests the case for partitions without fingerprint."""
+ image_paths = self._test_AddCareMapForAbOta()
+ OPTIONS.info_dict = {
+ 'system_verity_block_device': '/dev/block/system',
+ 'vendor_verity_block_device': '/dev/block/vendor',
+ }
- care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
+ AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
+
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown",
+ "unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown",
+ "unknown"]
+
+ self._verifyCareMap(expected, care_map_file)
+
+ def test_AddCareMapForAbOta_withThumbprint(self):
+ """Tests the case for partitions with thumbprint."""
+ image_paths = self._test_AddCareMapForAbOta()
+ OPTIONS.info_dict = {
+ 'system_verity_block_device': '/dev/block/system',
+ 'vendor_verity_block_device': '/dev/block/vendor',
+ 'system.build.prop': {
+ 'ro.system.build.thumbprint': 'google/sailfish/123:user/dev-keys',
+ },
+ 'vendor.build.prop' : {
+ 'ro.vendor.build.thumbprint': 'google/sailfish/456:user/dev-keys',
+ }
+ }
+
+ AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
+
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
+ "ro.system.build.thumbprint",
+ "google/sailfish/123:user/dev-keys",
+ 'vendor', RangeSet("0-9").to_string_raw(),
+ "ro.vendor.build.thumbprint",
+ "google/sailfish/456:user/dev-keys"]
+
+ self._verifyCareMap(expected, care_map_file)
+
+ def test_AddCareMapForAbOta_verityNotEnabled(self):
+ """No care_map.pb should be generated if verity not enabled."""
+ image_paths = self._test_AddCareMapForAbOta()
+ OPTIONS.info_dict = {}
+ AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
+
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
self.assertFalse(os.path.exists(care_map_file))
- def test_AddCareMapTxtForAbOta_missingImageFile(self):
+ def test_AddCareMapForAbOta_missingImageFile(self):
"""Missing image file should be considered fatal."""
- image_paths = self._test_AddCareMapTxtForAbOta()
+ image_paths = self._test_AddCareMapForAbOta()
image_paths['vendor'] = ''
- self.assertRaises(AssertionError, AddCareMapTxtForAbOta, None,
+ self.assertRaises(AssertionError, AddCareMapForAbOta, None,
['system', 'vendor'], image_paths)
- def test_AddCareMapTxtForAbOta_zipOutput(self):
+ def test_AddCareMapForAbOta_zipOutput(self):
"""Tests the case with ZIP output."""
- image_paths = self._test_AddCareMapTxtForAbOta()
+ image_paths = self._test_AddCareMapForAbOta()
output_file = common.MakeTempFile(suffix='.zip')
with zipfile.ZipFile(output_file, 'w') as output_zip:
- AddCareMapTxtForAbOta(output_zip, ['system', 'vendor'], image_paths)
+ AddCareMapForAbOta(output_zip, ['system', 'vendor'], image_paths)
- care_map_name = "META/care_map.txt"
+ care_map_name = "META/care_map.pb"
temp_dir = common.MakeTempDir()
with zipfile.ZipFile(output_file, 'r') as verify_zip:
self.assertTrue(care_map_name in verify_zip.namelist())
verify_zip.extract(care_map_name, path=temp_dir)
- expected = ['system', RangeSet("0-5 10-15").to_string_raw(), 'vendor',
- RangeSet("0-9").to_string_raw()]
+ expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
+ "ro.system.build.fingerprint",
+ "google/sailfish/12345:user/dev-keys",
+ 'vendor', RangeSet("0-9").to_string_raw(),
+ "ro.vendor.build.fingerprint",
+ "google/sailfish/678:user/dev-keys"]
self._verifyCareMap(expected, os.path.join(temp_dir, care_map_name))
- def test_AddCareMapTxtForAbOta_zipOutput_careMapEntryExists(self):
+ def test_AddCareMapForAbOta_zipOutput_careMapEntryExists(self):
"""Tests the case with ZIP output which already has care_map entry."""
- image_paths = self._test_AddCareMapTxtForAbOta()
+ image_paths = self._test_AddCareMapForAbOta()
output_file = common.MakeTempFile(suffix='.zip')
with zipfile.ZipFile(output_file, 'w') as output_zip:
- # Create an existing META/care_map.txt entry.
- common.ZipWriteStr(output_zip, 'META/care_map.txt', 'dummy care_map.txt')
+ # Create an existing META/care_map.pb entry.
+ common.ZipWriteStr(output_zip, 'META/care_map.pb',
+ 'dummy care_map.pb')
- # Request to add META/care_map.txt again.
- AddCareMapTxtForAbOta(output_zip, ['system', 'vendor'], image_paths)
+ # Request to add META/care_map.pb again.
+ AddCareMapForAbOta(output_zip, ['system', 'vendor'], image_paths)
# The one under OPTIONS.input_tmp must have been replaced.
- care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt')
- expected = ['system', RangeSet("0-5 10-15").to_string_raw(), 'vendor',
- RangeSet("0-9").to_string_raw()]
+ care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+ expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
+ "ro.system.build.fingerprint",
+ "google/sailfish/12345:user/dev-keys",
+ 'vendor', RangeSet("0-9").to_string_raw(),
+ "ro.vendor.build.fingerprint",
+ "google/sailfish/678:user/dev-keys"]
self._verifyCareMap(expected, care_map_file)
# The existing entry should be scheduled to be replaced.
- self.assertIn('META/care_map.txt', OPTIONS.replace_updated_files_list)
+ self.assertIn('META/care_map.pb', OPTIONS.replace_updated_files_list)
def test_AppendVBMetaArgsForPartition(self):
OPTIONS.info_dict = {}
@@ -291,7 +366,7 @@
(0xCAC3, 4),
(0xCAC1, 6)])
OPTIONS.info_dict = {
- 'system_adjusted_partition_size' : 12,
+ 'system_image_blocks' : 12,
}
name, care_map = GetCareMap('system', sparse_image)
self.assertEqual('system', name)
@@ -306,6 +381,6 @@
(0xCAC3, 4),
(0xCAC1, 6)])
OPTIONS.info_dict = {
- 'system_adjusted_partition_size' : -12,
+ 'system_image_blocks' : -12,
}
self.assertRaises(AssertionError, GetCareMap, 'system', sparse_image)
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 124b4d5..857026e 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -14,17 +14,14 @@
# limitations under the License.
#
-from __future__ import print_function
-
-import unittest
-
import common
-from blockimgdiff import (BlockImageDiff, EmptyImage, HeapItem, ImgdiffStats,
- Transfer)
+from blockimgdiff import (
+ BlockImageDiff, EmptyImage, HeapItem, ImgdiffStats, Transfer)
from rangelib import RangeSet
+from test_utils import ReleaseToolsTestCase
-class HealpItemTest(unittest.TestCase):
+class HealpItemTest(ReleaseToolsTestCase):
class Item(object):
def __init__(self, score):
@@ -54,7 +51,7 @@
self.assertFalse(item)
-class BlockImageDiffTest(unittest.TestCase):
+class BlockImageDiffTest(ReleaseToolsTestCase):
def test_GenerateDigraphOrder(self):
"""Make sure GenerateDigraph preserves the order.
@@ -245,7 +242,7 @@
block_image_diff.imgdiff_stats.stats)
-class ImgdiffStatsTest(unittest.TestCase):
+class ImgdiffStatsTest(ReleaseToolsTestCase):
def test_Log(self):
imgdiff_stats = ImgdiffStats()
diff --git a/tools/releasetools/test_build_image.py b/tools/releasetools/test_build_image.py
index 40a7c85..634c6b1 100644
--- a/tools/releasetools/test_build_image.py
+++ b/tools/releasetools/test_build_image.py
@@ -16,21 +16,19 @@
import filecmp
import os.path
-import unittest
import common
-from build_image import CheckHeadroom, RunCommand, SetUpInDirAndFsConfig
+from build_image import (
+ BuildImageError, CheckHeadroom, SetUpInDirAndFsConfig)
+from test_utils import ReleaseToolsTestCase
-class BuildImageTest(unittest.TestCase):
+class BuildImageTest(ReleaseToolsTestCase):
# Available: 1000 blocks.
EXT4FS_OUTPUT = (
"Created filesystem with 2777/129024 inodes and 515099/516099 blocks")
- def tearDown(self):
- common.Cleanup()
-
def test_CheckHeadroom_SizeUnderLimit(self):
# Required headroom: 1000 blocks.
prop_dict = {
@@ -38,7 +36,7 @@
'partition_headroom' : '4096000',
'mount_point' : 'system',
}
- self.assertTrue(CheckHeadroom(self.EXT4FS_OUTPUT, prop_dict))
+ CheckHeadroom(self.EXT4FS_OUTPUT, prop_dict)
def test_CheckHeadroom_InsufficientHeadroom(self):
# Required headroom: 1001 blocks.
@@ -47,7 +45,8 @@
'partition_headroom' : '4100096',
'mount_point' : 'system',
}
- self.assertFalse(CheckHeadroom(self.EXT4FS_OUTPUT, prop_dict))
+ self.assertRaises(
+ BuildImageError, CheckHeadroom, self.EXT4FS_OUTPUT, prop_dict)
def test_CheckHeadroom_WrongFsType(self):
prop_dict = {
@@ -79,22 +78,23 @@
output_image = common.MakeTempFile(suffix='.img')
command = ['mkuserimg_mke2fs', input_dir, output_image, 'ext4',
'/system', '409600', '-j', '0']
- ext4fs_output, exit_code = RunCommand(command)
- self.assertEqual(0, exit_code)
+ proc = common.Run(command)
+ ext4fs_output, _ = proc.communicate()
+ self.assertEqual(0, proc.returncode)
prop_dict = {
'fs_type' : 'ext4',
'partition_headroom' : '40960',
'mount_point' : 'system',
}
- self.assertTrue(CheckHeadroom(ext4fs_output, prop_dict))
+ CheckHeadroom(ext4fs_output, prop_dict)
prop_dict = {
'fs_type' : 'ext4',
'partition_headroom' : '413696',
'mount_point' : 'system',
}
- self.assertFalse(CheckHeadroom(ext4fs_output, prop_dict))
+ self.assertRaises(BuildImageError, CheckHeadroom, ext4fs_output, prop_dict)
def test_SetUpInDirAndFsConfig_SystemRootImageTrue_NonSystem(self):
prop_dict = {
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 5179900..c99049a 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -19,7 +19,6 @@
import subprocess
import tempfile
import time
-import unittest
import zipfile
from hashlib import sha1
@@ -44,7 +43,8 @@
yield '\0' * (step_size - block_size)
-class CommonZipTest(unittest.TestCase):
+class CommonZipTest(test_utils.ReleaseToolsTestCase):
+
def _verify(self, zip_file, zip_file_name, arcname, expected_hash,
test_file_name=None, expected_stat=None, expected_mode=0o644,
expected_compress_type=zipfile.ZIP_STORED):
@@ -334,8 +334,8 @@
self.assertFalse('Test2' in entries)
self.assertTrue('Test3' in entries)
- self.assertRaises(AssertionError, common.ZipDelete, zip_file.name,
- 'Test2')
+ self.assertRaises(
+ common.ExternalError, common.ZipDelete, zip_file.name, 'Test2')
with zipfile.ZipFile(zip_file.name, 'r') as check_zip:
entries = check_zip.namelist()
self.assertTrue('Test1' in entries)
@@ -359,7 +359,7 @@
os.remove(zip_file.name)
-class CommonApkUtilsTest(unittest.TestCase):
+class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase):
"""Tests the APK utils related functions."""
APKCERTS_TXT1 = (
@@ -407,9 +407,6 @@
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def _write_apkcerts_txt(apkcerts_txt, additional=None):
if additional is None:
@@ -523,14 +520,11 @@
{})
-class CommonUtilsTest(unittest.TestCase):
+class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def test_GetSparseImage_emptyBlockMapFile(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
with zipfile.ZipFile(target_files, 'w') as target_files_zip:
@@ -782,7 +776,8 @@
'avb_system_rollback_index_location': 2,
}
self.assertRaises(
- AssertionError, common.GetAvbChainedPartitionArg, 'system', info_dict)
+ common.ExternalError, common.GetAvbChainedPartitionArg, 'system',
+ info_dict)
INFO_DICT_DEFAULT = {
'recovery_api_version': 3,
@@ -934,7 +929,7 @@
AssertionError, common.LoadInfoDict, target_files_zip, True)
-class InstallRecoveryScriptFormatTest(unittest.TestCase):
+class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
"""Checks the format of install-recovery.sh.
Its format should match between common.py and validate_target_files.py.
@@ -993,6 +988,3 @@
recovery_image, boot_image, self._info)
validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
self._info)
-
- def tearDown(self):
- common.Cleanup()
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 8416af7..44703db 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -17,8 +17,6 @@
import copy
import os
import os.path
-import subprocess
-import unittest
import zipfile
import common
@@ -105,7 +103,7 @@
self.script.append(('AssertSomeThumbprint',) + args)
-class BuildInfoTest(unittest.TestCase):
+class BuildInfoTest(test_utils.ReleaseToolsTestCase):
TEST_INFO_DICT = {
'build.prop' : {
@@ -353,10 +351,7 @@
script_writer.script)
-class LoadOemDictsTest(unittest.TestCase):
-
- def tearDown(self):
- common.Cleanup()
+class LoadOemDictsTest(test_utils.ReleaseToolsTestCase):
def test_NoneDict(self):
self.assertIsNone(_LoadOemDicts(None))
@@ -389,7 +384,7 @@
self.assertEqual('{}'.format(i), oem_dict['ro.build.index'])
-class OtaFromTargetFilesTest(unittest.TestCase):
+class OtaFromTargetFilesTest(test_utils.ReleaseToolsTestCase):
TEST_TARGET_INFO_DICT = {
'build.prop' : {
@@ -431,9 +426,6 @@
common.OPTIONS.search_path = test_utils.get_search_path()
self.assertIsNotNone(common.OPTIONS.search_path)
- def tearDown(self):
- common.Cleanup()
-
def test_GetPackageMetadata_abOta_full(self):
target_info_dict = copy.deepcopy(self.TEST_TARGET_INFO_DICT)
target_info_dict['ab_update'] = 'true'
@@ -721,14 +713,11 @@
)
-class PropertyFilesTest(unittest.TestCase):
+class PropertyFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
common.OPTIONS.no_signing = False
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def construct_zip_package(entries):
zip_file = common.MakeTempFile(suffix='.zip')
@@ -889,6 +878,7 @@
property_files.required)
self.assertEqual(
(
+ 'care_map.pb',
'care_map.txt',
'compatibility.zip',
),
@@ -984,6 +974,7 @@
property_files.required)
self.assertEqual(
(
+ 'care_map.pb',
'care_map.txt',
'compatibility.zip',
),
@@ -1022,11 +1013,11 @@
'--signature_size', str(self.SIGNATURE_SIZE),
'--metadata_hash_file', metadata_sig_file,
'--payload_hash_file', payload_sig_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
- 'Failed to run brillo_update_payload: {}'.format(stdoutdata))
+ 'Failed to run brillo_update_payload:\n{}'.format(stdoutdata))
signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
@@ -1150,7 +1141,7 @@
property_files.Verify(zip_fp, raw_metadata)
-class PayloadSignerTest(unittest.TestCase):
+class PayloadSignerTest(test_utils.ReleaseToolsTestCase):
SIGFILE = 'sigfile.bin'
SIGNED_SIGFILE = 'signed-sigfile.bin'
@@ -1166,9 +1157,6 @@
common.OPTIONS.package_key : None,
}
- def tearDown(self):
- common.Cleanup()
-
def _assertFilesEqual(self, file1, file2):
with open(file1, 'rb') as fp1, open(file2, 'rb') as fp2:
self.assertEqual(fp1.read(), fp2.read())
@@ -1229,7 +1217,7 @@
self._assertFilesEqual(verify_file, signed_file)
-class PayloadTest(unittest.TestCase):
+class PayloadTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
@@ -1243,9 +1231,6 @@
common.OPTIONS.package_key : None,
}
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def _create_payload_full(secondary=False):
target_file = construct_target_files(secondary)
@@ -1283,7 +1268,7 @@
target_file = construct_target_files()
common.ZipDelete(target_file, 'IMAGES/vendor.img')
payload = Payload()
- self.assertRaises(AssertionError, payload.Generate, target_file)
+ self.assertRaises(common.ExternalError, payload.Generate, target_file)
def test_Sign_full(self):
payload = self._create_payload_full()
@@ -1331,7 +1316,7 @@
payload = self._create_payload_full()
payload_signer = PayloadSigner()
payload_signer.signer_args.append('bad-option')
- self.assertRaises(AssertionError, payload.Sign, payload_signer)
+ self.assertRaises(common.ExternalError, payload.Sign, payload_signer)
def test_WriteToZip(self):
payload = self._create_payload_full()
diff --git a/tools/releasetools/test_rangelib.py b/tools/releasetools/test_rangelib.py
index e181187..1251e11 100644
--- a/tools/releasetools/test_rangelib.py
+++ b/tools/releasetools/test_rangelib.py
@@ -14,11 +14,11 @@
# limitations under the License.
#
-import unittest
-
from rangelib import RangeSet
+from test_utils import ReleaseToolsTestCase
-class RangeSetTest(unittest.TestCase):
+
+class RangeSetTest(ReleaseToolsTestCase):
def test_union(self):
self.assertEqual(RangeSet("10-19 30-34").union(RangeSet("18-29")),
@@ -129,8 +129,8 @@
self.assertEqual(
RangeSet.parse_raw(RangeSet("0-9").to_string_raw()),
RangeSet("0-9"))
- self.assertEqual(RangeSet.parse_raw(
- RangeSet("2-10 12").to_string_raw()),
+ self.assertEqual(
+ RangeSet.parse_raw(RangeSet("2-10 12").to_string_raw()),
RangeSet("2-10 12"))
self.assertEqual(
RangeSet.parse_raw(RangeSet("11 2-10 12 1 0").to_string_raw()),
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index ac1b567..18762ee 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -14,11 +14,8 @@
# limitations under the License.
#
-from __future__ import print_function
-
import base64
import os.path
-import unittest
import zipfile
import common
@@ -28,7 +25,7 @@
ReplaceVerityKeyId, RewriteProps)
-class SignTargetFilesApksTest(unittest.TestCase):
+class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
MAC_PERMISSIONS_XML = """<?xml version="1.0" encoding="iso-8859-1"?>
<policy>
@@ -39,9 +36,6 @@
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def test_EditTags(self):
self.assertEqual(EditTags('dev-keys'), ('release-keys'))
self.assertEqual(EditTags('test-keys'), ('release-keys'))
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index a15ff5b..edb3d41 100644
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -18,12 +18,18 @@
Utils for running unittests.
"""
+import logging
import os
import os.path
import struct
+import sys
+import unittest
import common
+# Some test runner doesn't like outputs from stderr.
+logging.basicConfig(stream=sys.stdout)
+
def get_testdata_dir():
"""Returns the testdata dir, in relative to the script dir."""
@@ -110,3 +116,10 @@
fp.write(os.urandom(data_size))
return sparse_image
+
+
+class ReleaseToolsTestCase(unittest.TestCase):
+ """A common base class for all the releasetools unittests."""
+
+ def tearDown(self):
+ common.Cleanup()
diff --git a/tools/releasetools/test_validate_target_files.py b/tools/releasetools/test_validate_target_files.py
index 3ba89a1..d778d11 100644
--- a/tools/releasetools/test_validate_target_files.py
+++ b/tools/releasetools/test_validate_target_files.py
@@ -16,35 +16,28 @@
"""Unittests for validate_target_files.py."""
-from __future__ import print_function
-
import os
import os.path
import shutil
-import subprocess
-import unittest
-import build_image
import common
import test_utils
+import verity_utils
from validate_target_files import ValidateVerifiedBootImages
-class ValidateTargetFilesTest(unittest.TestCase):
+class ValidateTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def _generate_boot_image(self, output_file):
kernel = common.MakeTempFile(prefix='kernel-')
with open(kernel, 'wb') as kernel_fp:
kernel_fp.write(os.urandom(10))
cmd = ['mkbootimg', '--kernel', kernel, '-o', output_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
@@ -53,7 +46,7 @@
cmd = ['boot_signer', '/boot', output_file,
os.path.join(self.testdata_dir, 'testkey.pk8'),
os.path.join(self.testdata_dir, 'testkey.x509.pem'), output_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
@@ -116,14 +109,14 @@
def _generate_system_image(self, output_file):
verity_fec = True
partition_size = 1024 * 1024
- adjusted_size, verity_size = build_image.AdjustPartitionSizeForVerity(
+ image_size, verity_size = verity_utils.AdjustPartitionSizeForVerity(
partition_size, verity_fec)
# Use an empty root directory.
system_root = common.MakeTempDir()
cmd = ['mkuserimg_mke2fs', '-s', system_root, output_file, 'ext4',
- '/system', str(adjusted_size), '-j', '0']
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ '/system', str(image_size), '-j', '0']
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
@@ -132,15 +125,14 @@
# Append the verity metadata.
prop_dict = {
- 'original_partition_size' : str(partition_size),
- 'partition_size' : str(adjusted_size),
+ 'partition_size' : str(partition_size),
+ 'image_size' : str(image_size),
'verity_block_device' : '/dev/block/system',
'verity_key' : os.path.join(self.testdata_dir, 'testkey'),
'verity_signer_cmd' : 'verity_signer',
'verity_size' : str(verity_size),
}
- self.assertTrue(
- build_image.MakeVerityEnabledImage(output_file, verity_fec, prop_dict))
+ verity_utils.MakeVerityEnabledImage(output_file, verity_fec, prop_dict)
def test_ValidateVerifiedBootImages_systemImage(self):
input_tmp = common.MakeTempDir()
diff --git a/tools/releasetools/test_verity_utils.py b/tools/releasetools/test_verity_utils.py
new file mode 100644
index 0000000..0988d8e
--- /dev/null
+++ b/tools/releasetools/test_verity_utils.py
@@ -0,0 +1,222 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Unittests for verity_utils.py."""
+
+import math
+import os.path
+import random
+
+import common
+import sparse_img
+from rangelib import RangeSet
+from test_utils import get_testdata_dir, ReleaseToolsTestCase
+from verity_utils import (
+ AdjustPartitionSizeForVerity, AVBCalcMinPartitionSize, BLOCK_SIZE,
+ CreateHashtreeInfoGenerator, HashtreeInfo, MakeVerityEnabledImage,
+ VerifiedBootVersion1HashtreeInfoGenerator)
+
+
+class VerifiedBootVersion1HashtreeInfoGeneratorTest(ReleaseToolsTestCase):
+
+ def setUp(self):
+ self.testdata_dir = get_testdata_dir()
+
+ self.partition_size = 1024 * 1024
+ self.prop_dict = {
+ 'verity': 'true',
+ 'verity_fec': 'true',
+ 'system_verity_block_device': '/dev/block/system',
+ 'system_size': self.partition_size
+ }
+
+ self.hash_algorithm = "sha256"
+ self.fixed_salt = \
+ "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
+ self.expected_root_hash = \
+ "0b7c4565e87b1026e11fbab91c0bc29e185c847a5b44d40e6e86e461e8adf80d"
+
+ def _create_simg(self, raw_data):
+ output_file = common.MakeTempFile()
+ raw_image = common.MakeTempFile()
+ with open(raw_image, 'wb') as f:
+ f.write(raw_data)
+
+ cmd = ["img2simg", raw_image, output_file, '4096']
+ p = common.Run(cmd)
+ p.communicate()
+ self.assertEqual(0, p.returncode)
+
+ return output_file
+
+ def _generate_image(self):
+ partition_size = 1024 * 1024
+ adjusted_size, verity_size = AdjustPartitionSizeForVerity(
+ partition_size, True)
+
+ raw_image = ""
+ for i in range(adjusted_size):
+ raw_image += str(i % 10)
+
+ output_file = self._create_simg(raw_image)
+
+ # Append the verity metadata.
+ prop_dict = {
+ 'partition_size': str(partition_size),
+ 'image_size': str(adjusted_size),
+ 'verity_block_device': '/dev/block/system',
+ 'verity_key': os.path.join(self.testdata_dir, 'testkey'),
+ 'verity_signer_cmd': 'verity_signer',
+ 'verity_size': str(verity_size),
+ }
+ MakeVerityEnabledImage(output_file, True, prop_dict)
+
+ return output_file
+
+ def test_CreateHashtreeInfoGenerator(self):
+ image_file = sparse_img.SparseImage(self._generate_image())
+
+ generator = CreateHashtreeInfoGenerator(
+ 'system', image_file, self.prop_dict)
+ self.assertEqual(
+ VerifiedBootVersion1HashtreeInfoGenerator, type(generator))
+ self.assertEqual(self.partition_size, generator.partition_size)
+ self.assertTrue(generator.fec_supported)
+
+ def test_DecomposeSparseImage(self):
+ image_file = sparse_img.SparseImage(self._generate_image())
+
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
+ self.partition_size, 4096, True)
+ generator.DecomposeSparseImage(image_file)
+ self.assertEqual(991232, generator.filesystem_size)
+ self.assertEqual(12288, generator.hashtree_size)
+ self.assertEqual(32768, generator.metadata_size)
+
+ def test_ParseHashtreeMetadata(self):
+ image_file = sparse_img.SparseImage(self._generate_image())
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
+ self.partition_size, 4096, True)
+ generator.DecomposeSparseImage(image_file)
+
+ # pylint: disable=protected-access
+ generator._ParseHashtreeMetadata()
+
+ self.assertEqual(
+ self.hash_algorithm, generator.hashtree_info.hash_algorithm)
+ self.assertEqual(self.fixed_salt, generator.hashtree_info.salt)
+ self.assertEqual(self.expected_root_hash, generator.hashtree_info.root_hash)
+
+ def test_ValidateHashtree_smoke(self):
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
+ self.partition_size, 4096, True)
+ generator.image = sparse_img.SparseImage(self._generate_image())
+
+ generator.hashtree_info = info = HashtreeInfo()
+ info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
+ info.hashtree_range = RangeSet(
+ data=[991232 / 4096, (991232 + 12288) / 4096])
+ info.hash_algorithm = self.hash_algorithm
+ info.salt = self.fixed_salt
+ info.root_hash = self.expected_root_hash
+
+ self.assertTrue(generator.ValidateHashtree())
+
+ def test_ValidateHashtree_failure(self):
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
+ self.partition_size, 4096, True)
+ generator.image = sparse_img.SparseImage(self._generate_image())
+
+ generator.hashtree_info = info = HashtreeInfo()
+ info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
+ info.hashtree_range = RangeSet(
+ data=[991232 / 4096, (991232 + 12288) / 4096])
+ info.hash_algorithm = self.hash_algorithm
+ info.salt = self.fixed_salt
+ info.root_hash = "a" + self.expected_root_hash[1:]
+
+ self.assertFalse(generator.ValidateHashtree())
+
+ def test_Generate(self):
+ image_file = sparse_img.SparseImage(self._generate_image())
+ generator = CreateHashtreeInfoGenerator('system', 4096, self.prop_dict)
+ info = generator.Generate(image_file)
+
+ self.assertEqual(RangeSet(data=[0, 991232 / 4096]), info.filesystem_range)
+ self.assertEqual(RangeSet(data=[991232 / 4096, (991232 + 12288) / 4096]),
+ info.hashtree_range)
+ self.assertEqual(self.hash_algorithm, info.hash_algorithm)
+ self.assertEqual(self.fixed_salt, info.salt)
+ self.assertEqual(self.expected_root_hash, info.root_hash)
+
+
+class VerityUtilsTest(ReleaseToolsTestCase):
+
+ def setUp(self):
+ # To test AVBCalcMinPartitionSize(), by using 200MB to 2GB image size.
+ # - 51200 = 200MB * 1024 * 1024 / 4096
+ # - 524288 = 2GB * 1024 * 1024 * 1024 / 4096
+ self._image_sizes = [BLOCK_SIZE * random.randint(51200, 524288) + offset
+ for offset in range(BLOCK_SIZE)]
+
+ def test_AVBCalcMinPartitionSize_LinearFooterSize(self):
+ """Tests with footer size which is linear to partition size."""
+ for image_size in self._image_sizes:
+ for ratio in 0.95, 0.56, 0.22:
+ expected_size = common.RoundUpTo4K(int(math.ceil(image_size / ratio)))
+ self.assertEqual(
+ expected_size,
+ AVBCalcMinPartitionSize(
+ image_size, lambda x, ratio=ratio: int(x * ratio)))
+
+ def test_AVBCalcMinPartitionSize_SlowerGrowthFooterSize(self):
+ """Tests with footer size which grows slower than partition size."""
+
+ def _SizeCalculator(partition_size):
+ """Footer size is the power of 0.95 of partition size."""
+ # Minus footer size to return max image size.
+ return partition_size - int(math.pow(partition_size, 0.95))
+
+ for image_size in self._image_sizes:
+ min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+ # Checks min_partition_size can accommodate image_size.
+ self.assertGreaterEqual(
+ _SizeCalculator(min_partition_size),
+ image_size)
+ # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
+ self.assertLess(
+ _SizeCalculator(min_partition_size - BLOCK_SIZE),
+ image_size)
+
+ def test_AVBCalcMinPartitionSize_FasterGrowthFooterSize(self):
+ """Tests with footer size which grows faster than partition size."""
+
+ def _SizeCalculator(partition_size):
+ """Max image size is the power of 0.95 of partition size."""
+ # Max image size grows less than partition size, which means
+ # footer size grows faster than partition size.
+ return int(math.pow(partition_size, 0.95))
+
+ for image_size in self._image_sizes:
+ min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+ # Checks min_partition_size can accommodate image_size.
+ self.assertGreaterEqual(
+ _SizeCalculator(min_partition_size),
+ image_size)
+ # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
+ self.assertLess(
+ _SizeCalculator(min_partition_size - BLOCK_SIZE),
+ image_size)
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 09f800f..1cc4a60 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -35,7 +35,6 @@
import logging
import os.path
import re
-import subprocess
import zipfile
import common
@@ -256,7 +255,7 @@
continue
cmd = ['boot_signer', '-verify', image_path, '-certificate', verity_key]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with boot_signer:\n{}'.format(image, stdoutdata)
@@ -299,7 +298,7 @@
continue
cmd = ['verity_verifier', image_path, '-mincrypt', verity_key_mincrypt]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with verity_verifier (key: {}):\n{}'.format(
@@ -328,7 +327,7 @@
partition, info_dict, options[key_name])
cmd.extend(["--expected_chain_partition", chained_partition_arg])
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with verity_verifier (key: {}):\n{}'.format(
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
new file mode 100644
index 0000000..00af296
--- /dev/null
+++ b/tools/releasetools/verity_utils.py
@@ -0,0 +1,548 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import logging
+import os.path
+import shlex
+import struct
+
+import common
+import sparse_img
+from rangelib import RangeSet
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+BLOCK_SIZE = common.BLOCK_SIZE
+FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
+
+
+class BuildVerityImageError(Exception):
+ """An Exception raised during verity image building."""
+
+ def __init__(self, message):
+ Exception.__init__(self, message)
+
+
+def GetVerityFECSize(partition_size):
+ cmd = ["fec", "-s", str(partition_size)]
+ output = common.RunAndCheckOutput(cmd, verbose=False)
+ return int(output)
+
+
+def GetVerityTreeSize(partition_size):
+ cmd = ["build_verity_tree", "-s", str(partition_size)]
+ output = common.RunAndCheckOutput(cmd, verbose=False)
+ return int(output)
+
+
+def GetVerityMetadataSize(partition_size):
+ cmd = ["build_verity_metadata.py", "size", str(partition_size)]
+ output = common.RunAndCheckOutput(cmd, verbose=False)
+ return int(output)
+
+
+def GetVeritySize(partition_size, fec_supported):
+ verity_tree_size = GetVerityTreeSize(partition_size)
+ verity_metadata_size = GetVerityMetadataSize(partition_size)
+ verity_size = verity_tree_size + verity_metadata_size
+ if fec_supported:
+ fec_size = GetVerityFECSize(partition_size + verity_size)
+ return verity_size + fec_size
+ return verity_size
+
+
+def GetSimgSize(image_file):
+ simg = sparse_img.SparseImage(image_file, build_map=False)
+ return simg.blocksize * simg.total_blocks
+
+
+def ZeroPadSimg(image_file, pad_size):
+ blocks = pad_size // BLOCK_SIZE
+ logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
+ simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
+ simg.AppendFillChunk(0, blocks)
+
+
+def AdjustPartitionSizeForVerity(partition_size, fec_supported):
+ """Modifies the provided partition size to account for the verity metadata.
+
+ This information is used to size the created image appropriately.
+
+ Args:
+ partition_size: the size of the partition to be verified.
+
+ Returns:
+ A tuple of the size of the partition adjusted for verity metadata, and
+ the size of verity metadata.
+ """
+ key = "%d %d" % (partition_size, fec_supported)
+ if key in AdjustPartitionSizeForVerity.results:
+ return AdjustPartitionSizeForVerity.results[key]
+
+ hi = partition_size
+ if hi % BLOCK_SIZE != 0:
+ hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
+
+ # verity tree and fec sizes depend on the partition size, which
+ # means this estimate is always going to be unnecessarily small
+ verity_size = GetVeritySize(hi, fec_supported)
+ lo = partition_size - verity_size
+ result = lo
+
+ # do a binary search for the optimal size
+ while lo < hi:
+ i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
+ v = GetVeritySize(i, fec_supported)
+ if i + v <= partition_size:
+ if result < i:
+ result = i
+ verity_size = v
+ lo = i + BLOCK_SIZE
+ else:
+ hi = i
+
+ logger.info(
+ "Adjusted partition size for verity, partition_size: %s, verity_size: %s",
+ result, verity_size)
+ AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
+ return (result, verity_size)
+
+
+AdjustPartitionSizeForVerity.results = {}
+
+
+def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
+ padding_size):
+ cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
+ verity_path, verity_fec_path]
+ common.RunAndCheckOutput(cmd)
+
+
+def BuildVerityTree(sparse_image_path, verity_image_path):
+ cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
+ verity_image_path]
+ output = common.RunAndCheckOutput(cmd)
+ root, salt = output.split()
+ return root, salt
+
+
+def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
+ block_device, signer_path, key, signer_args,
+ verity_disable):
+ cmd = ["build_verity_metadata.py", "build", str(image_size),
+ verity_metadata_path, root_hash, salt, block_device, signer_path, key]
+ if signer_args:
+ cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
+ if verity_disable:
+ cmd.append("--verity_disable")
+ common.RunAndCheckOutput(cmd)
+
+
+def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
+ """Appends the unsparse image to the given sparse image.
+
+ Args:
+ sparse_image_path: the path to the (sparse) image
+ unsparse_image_path: the path to the (unsparse) image
+
+ Raises:
+ BuildVerityImageError: On error.
+ """
+ cmd = ["append2simg", sparse_image_path, unsparse_image_path]
+ try:
+ common.RunAndCheckOutput(cmd)
+ except:
+ raise BuildVerityImageError(error_message)
+
+
+def Append(target, file_to_append, error_message):
+ """Appends file_to_append to target.
+
+ Raises:
+ BuildVerityImageError: On error.
+ """
+ try:
+ with open(target, "a") as out_file, open(file_to_append, "r") as input_file:
+ for line in input_file:
+ out_file.write(line)
+ except IOError:
+ raise BuildVerityImageError(error_message)
+
+
+def BuildVerifiedImage(data_image_path, verity_image_path,
+ verity_metadata_path, verity_fec_path,
+ padding_size, fec_supported):
+ Append(
+ verity_image_path, verity_metadata_path,
+ "Could not append verity metadata!")
+
+ if fec_supported:
+ # Build FEC for the entire partition, including metadata.
+ BuildVerityFEC(
+ data_image_path, verity_image_path, verity_fec_path, padding_size)
+ Append(verity_image_path, verity_fec_path, "Could not append FEC!")
+
+ Append2Simg(
+ data_image_path, verity_image_path, "Could not append verity data!")
+
+
+def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
+ """Creates an image that is verifiable using dm-verity.
+
+ Args:
+ out_file: the location to write the verifiable image at
+ prop_dict: a dictionary of properties required for image creation and
+ verification
+
+ Raises:
+ AssertionError: On invalid partition sizes.
+ """
+ # get properties
+ image_size = int(prop_dict["image_size"])
+ block_dev = prop_dict["verity_block_device"]
+ signer_key = prop_dict["verity_key"] + ".pk8"
+ if OPTIONS.verity_signer_path is not None:
+ signer_path = OPTIONS.verity_signer_path
+ else:
+ signer_path = prop_dict["verity_signer_cmd"]
+ signer_args = OPTIONS.verity_signer_args
+
+ tempdir_name = common.MakeTempDir(suffix="_verity_images")
+
+ # Get partial image paths.
+ verity_image_path = os.path.join(tempdir_name, "verity.img")
+ verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
+ verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
+
+ # Build the verity tree and get the root hash and salt.
+ root_hash, salt = BuildVerityTree(out_file, verity_image_path)
+
+ # Build the metadata blocks.
+ verity_disable = "verity_disable" in prop_dict
+ BuildVerityMetadata(
+ image_size, verity_metadata_path, root_hash, salt, block_dev, signer_path,
+ signer_key, signer_args, verity_disable)
+
+ # Build the full verified image.
+ partition_size = int(prop_dict["partition_size"])
+ verity_size = int(prop_dict["verity_size"])
+
+ padding_size = partition_size - image_size - verity_size
+ assert padding_size >= 0
+
+ BuildVerifiedImage(
+ out_file, verity_image_path, verity_metadata_path, verity_fec_path,
+ padding_size, fec_supported)
+
+
+def AVBCalcMaxImageSize(avbtool, footer_type, partition_size, additional_args):
+ """Calculates max image size for a given partition size.
+
+ Args:
+ avbtool: String with path to avbtool.
+ footer_type: 'hash' or 'hashtree' for generating footer.
+ partition_size: The size of the partition in question.
+ additional_args: Additional arguments to pass to "avbtool add_hash_footer"
+ or "avbtool add_hashtree_footer".
+
+ Returns:
+ The maximum image size.
+
+ Raises:
+ BuildVerityImageError: On invalid image size.
+ """
+ cmd = [avbtool, "add_%s_footer" % footer_type,
+ "--partition_size", str(partition_size), "--calc_max_image_size"]
+ cmd.extend(shlex.split(additional_args))
+
+ output = common.RunAndCheckOutput(cmd)
+ image_size = int(output)
+ if image_size <= 0:
+ raise BuildVerityImageError(
+ "Invalid max image size: {}".format(output))
+ return image_size
+
+
+def AVBCalcMinPartitionSize(image_size, size_calculator):
+ """Calculates min partition size for a given image size.
+
+ Args:
+ image_size: The size of the image in question.
+ size_calculator: The function to calculate max image size
+ for a given partition size.
+
+ Returns:
+ The minimum partition size required to accommodate the image size.
+ """
+ # Use image size as partition size to approximate final partition size.
+ image_ratio = size_calculator(image_size) / float(image_size)
+
+ # Prepare a binary search for the optimal partition size.
+ lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
+
+ # Ensure lo is small enough: max_image_size should <= image_size.
+ delta = BLOCK_SIZE
+ max_image_size = size_calculator(lo)
+ while max_image_size > image_size:
+ image_ratio = max_image_size / float(lo)
+ lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
+ delta *= 2
+ max_image_size = size_calculator(lo)
+
+ hi = lo + BLOCK_SIZE
+
+ # Ensure hi is large enough: max_image_size should >= image_size.
+ delta = BLOCK_SIZE
+ max_image_size = size_calculator(hi)
+ while max_image_size < image_size:
+ image_ratio = max_image_size / float(hi)
+ hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
+ delta *= 2
+ max_image_size = size_calculator(hi)
+
+ partition_size = hi
+
+ # Start to binary search.
+ while lo < hi:
+ mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
+ max_image_size = size_calculator(mid)
+ if max_image_size >= image_size: # if mid can accommodate image_size
+ if mid < partition_size: # if a smaller partition size is found
+ partition_size = mid
+ hi = mid
+ else:
+ lo = mid + BLOCK_SIZE
+
+ logger.info(
+ "AVBCalcMinPartitionSize(%d): partition_size: %d.",
+ image_size, partition_size)
+
+ return partition_size
+
+
+def AVBAddFooter(image_path, avbtool, footer_type, partition_size,
+ partition_name, key_path, algorithm, salt,
+ additional_args):
+ """Adds dm-verity hashtree and AVB metadata to an image.
+
+ Args:
+ image_path: Path to image to modify.
+ avbtool: String with path to avbtool.
+ footer_type: 'hash' or 'hashtree' for generating footer.
+ partition_size: The size of the partition in question.
+ partition_name: The name of the partition - will be embedded in metadata.
+ key_path: Path to key to use or None.
+ algorithm: Name of algorithm to use or None.
+ salt: The salt to use (a hexadecimal string) or None.
+ additional_args: Additional arguments to pass to "avbtool add_hash_footer"
+ or "avbtool add_hashtree_footer".
+ """
+ cmd = [avbtool, "add_%s_footer" % footer_type,
+ "--partition_size", partition_size,
+ "--partition_name", partition_name,
+ "--image", image_path]
+
+ if key_path and algorithm:
+ cmd.extend(["--key", key_path, "--algorithm", algorithm])
+ if salt:
+ cmd.extend(["--salt", salt])
+
+ cmd.extend(shlex.split(additional_args))
+
+ common.RunAndCheckOutput(cmd)
+
+
+class HashtreeInfoGenerationError(Exception):
+ """An Exception raised during hashtree info generation."""
+
+ def __init__(self, message):
+ Exception.__init__(self, message)
+
+
+class HashtreeInfo(object):
+ def __init__(self):
+ self.hashtree_range = None
+ self.filesystem_range = None
+ self.hash_algorithm = None
+ self.salt = None
+ self.root_hash = None
+
+
+def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
+ generator = None
+ if (info_dict.get("verity") == "true" and
+ info_dict.get("{}_verity_block_device".format(partition_name))):
+ partition_size = info_dict["{}_size".format(partition_name)]
+ fec_supported = info_dict.get("verity_fec") == "true"
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
+ partition_size, block_size, fec_supported)
+
+ return generator
+
+
+class HashtreeInfoGenerator(object):
+ def Generate(self, image):
+ raise NotImplementedError
+
+ def DecomposeSparseImage(self, image):
+ raise NotImplementedError
+
+ def ValidateHashtree(self):
+ raise NotImplementedError
+
+
+class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
+ """A class that parses the metadata of hashtree for a given partition."""
+
+ def __init__(self, partition_size, block_size, fec_supported):
+ """Initialize VerityTreeInfo with the sparse image and input property.
+
+ Arguments:
+ partition_size: The whole size in bytes of a partition, including the
+ filesystem size, padding size, and verity size.
+ block_size: Expected size in bytes of each block for the sparse image.
+ fec_supported: True if the verity section contains fec data.
+ """
+
+ self.block_size = block_size
+ self.partition_size = partition_size
+ self.fec_supported = fec_supported
+
+ self.image = None
+ self.filesystem_size = None
+ self.hashtree_size = None
+ self.metadata_size = None
+
+ self.hashtree_info = HashtreeInfo()
+
+ def DecomposeSparseImage(self, image):
+ """Calculate the verity size based on the size of the input image.
+
+ Since we already know the structure of a verity enabled image to be:
+ [filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
+ calculate the size and offset of each section.
+ """
+
+ self.image = image
+ assert self.block_size == image.blocksize
+ assert self.partition_size == image.total_blocks * self.block_size, \
+ "partition size {} doesn't match with the calculated image size." \
+ " total_blocks: {}".format(self.partition_size, image.total_blocks)
+
+ adjusted_size, _ = AdjustPartitionSizeForVerity(
+ self.partition_size, self.fec_supported)
+ assert adjusted_size % self.block_size == 0
+
+ verity_tree_size = GetVerityTreeSize(adjusted_size)
+ assert verity_tree_size % self.block_size == 0
+
+ metadata_size = GetVerityMetadataSize(adjusted_size)
+ assert metadata_size % self.block_size == 0
+
+ self.filesystem_size = adjusted_size
+ self.hashtree_size = verity_tree_size
+ self.metadata_size = metadata_size
+
+ self.hashtree_info.filesystem_range = RangeSet(
+ data=[0, adjusted_size / self.block_size])
+ self.hashtree_info.hashtree_range = RangeSet(
+ data=[adjusted_size / self.block_size,
+ (adjusted_size + verity_tree_size) / self.block_size])
+
+ def _ParseHashtreeMetadata(self):
+ """Parses the hash_algorithm, root_hash, salt from the metadata block."""
+
+ metadata_start = self.filesystem_size + self.hashtree_size
+ metadata_range = RangeSet(
+ data=[metadata_start / self.block_size,
+ (metadata_start + self.metadata_size) / self.block_size])
+ meta_data = ''.join(self.image.ReadRangeSet(metadata_range))
+
+ # More info about the metadata structure available in:
+ # system/extras/verity/build_verity_metadata.py
+ META_HEADER_SIZE = 268
+ header_bin = meta_data[0:META_HEADER_SIZE]
+ header = struct.unpack("II256sI", header_bin)
+
+ # header: magic_number, version, signature, table_len
+ assert header[0] == 0xb001b001, header[0]
+ table_len = header[3]
+ verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
+ table_entries = verity_table.rstrip().split()
+
+ # Expected verity table format: "1 block_device block_device block_size
+ # block_size data_blocks data_blocks hash_algorithm root_hash salt"
+ assert len(table_entries) == 10, "Unexpected verity table size {}".format(
+ len(table_entries))
+ assert (int(table_entries[3]) == self.block_size and
+ int(table_entries[4]) == self.block_size)
+ assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
+ int(table_entries[6]) * self.block_size == self.filesystem_size)
+
+ self.hashtree_info.hash_algorithm = table_entries[7]
+ self.hashtree_info.root_hash = table_entries[8]
+ self.hashtree_info.salt = table_entries[9]
+
+ def ValidateHashtree(self):
+ """Checks that we can reconstruct the verity hash tree."""
+
+ # Writes the file system section to a temp file; and calls the executable
+ # build_verity_tree to construct the hash tree.
+ adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
+ with open(adjusted_partition, "wb") as fd:
+ self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
+
+ generated_verity_tree = common.MakeTempFile(prefix="verity")
+ root_hash, salt = BuildVerityTree(adjusted_partition, generated_verity_tree)
+
+ # The salt should be always identical, as we use fixed value.
+ assert salt == self.hashtree_info.salt, \
+ "Calculated salt {} doesn't match the one in metadata {}".format(
+ salt, self.hashtree_info.salt)
+
+ if root_hash != self.hashtree_info.root_hash:
+ logger.warning(
+ "Calculated root hash %s doesn't match the one in metadata %s",
+ root_hash, self.hashtree_info.root_hash)
+ return False
+
+ # Reads the generated hash tree and checks if it has the exact same bytes
+ # as the one in the sparse image.
+ with open(generated_verity_tree, "rb") as fd:
+ return fd.read() == ''.join(self.image.ReadRangeSet(
+ self.hashtree_info.hashtree_range))
+
+ def Generate(self, image):
+ """Parses and validates the hashtree info in a sparse image.
+
+ Returns:
+ hashtree_info: The information needed to reconstruct the hashtree.
+
+ Raises:
+ HashtreeInfoGenerationError: If we fail to generate the exact bytes of
+ the hashtree.
+ """
+
+ self.DecomposeSparseImage(image)
+ self._ParseHashtreeMetadata()
+
+ if not self.ValidateHashtree():
+ raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
+
+ return self.hashtree_info
diff --git a/tools/uuidgen.py b/tools/uuidgen.py
deleted file mode 100755
index d3091a7..0000000
--- a/tools/uuidgen.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import print_function
-import sys
-import uuid
-
-def uuidgen(name):
- return uuid.uuid5(uuid.uuid5(uuid.NAMESPACE_URL, "android.com"), name)
-
-if __name__ == "__main__":
- if len(sys.argv) < 2:
- print("Usage: uuidgen.py <name>")
- sys.exit(1)
- name = sys.argv[1]
- print(uuidgen(name))
diff --git a/tools/warn.py b/tools/warn.py
index bcde64a..c710164 100755
--- a/tools/warn.py
+++ b/tools/warn.py
@@ -505,6 +505,11 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: This class\'s name looks like a Type Parameter.',
+ 'patterns': [r".*: warning: \[ClassNamedLikeTypeParameter\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: Field name is CONSTANT_CASE, but field is not static and final',
'patterns': [r".*: warning: \[ConstantField\] .+"]},
{'category': 'java',
@@ -515,6 +520,11 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: Prefer assertThrows to ExpectedException',
+ 'patterns': [r".*: warning: \[ExpectedExceptionRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: This field is only assigned during initialization; consider making it final',
'patterns': [r".*: warning: \[FieldCanBeFinal\] .+"]},
{'category': 'java',
@@ -525,7 +535,12 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
- r'Java: Use Java\'s utility functional interfaces instead of Function\u003cA, B> for primitive types.',
+ 'Java: Refactors uses of the JSR 305 @Immutable to Error Prone\'s annotation',
+ 'patterns': [r".*: warning: \[ImmutableRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
+ 'Java: Use Java\'s utility functional interfaces instead of Function\u003cA, B> for primitive types.',
'patterns': [r".*: warning: \[LambdaFunctionalInterface\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
@@ -560,7 +575,7 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
- 'Java: Non-standard parameter comment; prefer `/*paramName=*/ arg`',
+ 'Java: Non-standard parameter comment; prefer `/* paramName= */ arg`',
'patterns': [r".*: warning: \[ParameterComment\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
@@ -600,17 +615,27 @@
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: Prefer assertThrows to @Test(expected=...)',
+ 'patterns': [r".*: warning: \[TestExceptionRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: Unchecked exceptions do not need to be declared in the method signature.',
'patterns': [r".*: warning: \[ThrowsUncheckedException\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
'description':
+ 'Java: Prefer assertThrows to try/fail',
+ 'patterns': [r".*: warning: \[TryFailRefactoring\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.LOW,
+ 'description':
'Java: Type parameters must be a single letter with an optional numeric suffix, or an UpperCamelCase name followed by the letter \'T\'.',
'patterns': [r".*: warning: \[TypeParameterNaming\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
'description':
- 'Java: Constructors and methods with the same name should appear sequentially with no other code in between',
+ 'Java: Constructors and methods with the same name should appear sequentially with no other code in between. Please re-order or re-name methods.',
'patterns': [r".*: warning: \[UngroupedOverloads\] .+"]},
{'category': 'java',
'severity': Severity.LOW,
@@ -640,11 +665,26 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: This method passes a pair of parameters through to String.format, but the enclosing method wasn\'t annotated @FormatMethod. Doing so gives compile-time rather than run-time protection against malformed format strings.',
+ 'patterns': [r".*: warning: \[AnnotateFormatMethod\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Annotations should be positioned after Javadocs, but before modifiers..',
+ 'patterns': [r".*: warning: \[AnnotationPosition\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Arguments are in the wrong order or could be commented for clarity.',
'patterns': [r".*: warning: \[ArgumentSelectionDefectChecker\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Arrays do not override equals() or hashCode, so comparisons will be done on reference equality only. If neither deduplication nor lookup are needed, consider using a List instead. Otherwise, use IdentityHashMap/Set, a Map from a library that handles object arrays, or an Iterable/List of pairs.',
+ 'patterns': [r".*: warning: \[ArrayAsKeyOfSetOrMap\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Arguments are swapped in assertEquals-like call',
'patterns': [r".*: warning: \[AssertEqualsArgumentOrderChecker\] .+"]},
{'category': 'java',
@@ -655,7 +695,7 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: The lambda passed to assertThows should contain exactly one statement',
+ 'Java: The lambda passed to assertThrows should contain exactly one statement',
'patterns': [r".*: warning: \[AssertThrowsMultipleStatements\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
@@ -670,6 +710,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Make toString(), hashCode() and equals() final in AutoValue classes, so it is clear to readers that AutoValue is not overriding them',
+ 'patterns': [r".*: warning: \[AutoValueFinalMethods\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Classes that implement Annotation must override equals and hashCode. Consider using AutoAnnotation instead of implementing Annotation by hand.',
'patterns': [r".*: warning: \[BadAnnotationImplementation\] .+"]},
{'category': 'java',
@@ -680,7 +725,22 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: BigDecimal(double) and BigDecimal.valueOf(double) may lose precision, prefer BigDecimal(String) or BigDecimal(long)',
+ 'Java: Importing nested classes/static methods/static fields with commonly-used names can make code harder to read, because it may not be clear from the context exactly which type is being referred to. Qualifying the name with that of the containing class can make the code clearer.',
+ 'patterns': [r".*: warning: \[BadImport\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: instanceof used in a way that is equivalent to a null check.',
+ 'patterns': [r".*: warning: \[BadInstanceof\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: BigDecimal#equals has surprising behavior: it also compares scale.',
+ 'patterns': [r".*: warning: \[BigDecimalEquals\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: new BigDecimal(double) loses precision in this case.',
'patterns': [r".*: warning: \[BigDecimalLiteralDouble\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
@@ -735,6 +795,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Providing Closeable resources makes their lifecycle unclear',
+ 'patterns': [r".*: warning: \[CloseableProvides\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: The type of the array parameter of Collection.toArray needs to be compatible with the array type',
'patterns': [r".*: warning: \[CollectionToArraySafeParameter\] .+"]},
{'category': 'java',
@@ -770,6 +835,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Avoid deprecated Thread methods; read the method\'s javadoc for details.',
+ 'patterns': [r".*: warning: \[DeprecatedThreadMethods\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Prefer collection factory methods or builders to the double-brace initialization pattern.',
'patterns': [r".*: warning: \[DoubleBraceInitialization\] .+"]},
{'category': 'java',
@@ -785,6 +855,16 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: equals() implementation may throw NullPointerException when given null',
+ 'patterns': [r".*: warning: \[EqualsBrokenForNull\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Overriding Object#equals in a non-final class by using getClass rather than instanceof breaks substitutability of subclasses.',
+ 'patterns': [r".*: warning: \[EqualsGetClass\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Classes that override equals should also override hashCode.',
'patterns': [r".*: warning: \[EqualsHashCode\] .+"]},
{'category': 'java',
@@ -795,11 +875,26 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: The contract of #equals states that it should return false for incompatible types, while this implementation may throw ClassCastException.',
+ 'patterns': [r".*: warning: \[EqualsUnsafeCast\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Implementing #equals by just comparing hashCodes is fragile. Hashes collide frequently, and this will lead to false positives in #equals.',
+ 'patterns': [r".*: warning: \[EqualsUsingHashCode\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Calls to ExpectedException#expect should always be followed by exactly one statement.',
'patterns': [r".*: warning: \[ExpectedExceptionChecker\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: When only using JUnit Assert\'s static methods, you should import statically instead of extending.',
+ 'patterns': [r".*: warning: \[ExtendingJUnitAssert\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Switch case may fall through',
'patterns': [r".*: warning: \[FallThrough\] .+"]},
{'category': 'java',
@@ -815,6 +910,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: This fuzzy equality check is using a tolerance less than the gap to the next number. You may want a less restrictive tolerance, or to assert equality.',
+ 'patterns': [r".*: warning: \[FloatingPointAssertionWithinEpsilon\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Floating point literal loses precision',
'patterns': [r".*: warning: \[FloatingPointLiteralPrecision\] .+"]},
{'category': 'java',
@@ -875,6 +975,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Including fields in hashCode which are not compared in equals violates the contract of hashCode.',
+ 'patterns': [r".*: warning: \[InconsistentHashCode\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: The ordering of parameters in overloaded methods should be as consistent as possible (when viewed from left to right)',
'patterns': [r".*: warning: \[InconsistentOverloads\] .+"]},
{'category': 'java',
@@ -905,6 +1010,21 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: This @param tag doesn\'t refer to a parameter of the method.',
+ 'patterns': [r".*: warning: \[InvalidParam\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: This tag is invalid.',
+ 'patterns': [r".*: warning: \[InvalidTag\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: The documented method doesn\'t actually throw this checked exception.',
+ 'patterns': [r".*: warning: \[InvalidThrows\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Class should not implement both `Iterable` and `Iterator`',
'patterns': [r".*: warning: \[IterableAndIterator\] .+"]},
{'category': 'java',
@@ -935,11 +1055,21 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Calls to Lock#lock should be immediately followed by a try block which releases the lock.',
+ 'patterns': [r".*: warning: \[LockNotBeforeTry\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Assignment where a boolean expression was expected; use == if this assignment wasn\'t expected or add parentheses for clarity.',
'patterns': [r".*: warning: \[LogicalAssignment\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Math.abs does not always give a positive result. Please consider other methods for positive random numbers.',
+ 'patterns': [r".*: warning: \[MathAbsoluteRandom\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Switches on enum types should either handle all values, or have a default case.',
'patterns': [r".*: warning: \[MissingCasesInEnumSwitch\] .+"]},
{'category': 'java',
@@ -960,6 +1090,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: A collection or proto builder was created, but its values were never accessed.',
+ 'patterns': [r".*: warning: \[ModifiedButNotUsed\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Modifying a collection while iterating over it in a loop may cause a ConcurrentModificationException to be thrown.',
'patterns': [r".*: warning: \[ModifyCollectionInEnhancedForLoop\] .+"]},
{'category': 'java',
@@ -990,6 +1125,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Instead of returning a functional type, return the actual type that the returned function would return and use lambdas at use site.',
+ 'patterns': [r".*: warning: \[NoFunctionalReturnType\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: This update of a volatile variable is non-atomic',
'patterns': [r".*: warning: \[NonAtomicVolatileUpdate\] .+"]},
{'category': 'java',
@@ -1010,6 +1150,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Dereference of possibly-null value',
+ 'patterns': [r".*: warning: \[NullableDereference\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: @Nullable should not be used for primitive types since they cannot be null',
'patterns': [r".*: warning: \[NullablePrimitive\] .+"]},
{'category': 'java',
@@ -1025,6 +1170,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Objects.hashCode(Object o) should not be passed a primitive value',
+ 'patterns': [r".*: warning: \[ObjectsHashCodePrimitive\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Use grouping parenthesis to make the operator precedence explicit',
'patterns': [r".*: warning: \[OperatorPrecedence\] .+"]},
{'category': 'java',
@@ -1070,8 +1220,13 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: Protobuf fields cannot be null, so this check is redundant',
- 'patterns': [r".*: warning: \[ProtoFieldPreconditionsCheckNotNull\] .+"]},
+ 'Java: A field on a protocol buffer was set twice in the same chained expression.',
+ 'patterns': [r".*: warning: \[ProtoRedundantSet\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Protos should not be used as a key to a map, in a set, or in a contains method on a descendant of a collection. Protos have non deterministic ordering and proto equality is deep, which is a performance issue.',
+ 'patterns': [r".*: warning: \[ProtosAsKeyOfSetOrMap\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
@@ -1110,7 +1265,12 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- r'Java: Prefer the short-circuiting boolean operators \u0026\u0026 and || to \u0026 and |.',
+ 'Java: Void methods should not have a @return tag.',
+ 'patterns': [r".*: warning: \[ReturnFromVoid\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Prefer the short-circuiting boolean operators \u0026\u0026 and || to \u0026 and |.',
'patterns': [r".*: warning: \[ShortCircuitBoolean\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
@@ -1140,11 +1300,21 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: SWIG generated code that can\'t call a C++ destructor will leak memory',
+ 'patterns': [r".*: warning: \[SwigMemoryLeak\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Synchronizing on non-final fields is not safe: if the field is ever updated, different threads may end up locking on different objects.',
'patterns': [r".*: warning: \[SynchronizeOnNonFinalField\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Code that contains System.exit() is untestable.',
+ 'patterns': [r".*: warning: \[SystemExitOutsideMain\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Using @Test(expected=...) is discouraged, since the test will pass if *any* statement in the test method throws the expected exception',
'patterns': [r".*: warning: \[TestExceptionChecker\] .+"]},
{'category': 'java',
@@ -1160,11 +1330,26 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Relying on the thread scheduler is discouraged; see Effective Java Item 72 (2nd edition) / 84 (3rd edition).',
+ 'patterns': [r".*: warning: \[ThreadPriorityCheck\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Three-letter time zone identifiers are deprecated, may be ambiguous, and might not do what you intend; the full IANA time zone ID should be used instead.',
'patterns': [r".*: warning: \[ThreeLetterTimeZoneID\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: An implementation of Object.toString() should never return null.',
+ 'patterns': [r".*: warning: \[ToStringReturnsNull\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: The actual and expected values appear to be swapped, which results in poor assertion failure messages. The actual value should come first.',
+ 'patterns': [r".*: warning: \[TruthAssertExpected\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Truth Library assert is called on a constant.',
'patterns': [r".*: warning: \[TruthConstantAsserts\] .+"]},
{'category': 'java',
@@ -1175,6 +1360,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Type parameter declaration shadows another named type',
+ 'patterns': [r".*: warning: \[TypeNameShadowing\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Type parameter declaration overrides another type parameter already declared',
'patterns': [r".*: warning: \[TypeParameterShadowing\] .+"]},
{'category': 'java',
@@ -1190,21 +1380,46 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
- 'Java: Switch handles all enum values; an explicit default case is unnecessary and defeats error checking for non-exhaustive switches.',
+ 'Java: Collection, Iterable, Multimap, and Queue do not have well-defined equals behavior',
+ 'patterns': [r".*: warning: \[UndefinedEquals\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: Switch handles all enum values: an explicit default case is unnecessary and defeats error checking for non-exhaustive switches.',
'patterns': [r".*: warning: \[UnnecessaryDefaultInEnumSwitch\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Unnecessary use of grouping parentheses',
+ 'patterns': [r".*: warning: \[UnnecessaryParentheses\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Finalizer may run before native code finishes execution',
'patterns': [r".*: warning: \[UnsafeFinalization\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Prefer `asSubclass` instead of casting the result of `newInstance`, to detect classes of incorrect type before invoking their constructors.This way, if the class is of the incorrect type,it will throw an exception before invoking its constructor.',
+ 'patterns': [r".*: warning: \[UnsafeReflectiveConstructionCast\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Unsynchronized method overrides a synchronized method.',
'patterns': [r".*: warning: \[UnsynchronizedOverridesSynchronized\] .+"]},
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: Unused.',
+ 'patterns': [r".*: warning: \[Unused\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
+ 'Java: This catch block catches an exception and re-throws another, but swallows the caught exception rather than setting it as a cause. This can make debugging harder.',
+ 'patterns': [r".*: warning: \[UnusedException\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Java assert is used in test. For testing purposes Assert.* matchers should be used.',
'patterns': [r".*: warning: \[UseCorrectAssertInTests\] .+"]},
{'category': 'java',
@@ -1215,6 +1430,11 @@
{'category': 'java',
'severity': Severity.MEDIUM,
'description':
+ 'Java: variableName and type with the same name would refer to the static field instead of the class',
+ 'patterns': [r".*: warning: \[VariableNameSameAsType\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.MEDIUM,
+ 'description':
'Java: Because of spurious wakeups, Object.wait() and Condition.await() must always be called in a loop',
'patterns': [r".*: warning: \[WaitNotInLoop\] .+"]},
{'category': 'java',
@@ -1230,6 +1450,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Use of class, field, or method that is not compatible with legacy Android devices',
+ 'patterns': [r".*: warning: \[AndroidJdkLibsChecker\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Reference equality used to compare arrays',
'patterns': [r".*: warning: \[ArrayEquals\] .+"]},
{'category': 'java',
@@ -1310,11 +1535,16 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- r'Java: Implementing \'Comparable\u003cT>\' where T is not compatible with the implementing class.',
+ 'Java: Implementing \'Comparable\u003cT>\' where T is not compatible with the implementing class.',
'patterns': [r".*: warning: \[ComparableType\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: this == null is always false, this != null is always true',
+ 'patterns': [r".*: warning: \[ComparingThisWithNull\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: This comparison method violates the contract',
'patterns': [r".*: warning: \[ComparisonContractViolated\] .+"]},
{'category': 'java',
@@ -1395,6 +1625,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Comparing different pairs of fields/getters in an equals implementation is probably a mistake.',
+ 'patterns': [r".*: warning: \[EqualsWrongThing\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Method annotated @ForOverride must be protected or package-private and only invoked from declaring class, or from an override of the method',
'patterns': [r".*: warning: \[ForOverride\] .+"]},
{'category': 'java',
@@ -1510,6 +1745,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Members shouldn\'t be annotated with @Inject if constructor is already annotated @Inject',
+ 'patterns': [r".*: warning: \[InjectOnMemberAndConstructor\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Scope annotation on an interface or abstact class is not allowed',
'patterns': [r".*: warning: \[InjectScopeAnnotationOnInterfaceOrAbstractClass\] .+"]},
{'category': 'java',
@@ -1550,7 +1790,7 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- r'Java: Path implements Iterable\u003cPath>; prefer Collection\u003cPath> for clarity',
+ 'Java: Path implements Iterable\u003cPath>; prefer Collection\u003cPath> for clarity',
'patterns': [r".*: warning: \[IterablePathParameter\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
@@ -1590,6 +1830,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Use of class, field, or method that is not compatible with JDK 7',
+ 'patterns': [r".*: warning: \[Java7ApiChecker\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Abstract and default methods are not injectable with javax.inject.Inject',
'patterns': [r".*: warning: \[JavaxInjectOnAbstractMethod\] .+"]},
{'category': 'java',
@@ -1620,6 +1865,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: Math.round(Integer) results in truncation',
+ 'patterns': [r".*: warning: \[MathRoundIntLong\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Certain resources in `android.R.string` have names that do not match their content',
'patterns': [r".*: warning: \[MislabeledAndroidString\] .+"]},
{'category': 'java',
@@ -1630,6 +1880,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: A terminating method call is required for a test helper to have any effect.',
+ 'patterns': [r".*: warning: \[MissingTestCall\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Use of "YYYY" (week year) in a date pattern without "ww" (week in year). You probably meant to use "yyyy" (year) instead.',
'patterns': [r".*: warning: \[MisusedWeekYear\] .+"]},
{'category': 'java',
@@ -1735,7 +1990,7 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- 'Java: Using ::equals as an incompatible Predicate; the predicate will always return false',
+ 'Java: Using ::equals or ::isInstance as an incompatible Predicate; the predicate will always return false',
'patterns': [r".*: warning: \[PredicateIncompatibleType\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
@@ -1745,7 +2000,7 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
- 'Java: Protobuf fields cannot be null',
+ 'Java: Protobuf fields cannot be null.',
'patterns': [r".*: warning: \[ProtoFieldNullComparison\] .+"]},
{'category': 'java',
'severity': Severity.HIGH,
@@ -1835,6 +2090,11 @@
{'category': 'java',
'severity': Severity.HIGH,
'description':
+ 'Java: String.substring(0) returns the original String',
+ 'patterns': [r".*: warning: \[SubstringOfZero\] .+"]},
+ {'category': 'java',
+ 'severity': Severity.HIGH,
+ 'description':
'Java: Suppressing "deprecated" is probably a typo for "deprecation"',
'patterns': [r".*: warning: \[SuppressWarningsDeprecated\] .+"]},
{'category': 'java',
diff --git a/tools/zipalign/ZipAlign.cpp b/tools/zipalign/ZipAlign.cpp
index af04a34..eea1749 100644
--- a/tools/zipalign/ZipAlign.cpp
+++ b/tools/zipalign/ZipAlign.cpp
@@ -111,7 +111,7 @@
status = pZout->add(pZin, pEntry, padding, &pNewEntry);
}
- if (status != NO_ERROR)
+ if (status != OK)
return 1;
bias += padding;
//printf(" added '%s' at %ld (pad=%d)\n",
@@ -146,13 +146,13 @@
return 1;
}
- if (zin.open(inFileName, ZipFile::kOpenReadOnly) != NO_ERROR) {
+ if (zin.open(inFileName, ZipFile::kOpenReadOnly) != OK) {
fprintf(stderr, "Unable to open '%s' as zip archive\n", inFileName);
return 1;
}
if (zout.open(outFileName,
ZipFile::kOpenReadWrite|ZipFile::kOpenCreate|ZipFile::kOpenTruncate)
- != NO_ERROR)
+ != OK)
{
fprintf(stderr, "Unable to open '%s' as zip archive\n", outFileName);
return 1;
@@ -178,7 +178,7 @@
if (verbose)
printf("Verifying alignment of %s (%d)...\n", fileName, alignment);
- if (zipFile.open(fileName, ZipFile::kOpenReadOnly) != NO_ERROR) {
+ if (zipFile.open(fileName, ZipFile::kOpenReadOnly) != OK) {
fprintf(stderr, "Unable to open '%s' for verification\n", fileName);
return 1;
}
diff --git a/tools/zipalign/ZipEntry.cpp b/tools/zipalign/ZipEntry.cpp
index c3c833e..810d74a 100644
--- a/tools/zipalign/ZipEntry.cpp
+++ b/tools/zipalign/ZipEntry.cpp
@@ -48,7 +48,7 @@
/* read the CDE */
result = mCDE.read(fp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("mCDE.read failed\n");
return result;
}
@@ -64,7 +64,7 @@
}
result = mLFH.read(fp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("mLFH.read failed\n");
return result;
}
@@ -103,7 +103,7 @@
* can defer worrying about that to when we're extracting data.
*/
- return NO_ERROR;
+ return OK;
}
/*
@@ -189,7 +189,7 @@
mLFH.mExtraFieldLength+1);
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -225,7 +225,7 @@
mLFH.mExtraFieldLength = padding;
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -403,7 +403,7 @@
*/
status_t ZipEntry::LocalFileHeader::read(FILE* fp)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t buf[kLFHLen];
assert(mFileName == NULL);
@@ -499,7 +499,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
@@ -537,7 +537,7 @@
*/
status_t ZipEntry::CentralDirEntry::read(FILE* fp)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t buf[kCDELen];
/* no re-use */
@@ -669,7 +669,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
/*
diff --git a/tools/zipalign/ZipFile.cpp b/tools/zipalign/ZipFile.cpp
index 9e44956..63fb962 100644
--- a/tools/zipalign/ZipFile.cpp
+++ b/tools/zipalign/ZipFile.cpp
@@ -120,7 +120,7 @@
* have a need for empty zip files.)
*/
mNeedCDRewrite = true;
- result = NO_ERROR;
+ result = OK;
}
if (flags & kOpenReadOnly)
@@ -205,7 +205,7 @@
*/
status_t ZipFile::readCentralDir(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t* buf = NULL;
off_t fileLength, seekStart;
long readAmount;
@@ -267,7 +267,7 @@
/* extract eocd values */
result = mEOCD.readBuf(buf + i, readAmount - i);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("Failure reading %ld bytes of EOCD values", readAmount - i);
goto bail;
}
@@ -311,7 +311,7 @@
ZipEntry* pEntry = new ZipEntry;
result = pEntry->initFromCDE(mZipFp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("initFromCDE failed\n");
delete pEntry;
goto bail;
@@ -361,7 +361,7 @@
const char* storageName, int compressionMethod, ZipEntry** ppEntry)
{
ZipEntry* pEntry = NULL;
- status_t result = NO_ERROR;
+ status_t result = OK;
long lfhPosn, startPosn, endPosn, uncompressedLen;
FILE* inputFp = NULL;
uint32_t crc;
@@ -415,7 +415,7 @@
if (compressionMethod == ZipEntry::kCompressDeflated) {
bool failed = false;
result = compressFpToFp(mZipFp, inputFp, data, size, &crc);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("compression failed, storing\n");
failed = true;
} else {
@@ -447,7 +447,7 @@
} else {
result = copyDataToFp(mZipFp, data, size, &crc);
}
- if (result != NO_ERROR) {
+ if (result != OK) {
// don't need to truncate; happens in CDE rewrite
ALOGD("failed copying data in\n");
goto bail;
@@ -535,11 +535,11 @@
}
result = pEntry->initFromExternal(pSourceEntry);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
if (padding != 0) {
result = pEntry->addPadding(padding);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
}
@@ -574,7 +574,7 @@
copyLen += ZipEntry::kDataDescriptorLen;
if (copyPartialFpToFp(mZipFp, pSourceZip->mZipFp, copyLen, NULL)
- != NO_ERROR)
+ != OK)
{
ALOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
@@ -603,7 +603,7 @@
*ppEntry = pEntry;
pEntry = NULL;
- result = NO_ERROR;
+ result = OK;
bail:
delete pEntry;
@@ -642,7 +642,7 @@
}
result = pEntry->initFromExternal(pSourceEntry);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
/*
@@ -682,7 +682,7 @@
}
long startPosn = ftell(mZipFp);
uint32_t crc;
- if (compressFpToFp(mZipFp, NULL, buf, uncompressedLen, &crc) != NO_ERROR) {
+ if (compressFpToFp(mZipFp, NULL, buf, uncompressedLen, &crc) != OK) {
ALOGW("recompress of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
free(buf);
@@ -699,7 +699,7 @@
copyLen += ZipEntry::kDataDescriptorLen;
if (copyPartialFpToFp(mZipFp, pSourceZip->mZipFp, copyLen, NULL)
- != NO_ERROR)
+ != OK)
{
ALOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
@@ -738,7 +738,7 @@
*ppEntry = pEntry;
pEntry = NULL;
- result = NO_ERROR;
+ result = OK;
bail:
delete pEntry;
@@ -773,7 +773,7 @@
}
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -793,7 +793,7 @@
}
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -837,7 +837,7 @@
length -= readSize;
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -849,7 +849,7 @@
status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
const void* data, size_t size, uint32_t* pCRC32)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
const size_t kBufSize = 1024 * 1024;
uint8_t* inBuf = NULL;
uint8_t* outBuf = NULL;
@@ -933,7 +933,7 @@
/* mark entry as deleted, and mark archive as dirty */
pEntry->setDeleted();
mNeedCDRewrite = true;
- return NO_ERROR;
+ return OK;
}
/*
@@ -944,19 +944,19 @@
*/
status_t ZipFile::flush(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
long eocdPosn;
int i, count;
if (mReadOnly)
return INVALID_OPERATION;
if (!mNeedCDRewrite)
- return NO_ERROR;
+ return OK;
assert(mZipFp != NULL);
result = crunchArchive();
- if (result != NO_ERROR)
+ if (result != OK)
return result;
if (fseek(mZipFp, mEOCD.mCentralDirOffset, SEEK_SET) != 0)
@@ -986,7 +986,7 @@
/* should we clear the "newly added" flag in all entries now? */
mNeedCDRewrite = false;
- return NO_ERROR;
+ return OK;
}
/*
@@ -997,7 +997,7 @@
*/
status_t ZipFile::crunchArchive(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
int i, count;
long delCount, adjust;
@@ -1065,7 +1065,7 @@
// pEntry->getFileName(), adjust);
result = filemove(mZipFp, pEntry->getLFHOffset() - adjust,
pEntry->getLFHOffset(), span);
- if (result != NO_ERROR) {
+ if (result != OK) {
/* this is why you use a temp file */
ALOGE("error during crunch - archive is toast\n");
return result;
@@ -1097,7 +1097,7 @@
status_t ZipFile::filemove(FILE* fp, off_t dst, off_t src, size_t n)
{
if (dst == src || n <= 0)
- return NO_ERROR;
+ return OK;
uint8_t readBuf[32768];
@@ -1140,7 +1140,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
@@ -1355,7 +1355,7 @@
memcpy(mComment, buf + kEOCDLen, mCommentLen);
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -1382,7 +1382,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
/*