Merge "Build: update warn.py for Errorprone 2.3.2"
diff --git a/core/Makefile b/core/Makefile
index 2df67e9..f7f6f35 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -42,11 +42,14 @@
$(eval unique_product_copy_files_destinations += $(_dest))))
# Dump a list of overriden (and ignored PRODUCT_COPY_FILES entries)
-$(file >$(PRODUCT_OUT)/product_copy_files_ignored.txt,$(subst $(space),$(newline),$(strip $(product_copy_files_ignored))))
-ifdef dist_goal
-$(file >$(DIST_DIR)/logs/product_copy_files_ignored.txt,$(subst $(space),$(newline),$(strip $(product_copy_files_ignored))))
-endif
+pcf_ignored_file := $(PRODUCT_OUT)/product_copy_files_ignored.txt
+$(pcf_ignored_file): PRIVATE_IGNORED := $(sort $(product_copy_files_ignored))
+$(pcf_ignored_file):
+ echo "$(PRIVATE_IGNORED)" | tr " " "\n" >$@
+$(call dist-for-goals,droidcore,$(pcf_ignored_file):logs/$(notdir $(pcf_ignored_file)))
+
+pcf_ignored_file :=
product_copy_files_ignored :=
unique_product_copy_files_pairs :=
unique_product_copy_files_destinations :=
@@ -1918,7 +1921,7 @@
build/make/tools/releasetools/build_image.py \
$(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
$(systemimage_intermediates)/generated_system_image_info.txt \
- || ( mkdir -p $(DIST_DIR); cp $(INSTALLED_FILES_FILE) $(DIST_DIR)/installed-files-rescued.txt; \
+ || ( mkdir -p $$(DIST_DIR); cp $(INSTALLED_FILES_FILE) $$(DIST_DIR)/installed-files-rescued.txt; \
exit 1 )
endef
@@ -2657,12 +2660,12 @@
endif
INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES := \
- $(BOARD_AVB_VBMETA_MAINLINE) \
+ $(BOARD_AVB_VBMETA_SYSTEM) \
$(BOARD_AVB_VBMETA_VENDOR)
# Not allowing the same partition to appear in multiple groups.
ifneq ($(words $(sort $(INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES))),$(words $(INTERNAL_AVB_PARTITIONS_IN_CHAINED_VBMETA_IMAGES)))
- $(error BOARD_AVB_VBMETA_MAINLINE and BOARD_AVB_VBMETA_VENDOR cannot have duplicates)
+ $(error BOARD_AVB_VBMETA_SYSTEM and BOARD_AVB_VBMETA_VENDOR cannot have duplicates)
endif
BOOT_FOOTER_ARGS := BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS
@@ -2675,7 +2678,7 @@
ODM_FOOTER_ARGS := BOARD_AVB_ODM_ADD_HASHTREE_FOOTER_ARGS
# Helper function that checks and sets required build variables for an AVB chained partition.
-# $(1): the partition to enable AVB chain, e.g., boot or system or vbmeta_mainline.
+# $(1): the partition to enable AVB chain, e.g., boot or system or vbmeta_system.
define _check-and-set-avb-chain-args
$(eval part := $(1))
$(eval PART=$(call to-upper,$(part)))
@@ -2698,7 +2701,7 @@
--chain_partition $(part):$($(_rollback_index_location)):$(AVB_CHAIN_KEY_DIR)/$(part).avbpubkey)
# Set rollback_index via footer args for non-chained vbmeta image. Chained vbmeta image will pick up
-# the index via a separate flag (e.g. BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX).
+# the index via a separate flag (e.g. BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX).
$(if $(filter $(part),$(part:vbmeta_%=%)),\
$(eval _footer_args := $(PART)_FOOTER_ARGS) \
$(eval $($(_footer_args)) += --rollback_index $($(_rollback_index))))
@@ -2750,9 +2753,9 @@
$(eval $(call check-and-set-avb-args,recovery))
endif
-# Not using INSTALLED_VBMETA_MAINLINEIMAGE_TARGET as it won't be set yet.
-ifdef BOARD_AVB_VBMETA_MAINLINE
-$(eval $(call check-and-set-avb-args,vbmeta_mainline))
+# Not using INSTALLED_VBMETA_SYSTEMIMAGE_TARGET as it won't be set yet.
+ifdef BOARD_AVB_VBMETA_SYSTEM
+$(eval $(call check-and-set-avb-args,vbmeta_system))
endif
ifdef BOARD_AVB_VBMETA_VENDOR
@@ -2772,12 +2775,12 @@
endif
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --padding_size 4096
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += --padding_size 4096
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --padding_size 4096
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --padding_size 4096
ifeq (eng,$(filter eng, $(TARGET_BUILD_VARIANT)))
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --set_hashtree_disabled_flag
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += --set_hashtree_disabled_flag
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += --set_hashtree_disabled_flag
BOARD_AVB_MAKE_VBMETA_VENDOR_IMAGE_ARGS += --set_hashtree_disabled_flag
endif
@@ -2785,9 +2788,9 @@
BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS += --rollback_index $(BOARD_AVB_ROLLBACK_INDEX)
endif
-ifdef BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX
-BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS += \
- --rollback_index $(BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX)
+ifdef BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX
+BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS += \
+ --rollback_index $(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX)
endif
ifdef BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX
@@ -2821,9 +2824,9 @@
$(if $(BOARD_AVB_RECOVERY_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_RECOVERY_KEY_PATH) \
--output $(1)/recovery.avbpubkey)
- $(if $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH),\
- $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH) \
- --output $(1)/vbmeta_mainline.avbpubkey)
+ $(if $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH),\
+ $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH) \
+ --output $(1)/vbmeta_system.avbpubkey)
$(if $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH) \
--output $(1)/vbmeta_vendor.avbpubkey)
@@ -2831,11 +2834,11 @@
# Builds a chained VBMeta image. This VBMeta image will contain the descriptors for the partitions
# specified in BOARD_AVB_VBMETA_<NAME>. The built VBMeta image will be included into the top-level
-# vbmeta image as a chained partition. For example, if a target defines `BOARD_AVB_VBMETA_MAINLINE
-# := system product_services`, `vbmeta_mainline.img` will be created that includes the descriptors
-# for `system.img` and `product_services.img`. `vbmeta_mainline.img` itself will be included into
+# vbmeta image as a chained partition. For example, if a target defines `BOARD_AVB_VBMETA_SYSTEM
+# := system product_services`, `vbmeta_system.img` will be created that includes the descriptors
+# for `system.img` and `product_services.img`. `vbmeta_system.img` itself will be included into
# `vbmeta.img` as a chained partition.
-# $(1): VBMeta image name, such as "vbmeta_mainline", "vbmeta_vendor" etc.
+# $(1): VBMeta image name, such as "vbmeta_system", "vbmeta_vendor" etc.
# $(2): Output filename.
define build-chained-vbmeta-image
$(call pretty,"Target chained vbmeta image: $@")
@@ -2847,13 +2850,13 @@
--output $@
endef
-ifdef BOARD_AVB_VBMETA_MAINLINE
-INSTALLED_VBMETA_MAINLINEIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta_mainline.img
-$(INSTALLED_VBMETA_MAINLINEIMAGE_TARGET): \
+ifdef BOARD_AVB_VBMETA_SYSTEM
+INSTALLED_VBMETA_SYSTEMIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta_system.img
+$(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET): \
$(AVBTOOL) \
- $(call images-for-partitions,$(BOARD_AVB_VBMETA_MAINLINE)) \
- $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH)
- $(call build-chained-vbmeta-image,vbmeta_mainline)
+ $(call images-for-partitions,$(BOARD_AVB_VBMETA_SYSTEM)) \
+ $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)
+ $(call build-chained-vbmeta-image,vbmeta_system)
endif
ifdef BOARD_AVB_VBMETA_VENDOR
@@ -2891,9 +2894,9 @@
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
$(INSTALLED_RECOVERYIMAGE_TARGET) \
- $(INSTALLED_VBMETA_MAINLINEIMAGE_TARGET) \
+ $(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET) \
$(INSTALLED_VBMETA_VENDORIMAGE_TARGET) \
- $(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH) \
+ $(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH) \
$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH) \
$(BOARD_AVB_KEY_PATH)
$(build-vbmetaimage-target)
@@ -2939,18 +2942,20 @@
--metadata-size 65536 \
--metadata-slots $(if $(1),2,1) \
--device-size $(BOARD_SUPER_PARTITION_SIZE) \
- $(foreach name,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
- --partition $(name)$(1):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0) \
- $(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
- $(if $(1), --partition $(name)_b:readonly:0) \
- )
+ $(foreach group,$(BOARD_SUPER_PARTITION_GROUPS), \
+ --group $(group):$(BOARD_$(call to-upper,$(group))_SIZE) \
+ $(foreach name,$(BOARD_$(call to-upper,$(group))_PARTITION_LIST), \
+ --partition $(name)$(1):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0):$(group) \
+ $(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
+ $(if $(1), --partition $(name)_b:readonly:0:$(group)) \
+ ))
endef
# $(1): output image path
# $(2): slot A suffix (_a or empty)
# $(3): include images or not (true or empty)
define build-superimage-target
- $(HOST_OUT_EXECUTABLES)/lpmake \
+ $(LPMAKE) \
$(call build-superimage-target-args,$(2),$(3)) \
--output $(1)
endef
@@ -3520,16 +3525,16 @@
$(hide) echo "avb_recovery_algorithm=$(BOARD_AVB_RECOVERY_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
endif # BOARD_AVB_RECOVERY_KEY_PATH
-ifneq (,$(strip $(BOARD_AVB_VBMETA_MAINLINE)))
- $(hide) echo "avb_vbmeta_mainline=$(BOARD_AVB_VBMETA_MAINLINE)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_args=$(BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_key_path=$(BOARD_AVB_VBMETA_MAINLINE_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_algorithm=$(BOARD_AVB_VBMETA_MAINLINE_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_mainline_rollback_index_location=$(BOARD_AVB_VBMETA_MAINLINE_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
-endif # BOARD_AVB_VBMETA_MAINLINE
+ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
+ $(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_key_path=$(BOARD_AVB_VBMETA_SYSTEM_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_algorithm=$(BOARD_AVB_VBMETA_SYSTEM_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_system_rollback_index_location=$(BOARD_AVB_VBMETA_SYSTEM_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
+endif # BOARD_AVB_VBMETA_SYSTEM
ifneq (,$(strip $(BOARD_AVB_VBMETA_VENDOR)))
$(hide) echo "avb_vbmeta_vendor=$(BOARD_AVB_VBMETA_VENDOR)" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_MAINLINE_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "avb_vbmeta_vendor_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_key_path=$(BOARD_AVB_VBMETA_VENDOR_KEY_PATH)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_algorithm=$(BOARD_AVB_VBMETA_VENDOR_ALGORITHM)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "avb_vbmeta_vendor_rollback_index_location=$(BOARD_AVB_VBMETA_VENDOR_ROLLBACK_INDEX_LOCATION)" >> $(zip_root)/META/misc_info.txt
@@ -3940,7 +3945,7 @@
MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH := device/generic/goldfish/tools/mk_verified_boot_params.sh
$(QEMU_VERIFIED_BOOT_PARAMS): $(INSTALLED_QEMU_SYSTEMIMAGE) $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_VBMETAIMAGE_TARGET) $(SGDISK_HOST) $(AVBTOOL)
@echo Creating $@
- (export SGDISK=$(SGDISK_HOST) AVBTOOL=$(AVBTOOL); $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_SYSTEMIMAGE_TARGET) $(INSTALLED_QEMU_SYSTEMIMAGE) $(QEMU_VERIFIED_BOOT_PARAMS))
+ (export SGDISK=$(SGDISK_HOST) AVBTOOL=$(AVBTOOL); $(MK_VERIFIED_BOOT_KERNEL_CMDLINE_SH) $(INSTALLED_VBMETAIMAGE_TARGET) $(INSTALLED_QEMU_SYSTEMIMAGE) $(QEMU_VERIFIED_BOOT_PARAMS))
systemimage: $(QEMU_VERIFIED_BOOT_PARAMS)
diff --git a/core/android_manifest.mk b/core/android_manifest.mk
index 8e8bfec..8608ca1 100644
--- a/core/android_manifest.mk
+++ b/core/android_manifest.mk
@@ -72,8 +72,15 @@
my_manifest_fixer_flags += --uses-non-sdk-api
endif
$(fixed_android_manifest): PRIVATE_MANIFEST_FIXER_FLAGS := $(my_manifest_fixer_flags)
+# These two libs are added as optional dependencies (<uses-library> with
+# android:required set to false). This is because they haven't existed in pre-P
+# devices, but classes in them were in bootclasspath jars, etc. So making them
+# hard dependencies (andriod:required=true) would prevent apps from being
+# installed to such legacy devices.
+$(fixed_android_manifest): PRIVATE_OPTIONAL_SDK_LIB_NAMES := android.test.base android.test.mock
$(fixed_android_manifest): $(MANIFEST_FIXER)
$(fixed_android_manifest): $(main_android_manifest)
+ echo $(PRIVATE_OPTIONAL_SDK_LIB_NAMES) | tr ' ' '\n' > $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
@echo "Fix manifest: $@"
$(MANIFEST_FIXER) \
--minSdkVersion $(PRIVATE_MIN_SDK_VERSION) \
@@ -81,5 +88,8 @@
--raise-min-sdk-version \
$(PRIVATE_MANIFEST_FIXER_FLAGS) \
$(if (PRIVATE_EXPORTED_SDK_LIBS_FILE),\
- $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | sort -u | sed -e 's/^/\ --uses-library\ /' | tr '\n' ' ')) \
+ $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | grep -v -f $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional | sort -u | sed -e 's/^/\ --uses-library\ /' | tr '\n' ' ') \
+ $$(cat $(PRIVATE_EXPORTED_SDK_LIBS_FILE) | grep -f $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional | sort -u | sed -e 's/^/\ --optional-uses-library\ /' | tr '\n' ' ') \
+ ) \
$< $@
+ rm $(PRIVATE_EXPORTED_SDK_LIBS_FILE).optional
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
index 3ce64f9..ffb6021 100644
--- a/core/combo/TARGET_linux-arm.mk
+++ b/core/combo/TARGET_linux-arm.mk
@@ -33,7 +33,7 @@
TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT := generic
endif
-KNOWN_ARMv8_CORES := cortex-a53 cortex-a53.a57 cortex-a55 cortex-a73 cortex-a75
+KNOWN_ARMv8_CORES := cortex-a53 cortex-a53.a57 cortex-a55 cortex-a73 cortex-a75 cortex-a76
KNOWN_ARMv8_CORES += kryo denver64 exynos-m1 exynos-m2
# Many devices (incorrectly) use armv7-a-neon as the 2nd architecture variant
diff --git a/core/config.mk b/core/config.mk
index b9174b3..aeb8aee 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -50,7 +50,7 @@
# Mark variables that should be coming as environment variables from soong_ui
# as readonly
-.KATI_READONLY := OUT_DIR TMPDIR BUILD_DATETIME_FILE
+.KATI_READONLY := OUT_DIR TMPDIR BUILD_DATETIME_FILE DIST_DIR
# Mark variables deprecated/obsolete
CHANGES_URL := https://android.googlesource.com/platform/build/+/master/Changes.md
@@ -958,7 +958,6 @@
requirements := \
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
- PRODUCT_USE_FASTBOOTD \
$(foreach req,$(requirements),$(if $(filter false,$($(req))),\
$(error PRODUCT_USE_LOGICAL_PARTITIONS requires $(req) to be true)))
@@ -1169,6 +1168,7 @@
INTERNAL_PLATFORM_HIDDENAPI_LIGHT_GREYLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-light-greylist.txt
INTERNAL_PLATFORM_HIDDENAPI_DARK_GREYLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-dark-greylist.txt
INTERNAL_PLATFORM_HIDDENAPI_BLACKLIST := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-blacklist.txt
+INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/hiddenapi-greylist.csv
# Missing optional uses-libraries so that the platform doesn't create build rules that depend on
# them. See setup_one_odex.mk.
diff --git a/core/definitions.mk b/core/definitions.mk
index 5a14826..3538166 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2708,12 +2708,20 @@
--write-greylist $(3) \
--write-greylist 26,28:$(4)
+$(5): $(1) $(CLASS2GREYLIST) $(INTERNAL_PLATFORM_HIDDENAPI_PUBLIC_LIST)
+ $(CLASS2GREYLIST) --public-api-list $(INTERNAL_PLATFORM_HIDDENAPI_PUBLIC_LIST) $(1) \
+ --write-metadata-csv $(5)
+
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): $(2) $(3) $(4)
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): \
PRIVATE_WHITELIST_INPUTS := $$(PRIVATE_WHITELIST_INPUTS) $(2)
$(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST): \
PRIVATE_GREYLIST_INPUTS := $$(PRIVATE_GREYLIST_INPUTS) $(3)
PRIVATE_DARKGREYLIST_INPUTS := $$(PRIVATE_DARKGREYLIST_INPUTS) $(4)
+$(INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA): $(5)
+$(INTERNAL_PLATFORM_HIDDENAPI_GREYLIST_METADATA): \
+ PRIVATE_METADATA_INPUTS := $$(PRIVATE_METADATA_INPUTS) $(5)
+
endif
endef
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 96e7e2c..f5babb6 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -1036,11 +1036,6 @@
PER_ARCH_MODULE_CLASSES := SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP RENDERSCRIPT_BITCODE NATIVE_TESTS HEADER_LIBRARIES
.KATI_READONLY := COMMON_MODULE_CLASSES PER_ARCH_MODULE_CLASSES
-ifeq (,$(strip $(DIST_DIR)))
- DIST_DIR := $(OUT_DIR)/dist
-endif
-.KATI_READONLY := DIST_DIR
-
ifeq ($(CALLED_FROM_SETUP),true)
PRINT_BUILD_CONFIG ?= true
endif
diff --git a/core/java.mk b/core/java.mk
index c015e4a..30571b7 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -77,6 +77,7 @@
hiddenapi_whitelist_txt := $(intermediates.COMMON)/hiddenapi/whitelist.txt
hiddenapi_greylist_txt := $(intermediates.COMMON)/hiddenapi/greylist.txt
hiddenapi_darkgreylist_txt := $(intermediates.COMMON)/hiddenapi/darkgreylist.txt
+hiddenapi_greylist_metadata_csv := $(intermediates.COMMON)/hiddenapi/greylist.csv
ifeq ($(LOCAL_MODULE_CLASS)$(LOCAL_SRC_FILES)$(LOCAL_STATIC_JAVA_LIBRARIES)$(LOCAL_SOURCE_FILES_ALL_GENERATED),APPS)
# If this is an apk without any Java code (e.g. framework-res), we should skip compiling Java.
@@ -507,8 +508,8 @@
# dex later on. The difference is academic currently, as we don't proguard any
# bootclasspath code at the moment. If we were to do that, we should add keep
# rules for all members with the @UnsupportedAppUsage annotation.
- $(eval $(call hiddenapi-generate-greylist-txt, $(full_classes_pre_proguard_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt)))
- LOCAL_INTERMEDIATE_TARGETS += $(hiddenapi_whitelist_txt) $(hiddenapi_greylist_txt) $(hiddenapi_darkgreylist_txt)
+ $(eval $(call hiddenapi-generate-greylist-txt, $(full_classes_pre_proguard_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt),$(hiddenapi_greylist_metadata_csv)))
+ LOCAL_INTERMEDIATE_TARGETS += $(hiddenapi_whitelist_txt) $(hiddenapi_greylist_txt) $(hiddenapi_darkgreylist_txt) $(hiddenapi_greylist_metadata_csv)
$(eval $(call hiddenapi-copy-dex-files,$(built_dex_intermediate),$(built_dex_hiddenapi)))
built_dex_copy_from := $(built_dex_hiddenapi)
else # !is_boot_jar
diff --git a/core/main.mk b/core/main.mk
index 7f673e9..ecb8c0d 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -1012,6 +1012,18 @@
product_FILES := $(call product-installed-files, $(INTERNAL_PRODUCT))
# Verify the artifact path requirements made by included products.
+
+ # Fakes don't get installed, and host files are irrelevant.
+ static_whitelist_patterns := $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%
+ # RROs become REQUIRED by the source module, but are always placed on the vendor partition.
+ static_whitelist_patterns += %__auto_generated_rro.apk
+ ifeq (true,$(BOARD_USES_SYSTEM_OTHER_ODEX))
+ # Allow system_other odex space optimization.
+ static_whitelist_patterns += \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.odex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.vdex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.art
+ endif
all_offending_files :=
$(foreach makefile,$(ARTIFACT_PATH_REQUIREMENT_PRODUCTS),\
$(eval requirements := $(PRODUCTS.$(makefile).ARTIFACT_PATH_REQUIREMENTS)) \
@@ -1020,10 +1032,7 @@
$(eval path_patterns := $(call resolve-product-relative-paths,$(requirements),%)) \
$(eval whitelist_patterns := $(call resolve-product-relative-paths,$(whitelist))) \
$(eval files := $(call product-installed-files, $(makefile))) \
- $(eval files := $(filter-out $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%,$(files))) \
- $(eval # RROs become REQUIRED by the source module, but are always placed on the vendor partition.) \
- $(eval files := $(filter-out %__auto_generated_rro.apk,$(files))) \
- $(eval offending_files := $(filter-out $(path_patterns) $(whitelist_patterns),$(files))) \
+ $(eval offending_files := $(filter-out $(path_patterns) $(whitelist_patterns) $(static_whitelist_patterns),$(files))) \
$(call maybe-print-list-and-error,$(offending_files),$(makefile) produces files outside its artifact path requirement.) \
$(eval unused_whitelist := $(filter-out $(files),$(whitelist_patterns))) \
$(call maybe-print-list-and-error,$(unused_whitelist),$(makefile) includes redundant whitelist entries in its artifact path requirement.) \
@@ -1034,9 +1043,13 @@
$(eval whitelist := $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST)) \
$(eval whitelist_patterns := $(call resolve-product-relative-paths,$(whitelist))) \
$(eval offending_files := $(filter-out $(whitelist_patterns),$(files_in_requirement))) \
- $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS),\
- $(call maybe-print-list-and-error,$(offending_files),$(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement.) \
- $(eval unused_whitelist := $(filter-out $(extra_files),$(whitelist_patterns))) \
+ $(eval enforcement := $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS)) \
+ $(if $(enforcement),\
+ $(call maybe-print-list-and-error,$(offending_files),\
+ $(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement. \
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT)) \
+ $(eval unused_whitelist := $(if $(filter true strict,$(enforcement)),\
+ $(foreach p,$(whitelist_patterns),$(if $(filter $(p),$(extra_files)),,$(p))))) \
$(call maybe-print-list-and-error,$(unused_whitelist),$(INTERNAL_PRODUCT) includes redundant artifact path requirement whitelist entries.) \
) \
)
diff --git a/core/product.mk b/core/product.mk
index d1c74e7..f9f8d60 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -205,10 +205,10 @@
PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE \
PRODUCT_USE_LOGICAL_PARTITIONS \
PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS \
+ PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT \
PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST \
PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
PRODUCT_BUILD_SUPER_PARTITION \
- PRODUCT_USE_FASTBOOTD \
PRODUCT_FORCE_PRODUCT_MODULES_TO_SYSTEM_PARTITION \
define dump-product
diff --git a/core/product_config.mk b/core/product_config.mk
index 7cbea91..27af09e 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -525,10 +525,6 @@
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BUILD_SUPER_PARTITION)),\
$(PRODUCT_USE_LOGICAL_PARTITIONS))
.KATI_READONLY := PRODUCT_BUILD_SUPER_PARTITION
-PRODUCT_USE_FASTBOOTD := $(or \
- $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_FASTBOOTD)),\
- $(PRODUCT_USE_LOGICAL_PARTITIONS))
-.KATI_READONLY := PRODUCT_USE_FASTBOOTD
# List of modules that should be forcefully unmarked from being LOCAL_PRODUCT_MODULE, and hence
# installed on /system directory by default.
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index 18a09fb..20bfc66 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -22,6 +22,7 @@
hiddenapi_whitelist_txt := $(intermediates.COMMON)/hiddenapi/whitelist.txt
hiddenapi_greylist_txt := $(intermediates.COMMON)/hiddenapi/greylist.txt
hiddenapi_darkgreylist_txt := $(intermediates.COMMON)/hiddenapi/darkgreylist.txt
+hiddenapi_greylist_metadata_csv := $(intermediates.COMMON)/hiddenapi/greylist.csv
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_jar)))
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(full_classes_pre_proguard_jar)))
@@ -79,7 +80,7 @@
# We use full_classes_jar here, which is the post-proguard jar (on the basis that we also
# have a full_classes_pre_proguard_jar). This is consistent with the equivalent code in
# java.mk.
- $(eval $(call hiddenapi-generate-greylist-txt,$(full_classes_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt)))
+ $(eval $(call hiddenapi-generate-greylist-txt,$(full_classes_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt),$(hiddenapi_greylist_metadata_csv)))
$(eval $(call hiddenapi-copy-soong-jar,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
else # !is_boot_jar
$(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
diff --git a/core/tasks/check_emu_boot.mk b/core/tasks/check_emu_boot.mk
deleted file mode 100644
index 4870677..0000000
--- a/core/tasks/check_emu_boot.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-check_emu_boot0 := $(DIST_DIR)/$(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)-emulator-boot-test-result.txt
-$(check_emu_boot0) : PRIVATE_PREFIX := $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)
-$(check_emu_boot0) : PRIVATE_EMULATOR_BOOT_TEST_SH := device/generic/goldfish/tools/emulator_boot_test.sh
-$(check_emu_boot0) : PRIVATE_BOOT_COMPLETE_STRING := "emulator: INFO: boot completed"
-$(check_emu_boot0) : PRIVATE_BOOT_FAIL_STRING := "emulator: ERROR: fail to boot after"
-$(check_emu_boot0) : PRIVATE_SUCCESS_FILE := $(DIST_DIR)/$(PRIVATE_PREFIX)-BOOT-SUCCESS.txt
-$(check_emu_boot0) : PRIVATE_FAIL_FILE := $(DIST_DIR)/$(PRIVATE_PREFIX)-BOOT-FAIL.txt
-$(check_emu_boot0) : $(INSTALLED_QEMU_SYSTEMIMAGE) $(INSTALLED_QEMU_VENDORIMAGE) \
- $(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(PRODUCT_OUT)/userdata.img) \
- $(PRODUCT_OUT)/ramdisk.img device/generic/goldfish/tools/emulator_boot_test.sh
- @mkdir -p $(dir $(check_emu_boot0))
- $(hide) rm -f $(check_emu_boot0)
- $(hide) rm -f $(PRIVATE_SUCCESS_FILE)
- $(hide) rm -f $(PRIVATE_FAIL_FILE)
- (export ANDROID_PRODUCT_OUT=$$(cd $(PRODUCT_OUT);pwd);\
- export ANDROID_BUILD_TOP=$$(pwd);\
- $(PRIVATE_EMULATOR_BOOT_TEST_SH) > $(check_emu_boot0))
- (if grep -q $(PRIVATE_BOOT_COMPLETE_STRING) $(check_emu_boot0);\
- then echo boot_succeeded > $(PRIVATE_SUCCESS_FILE); fi)
- (if grep -q $(PRIVATE_BOOT_FAIL_STRING) $(check_emu_boot0);\
- then echo boot_failed > $(PRIVATE_FAIL_FILE); fi)
-.PHONY: check_emu_boot
-check_emu_boot: $(check_emu_boot0)
diff --git a/core/tasks/collect_gpl_sources.mk b/core/tasks/collect_gpl_sources.mk
index fdbf6c9..acbe9be 100644
--- a/core/tasks/collect_gpl_sources.mk
+++ b/core/tasks/collect_gpl_sources.mk
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ifdef dist_goal
-
# The rule below doesn't have dependenices on the files that it copies,
-# so manually generate directly into the DIST_DIR directory that is always
-# wiped between dist builds.
-gpl_source_tgz := $(DIST_DIR)/gpl_source.tgz
+# so manually generate into a PACKAGING intermediate dir, which is wiped
+# in installclean between incremental builds on build servers.
+gpl_source_tgz := $(call intermediates-dir-for,PACKAGING,gpl_source)/gpl_source.tgz
# FORCE since we can't know whether any of the sources changed
$(gpl_source_tgz): PRIVATE_PATHS := $(sort $(patsubst %/, %, $(dir $(ALL_GPL_MODULE_LICENSE_FILES))))
@@ -26,8 +24,4 @@
$(hide) tar cfz $@ --exclude ".git*" $(PRIVATE_PATHS)
# Dist the tgz only if we are doing a full build
-ifeq (,$(TARGET_BUILD_APPS))
-droidcore: $(gpl_source_tgz)
-endif
-
-endif # dist_goal
+$(call dist-for-goals,droidcore,$(gpl_source_tgz))
diff --git a/envsetup.sh b/envsetup.sh
index 4579bef..a4d950e 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1574,9 +1574,12 @@
}
# Zsh needs bashcompinit called to support bash-style completion.
-function add_zsh_completion() {
- autoload -U compinit && compinit
- autoload -U bashcompinit && bashcompinit
+function enable_zsh_completion() {
+ # Don't override user's options if bash-style completion is already enabled.
+ if ! declare -f complete >/dev/null; then
+ autoload -U compinit && compinit
+ autoload -U bashcompinit && bashcompinit
+ fi
}
function validate_current_shell() {
@@ -1587,7 +1590,7 @@
;;
*zsh*)
function check_type() { type "$1"; }
- add_zsh_completion ;;
+ enable_zsh_completion ;;
*)
echo -e "WARNING: Only bash and zsh are supported.\nUse of other shell would lead to erroneous results."
;;
diff --git a/target/board/generic_x86_arm/BoardConfig.mk b/target/board/generic_x86_arm/BoardConfig.mk
index d1e4884..8e70b25 100644
--- a/target/board/generic_x86_arm/BoardConfig.mk
+++ b/target/board/generic_x86_arm/BoardConfig.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2016 The Android Open Source Project
+# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,11 +13,7 @@
# limitations under the License.
#
-# Configuration for generic_x86 + arm libraries needed by binary translation.
-
-# The generic product target doesn't have any hardware-specific pieces.
-TARGET_NO_BOOTLOADER := true
-TARGET_NO_KERNEL := true
+# x86 emulator specific definitions
TARGET_CPU_ABI := x86
TARGET_ARCH := x86
TARGET_ARCH_VARIANT := x86
@@ -28,39 +24,27 @@
TARGET_2ND_ARCH_VARIANT := armv7-a
TARGET_2ND_CPU_VARIANT := generic
-# Tell the build system this isn't a typical 64bit+32bit multilib configuration.
+TARGET_CPU_ABI_LIST := x86 armeabi-v7a armeabi
TARGET_TRANSLATE_2ND_ARCH := true
BUILD_BROKEN_DUP_RULES := true
-# no hardware camera
-USE_CAMERA_STUB := true
-# Enable dex-preoptimization to speed up the first boot sequence
-# of an SDK AVD. Note that this operation only works on Linux for now
-ifeq ($(HOST_OS),linux)
- ifeq ($(WITH_DEXPREOPT),)
- WITH_DEXPREOPT := true
- WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY := false
- endif
-endif
+include build/make/target/board/BoardConfigEmuCommon.mk
+include build/make/target/board/BoardConfigGsiCommon.mk
-TARGET_USES_HWC2 := true
-NUM_FRAMEBUFFER_SURFACE_BUFFERS := 3
+# Resize to 4G to accomodate ASAN and CTS
+BOARD_USERDATAIMAGE_PARTITION_SIZE := 4294967296
-# Build OpenGLES emulation host and guest libraries
-BUILD_EMULATOR_OPENGL := true
+BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/x86
-# Build and enable the OpenGL ES View renderer. When running on the emulator,
-# the GLES renderer disables itself if host GL acceleration isn't available.
-USE_OPENGL_RENDERER := true
-
-TARGET_USERIMAGES_USE_EXT4 := true
-BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1879048192 # 1.75 GB
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-BOARD_CACHEIMAGE_PARTITION_SIZE := 69206016
-BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE := ext4
-BOARD_FLASH_BLOCK_SIZE := 512
-TARGET_USERIMAGES_SPARSE_EXT_DISABLED := true
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/common
+# Wifi.
+BOARD_WLAN_DEVICE := emulator
+BOARD_HOSTAPD_DRIVER := NL80211
+BOARD_WPA_SUPPLICANT_DRIVER := NL80211
+BOARD_HOSTAPD_PRIVATE_LIB := lib_driver_cmd_simulated
+BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
+WPA_SUPPLICANT_VERSION := VER_0_8_X
+WIFI_DRIVER_FW_PATH_PARAM := "/dev/null"
+WIFI_DRIVER_FW_PATH_STA := "/dev/null"
+WIFI_DRIVER_FW_PATH_AP := "/dev/null"
diff --git a/target/board/generic_x86_arm/README.txt b/target/board/generic_x86_arm/README.txt
new file mode 100644
index 0000000..05f7ca2
--- /dev/null
+++ b/target/board/generic_x86_arm/README.txt
@@ -0,0 +1,10 @@
+The "generic_x86_arm" product defines a non-hardware-specific IA target
+without a kernel or bootloader.
+
+It can be used to build the entire user-level system, and
+will work with the IA version of the emulator,
+
+It is not a product "base class"; no other products inherit
+from it or use it in any way.
+
+Third party arm to x86 translator has to be installed as well
diff --git a/target/board/generic_x86_arm/device.mk b/target/board/generic_x86_arm/device.mk
new file mode 100644
index 0000000..0a32415
--- /dev/null
+++ b/target/board/generic_x86_arm/device.mk
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# NFC:
+# Provide default libnfc-nci.conf file for devices that does not have one in
+# vendor/etc because aosp system image (of aosp_$arch products) is going to
+# be used as GSI.
+# May need to remove the following for newly launched devices in P since this
+# NFC configuration file should be in vendor/etc, instead of system/etc
+PRODUCT_COPY_FILES += \
+ device/generic/common/nfc/libnfc-nci.conf:system/etc/libnfc-nci.conf
diff --git a/target/board/generic_x86_arm/system.prop b/target/board/generic_x86_arm/system.prop
new file mode 100644
index 0000000..64829f3
--- /dev/null
+++ b/target/board/generic_x86_arm/system.prop
@@ -0,0 +1,5 @@
+#
+# system.prop for generic sdk
+#
+
+rild.libpath=/vendor/lib/libreference-ril.so
diff --git a/target/product/aosp_x86_arm.mk b/target/product/aosp_x86_arm.mk
index 19f57e8..b921c97 100644
--- a/target/product/aosp_x86_arm.mk
+++ b/target/product/aosp_x86_arm.mk
@@ -17,27 +17,32 @@
# aosp_x86 with arm libraries needed by binary translation.
+# The system image of aosp_x86-userdebug is a GSI for the devices with:
+# - x86 32 bits user space
+# - 64 bits binder interface
+# - system-as-root
+# - VNDK enforcement
+# - compatible property override enabled
+
+-include device/generic/goldfish/x86-vendor.mk
+
include $(SRC_TARGET_DIR)/product/full_x86.mk
-# arm libraries. This is the list of shared libraries included in the NDK.
-# Their dependency libraries will be automatically pulled in.
+# Enable dynamic partition size
+PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
+
+# Enable A/B update
+AB_OTA_UPDATER := true
+AB_OTA_PARTITIONS := system
PRODUCT_PACKAGES += \
- libandroid_arm \
- libaaudio_arm \
- libc_arm \
- libdl_arm \
- libEGL_arm \
- libGLESv1_CM_arm \
- libGLESv2_arm \
- libGLESv3_arm \
- libjnigraphics_arm \
- liblog_arm \
- libm_arm \
- libmediandk_arm \
- libOpenMAXAL_arm \
- libstdc++_arm \
- libOpenSLES_arm \
- libz_arm \
+ update_engine \
+ update_verifier
+
+# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
+PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
+
+# Support addtional P vendor interface
+PRODUCT_EXTRA_VNDK_VERSIONS := 28
PRODUCT_NAME := aosp_x86_arm
PRODUCT_DEVICE := generic_x86_arm
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 11f5fe4..57e981f 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -85,9 +85,9 @@
incidentd \
incident_helper \
incident_report \
- init \
init.environ.rc \
init.rc \
+ init_system \
input \
installd \
iorapd \
diff --git a/target/product/base_vendor.mk b/target/product/base_vendor.mk
index 1b25f27..9bb45d1 100644
--- a/target/product/base_vendor.mk
+++ b/target/product/base_vendor.mk
@@ -34,6 +34,7 @@
fs_config_dirs_nonsystem \
gralloc.default \
group \
+ init_vendor \
libbundlewrapper \
libclearkeycasplugin \
libdownmix \
diff --git a/target/product/mainline_system.mk b/target/product/mainline_system.mk
index 8d0611f..ed6dcc9 100644
--- a/target/product/mainline_system.mk
+++ b/target/product/mainline_system.mk
@@ -27,6 +27,7 @@
DMService \
LiveWallpapersPicker \
PartnerBookmarksProvider \
+ PresencePolling \
RcsService \
SafetyRegulatoryInfo \
Stk \
@@ -40,6 +41,10 @@
PRODUCT_PACKAGES += \
netutils-wrapper-1.0 \
+# Charger images
+PRODUCT_PACKAGES += \
+ charger_res_images \
+
# system_other support
PRODUCT_PACKAGES += \
cppreopts.sh \
@@ -50,22 +55,28 @@
audio.a2dp.default \
audio.hearing_aid.default \
+PRODUCT_PACKAGES_DEBUG += \
+ avbctl \
+ bootctl \
+ tinyplay \
+ tinycap \
+ tinymix \
+ tinypcminfo \
+ update_engine_client \
+
# Enable dynamic partition size
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
PRODUCT_NAME := mainline_system
PRODUCT_BRAND := generic
-PRODUCT_SHIPPING_API_LEVEL := 28
_base_mk_whitelist :=
_my_whitelist := $(_base_mk_whitelist)
-# Both /system and / are in system.img when PRODUCT_SHIPPING_API_LEVEL>=28.
-# Though we do have a new ramdisk partition for logical partitions.
+# For mainline, system.img should be mounted at /, so we include ROOT here.
_my_paths := \
- $(TARGET_COPY_OUT_ROOT) \
- $(TARGET_COPY_OUT_SYSTEM) \
- $(TARGET_COPY_OUT_RAMDISK) \
+ $(TARGET_COPY_OUT_ROOT)/ \
+ $(TARGET_COPY_OUT_SYSTEM)/ \
$(call require-artifacts-in-path, $(_my_paths), $(_my_whitelist))
diff --git a/tools/atree/files.cpp b/tools/atree/files.cpp
index d5c8a97..b90f8b3 100644
--- a/tools/atree/files.cpp
+++ b/tools/atree/files.cpp
@@ -81,7 +81,7 @@
state = TEXT;
break;
}
- // otherwise fall-through to TEXT case
+ [[fallthrough]];
case TEXT:
if (state != IN_QUOTE && isspace(*p)) {
if (q != p) {
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 2fa5f52..1e8677c 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -46,6 +46,7 @@
from __future__ import print_function
import datetime
+import logging
import os
import shlex
import shutil
@@ -62,8 +63,9 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
-OPTIONS = common.OPTIONS
+logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
OPTIONS.add_missing = False
OPTIONS.rebuild_recovery = False
OPTIONS.replace_updated_files_list = []
@@ -127,7 +129,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.img")
if os.path.exists(img.input_name):
- print("system.img already exists; no need to rebuild...")
+ logger.info("system.img already exists; no need to rebuild...")
return img.input_name
def output_sink(fn, data):
@@ -142,7 +144,7 @@
common.ZipWrite(output_zip, ofile.name, arc_name)
if OPTIONS.rebuild_recovery:
- print("Building new recovery patch")
+ logger.info("Building new recovery patch")
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict)
@@ -159,7 +161,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system_other.img")
if os.path.exists(img.input_name):
- print("system_other.img already exists; no need to rebuild...")
+ logger.info("system_other.img already exists; no need to rebuild...")
return
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
@@ -171,7 +173,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.img")
if os.path.exists(img.input_name):
- print("vendor.img already exists; no need to rebuild...")
+ logger.info("vendor.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
@@ -186,7 +188,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "product.img")
if os.path.exists(img.input_name):
- print("product.img already exists; no need to rebuild...")
+ logger.info("product.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -204,7 +206,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES",
"product_services.img")
if os.path.exists(img.input_name):
- print("product_services.img already exists; no need to rebuild...")
+ logger.info("product_services.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -220,7 +222,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "odm.img")
if os.path.exists(img.input_name):
- print("odm.img already exists; no need to rebuild...")
+ logger.info("odm.img already exists; no need to rebuild...")
return img.input_name
block_list = OutputFile(
@@ -239,7 +241,7 @@
"""
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "dtbo.img")
if os.path.exists(img.input_name):
- print("dtbo.img already exists; no need to rebuild...")
+ logger.info("dtbo.img already exists; no need to rebuild...")
return img.input_name
dtbo_prebuilt_path = os.path.join(
@@ -269,7 +271,7 @@
def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
- print("creating " + what + ".img...")
+ logger.info("creating " + what + ".img...")
image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
fstab = info_dict["fstab"]
@@ -340,7 +342,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img")
if os.path.exists(img.input_name):
- print("userdata.img already exists; no need to rebuild...")
+ logger.info("userdata.img already exists; no need to rebuild...")
return
# Skip userdata.img if no size.
@@ -348,7 +350,7 @@
if not image_props.get("partition_size"):
return
- print("creating userdata.img...")
+ logger.info("creating userdata.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -399,7 +401,7 @@
partitions: A dict that's keyed by partition names with image paths as
values. Only valid partition names are accepted, as listed in
common.AVB_PARTITIONS.
- name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_mainline'.
+ name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
needed_partitions: Partitions whose descriptors should be included into the
generated VBMeta image.
@@ -411,7 +413,7 @@
img = OutputFile(
output_zip, OPTIONS.input_tmp, "IMAGES", "{}.img".format(name))
if os.path.exists(img.input_name):
- print("{}.img already exists; not rebuilding...".format(name))
+ logger.info("%s.img already exists; not rebuilding...", name)
return img.input_name
avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"]
@@ -495,7 +497,7 @@
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img")
if os.path.exists(img.input_name):
- print("cache.img already exists; no need to rebuild...")
+ logger.info("cache.img already exists; no need to rebuild...")
return
image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -503,7 +505,7 @@
if "fs_type" not in image_props:
return
- print("creating cache.img...")
+ logger.info("creating cache.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -580,8 +582,7 @@
present_props = [x for x in prop_name_list if x in build_props]
if not present_props:
- print("Warning: fingerprint is not present for partition {}".
- format(partition))
+ logger.warning("fingerprint is not present for partition %s", partition)
property_id, fingerprint = "unknown", "unknown"
else:
property_id = present_props[0]
@@ -633,7 +634,7 @@
prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
if os.path.exists(prebuilt_path):
- print("%s already exists, no need to overwrite..." % (img_name,))
+ logger.info("%s already exists, no need to overwrite...", img_name)
continue
img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name)
@@ -698,7 +699,7 @@
if not OPTIONS.add_missing:
if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
- print("target_files appears to already contain images.")
+ logger.warning("target_files appears to already contain images.")
sys.exit(1)
OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True)
@@ -748,7 +749,7 @@
partitions = dict()
def banner(s):
- print("\n\n++++ " + s + " ++++\n\n")
+ logger.info("\n\n++++ " + s + " ++++\n\n")
banner("boot")
# common.GetBootableImage() returns the image directly if present.
@@ -832,15 +833,15 @@
# chained VBMeta image plus the chained VBMeta images themselves.
vbmeta_partitions = common.AVB_PARTITIONS[:]
- vbmeta_mainline = OPTIONS.info_dict.get("avb_vbmeta_mainline", "").strip()
- if vbmeta_mainline:
- banner("vbmeta_mainline")
+ vbmeta_system = OPTIONS.info_dict.get("avb_vbmeta_system", "").strip()
+ if vbmeta_system:
+ banner("vbmeta_system")
AddVBMeta(
- output_zip, partitions, "vbmeta_mainline", vbmeta_mainline.split())
+ output_zip, partitions, "vbmeta_system", vbmeta_system.split())
vbmeta_partitions = [
item for item in vbmeta_partitions
- if item not in vbmeta_mainline.split()]
- vbmeta_partitions.append("vbmeta_mainline")
+ if item not in vbmeta_system.split()]
+ vbmeta_partitions.append("vbmeta_system")
vbmeta_vendor = OPTIONS.info_dict.get("avb_vbmeta_vendor", "").strip()
if vbmeta_vendor:
@@ -912,20 +913,21 @@
"is_signing"],
extra_option_handler=option_handler)
-
if len(args) != 1:
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
AddImagesToTargetFiles(args[0])
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ except common.ExternalError:
+ logger.exception("\n ERROR:\n")
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 189dba2..2d20e23 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -19,6 +19,7 @@
import functools
import heapq
import itertools
+import logging
import multiprocessing
import os
import os.path
@@ -33,6 +34,8 @@
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
+logger = logging.getLogger(__name__)
+
def compute_patch(srcfile, tgtfile, imgdiff=False):
patchfile = common.MakeTempFile(prefix='patch-')
@@ -304,8 +307,8 @@
"""Prints a report of the collected imgdiff stats."""
def print_header(header, separator):
- print(header)
- print(separator * len(header) + '\n')
+ logger.info(header)
+ logger.info(separator * len(header) + '\n')
print_header(' Imgdiff Stats Report ', '=')
for key in self.REASONS:
@@ -314,7 +317,7 @@
values = self.stats[key]
section_header = ' {} (count: {}) '.format(key, len(values))
print_header(section_header, '-')
- print(''.join([' {}\n'.format(name) for name in values]))
+ logger.info(''.join([' {}\n'.format(name) for name in values]))
class BlockImageDiff(object):
@@ -482,7 +485,7 @@
self.WriteTransfers(prefix)
# Report the imgdiff stats.
- if common.OPTIONS.verbose and not self.disable_imgdiff:
+ if not self.disable_imgdiff:
self.imgdiff_stats.Report()
def WriteTransfers(self, prefix):
@@ -692,16 +695,17 @@
OPTIONS = common.OPTIONS
if OPTIONS.cache_size is not None:
max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
- print("max stashed blocks: %d (%d bytes), "
- "limit: %d bytes (%.2f%%)\n" % (
- max_stashed_blocks, self._max_stashed_size, max_allowed,
- self._max_stashed_size * 100.0 / max_allowed))
+ logger.info(
+ "max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n",
+ max_stashed_blocks, self._max_stashed_size, max_allowed,
+ self._max_stashed_size * 100.0 / max_allowed)
else:
- print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
- max_stashed_blocks, self._max_stashed_size))
+ logger.info(
+ "max stashed blocks: %d (%d bytes), limit: <unknown>\n",
+ max_stashed_blocks, self._max_stashed_size)
def ReviseStashSize(self):
- print("Revising stash size...")
+ logger.info("Revising stash size...")
stash_map = {}
# Create the map between a stash and its def/use points. For example, for a
@@ -746,7 +750,7 @@
# that will use this stash and replace the command with "new".
use_cmd = stash_map[stash_raw_id][2]
replaced_cmds.append(use_cmd)
- print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
+ logger.info("%10d %9s %s", sr.size(), "explicit", use_cmd)
else:
# Update the stashes map.
if sh in stashes:
@@ -762,7 +766,7 @@
if xf.src_ranges.overlaps(xf.tgt_ranges):
if stashed_blocks + xf.src_ranges.size() > max_allowed:
replaced_cmds.append(xf)
- print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf))
+ logger.info("%10d %9s %s", xf.src_ranges.size(), "implicit", xf)
# Replace the commands in replaced_cmds with "new"s.
for cmd in replaced_cmds:
@@ -788,28 +792,29 @@
stashes.pop(sh)
num_of_bytes = new_blocks * self.tgt.blocksize
- print(" Total %d blocks (%d bytes) are packed as new blocks due to "
- "insufficient cache size." % (new_blocks, num_of_bytes))
+ logger.info(
+ " Total %d blocks (%d bytes) are packed as new blocks due to "
+ "insufficient cache size.", new_blocks, num_of_bytes)
return new_blocks
def ComputePatches(self, prefix):
- print("Reticulating splines...")
+ logger.info("Reticulating splines...")
diff_queue = []
patch_num = 0
with open(prefix + ".new.dat", "wb") as new_f:
for index, xf in enumerate(self.transfers):
if xf.style == "zero":
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s" % (
- tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name,
- str(xf.tgt_ranges)))
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+ xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "new":
self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s" % (
- tgt_size, tgt_size, 100.0, xf.style,
- xf.tgt_name, str(xf.tgt_ranges)))
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+ xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "diff":
# We can't compare src and tgt directly because they may have
@@ -827,11 +832,12 @@
xf.patch = None
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
if xf.src_ranges != xf.tgt_ranges:
- print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
- tgt_size, tgt_size, 100.0, xf.style,
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s (from %s)", tgt_size, tgt_size,
+ 100.0, xf.style,
xf.tgt_name if xf.tgt_name == xf.src_name else (
xf.tgt_name + " (from " + xf.src_name + ")"),
- str(xf.tgt_ranges), str(xf.src_ranges)))
+ str(xf.tgt_ranges), str(xf.src_ranges))
else:
if xf.patch:
# We have already generated the patch with imgdiff, while
@@ -850,9 +856,9 @@
if diff_queue:
if self.threads > 1:
- print("Computing patches (using %d threads)..." % (self.threads,))
+ logger.info("Computing patches (using %d threads)...", self.threads)
else:
- print("Computing patches...")
+ logger.info("Computing patches...")
diff_total = len(diff_queue)
patches = [None] * diff_total
@@ -874,13 +880,6 @@
xf_index, imgdiff, patch_index = diff_queue.pop()
xf = self.transfers[xf_index]
- if sys.stdout.isatty():
- diff_left = len(diff_queue)
- progress = (diff_total - diff_left) * 100 / diff_total
- # '\033[K' is to clear to EOL.
- print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
- sys.stdout.flush()
-
patch = xf.patch
if not patch:
src_ranges = xf.src_ranges
@@ -918,13 +917,10 @@
while threads:
threads.pop().join()
- if sys.stdout.isatty():
- print('\n')
-
if error_messages:
- print('ERROR:')
- print('\n'.join(error_messages))
- print('\n\n\n')
+ logger.error('ERROR:')
+ logger.error('\n'.join(error_messages))
+ logger.error('\n\n\n')
sys.exit(1)
else:
patches = []
@@ -938,14 +934,13 @@
offset += xf.patch_len
patch_fd.write(patch)
- if common.OPTIONS.verbose:
- tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
- print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
- xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
- xf.style,
- xf.tgt_name if xf.tgt_name == xf.src_name else (
- xf.tgt_name + " (from " + xf.src_name + ")"),
- xf.tgt_ranges, xf.src_ranges))
+ tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+ logger.info(
+ "%10d %10d (%6.2f%%) %7s %s %s %s", xf.patch_len, tgt_size,
+ xf.patch_len * 100.0 / tgt_size, xf.style,
+ xf.tgt_name if xf.tgt_name == xf.src_name else (
+ xf.tgt_name + " (from " + xf.src_name + ")"),
+ xf.tgt_ranges, xf.src_ranges)
def AssertSha1Good(self):
"""Check the SHA-1 of the src & tgt blocks in the transfer list.
@@ -1005,7 +1000,7 @@
assert touched[i] == 1
def ImproveVertexSequence(self):
- print("Improving vertex order...")
+ logger.info("Improving vertex order...")
# At this point our digraph is acyclic; we reversed any edges that
# were backwards in the heuristically-generated sequence. The
@@ -1057,7 +1052,7 @@
blocks will be written to the same stash slot in WriteTransfers().
"""
- print("Reversing backward edges...")
+ logger.info("Reversing backward edges...")
in_order = 0
out_of_order = 0
stash_raw_id = 0
@@ -1089,15 +1084,15 @@
xf.goes_after[u] = None # value doesn't matter
u.goes_before[xf] = None
- print((" %d/%d dependencies (%.2f%%) were violated; "
- "%d source blocks stashed.") %
- (out_of_order, in_order + out_of_order,
- (out_of_order * 100.0 / (in_order + out_of_order))
- if (in_order + out_of_order) else 0.0,
- stash_size))
+ logger.info(
+ " %d/%d dependencies (%.2f%%) were violated; %d source blocks "
+ "stashed.", out_of_order, in_order + out_of_order,
+ (out_of_order * 100.0 / (in_order + out_of_order)) if (
+ in_order + out_of_order) else 0.0,
+ stash_size)
def FindVertexSequence(self):
- print("Finding vertex sequence...")
+ logger.info("Finding vertex sequence...")
# This is based on "A Fast & Effective Heuristic for the Feedback
# Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of
@@ -1210,7 +1205,7 @@
self.transfers = new_transfers
def GenerateDigraph(self):
- print("Generating digraph...")
+ logger.info("Generating digraph...")
# Each item of source_ranges will be:
# - None, if that block is not used as a source,
@@ -1376,9 +1371,9 @@
if tgt_changed < tgt_size * crop_threshold:
assert tgt_changed + tgt_skipped.size() == tgt_size
- print('%10d %10d (%6.2f%%) %s' % (
- tgt_skipped.size(), tgt_size,
- tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+ logger.info(
+ '%10d %10d (%6.2f%%) %s', tgt_skipped.size(), tgt_size,
+ tgt_skipped.size() * 100.0 / tgt_size, tgt_name)
AddSplitTransfers(
"%s-skipped" % (tgt_name,),
"%s-skipped" % (src_name,),
@@ -1519,7 +1514,7 @@
split_src_ranges,
patch_content))
- print("Finding transfers...")
+ logger.info("Finding transfers...")
large_apks = []
split_large_apks = []
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 43c91da..4a013c2 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -26,6 +26,7 @@
from __future__ import print_function
+import logging
import os
import os.path
import re
@@ -35,6 +36,8 @@
import common
import verity_utils
+logger = logging.getLogger(__name__)
+
OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE
BYTES_IN_MB = 1024 * 1024
@@ -228,8 +231,8 @@
"partition_size" not in prop_dict):
# If partition_size is not defined, use output of `du' + reserved_size.
size = GetDiskUsage(in_dir)
- if OPTIONS.verbose:
- print("The tree size of %s is %d MB." % (in_dir, size // BYTES_IN_MB))
+ logger.info(
+ "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
size += int(prop_dict.get("partition_reserved_size", 0))
# Round this up to a multiple of 4K so that avbtool works
size = common.RoundUpTo4K(size)
@@ -241,8 +244,8 @@
lambda x: verity_utils.AVBCalcMaxImageSize(
avbtool, avb_footer_type, x, avb_signing_args))
prop_dict["partition_size"] = str(size)
- if OPTIONS.verbose:
- print("Allocating %d MB for %s." % (size // BYTES_IN_MB, out_file))
+ logger.info(
+ "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
prop_dict["image_size"] = prop_dict["partition_size"]
@@ -350,8 +353,8 @@
du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB)
# Suppress any errors from GetDiskUsage() to avoid hiding the real errors
# from common.RunAndCheckOutput().
- except Exception as e: # pylint: disable=broad-except
- print(e, file=sys.stderr)
+ except Exception: # pylint: disable=broad-except
+ logger.exception("Failed to compute disk usage with du")
du_str = "unknown"
print(
"Out of space? The tree size of {} is {}, with reserved space of {} "
@@ -664,6 +667,8 @@
print(__doc__)
sys.exit(1)
+ common.InitLogging()
+
in_dir = argv[0]
glob_dict_file = argv[1]
out_file = argv[2]
@@ -697,7 +702,7 @@
elif image_filename == "product_services.img":
mount_point = "product_services"
else:
- print("error: unknown image file name ", image_filename, file=sys.stderr)
+ logger.error("Unknown image file name %s", image_filename)
sys.exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
@@ -705,14 +710,14 @@
try:
BuildImage(in_dir, image_properties, out_file, target_out)
except:
- print("Error: Failed to build {} from {}".format(out_file, in_dir),
- file=sys.stderr)
+ logger.error("Failed to build %s from %s", out_file, in_dir)
raise
if prop_file_out:
glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point)
SaveGlobalDict(prop_file_out, glob_dict_out)
+
if __name__ == '__main__':
try:
main(sys.argv[1:])
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index a580709..7d3424b 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -21,16 +21,18 @@
from __future__ import print_function
import argparse
+import logging
import re
import subprocess
import sys
import zipfile
-
from hashlib import sha1
from hashlib import sha256
import common
+logger = logging.getLogger(__name__)
+
def CertUsesSha256(cert):
"""Check if the cert uses SHA-256 hashing algorithm."""
@@ -181,6 +183,8 @@
parser.add_argument('package', help='The OTA package to be verified.')
args = parser.parse_args()
+ common.InitLogging()
+
VerifyPackage(args.certificate, args.package)
VerifyAbOtaPayload(args.certificate, args.package)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 7cca766..fe63458 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -20,6 +20,9 @@
import getpass
import gzip
import imp
+import json
+import logging
+import logging.config
import os
import platform
import re
@@ -37,6 +40,8 @@
import blockimgdiff
import sparse_img
+logger = logging.getLogger(__name__)
+
class Options(object):
def __init__(self):
@@ -121,13 +126,53 @@
pass
+def InitLogging():
+ DEFAULT_LOGGING_CONFIG = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'standard': {
+ 'format':
+ '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
+ 'datefmt': '%Y-%m-%d %H:%M:%S',
+ },
+ },
+ 'handlers': {
+ 'default': {
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'standard',
+ },
+ },
+ 'loggers': {
+ '': {
+ 'handlers': ['default'],
+ 'level': 'WARNING',
+ 'propagate': True,
+ }
+ }
+ }
+ env_config = os.getenv('LOGGING_CONFIG')
+ if env_config:
+ with open(env_config) as f:
+ config = json.load(f)
+ else:
+ config = DEFAULT_LOGGING_CONFIG
+
+ # Increase the logging level for verbose mode.
+ if OPTIONS.verbose:
+ config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
+ config['loggers']['']['level'] = 'INFO'
+
+ logging.config.dictConfig(config)
+
+
def Run(args, verbose=None, **kwargs):
"""Creates and returns a subprocess.Popen object.
Args:
args: The command represented as a list of strings.
- verbose: Whether the commands should be shown (default to OPTIONS.verbose
- if unspecified).
+ verbose: Whether the commands should be shown. Default to the global
+ verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them.
@@ -135,13 +180,12 @@
Returns:
A subprocess.Popen object.
"""
- if verbose is None:
- verbose = OPTIONS.verbose
if 'stdout' not in kwargs and 'stderr' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
- if verbose:
- print(" Running: \"{}\"".format(" ".join(args)))
+ # Don't log any if caller explicitly says so.
+ if verbose != False:
+ logger.info(" Running: \"%s\"", " ".join(args))
return subprocess.Popen(args, **kwargs)
@@ -150,8 +194,8 @@
Args:
args: The command represented as a list of strings.
- verbose: Whether the commands should be shown (default to OPTIONS.verbose
- if unspecified).
+ verbose: Whether the commands should be shown. Default to the global
+ verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them.
@@ -162,12 +206,11 @@
Raises:
ExternalError: On non-zero exit from the command.
"""
- if verbose is None:
- verbose = OPTIONS.verbose
proc = Run(args, verbose=verbose, **kwargs)
output, _ = proc.communicate()
- if verbose:
- print("{}".format(output.rstrip()))
+ # Don't log any if caller explicitly says so.
+ if verbose != False:
+ logger.info("%s", output.rstrip())
if proc.returncode != 0:
raise ExternalError(
"Failed to run command '{}' (exit code {}):\n{}".format(
@@ -277,8 +320,8 @@
if os.path.exists(system_base_fs_file):
d["system_base_fs_file"] = system_base_fs_file
else:
- print("Warning: failed to find system base fs file: %s" % (
- system_base_fs_file,))
+ logger.warning(
+ "Failed to find system base fs file: %s", system_base_fs_file)
del d["system_base_fs_file"]
if "vendor_base_fs_file" in d:
@@ -287,8 +330,8 @@
if os.path.exists(vendor_base_fs_file):
d["vendor_base_fs_file"] = vendor_base_fs_file
else:
- print("Warning: failed to find vendor base fs file: %s" % (
- vendor_base_fs_file,))
+ logger.warning(
+ "Failed to find vendor base fs file: %s", vendor_base_fs_file)
del d["vendor_base_fs_file"]
def makeint(key):
@@ -364,7 +407,7 @@
try:
data = read_helper(prop_file)
except KeyError:
- print("Warning: could not read %s" % (prop_file,))
+ logger.warning("Failed to read %s", prop_file)
data = ""
return LoadDictionaryFromLines(data.split("\n"))
@@ -394,7 +437,7 @@
try:
data = read_helper(recovery_fstab_path)
except KeyError:
- print("Warning: could not find {}".format(recovery_fstab_path))
+ logger.warning("Failed to find %s", recovery_fstab_path)
data = ""
assert fstab_version == 2
@@ -447,7 +490,7 @@
def DumpInfoDict(d):
for k, v in sorted(d.items()):
- print("%-25s = (%s) %s" % (k, type(v).__name__, v))
+ logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
def AppendAVBSigningArgs(cmd, partition):
@@ -657,15 +700,15 @@
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
+ logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
+ logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
- print("building image from target_files %s..." % (tree_subdir,))
+ logger.info("building image from target_files %s...", tree_subdir)
if info_dict is None:
info_dict = OPTIONS.info_dict
@@ -1001,9 +1044,9 @@
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
- print("\n WARNING: %s\n" % (msg,))
- elif OPTIONS.verbose:
- print(" ", msg)
+ logger.warning("\n WARNING: %s\n", msg)
+ else:
+ logger.info(" %s", msg)
def ReadApkCerts(tf_zip):
@@ -1302,13 +1345,13 @@
continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
- print("failed to parse password file: ", line)
+ logger.warning("Failed to parse password file: %s", line)
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError as e:
if e.errno != errno.ENOENT:
- print("error reading password file: ", str(e))
+ logger.exception("Error reading password file:")
return result
@@ -1452,10 +1495,10 @@
if x == ".py":
f = b
info = imp.find_module(f, [d])
- print("loaded device-specific extensions from", path)
+ logger.info("loaded device-specific extensions from %s", path)
self.module = imp.load_module("device_specific", *info)
except ImportError:
- print("unable to load device-specific module; assuming none")
+ logger.info("unable to load device-specific module; assuming none")
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
@@ -1597,7 +1640,7 @@
th.start()
th.join(timeout=300) # 5 mins
if th.is_alive():
- print("WARNING: diff command timed out")
+ logger.warning("diff command timed out")
p.terminate()
th.join(5)
if th.is_alive():
@@ -1605,8 +1648,7 @@
th.join()
if p.returncode != 0:
- print("WARNING: failure running %s:\n%s\n" % (
- diff_program, "".join(err)))
+ logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
self.patch = None
return None, None, None
diff = ptemp.read()
@@ -1630,7 +1672,7 @@
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
- print(len(diffs), "diffs to compute")
+ logger.info("%d diffs to compute", len(diffs))
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
@@ -1656,14 +1698,14 @@
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
- print(
- "patching failed! %s" % (name,))
+ logger.error("patching failed! %40s", name)
else:
- print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
- dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
+ logger.info(
+ "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
+ tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
- except Exception as e:
- print(e)
+ except Exception:
+ logger.exception("Failed to compute diff from worker")
raise
# start worker threads; wait for them all to finish.
@@ -2086,6 +2128,6 @@
# in the L release.
sh_location = "bin/install-recovery.sh"
- print("putting script in", sh_location)
+ logger.info("putting script in %s", sh_location)
output_sink(sh_location, sh)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 01ff149..0156b72 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -28,6 +28,7 @@
from __future__ import print_function
+import logging
import os
import shutil
import sys
@@ -39,6 +40,7 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
@@ -72,6 +74,8 @@
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"])
output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
CopyInfo(output_zip)
@@ -90,11 +94,11 @@
common.ZipWrite(output_zip, os.path.join(images_path, image), image)
finally:
- print("cleaning up...")
+ logger.info("cleaning up...")
common.ZipClose(output_zip)
shutil.rmtree(OPTIONS.input_tmp)
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
@@ -102,5 +106,5 @@
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ logger.exception("\n ERROR:\n")
sys.exit(1)
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 7c6007e..725b355 100755
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -16,24 +16,27 @@
from __future__ import print_function
+import logging
+import os
import sys
+import common
+
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
-import os
-import common
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
-def main(argv):
- # def option_handler(o, a):
- # return False
+def main(argv):
args = common.ParseOptions(argv, __doc__)
input_dir, output_dir = args
+ common.InitLogging()
+
OPTIONS.info_dict = common.LoadInfoDict(input_dir)
recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 7ea53f8..2264655 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -164,12 +164,12 @@
from __future__ import print_function
+import logging
import multiprocessing
import os.path
import shlex
import shutil
import struct
-import subprocess
import sys
import tempfile
import zipfile
@@ -182,6 +182,7 @@
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
OPTIONS.package_key = None
@@ -393,11 +394,7 @@
cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
cmd.extend(["-out", signing_key])
-
- get_signing_key = common.Run(cmd, verbose=False)
- stdoutdata, _ = get_signing_key.communicate()
- assert get_signing_key.returncode == 0, \
- "Failed to get signing key: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd, verbose=False)
self.signer = "openssl"
self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
@@ -410,10 +407,7 @@
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- signing = common.Run(cmd)
- stdoutdata, _ = signing.communicate()
- assert signing.returncode == 0, \
- "Failed to sign the input file: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd)
return out_file
@@ -431,8 +425,6 @@
Args:
secondary: Whether it's generating a secondary payload (default: False).
"""
- # The place where the output from the subprocess should go.
- self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
self.payload_file = None
self.payload_properties = None
self.secondary = secondary
@@ -457,10 +449,7 @@
if source_file is not None:
cmd.extend(["--source_image", source_file])
cmd.extend(additional_args)
- p = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
- "brillo_update_payload generate failed: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd)
self.payload_file = payload_file
self.payload_properties = None
@@ -484,9 +473,7 @@
"--signature_size", "256",
"--metadata_hash_file", metadata_sig_file,
"--payload_hash_file", payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload hash failed"
+ common.RunAndCheckOutput(cmd)
# 2. Sign the hashes.
signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
@@ -501,9 +488,7 @@
"--signature_size", "256",
"--metadata_signature_file", signed_metadata_sig_file,
"--payload_signature_file", signed_payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload sign failed"
+ common.RunAndCheckOutput(cmd)
# 4. Dump the signed payload properties.
properties_file = common.MakeTempFile(prefix="payload-properties-",
@@ -511,9 +496,7 @@
cmd = ["brillo_update_payload", "properties",
"--payload", signed_payload_file,
"--properties_file", properties_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload properties failed"
+ common.RunAndCheckOutput(cmd)
if self.secondary:
with open(properties_file, "a") as f:
@@ -595,11 +578,11 @@
OPTIONS.input_tmp, "RECOVERY")
common.ZipWriteStr(
output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
- print("two-step package: using %s in stage 1/3" % (
- recovery_two_step_img_name,))
+ logger.info(
+ "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
script.WriteRawImage("/boot", recovery_two_step_img_name)
else:
- print("two-step package: using recovery.img in stage 1/3")
+ logger.info("two-step package: using recovery.img in stage 1/3")
# The "recovery.img" entry has been written into package earlier.
script.WriteRawImage("/boot", "recovery.img")
@@ -1363,8 +1346,8 @@
target_api_version = target_info["recovery_api_version"]
source_api_version = source_info["recovery_api_version"]
if source_api_version == 0:
- print("WARNING: generating edify script for a source that "
- "can't install it.")
+ logger.warning(
+ "Generating edify script for a source that can't install it.")
script = edify_generator.EdifyGenerator(
source_api_version, target_info, fstab=source_info["fstab"])
@@ -1542,8 +1525,9 @@
else:
include_full_boot = False
- print("boot target: %d source: %d diff: %d" % (
- target_boot.size, source_boot.size, len(d)))
+ logger.info(
+ "boot target: %d source: %d diff: %d", target_boot.size,
+ source_boot.size, len(d))
common.ZipWriteStr(output_zip, "boot.img.p", d)
@@ -1593,19 +1577,19 @@
if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
script.WriteRawImage("/boot", "boot.img")
- print("writing full boot image (forced by two-step mode)")
+ logger.info("writing full boot image (forced by two-step mode)")
if not OPTIONS.two_step:
if updating_boot:
if include_full_boot:
- print("boot image changed; including full.")
+ logger.info("boot image changed; including full.")
script.Print("Installing boot image...")
script.WriteRawImage("/boot", "boot.img")
else:
# Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the
# partition.
- print("boot image changed; including patch.")
+ logger.info("boot image changed; including patch.")
script.Print("Patching boot image...")
script.ShowProgress(0.1, 10)
script.PatchPartition(
@@ -1615,7 +1599,7 @@
boot_type, boot_device, source_boot.size, source_boot.sha1),
'boot.img.p')
else:
- print("boot image unchanged; skipping.")
+ logger.info("boot image unchanged; skipping.")
# Do device-specific installation (eg, write radio image).
device_specific.IncrementalOTA_InstallEnd()
@@ -1806,7 +1790,7 @@
common.ZipWriteStr(output_zip, care_map_name, care_map_data,
compress_type=zipfile.ZIP_STORED)
else:
- print("Warning: cannot find care map file in target_file package")
+ logger.warning("Cannot find care map file in target_file package")
AddCompatibilityArchiveIfTrebleEnabled(
target_zip, output_zip, target_info, source_info)
@@ -1922,6 +1906,8 @@
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
if OPTIONS.downgrade:
# We should only allow downgrading incrementals (as opposed to full).
# Otherwise the device may go back from arbitrary build with this full
@@ -1942,9 +1928,8 @@
with zipfile.ZipFile(args[0], 'r') as input_zip:
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
- if OPTIONS.verbose:
- print("--- target info ---")
- common.DumpInfoDict(OPTIONS.info_dict)
+ logger.info("--- target info ---")
+ common.DumpInfoDict(OPTIONS.info_dict)
# Load the source build dict if applicable.
if OPTIONS.incremental_source is not None:
@@ -1952,9 +1937,8 @@
with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
- if OPTIONS.verbose:
- print("--- source info ---")
- common.DumpInfoDict(OPTIONS.source_info_dict)
+ logger.info("--- source info ---")
+ common.DumpInfoDict(OPTIONS.source_info_dict)
# Load OEM dicts if provided.
OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
@@ -1978,7 +1962,7 @@
output_file=args[1],
source_file=OPTIONS.incremental_source)
- print("done.")
+ logger.info("done.")
return
# Sanity check the loaded info dicts first.
@@ -1989,7 +1973,7 @@
# Non-A/B OTAs rely on /cache partition to store temporary files.
cache_size = OPTIONS.info_dict.get("cache_size")
if cache_size is None:
- print("--- can't determine the cache partition size ---")
+ logger.warning("--- can't determine the cache partition size ---")
OPTIONS.cache_size = cache_size
if OPTIONS.extra_script is not None:
@@ -1998,7 +1982,7 @@
if OPTIONS.extracted_input is not None:
OPTIONS.input_tmp = OPTIONS.extracted_input
else:
- print("unzipping target target-files...")
+ logger.info("unzipping target target-files...")
OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
OPTIONS.target_tmp = OPTIONS.input_tmp
@@ -2010,7 +1994,7 @@
if OPTIONS.device_specific is None:
from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
if os.path.exists(from_input):
- print("(using device-specific extensions from target_files)")
+ logger.info("(using device-specific extensions from target_files)")
OPTIONS.device_specific = from_input
else:
OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
@@ -2027,7 +2011,7 @@
# Generate an incremental OTA.
else:
- print("unzipping source target-files...")
+ logger.info("unzipping source target-files...")
OPTIONS.source_tmp = common.UnzipTemp(
OPTIONS.incremental_source, UNZIP_PATTERN)
with zipfile.ZipFile(args[0], 'r') as input_zip, \
@@ -2043,15 +2027,15 @@
target_files_diff.recursiveDiff(
'', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ except common.ExternalError:
+ logger.exception("\n ERROR:\n")
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index ca53ae1..5ebb1f0 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -13,6 +13,7 @@
# limitations under the License.
import bisect
+import logging
import os
import struct
import threading
@@ -20,6 +21,8 @@
import rangelib
+logger = logging.getLogger(__name__)
+
class SparseImage(object):
"""Wraps a sparse image file into an image object.
@@ -61,8 +64,9 @@
raise ValueError("Chunk header size was expected to be 12, but is %u." %
(chunk_hdr_sz,))
- print("Total of %u %u-byte output blocks in %u input chunks."
- % (total_blks, blk_sz, total_chunks))
+ logger.info(
+ "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
+ blk_sz, total_chunks)
if not build_map:
assert not hashtree_info_generator, \
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index f75b3a7..44703db 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -1268,7 +1268,7 @@
target_file = construct_target_files()
common.ZipDelete(target_file, 'IMAGES/vendor.img')
payload = Payload()
- self.assertRaises(AssertionError, payload.Generate, target_file)
+ self.assertRaises(common.ExternalError, payload.Generate, target_file)
def test_Sign_full(self):
payload = self._create_payload_full()
@@ -1316,7 +1316,7 @@
payload = self._create_payload_full()
payload_signer = PayloadSigner()
payload_signer.signer_args.append('bad-option')
- self.assertRaises(AssertionError, payload.Sign, payload_signer)
+ self.assertRaises(common.ExternalError, payload.Sign, payload_signer)
def test_WriteToZip(self):
payload = self._create_payload_full()
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index 626a1dd..00af296 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -16,6 +16,7 @@
from __future__ import print_function
+import logging
import os.path
import shlex
import struct
@@ -24,6 +25,8 @@
import sparse_img
from rangelib import RangeSet
+logger = logging.getLogger(__name__)
+
OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
@@ -71,7 +74,7 @@
def ZeroPadSimg(image_file, pad_size):
blocks = pad_size // BLOCK_SIZE
- print("Padding %d blocks (%d bytes)" % (blocks, pad_size))
+ logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
simg.AppendFillChunk(0, blocks)
@@ -114,9 +117,9 @@
else:
hi = i
- if OPTIONS.verbose:
- print("Adjusted partition size for verity, partition_size: {},"
- " verity_size: {}".format(result, verity_size))
+ logger.info(
+ "Adjusted partition size for verity, partition_size: %s, verity_size: %s",
+ result, verity_size)
AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
return (result, verity_size)
@@ -326,9 +329,9 @@
else:
lo = mid + BLOCK_SIZE
- if OPTIONS.verbose:
- print("AVBCalcMinPartitionSize({}): partition_size: {}.".format(
- image_size, partition_size))
+ logger.info(
+ "AVBCalcMinPartitionSize(%d): partition_size: %d.",
+ image_size, partition_size)
return partition_size
@@ -514,9 +517,9 @@
salt, self.hashtree_info.salt)
if root_hash != self.hashtree_info.root_hash:
- print(
- "Calculated root hash {} doesn't match the one in metadata {}".format(
- root_hash, self.hashtree_info.root_hash))
+ logger.warning(
+ "Calculated root hash %s doesn't match the one in metadata %s",
+ root_hash, self.hashtree_info.root_hash)
return False
# Reads the generated hash tree and checks if it has the exact same bytes