Merge "Add Rust vendor image support to Make."
diff --git a/core/Makefile b/core/Makefile
index 7975d3a..eb08355 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -211,6 +211,44 @@
 	$(hide) mv $@.tmp $@
 
 # -----------------------------------------------------------------
+# Declare vendor ramdisk fragments
+INTERNAL_VENDOR_RAMDISK_FRAGMENTS :=
+
+# Validation check and assign default --ramdisk_type.
+$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+  $(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+             $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)), \
+    $(error Must not specify KERNEL_MODULE_DIRS for prebuilt vendor ramdisk fragment "$(vendor_ramdisk_fragment)": $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS))) \
+  $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragment-stage-$(vendor_ramdisk_fragment))) \
+  $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES :=) \
+  $(if $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+    $(if $(filter --ramdisk_type,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)),, \
+      $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_type DLKM))) \
+)
+
+# Create the "kernel module directory" to "vendor ramdisk fragment" inverse mapping.
+$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+  $(foreach kmd,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+    $(eval kmd_vrf := KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(kmd)) \
+    $(if $($(kmd_vrf)),$(error Kernel module directory "$(kmd)" belongs to multiple vendor ramdisk fragments: "$($(kmd_vrf))" "$(vendor_ramdisk_fragment)", each kernel module directory should belong to exactly one or none vendor ramdisk fragment)) \
+    $(eval $(kmd_vrf) := $(vendor_ramdisk_fragment)) \
+  ) \
+)
+INTERNAL_VENDOR_RAMDISK_FRAGMENTS += $(BOARD_VENDOR_RAMDISK_FRAGMENTS)
+
+# Strip the list in case of any whitespace.
+INTERNAL_VENDOR_RAMDISK_FRAGMENTS := \
+  $(strip $(INTERNAL_VENDOR_RAMDISK_FRAGMENTS))
+
+# Assign --ramdisk_name for each vendor ramdisk fragment.
+$(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \
+  $(if $(filter --ramdisk_name,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)), \
+    $(error Must not specify --ramdisk_name for vendor ramdisk fragment: $(vendor_ramdisk_fragment))) \
+  $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_name $(vendor_ramdisk_fragment)) \
+  $(eval .KATI_READONLY := BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS) \
+)
+
+# -----------------------------------------------------------------
 # kernel modules
 
 # Depmod requires a well-formed kernel version so 0.0 is used as a placeholder.
@@ -315,14 +353,34 @@
 	@echo '$$(strip $$(notdir $$(PRIVATE_LOAD_MODULES)))' | tr ' ' '\n' > $$(@)
 endef
 
+# $(1): source options file
+# $(2): destination pathname
+# Returns a build rule that checks the syntax of and installs a kernel modules
+# options file. Strip and squeeze any extra space and blank lines.
+# For use via $(eval).
+define build-image-kernel-modules-options-file
+$(2): $(1)
+	@echo "libmodprobe options $$(@)"
+	$(hide) mkdir -p "$$(dir $$@)"
+	$(hide) rm -f "$$@"
+	$(hide) awk <"$$<" >"$$@" \
+	  '/^#/ { print; next } \
+	   NF == 0 { next } \
+	   NF < 2 || $$$$1 != "options" \
+	     { print "Invalid options line " FNR ": " $$$$0 >"/dev/stderr"; \
+	       exit_status = 1; next } \
+	   { $$$$1 = $$$$1; print } \
+	   END { exit exit_status }'
+endef
+
 # $(1): source blocklist file
 # $(2): destination pathname
 # Returns a build rule that checks the syntax of and installs a kernel modules
-# blocklist file. Strip and squeeze any extra space in the blocklist.
+# blocklist file. Strip and squeeze any extra space and blank lines.
 # For use via $(eval).
 define build-image-kernel-modules-blocklist-file
 $(2): $(1)
-	@echo "modprobe blocklist $$(@)"
+	@echo "libmodprobe blocklist $$(@)"
 	$(hide) mkdir -p "$$(dir $$@)"
 	$(hide) rm -f "$$@"
 	$(hide) awk <"$$<" >"$$@" \
@@ -352,11 +410,19 @@
   $(if $(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),,\
     $(eval BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver) := $(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)))) \
   $(call copy-many-files,$(call build-image-kernel-modules,$(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)),$(2),$(3),$(call intermediates-dir-for,PACKAGING,depmod_$(1)$(_sep)$(_kver)),$(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),$(4),$(BOARD_$(1)_KERNEL_MODULES_ARCHIVE$(_sep)$(_kver)),$(_stripped_staging_dir),$(_kver)))) \
+$(if $(_kver), \
+  $(eval _dir := $(_kver)/), \
+  $(eval _dir :=)) \
+$(if $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \
+  $(eval $(call build-image-kernel-modules-options-file, \
+    $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \
+    $(2)/lib/modules/$(_dir)modules.options)) \
+  $(2)/lib/modules/$(_dir)modules.options) \
 $(if $(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \
   $(eval $(call build-image-kernel-modules-blocklist-file, \
     $(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \
-    $(2)/lib/modules/modules.blocklist)) \
-  $(2)/lib/modules/modules.blocklist)
+    $(2)/lib/modules/$(_dir)modules.blocklist)) \
+  $(2)/lib/modules/$(_dir)modules.blocklist)
 endef
 
 # $(1): kernel module directory name (top is an out of band value for no directory)
@@ -415,38 +481,24 @@
   VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR :=
 endif
 
-# Create the "kernel module directory" to "vendor ramdisk fragment" inverse mapping.
-$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
-  $(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
-             $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)), \
-    $(error Must not specify KERNEL_MODULE_DIRS for prebuilt vendor ramdisk fragment "$(vendor_ramdisk_fragment)": $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS))) \
-  $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragment-dlkm-$(vendor_ramdisk_fragment))) \
-  $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES :=) \
-  $(foreach dir,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
-    $(eval kmd_vrf := KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir)) \
-    $(if $($(kmd_vrf)),$(error Kernel module directory "$(dir)" belongs to multiple vendor ramdisk fragments: "$($(kmd_vrf))" "$(vendor_ramdisk_fragment)", each kernel module directory should belong to exactly one or none vendor ramdisk fragment)) \
-    $(eval $(kmd_vrf) := $(vendor_ramdisk_fragment)) \
-  ) \
-)
-
 BOARD_KERNEL_MODULE_DIRS += top
-$(foreach dir,$(BOARD_KERNEL_MODULE_DIRS), \
-  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,,$(dir))) \
-  $(eval vendor_ramdisk_fragment := $(KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir))) \
+$(foreach kmd,$(BOARD_KERNEL_MODULE_DIRS), \
+  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,,$(kmd))) \
+  $(eval vendor_ramdisk_fragment := $(KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(kmd))) \
   $(if $(vendor_ramdisk_fragment), \
     $(eval output_dir := $(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR)) \
     $(eval result_var := VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES) \
     $(eval ### else ###), \
     $(eval output_dir := $(TARGET_VENDOR_RAMDISK_OUT)) \
     $(eval result_var := ALL_DEFAULT_INSTALLED_MODULES)) \
-  $(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
-  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(dir))) \
-  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
-  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(dir))) \
-  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(if $(filter true,$(BOARD_USES_ODM_DLKMIMAGE)),$(TARGET_OUT_ODM_DLKM),$(TARGET_OUT_ODM)),odm,modules.load,,$(dir))) \
+  $(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
+  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(kmd))) \
+  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \
+  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(kmd))) \
+  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(if $(filter true,$(BOARD_USES_ODM_DLKMIMAGE)),$(TARGET_OUT_ODM_DLKM),$(TARGET_OUT_ODM)),odm,modules.load,,$(kmd))) \
   $(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
-    $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(dir))),\
-    $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,GENERIC_RAMDISK,$(TARGET_RAMDISK_OUT),,modules.load,,$(dir)))))
+    $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(kmd))),\
+    $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,GENERIC_RAMDISK,$(TARGET_RAMDISK_OUT),,modules.load,,$(kmd)))))
 
 # -----------------------------------------------------------------
 # Cert-to-package mapping.  Used by the post-build signing tools.
@@ -1040,13 +1092,13 @@
 endif
 
 ifdef INTERNAL_BOOTCONFIG
-ifneq (,$(findstring androidboot.hardware=, $(INTERNAL_BOOTCONFIG)))
-$(error "androidboot.hardware" BOOTCONFIG parameter is not supported due to \
-  bootconfig limitations. Use "hardware" instead. INTERNAL_BOOTCONFIG: \
-  $(INTERNAL_BOOTCONFIG))
-endif
-INTERNAL_VENDOR_BOOTCONFIG_TARGET := $(PRODUCT_OUT)/vendor-bootconfig.img
-$(INTERNAL_VENDOR_BOOTCONFIG_TARGET):
+  ifneq (,$(findstring androidboot.hardware=, $(INTERNAL_BOOTCONFIG)))
+    $(error "androidboot.hardware" BOOTCONFIG parameter is not supported due \
+      to bootconfig limitations. Use "hardware" instead. INTERNAL_BOOTCONFIG: \
+      $(INTERNAL_BOOTCONFIG))
+  endif
+  INTERNAL_VENDOR_BOOTCONFIG_TARGET := $(PRODUCT_OUT)/vendor-bootconfig.img
+  $(INTERNAL_VENDOR_BOOTCONFIG_TARGET):
 	rm -f $@
 	$(foreach param,$(INTERNAL_BOOTCONFIG), \
 	 printf "%s\n" $(param) >> $@;)
@@ -1083,17 +1135,12 @@
 INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS :=
 INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS :=
 
-$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+$(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \
   $(eval prebuilt_vendor_ramdisk_fragment_file := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \
   $(if $(prebuilt_vendor_ramdisk_fragment_file), \
     $(eval vendor_ramdisk_fragment_target := $(call build-prebuilt-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment),$(prebuilt_vendor_ramdisk_fragment_file))) \
     $(eval ### else ###), \
-    $(eval vendor_ramdisk_fragment_target := $(call build-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment))) \
-    $(if $(filter --ramdisk_type,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)),, \
-      $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_type DLKM))) \
-  $(if $(filter --ramdisk_name,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)), \
-    $(error Must not specify --ramdisk_name for vendor ramdisk fragment: $(vendor_ramdisk_fragment))) \
-  $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_name $(vendor_ramdisk_fragment)) \
+    $(eval vendor_ramdisk_fragment_target := $(call build-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment)))) \
   $(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS += $(vendor_ramdisk_fragment_target)) \
   $(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS += $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS) --vendor_ramdisk_fragment $(vendor_ramdisk_fragment_target)) \
 )
@@ -2509,7 +2556,7 @@
 $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(INSTALLED_DEBUG_RAMDISK_TARGET)
 $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_TEST_HARNESS_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
 	$(call pretty,"Target test harness ramdisk: $@")
-	rsync -a $(TEST_HARNESS_RAMDISK_SYNC_DIR)/ $(TEST_HARNESS_RAMDISK_ROOT_DIR)
+	rsync --chmod=u+w -a $(TEST_HARNESS_RAMDISK_SYNC_DIR)/ $(TEST_HARNESS_RAMDISK_ROOT_DIR)
 	$(call append-test-harness-props,$(ADDITIONAL_TEST_HARNESS_PROPERTIES),$(TEST_HARNESS_PROP_TARGET))
 	$(MKBOOTFS) -d $(TARGET_OUT) $(TEST_HARNESS_RAMDISK_ROOT_DIR) | $(COMPRESSION_COMMAND) > $@
 
@@ -2527,6 +2574,7 @@
 #
 # Note: it's intentional to skip signing for boot-test-harness.img, because it
 # can only be used if the device is unlocked with verification error.
+ifneq ($(INSTALLED_BOOTIMAGE_TARGET),)
 ifneq ($(strip $(TARGET_NO_KERNEL)),true)
 
 ifneq ($(strip $(BOARD_KERNEL_BINARIES)),)
@@ -2567,6 +2615,7 @@
 	$(foreach b,$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET),$(call build-boot-test-harness-target,$b))
 
 endif # TARGET_NO_KERNEL
+endif # INSTALLED_BOOTIMAGE_TARGET
 endif # BOARD_BUILD_SYSTEM_ROOT_IMAGE is not true
 
 # Creates a compatibility symlink between two partitions, e.g. /system/vendor to /vendor
@@ -3937,6 +3986,9 @@
     or (2) extracting kernel configuration and defining BOARD_KERNEL_CONFIG_FILE and \
     BOARD_KERNEL_VERSION manually; or (3) unsetting PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS \
     manually.)
+# Clear their values to indicate that these two files does not exist.
+BUILT_KERNEL_CONFIGS_FILE :=
+BUILT_KERNEL_VERSION_FILE :=
 else
 
 # Tools for decompression that is not in PATH.
@@ -3981,8 +4033,10 @@
 check_vintf_compatible_deps := $(check_vintf_common_srcs)
 
 ifeq ($(PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS),true)
+ifneq (,$(BUILT_KERNEL_VERSION_FILE)$(BUILT_KERNEL_CONFIGS_FILE))
 check_vintf_compatible_args += --kernel $(BUILT_KERNEL_VERSION_FILE):$(BUILT_KERNEL_CONFIGS_FILE)
 check_vintf_compatible_deps += $(BUILT_KERNEL_CONFIGS_FILE) $(BUILT_KERNEL_VERSION_FILE)
+endif # BUILT_KERNEL_VERSION_FILE != "" || BUILT_KERNEL_CONFIGS_FILE != ""
 endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
 
 check_vintf_compatible_args += \
@@ -4887,9 +4941,9 @@
 	echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/VENDOR_BOOT/pagesize
 endif
 	echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/VENDOR_BOOT/vendor_cmdline
-ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
-	echo "$(BOARD_VENDOR_RAMDISK_FRAGMENTS)" > "$(zip_root)/VENDOR_BOOT/vendor_ramdisk_fragments"
-	$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+ifdef INTERNAL_VENDOR_RAMDISK_FRAGMENTS
+	echo "$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS)" > "$(zip_root)/VENDOR_BOOT/vendor_ramdisk_fragments"
+	$(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \
 	  mkdir -p $(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment); \
 	  echo "$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)" > "$(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/mkbootimg_args"; \
 	  $(eval prebuilt_ramdisk := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \
@@ -4899,7 +4953,7 @@
 	      $(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR), \
 	      $(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/RAMDISK); \
 	  ))
-endif # BOARD_VENDOR_RAMDISK_FRAGMENTS != ""
+endif # INTERNAL_VENDOR_RAMDISK_FRAGMENTS != ""
 endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET
 ifdef BUILDING_SYSTEM_IMAGE
 	@# Contents of the system image
@@ -5331,10 +5385,18 @@
 # Any dependencies are set up later in build/make/core/main.mk.
 
 JACOCO_REPORT_CLASSES_ALL := $(PRODUCT_OUT)/jacoco-report-classes-all.jar
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_TARGET_JACOCO_DIR := $(call intermediates-dir-for,PACKAGING,jacoco)
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_HOST_JACOCO_DIR := $(call intermediates-dir-for,PACKAGING,jacoco,HOST)
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_TARGET_PROGUARD_USAGE_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage)
+$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_HOST_PROGUARD_USAGE_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage,HOST)
 $(JACOCO_REPORT_CLASSES_ALL) :
 	@echo "Collecting uninstrumented classes"
-	find $(TARGET_COMMON_OUT_ROOT) $(HOST_COMMON_OUT_ROOT) -name "jacoco-report-classes.jar" -o -name "proguard_usage.zip" 2>/dev/null | sort > $@.list
-	$(SOONG_ZIP) -o $@ -L 0 -C $(OUT_DIR) -P out -l $@.list
+	mkdir -p $(PRIVATE_TARGET_JACOCO_DIR) $(PRIVATE_HOST_JACOCO_DIR) $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) $(PRIVATE_HOST_PROGUARD_USAGE_DIR)
+	$(SOONG_ZIP) -o $@ -L 0 \
+	  -C $(PRIVATE_TARGET_JACOCO_DIR) -P out/target/common/obj -D $(PRIVATE_TARGET_JACOCO_DIR) \
+	  -C $(PRIVATE_HOST_JACOCO_DIR) -P out/target/common/obj -D $(PRIVATE_HOST_JACOCO_DIR) \
+	  -C $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) -P out/target/common/obj -D $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) \
+	  -C $(PRIVATE_HOST_PROGUARD_USAGE_DIR) -P out/target/common/obj -D $(PRIVATE_HOST_PROGUARD_USAGE_DIR)
 
 ifeq (,$(TARGET_BUILD_UNBUNDLED))
   $(JACOCO_REPORT_CLASSES_ALL): $(INTERNAL_ALLIMAGES_FILES)
@@ -5350,13 +5412,11 @@
 ifeq (,$(TARGET_BUILD_UNBUNDLED))
 $(PROGUARD_DICT_ZIP): $(INTERNAL_ALLIMAGES_FILES) $(updater_dep)
 endif
-$(PROGUARD_DICT_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard)/filelist
+$(PROGUARD_DICT_ZIP): PRIVATE_PACKAGING_DIR := $(call intermediates-dir-for,PACKAGING,proguard_dictionary)
 $(PROGUARD_DICT_ZIP): $(SOONG_ZIP)
 	@echo "Packaging Proguard obfuscation dictionary files."
-	mkdir -p $(dir $@) $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS $(dir $(PRIVATE_LIST_FILE))
-	find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary | \
-	    sed -e 's/\(.*\)\/proguard_dictionary/\0\n\1\/classes.jar/' > $(PRIVATE_LIST_FILE)
-	$(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
+	mkdir -p $(dir $@) $(PRIVATE_PACKAGING_DIR)
+	$(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(PRIVATE_PACKAGING_DIR) -P out/target/common/obj -D $(PRIVATE_PACKAGING_DIR)
 
 #------------------------------------------------------------------
 # A zip of Proguard usage files.
@@ -5377,11 +5437,12 @@
     $(INSTALLED_ODM_DLKMIMAGE_TARGET) \
     $(updater_dep)
 endif
-$(PROGUARD_USAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard_usage)/filelist
+$(PROGUARD_USAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard_usage.zip)/filelist
+$(PROGUARD_USAGE_ZIP): PRIVATE_PACKAGING_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage)
 $(PROGUARD_USAGE_ZIP): $(MERGE_ZIPS)
 	@echo "Packaging Proguard usage files."
-	mkdir -p $(dir $@) $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS $(dir $(PRIVATE_LIST_FILE))
-	find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_usage.zip > $(PRIVATE_LIST_FILE)
+	mkdir -p $(dir $@) $(PRIVATE_PACKAGING_DIR) $(dir $(PRIVATE_LIST_FILE))
+	find $(PRIVATE_PACKAGING_DIR) -name proguard_usage.zip > $(PRIVATE_LIST_FILE)
 	$(MERGE_ZIPS) $@ @$(PRIVATE_LIST_FILE)
 
 ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
diff --git a/core/app_prebuilt_internal.mk b/core/app_prebuilt_internal.mk
index fe04b84..6335728 100644
--- a/core/app_prebuilt_internal.mk
+++ b/core/app_prebuilt_internal.mk
@@ -169,12 +169,13 @@
 endif
 
 my_dex_jar := $(my_prebuilt_src_file)
-my_manifest_or_apk := $(my_prebuilt_src_file)
 dex_preopt_profile_src_file := $(my_prebuilt_src_file)
 
 #######################################
 # defines built_odex along with rule to install odex
+my_manifest_or_apk := $(my_prebuilt_src_file)
 include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
+my_manifest_or_apk :=
 #######################################
 ifneq ($(LOCAL_REPLACE_PREBUILT_APK_INSTALLED),)
 # There is a replacement for the prebuilt .apk we can install without any processing.
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 68f880f..c973997 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -533,13 +533,17 @@
 
 ifndef LOCAL_IS_HOST_MODULE
 # Rule to install the module's companion init.rc.
-my_init_rc := $(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC)
+ifneq ($(strip $(LOCAL_FULL_INIT_RC)),)
+my_init_rc := $(LOCAL_FULL_INIT_RC)
+else
+my_init_rc := $(foreach rc,$(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC),$(LOCAL_PATH)/$(rc))
+endif
 ifneq ($(strip $(my_init_rc)),)
 # Make doesn't support recovery as an output partition, but some Soong modules installed in recovery
 # have init.rc files that need to be installed alongside them. Manually handle the case where the
 # output file is in the recovery partition.
 my_init_rc_path := $(if $(filter $(TARGET_RECOVERY_ROOT_OUT)/%,$(my_module_path)),$(TARGET_RECOVERY_ROOT_OUT)/system/etc,$(TARGET_OUT$(partition_tag)_ETC))
-my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(LOCAL_PATH)/$(rc):$(my_init_rc_path)/init/$(notdir $(rc)))
+my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(rc):$(my_init_rc_path)/init/$(notdir $(rc)))
 my_init_rc_installed := $(foreach rc,$(my_init_rc_pairs),$(call word-colon,2,$(rc)))
 
 # Make sure we only set up the copy rules once, even if another arch variant
diff --git a/core/binary.mk b/core/binary.mk
index 0d7206f..cf47374 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -471,27 +471,6 @@
 my_soong_problems += dotdot_incs
 endif
 
-####################################################
-## Add FDO flags if FDO is turned on and supported
-## Please note that we will do option filtering during FDO build.
-## i.e. Os->O2, remove -fno-early-inline and -finline-limit.
-##################################################################
-my_fdo_build :=
-ifneq ($(filter true always, $(LOCAL_FDO_SUPPORT)),)
-  ifeq ($(BUILD_FDO_INSTRUMENT),true)
-    my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_CFLAGS)
-    my_ldflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_LDFLAGS)
-    my_fdo_build := true
-  else ifneq ($(filter true,$(BUILD_FDO_OPTIMIZE))$(filter always,$(LOCAL_FDO_SUPPORT)),)
-    my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_OPTIMIZE_CFLAGS)
-    my_fdo_build := true
-  endif
-  # Disable ccache (or other compiler wrapper) except gomacc, which
-  # can handle -fprofile-use properly.
-  my_cc_wrapper := $(filter $(GOMA_CC) $(RBE_WRAPPER),$(my_cc_wrapper))
-  my_cxx_wrapper := $(filter $(GOMA_CC) $(RBE_WRAPPER),$(my_cxx_wrapper))
-endif
-
 ###########################################################
 ## Explicitly declare assembly-only __ASSEMBLY__ macro for
 ## assembly source
@@ -1479,12 +1458,6 @@
 my_asflags := $(call convert-to-clang-flags,$(my_asflags))
 my_ldflags := $(call convert-to-clang-flags,$(my_ldflags))
 
-ifeq ($(my_fdo_build), true)
-  my_cflags := $(patsubst -Os,-O2,$(my_cflags))
-  fdo_incompatible_flags := -fno-early-inlining -finline-limit=%
-  my_cflags := $(filter-out $(fdo_incompatible_flags),$(my_cflags))
-endif
-
 # No one should ever use this flag. On GCC it's mere presence will disable all
 # warnings, even those that are specified after it (contrary to typical warning
 # flag behavior). This circumvents CFLAGS_NO_OVERRIDE from forcibly enabling the
diff --git a/core/board_config.mk b/core/board_config.mk
index a739784..9ae597e 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -25,6 +25,7 @@
 _board_strip_readonly_list += BOARD_HAVE_BLUETOOTH
 _board_strip_readonly_list += BOARD_INSTALLER_CMDLINE
 _board_strip_readonly_list += BOARD_KERNEL_CMDLINE
+_board_strip_readonly_list += BOARD_BOOT_HEADER_VERSION
 _board_strip_readonly_list += BOARD_BOOTCONFIG
 _board_strip_readonly_list += BOARD_KERNEL_BASE
 _board_strip_readonly_list += BOARD_USES_GENERIC_AUDIO
@@ -828,7 +829,14 @@
   ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
     $(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if not building vendor_boot image)
   endif
-endif
+else # BUILDING_VENDOR_BOOT_IMAGE
+  ifneq (,$(call math_lt,$(BOARD_BOOT_HEADER_VERSION),4))
+    ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
+      $(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if \
+        BOARD_BOOT_HEADER_VERSION is less than 4)
+    endif
+  endif
+endif # BUILDING_VENDOR_BOOT_IMAGE
 
 ifneq ($(words $(BOARD_VENDOR_RAMDISK_FRAGMENTS)),$(words $(sort $(BOARD_VENDOR_RAMDISK_FRAGMENTS))))
   $(error BOARD_VENDOR_RAMDISK_FRAGMENTS has duplicate entries: $(BOARD_VENDOR_RAMDISK_FRAGMENTS))
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 019892e..faca97a 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -100,12 +100,12 @@
 LOCAL_EXTRA_FULL_TEST_CONFIGS:=
 LOCAL_EXTRACT_APK:=
 LOCAL_EXTRACT_DPI_APK:=
-LOCAL_FDO_SUPPORT:=
 LOCAL_FILE_CONTEXTS:=
 LOCAL_FINDBUGS_FLAGS:=
 LOCAL_FORCE_STATIC_EXECUTABLE:=
 LOCAL_FULL_CLASSES_JACOCO_JAR:=
 LOCAL_FULL_CLASSES_PRE_JACOCO_JAR:=
+LOCAL_FULL_INIT_RC:=
 LOCAL_FULL_LIBS_MANIFEST_FILES:=
 LOCAL_FULL_MANIFEST_FILE:=
 LOCAL_FULL_TEST_CONFIG:=
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
index e45c1a6..11c1944 100644
--- a/core/combo/TARGET_linux-arm.mk
+++ b/core/combo/TARGET_linux-arm.mk
@@ -64,7 +64,6 @@
 endif
 
 include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
 
 define $(combo_var_prefix)transform-shared-lib-to-toc
 $(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/TARGET_linux-arm64.mk b/core/combo/TARGET_linux-arm64.mk
index a3f59a7..5d481cb 100644
--- a/core/combo/TARGET_linux-arm64.mk
+++ b/core/combo/TARGET_linux-arm64.mk
@@ -39,7 +39,6 @@
 endif
 
 include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
 
 define $(combo_var_prefix)transform-shared-lib-to-toc
 $(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/TARGET_linux-x86.mk b/core/combo/TARGET_linux-x86.mk
index 2c4614b..acbae51 100644
--- a/core/combo/TARGET_linux-x86.mk
+++ b/core/combo/TARGET_linux-x86.mk
@@ -32,7 +32,6 @@
 endif
 
 include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
 
 define $(combo_var_prefix)transform-shared-lib-to-toc
 $(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/TARGET_linux-x86_64.mk b/core/combo/TARGET_linux-x86_64.mk
index d2172d6..9e7e363 100644
--- a/core/combo/TARGET_linux-x86_64.mk
+++ b/core/combo/TARGET_linux-x86_64.mk
@@ -32,7 +32,6 @@
 endif
 
 include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
-include $(BUILD_SYSTEM)/combo/fdo.mk
 
 define $(combo_var_prefix)transform-shared-lib-to-toc
 $(call _gen_toc_command_for_elf,$(1),$(2))
diff --git a/core/combo/fdo.mk b/core/combo/fdo.mk
deleted file mode 100644
index 8fb8fd3..0000000
--- a/core/combo/fdo.mk
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Copyright (C) 2006 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Setup FDO related flags.
-
-$(combo_2nd_arch_prefix)TARGET_FDO_CFLAGS:=
-
-# Set BUILD_FDO_INSTRUMENT=true to turn on FDO instrumentation.
-# The profile will be generated on /sdcard/fdo_profile on the device.
-$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_CFLAGS := -fprofile-generate=/sdcard/fdo_profile -DANDROID_FDO
-$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_LDFLAGS := -lgcov -lgcc
-
-# Set TARGET_FDO_PROFILE_PATH to set a custom profile directory for your build.
-ifeq ($(strip $($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH)),)
-  $(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH := vendor/google_data/fdo_profile
-endif
-
-$(combo_2nd_arch_prefix)TARGET_FDO_OPTIMIZE_CFLAGS := \
-    -fprofile-use=$($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH) \
-    -DANDROID_FDO -fprofile-correction -Wcoverage-mismatch -Wno-error
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 228bad6..4a94e93 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -139,7 +139,7 @@
     ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\
            $(filter $(dir)%,$(LOCAL_PATH)))),)
       my_sanitize := memtag_heap $(my_sanitize)
-      my_sanitize_diag := memtag_heap $(my_sanitize)
+      my_sanitize_diag := memtag_heap $(my_sanitize_diag)
     else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\
            $(filter $(dir)%,$(LOCAL_PATH)))),)
       my_sanitize := memtag_heap $(my_sanitize)
@@ -211,10 +211,12 @@
 
 ifneq ($(filter memtag_heap,$(my_sanitize)),)
   # Add memtag ELF note.
-  ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
-    my_whole_static_libraries += note_memtag_heap_sync
-  else
-    my_whole_static_libraries += note_memtag_heap_async
+  ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
+    ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
+      my_whole_static_libraries += note_memtag_heap_sync
+    else
+      my_whole_static_libraries += note_memtag_heap_async
+    endif
   endif
   # This is all that memtag_heap does - it is not an actual -fsanitize argument.
   # Remove it from the list.
diff --git a/core/definitions.mk b/core/definitions.mk
index 2883f0d..b15ce84 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -745,6 +745,42 @@
 endef
 
 ###########################################################
+## The packaging directory for a module.  Similar to intermedates, but
+## in a location that will be wiped by an m installclean.
+###########################################################
+
+# $(1): subdir in PACKAGING
+# $(2): target class, like "APPS"
+# $(3): target name, like "NotePad"
+# $(4): { HOST, HOST_CROSS, <empty (TARGET)>, <other non-empty (HOST)> }
+define packaging-dir-for
+$(strip \
+    $(eval _pdfClass := $(strip $(2))) \
+    $(if $(_pdfClass),, \
+        $(error $(LOCAL_PATH): Class not defined in call to generated-sources-dir-for)) \
+    $(eval _pdfName := $(strip $(3))) \
+    $(if $(_pdfName),, \
+        $(error $(LOCAL_PATH): Name not defined in call to generated-sources-dir-for)) \
+    $(call intermediates-dir-for,PACKAGING,$(1),$(4))/$(_pdfClass)/$(_pdfName)_intermediates \
+)
+endef
+
+# Uses LOCAL_MODULE_CLASS, LOCAL_MODULE, and LOCAL_IS_HOST_MODULE
+# to determine the packaging directory.
+#
+# $(1): subdir in PACKAGING
+define local-packaging-dir
+$(strip \
+    $(if $(strip $(LOCAL_MODULE_CLASS)),, \
+        $(error $(LOCAL_PATH): LOCAL_MODULE_CLASS not defined before call to local-generated-sources-dir)) \
+    $(if $(strip $(LOCAL_MODULE)),, \
+        $(error $(LOCAL_PATH): LOCAL_MODULE not defined before call to local-generated-sources-dir)) \
+    $(call packaging-dir-for,$(1),$(LOCAL_MODULE_CLASS),$(LOCAL_MODULE),$(if $(strip $(LOCAL_IS_HOST_MODULE)),HOST)) \
+)
+endef
+
+
+###########################################################
 ## Convert a list of short module names (e.g., "framework", "Browser")
 ## into the list of files that are built for those modules.
 ## NOTE: this won't return reliable results until after all
@@ -1712,7 +1748,6 @@
   $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--start-group) \
   $(PRIVATE_ALL_STATIC_LIBRARIES) \
   $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
-  $(if $(filter true,$(NATIVE_COVERAGE)),-lgcov) \
   $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_HOST_LIBPROFILE_RT)) \
   $(PRIVATE_ALL_SHARED_LIBRARIES) \
   -o $@ \
@@ -1752,7 +1787,6 @@
   $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
   $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
   $(PRIVATE_TARGET_LIBCRT_BUILTINS) \
-  $(PRIVATE_TARGET_LIBATOMIC) \
   $(PRIVATE_TARGET_GLOBAL_LDFLAGS) \
   $(PRIVATE_LDFLAGS) \
   $(PRIVATE_ALL_SHARED_LIBRARIES) \
@@ -1787,7 +1821,6 @@
   $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
   $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
   $(PRIVATE_TARGET_LIBCRT_BUILTINS) \
-  $(PRIVATE_TARGET_LIBATOMIC) \
   $(PRIVATE_TARGET_GLOBAL_LDFLAGS) \
   $(PRIVATE_LDFLAGS) \
   $(PRIVATE_ALL_SHARED_LIBRARIES) \
@@ -1831,7 +1864,6 @@
   $(filter %libc.a %libc.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
   $(filter %libc_nomalloc.a %libc_nomalloc.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
   $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
-  $(PRIVATE_TARGET_LIBATOMIC) \
   $(filter %libcompiler_rt.a %libcompiler_rt.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
   $(PRIVATE_TARGET_LIBCRT_BUILTINS) \
   -Wl,--end-group \
@@ -1859,7 +1891,6 @@
   $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--start-group) \
   $(PRIVATE_ALL_STATIC_LIBRARIES) \
   $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
-  $(if $(filter true,$(NATIVE_COVERAGE)),-lgcov) \
   $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_HOST_LIBPROFILE_RT)) \
   $(PRIVATE_ALL_SHARED_LIBRARIES) \
   $(foreach path,$(PRIVATE_RPATHS), \
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index e0f94bd..6b05d0d 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -208,6 +208,17 @@
 # Verify <uses-library> coherence between the build system and the manifest.
 ################################################################################
 
+# Some libraries do not have a manifest, so there is nothing to check against.
+# Handle it as if the manifest had zero <uses-library> tags: it is ok unless the
+# module has non-empty LOCAL_USES_LIBRARIES or LOCAL_OPTIONAL_USES_LIBRARIES.
+ifndef my_manifest_or_apk
+  ifneq (,$(strip $(LOCAL_USES_LIBRARIES)$(LOCAL_OPTIONAL_USES_LIBRARIES)))
+    $(error $(LOCAL_MODULE) has non-empty <uses-library> list but no manifest)
+  else
+    LOCAL_ENFORCE_USES_LIBRARIES := false
+  endif
+endif
+
 # Verify LOCAL_USES_LIBRARIES/LOCAL_OPTIONAL_USES_LIBRARIES
 # If LOCAL_ENFORCE_USES_LIBRARIES is not set, default to true if either of LOCAL_USES_LIBRARIES or
 # LOCAL_OPTIONAL_USES_LIBRARIES are specified.
@@ -360,7 +371,7 @@
   $(call add_json_str,  ProfileClassListing,            $(if $(my_process_profile),$(LOCAL_DEX_PREOPT_PROFILE)))
   $(call add_json_bool, ProfileIsTextListing,           $(my_profile_is_text_listing))
   $(call add_json_str,  EnforceUsesLibrariesStatusFile, $(my_enforced_uses_libraries))
-  $(call add_json_bool, EnforceUsesLibraries,           $(LOCAL_ENFORCE_USES_LIBRARIES))
+  $(call add_json_bool, EnforceUsesLibraries,           $(filter true,$(LOCAL_ENFORCE_USES_LIBRARIES)))
   $(call add_json_str,  ProvidesUsesLibrary,            $(firstword $(LOCAL_PROVIDES_USES_LIBRARY) $(LOCAL_MODULE)))
   $(call add_json_map,  ClassLoaderContexts)
   $(call add_json_class_loader_context, any, $(my_dexpreopt_libs))
diff --git a/core/executable_internal.mk b/core/executable_internal.mk
index c6a8faf..fb14cce 100644
--- a/core/executable_internal.mk
+++ b/core/executable_internal.mk
@@ -41,7 +41,6 @@
 else
 my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS)
 endif
-my_target_libatomic := $(call intermediates-dir-for,STATIC_LIBRARIES,libatomic,,,$(LOCAL_2ND_ARCH_VAR_PREFIX))/libatomic.a
 ifeq ($(LOCAL_NO_CRT),true)
 my_target_crtbegin_dynamic_o :=
 my_target_crtbegin_static_o :=
@@ -61,18 +60,17 @@
 my_target_crtend_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_android.sdk.$(my_ndk_crt_version))
 endif
 $(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins)
-$(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic)
 $(linked_module): PRIVATE_TARGET_CRTBEGIN_DYNAMIC_O := $(my_target_crtbegin_dynamic_o)
 $(linked_module): PRIVATE_TARGET_CRTBEGIN_STATIC_O := $(my_target_crtbegin_static_o)
 $(linked_module): PRIVATE_TARGET_CRTEND_O := $(my_target_crtend_o)
 $(linked_module): PRIVATE_POST_LINK_CMD := $(LOCAL_POST_LINK_CMD)
 
 ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
-$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libatomic) $(CLANG_CXX)
+$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(CLANG_CXX)
 	$(transform-o-to-static-executable)
 	$(PRIVATE_POST_LINK_CMD)
 else
-$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libatomic) $(CLANG_CXX)
+$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(CLANG_CXX)
 	$(transform-o-to-executable)
 	$(PRIVATE_POST_LINK_CMD)
 endif
diff --git a/core/jacoco.mk b/core/jacoco.mk
index e8fb89b..e8c74ee 100644
--- a/core/jacoco.mk
+++ b/core/jacoco.mk
@@ -71,7 +71,11 @@
 	zip -q $@ \
 	  -r $(PRIVATE_UNZIPPED_PATH)
 
-
+# Make a rule to copy the jacoco-report-classes.jar to a packaging directory.
+$(eval $(call copy-one-file,$(my_classes_to_report_on_path),\
+  $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar))
+$(call add-dependency,$(LOCAL_BUILT_MODULE),\
+  $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)
 
   # make a task that invokes instrumentation
   my_instrumented_path := $(my_files)/work/instrumented/classes
diff --git a/core/java.mk b/core/java.mk
index 3f147ba..123cbe8 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -470,6 +470,17 @@
 
 ifneq ($(filter obfuscation,$(LOCAL_PROGUARD_ENABLED)),)
   $(built_dex_intermediate): .KATI_IMPLICIT_OUTPUTS := $(proguard_dictionary) $(proguard_configuration)
+
+  # Make a rule to copy the proguard_dictionary to a packaging directory.
+  $(eval $(call copy-one-file,$(proguard_dictionary),\
+    $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary))
+  $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+    $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)
+
+  $(eval $(call copy-one-file,$(full_classes_pre_proguard_jar),\
+    $(call local-packaging-dir,proguard_dictionary)/classes.jar))
+  $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+    $(call local-packaging-dir,proguard_dictionary)/classes.jar)
 endif
 
 endif # LOCAL_PROGUARD_ENABLED defined
diff --git a/core/java_prebuilt_internal.mk b/core/java_prebuilt_internal.mk
index 990b7d4..be733ff 100644
--- a/core/java_prebuilt_internal.mk
+++ b/core/java_prebuilt_internal.mk
@@ -33,7 +33,6 @@
 
 ifeq ($(prebuilt_module_is_dex_javalib),true)
 my_dex_jar := $(my_prebuilt_src_file)
-my_manifest_or_apk := $(my_prebuilt_src_file)
 # This is a target shared library, i.e. a jar with classes.dex.
 
 $(foreach pair,$(PRODUCT_BOOT_JARS), \
diff --git a/core/main.mk b/core/main.mk
index 3362681..1e9a95f 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -290,11 +290,25 @@
     ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
 endif
 
+ifneq ($(TARGET_BUILD_VARIANT),user)
+  ifdef PRODUCT_SET_DEBUGFS_RESTRICTIONS
+    ADDITIONAL_VENDOR_PROPERTIES += \
+      ro.product.enforce_debugfs_restrictions=$(PRODUCT_SET_DEBUGFS_RESTRICTIONS)
+  endif
+endif
+
 # Vendors with GRF must define BOARD_SHIPPING_API_LEVEL for the vendor API level.
 # This must not be defined for the non-GRF devices.
 ifdef BOARD_SHIPPING_API_LEVEL
 ADDITIONAL_VENDOR_PROPERTIES += \
     ro.board.first_api_level=$(BOARD_SHIPPING_API_LEVEL)
+
+# To manually set the vendor API level of the vendor modules, BOARD_API_LEVEL can be used.
+# The values of the GRF properties will be verified by post_process_props.py
+ifdef BOARD_API_LEVEL
+ADDITIONAL_VENDOR_PROPERTIES += \
+    ro.board.api_level=$(BOARD_API_LEVEL)
+endif
 endif
 
 ADDITIONAL_VENDOR_PROPERTIES += \
diff --git a/core/product.mk b/core/product.mk
index 7c27614..015fe44 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -341,6 +341,9 @@
 # This flag implies PRODUCT_USE_DYNAMIC_PARTITIONS.
 _product_single_value_vars += PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
 
+# When this is true, various build time as well as runtime debugfs restrictions are enabled.
+_product_single_value_vars += PRODUCT_SET_DEBUGFS_RESTRICTIONS
+
 # Other dynamic partition feature flags.PRODUCT_USE_DYNAMIC_PARTITION_SIZE and
 # PRODUCT_BUILD_SUPER_PARTITION default to the value of PRODUCT_USE_DYNAMIC_PARTITIONS.
 _product_single_value_vars += \
diff --git a/core/rust_device_benchmark_config_template.xml b/core/rust_device_benchmark_config_template.xml
new file mode 100644
index 0000000..2055df2
--- /dev/null
+++ b/core/rust_device_benchmark_config_template.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- This test config file is auto-generated. -->
+<configuration description="Config to run {MODULE} rust benchmark tests.">
+    <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+        <option name="cleanup" value="false" />
+        <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
+        <option name="test-device-path" value="/data/local/tmp" />
+        <option name="module-name" value="{MODULE}" />
+        <option name="is-benchmark" value="true" />
+    </test>
+</configuration>
diff --git a/core/rust_host_benchmark_config_template.xml b/core/rust_host_benchmark_config_template.xml
new file mode 100644
index 0000000..bb7c1b5
--- /dev/null
+++ b/core/rust_host_benchmark_config_template.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Config to run {MODULE} rust benchmark host tests">
+    <test class="com.android.tradefed.testtype.rust.RustBinaryHostTest" >
+        <option name="test-file" value="{MODULE}" />
+        <option name="test-timeout" value="5m" />
+        <option name="is-benchmark" value="true" />
+    </test>
+</configuration>
diff --git a/core/shared_library_internal.mk b/core/shared_library_internal.mk
index 12b7f44..139de10 100644
--- a/core/shared_library_internal.mk
+++ b/core/shared_library_internal.mk
@@ -39,7 +39,6 @@
 else
 my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS)
 endif
-my_target_libatomic := $(call intermediates-dir-for,STATIC_LIBRARIES,libatomic,,,$(LOCAL_2ND_ARCH_VAR_PREFIX))/libatomic.a
 ifeq ($(LOCAL_NO_CRT),true)
 my_target_crtbegin_so_o :=
 my_target_crtend_so_o :=
@@ -55,7 +54,6 @@
 my_target_crtend_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_so.sdk.$(my_ndk_crt_version))
 endif
 $(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins)
-$(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic)
 $(linked_module): PRIVATE_TARGET_CRTBEGIN_SO_O := $(my_target_crtbegin_so_o)
 $(linked_module): PRIVATE_TARGET_CRTEND_SO_O := $(my_target_crtend_so_o)
 
@@ -65,7 +63,6 @@
         $(my_target_crtbegin_so_o) \
         $(my_target_crtend_so_o) \
         $(my_target_libcrt_builtins) \
-        $(my_target_libatomic) \
         $(LOCAL_ADDITIONAL_DEPENDENCIES) $(CLANG_CXX)
 	$(transform-o-to-shared-lib)
 
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index 50ac93a..ce7b142 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -7,7 +7,7 @@
 # LOCAL_SOONG_HEADER_JAR
 # LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR
 # LOCAL_SOONG_PROGUARD_DICT
-# LOCAL_SOONG_PROGUARD_USAGE
+# LOCAL_SOONG_PROGUARD_USAGE_ZIP
 # LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE
 # LOCAL_SOONG_RRO_DIRS
 # LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH)
@@ -74,23 +74,31 @@
 
 ifdef LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR
   $(eval $(call copy-one-file,$(LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR),\
-    $(intermediates.COMMON)/jacoco-report-classes.jar))
+    $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar))
   $(call add-dependency,$(LOCAL_BUILT_MODULE),\
-    $(intermediates.COMMON)/jacoco-report-classes.jar)
+    $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)
 endif
 
 ifdef LOCAL_SOONG_PROGUARD_DICT
   $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
     $(intermediates.COMMON)/proguard_dictionary))
+  $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
+    $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary))
+  $(eval $(call copy-one-file,$(LOCAL_SOONG_CLASSES_JAR),\
+    $(call local-packaging-dir,proguard_dictionary)/classes.jar))
   $(call add-dependency,$(LOCAL_BUILT_MODULE),\
     $(intermediates.COMMON)/proguard_dictionary)
+  $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+    $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)
+  $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+    $(call local-packaging-dir,proguard_dictionary)/classes.jar)
 endif
 
 ifdef LOCAL_SOONG_PROGUARD_USAGE_ZIP
   $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_USAGE_ZIP),\
-    $(intermediates.COMMON)/proguard_usage.zip))
+    $(call local-packaging-dir,proguard_usage)/proguard_usage.zip))
   $(call add-dependency,$(LOCAL_BUILT_MODULE),\
-    $(intermediates.COMMON)/proguard_usage.zip)
+    $(call local-packaging-dir,proguard_usage)/proguard_usage.zip)
 endif
 
 ifdef LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE
diff --git a/core/soong_config.mk b/core/soong_config.mk
index b87eba1..17176df 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -256,6 +256,8 @@
 $(call add_json_bool, BuildBrokenTrebleSyspropNeverallow, $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW)))
 $(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(filter true,$(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE)))
 
+$(call add_json_bool, BuildDebugfsRestrictionsEnabled, $(filter true,$(PRODUCT_SET_DEBUGFS_RESTRICTIONS)))
+
 $(call add_json_bool, RequiresInsecureExecmemForSwiftshader, $(filter true,$(PRODUCT_REQUIRES_INSECURE_EXECMEM_FOR_SWIFTSHADER)))
 
 $(call add_json_bool, SelinuxIgnoreNeverallows, $(filter true,$(SELINUX_IGNORE_NEVERALLOWS)))
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index c600178..0922def 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -47,23 +47,31 @@
 
 ifdef LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR
   $(eval $(call copy-one-file,$(LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR),\
-    $(intermediates.COMMON)/jacoco-report-classes.jar))
+    $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar))
   $(call add-dependency,$(common_javalib.jar),\
-    $(intermediates.COMMON)/jacoco-report-classes.jar)
+    $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)
 endif
 
 ifdef LOCAL_SOONG_PROGUARD_DICT
   $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
     $(intermediates.COMMON)/proguard_dictionary))
-  $(call add-dependency,$(LOCAL_BUILT_MODULE),\
+  $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\
+    $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary))
+  $(eval $(call copy-one-file,$(LOCAL_SOONG_CLASSES_JAR),\
+    $(call local-packaging-dir,proguard_dictionary)/classes.jar))
+  $(call add-dependency,$(common_javalib.jar),\
     $(intermediates.COMMON)/proguard_dictionary)
+  $(call add-dependency,$(common_javalib.jar),\
+    $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)
+  $(call add-dependency,$(common_javalib.jar),\
+    $(call local-packaging-dir,proguard_dictionary)/classes.jar)
 endif
 
-ifdef LOCAL_SOONG_PROGUARD_USAGE
+ifdef LOCAL_SOONG_PROGUARD_USAGE_ZIP
   $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_USAGE_ZIP),\
-    $(intermediates.COMMON)/proguard_usage.zip))
-  $(call add-dependency,$(LOCAL_BUILT_MODULE),\
-    $(intermediates.COMMON)/proguard_usage.zip)
+    $(call local-packaging-dir,proguard_usage)/proguard_usage.zip))
+  $(call add-dependency,$(common_javalib.jar),\
+    $(call local-packaging-dir,proguard_usage)/proguard_usage.zip)
 endif
 
 
@@ -120,9 +128,11 @@
 
     $(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
     $(eval $(call add-dependency,$(LOCAL_BUILT_MODULE),$(common_javalib.jar)))
-    $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_jar)))
-    ifneq ($(TURBINE_ENABLED),false)
-      $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_header_jar)))
+    ifdef LOCAL_SOONG_CLASSES_JAR
+      $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_jar)))
+      ifneq ($(TURBINE_ENABLED),false)
+        $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_header_jar)))
+      endif
     endif
   endif
 
@@ -153,8 +163,10 @@
   $(eval $(call copy-one-file,$(LOCAL_SOONG_DEXPREOPT_CONFIG), $(call local-intermediates-dir,)/dexpreopt.config))
 endif
 
+ifdef LOCAL_SOONG_CLASSES_JAR
 javac-check : $(full_classes_jar)
 javac-check-$(LOCAL_MODULE) : $(full_classes_jar)
+endif
 .PHONY: javac-check-$(LOCAL_MODULE)
 
 ifndef LOCAL_IS_HOST_MODULE
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 359d3d2..daebdd3 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -122,7 +122,7 @@
 	        echo "$$(line)" >> $$@;\
 	    )\
 	)
-	$(hide) $(POST_PROCESS_PROPS) $$(_option) $$@ $(5)
+	$(hide) $(POST_PROCESS_PROPS) $$(_option) --sdk-version $(PLATFORM_SDK_VERSION) $$@ $(5)
 	$(hide) $(foreach file,$(strip $(6)),\
 	    if [ -f "$(file)" ]; then\
 	        cat $(file) >> $$@;\
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index c9e3e80..4138277 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -240,7 +240,7 @@
     #  It must be of the form "YYYY-MM-DD" on production devices.
     #  It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
     #  If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
-      PLATFORM_SECURITY_PATCH := 2021-03-05
+      PLATFORM_SECURITY_PATCH := 2021-04-05
 endif
 .KATI_READONLY := PLATFORM_SECURITY_PATCH
 
diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk
index 37c0f25..27dc158 100644
--- a/target/board/generic_arm64/device.mk
+++ b/target/board/generic_arm64/device.mk
@@ -24,7 +24,12 @@
     kernel/prebuilts/5.10/arm64/kernel-5.10-lz4:kernel-5.10-lz4 \
     kernel/prebuilts/mainline/arm64/kernel-mainline-allsyms:kernel-mainline \
     kernel/prebuilts/mainline/arm64/kernel-mainline-gz-allsyms:kernel-mainline-gz \
-    kernel/prebuilts/mainline/arm64/kernel-mainline-lz4-allsyms:kernel-mainline-lz4
+    kernel/prebuilts/mainline/arm64/kernel-mainline-lz4-allsyms:kernel-mainline-lz4 \
+
+$(call dist-for-goals, dist_files, kernel/prebuilts/4.19/arm64/prebuilt-info.txt:kernel/4.19/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.4/arm64/prebuilt-info.txt:kernel/5.4/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.10/arm64/prebuilt-info.txt:kernel/5.10/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/mainline/arm64/prebuilt-info.txt:kernel/mainline/prebuilt-info.txt)
 
 ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
 PRODUCT_COPY_FILES += \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 4fdfc88..21beda9 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -116,7 +116,6 @@
     iptables \
     ip-up-vpn \
     javax.obex \
-    keystore \
     keystore2 \
     credstore \
     ld.mc \
@@ -292,10 +291,16 @@
 ifeq ($(EMMA_INSTRUMENT),true)
   ifneq ($(EMMA_INSTRUMENT_STATIC),true)
     # For instrumented build, if Jacoco is not being included statically
-    # in instrumented packages then include Jacoco classes into the
-    # bootclasspath.
+    # in instrumented packages then include Jacoco classes in the product
+    # packages.
     PRODUCT_PACKAGES += jacocoagent
-    PRODUCT_BOOT_JARS += jacocoagent
+    ifneq ($(EMMA_INSTRUMENT_FRAMEWORK),true)
+      # For instrumented build, if Jacoco is not being included statically
+      # in instrumented packages and has not already been included in the
+      # bootclasspath via ART_APEX_JARS then include Jacoco classes into the
+      # bootclasspath.
+      PRODUCT_BOOT_JARS += jacocoagent
+    endif # EMMA_INSTRUMENT_FRAMEWORK
   endif # EMMA_INSTRUMENT_STATIC
 endif # EMMA_INSTRUMENT
 
@@ -396,8 +401,4 @@
 PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\
     frameworks/base/config/dirty-image-objects:system/etc/dirty-image-objects)
 
-# This property allows enabling Keystore 2.0 selectively for testing.
-# TODO Remove when Keystore 2.0 migration is complete. b/171563717
-PRODUCT_SYSTEM_PROPERTIES += persist.android.security.keystore2.enable=true
-
 $(call inherit-product, $(SRC_TARGET_DIR)/product/runtime_libart.mk)
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index 1545780..bb17dda 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -39,16 +39,6 @@
     com.android.tethering:framework-tethering \
     com.android.ipsec:android.net.ipsec.ike
 
-# Add the compatibility library that is needed when android.test.base
-# is removed from the bootclasspath.
-# Default to excluding android.test.base from the bootclasspath.
-ifneq ($(REMOVE_ATB_FROM_BCP),false)
-  PRODUCT_PACKAGES += framework-atb-backward-compatibility
-  PRODUCT_BOOT_JARS += framework-atb-backward-compatibility
-else
-  PRODUCT_BOOT_JARS += android.test.base
-endif
-
 # Minimal configuration for running dex2oat (default argument values).
 # PRODUCT_USES_DEFAULT_ART_CONFIG must be true to enable boot image compilation.
 PRODUCT_USES_DEFAULT_ART_CONFIG := true
diff --git a/target/product/gsi/gsi_skip_mount.cfg b/target/product/gsi/gsi_skip_mount.cfg
index ad3c7d9..28f4349 100644
--- a/target/product/gsi/gsi_skip_mount.cfg
+++ b/target/product/gsi/gsi_skip_mount.cfg
@@ -1,3 +1,9 @@
+# Skip "system" mountpoints.
 /oem
 /product
 /system_ext
+# Skip sub-mountpoints of system mountpoints.
+/oem/*
+/product/*
+/system_ext/*
+/system/*
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index e8f1c2e..25fa68b 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -31,8 +31,10 @@
     system/product/% \
     system/system_ext/%
 
-# Split selinux policy
-PRODUCT_FULL_TREBLE_OVERRIDE := true
+# GSI should always support up-to-date platform features.
+# Keep this value at the latest API level to ensure latest build system
+# default configs are applied.
+PRODUCT_SHIPPING_API_LEVEL := 30
 
 # Enable dynamic partitions to facilitate mixing onto Cuttlefish
 PRODUCT_USE_DYNAMIC_PARTITIONS := true
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index 143131e..c7ac907 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -57,6 +57,7 @@
 # system server jars which are updated via apex modules.
 # The values should be of the format <apex name>:<jar name>
 PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS := \
+    com.android.art:service-art \
     com.android.permission:service-permission \
 
 PRODUCT_COPY_FILES += \
diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py
index 18f2166..bf958fb 100755
--- a/tools/generate-notice-files.py
+++ b/tools/generate-notice-files.py
@@ -231,8 +231,8 @@
 
     input_dirs = [os.path.normpath(source_dir) for source_dir in args.source_dir]
     # Find all the notice files and md5 them
+    files_with_same_hash = defaultdict(list)
     for input_dir in input_dirs:
-        files_with_same_hash = defaultdict(list)
         for root, dir, files in os.walk(input_dir):
             for file in files:
                 matched = True
@@ -254,8 +254,7 @@
                     file_md5sum = md5sum(filename)
                     files_with_same_hash[file_md5sum].append(filename)
 
-        filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
-
+    filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
     combine_notice_files_text(filesets, input_dirs, txt_output_file, file_title)
 
     if html_output_file is not None:
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index d8c9cb1..efbf614 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -42,7 +42,46 @@
   # default to "adb". That might not the right policy there, but it's better
   # to be explicit.
   if not prop_list.get_value("persist.sys.usb.config"):
-    prop_list.put("persist.sys.usb.config", "none");
+    prop_list.put("persist.sys.usb.config", "none")
+
+def validate_grf_props(prop_list, sdk_version):
+  """Validate GRF properties if exist.
+
+  If ro.board.first_api_level is defined, check if its value is valid for the
+  sdk version.
+  Also, validate the value of ro.board.api_level if defined.
+
+  Returns:
+    True if the GRF properties are valid.
+  """
+  grf_api_level = prop_list.get_value("ro.board.first_api_level")
+  board_api_level = prop_list.get_value("ro.board.api_level")
+
+  if not grf_api_level:
+    if board_api_level:
+      sys.stderr.write("error: non-GRF device must not define "
+                       "ro.board.api_level\n")
+      return False
+    # non-GRF device skips the GRF validation test
+    return True
+
+  grf_api_level = int(grf_api_level)
+  if grf_api_level > sdk_version:
+    sys.stderr.write("error: ro.board.first_api_level(%d) must be less than "
+                     "or equal to ro.build.version.sdk(%d)\n"
+                     % (grf_api_level, sdk_version))
+    return False
+
+  if board_api_level:
+    board_api_level = int(board_api_level)
+    if board_api_level < grf_api_level or board_api_level > sdk_version:
+      sys.stderr.write("error: ro.board.api_level(%d) must be neither less "
+                       "than ro.board.first_api_level(%d) nor greater than "
+                       "ro.build.version.sdk(%d)\n"
+                       % (board_api_level, grf_api_level, sdk_version))
+      return False
+
+  return True
 
 def validate(prop_list):
   """Validate the properties.
@@ -215,6 +254,7 @@
                       default=False)
   parser.add_argument("filename")
   parser.add_argument("disallowed_keys", metavar="KEY", type=str, nargs="*")
+  parser.add_argument("--sdk-version", type=int, required=True)
   args = parser.parse_args()
 
   if not args.filename.endswith("/build.prop"):
@@ -225,6 +265,8 @@
   mangle_build_prop(props)
   if not override_optional_props(props, args.allow_dup):
     sys.exit(1)
+  if not validate_grf_props(props, args.sdk_version):
+    sys.exit(1)
   if not validate(props):
     sys.exit(1)
 
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index d7de85b..a56c305 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -134,13 +134,12 @@
       "board_uses_vendorimage") == "true"
 
   if (OPTIONS.rebuild_recovery and not board_uses_vendorimage and
-          recovery_img is not None and boot_img is not None):
+      recovery_img is not None and boot_img is not None):
     logger.info("Building new recovery patch on system at system/vendor")
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
-  block_list = OutputFile(output_zip, OPTIONS.input_tmp,
-                          "IMAGES", "system.map")
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.map")
   CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
               block_list=block_list)
   return img.name
@@ -183,13 +182,12 @@
       "board_uses_vendorimage") == "true"
 
   if (OPTIONS.rebuild_recovery and board_uses_vendorimage and
-          recovery_img is not None and boot_img is not None):
+      recovery_img is not None and boot_img is not None):
     logger.info("Building new recovery patch on vendor")
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
-  block_list = OutputFile(output_zip, OPTIONS.input_tmp,
-                          "IMAGES", "vendor.map")
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
   CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img,
               block_list=block_list)
   return img.name
@@ -261,7 +259,6 @@
       block_list=block_list)
   return img.name
 
-
 def AddOdmDlkm(output_zip):
   """Turn the contents of OdmDlkm into an odm_dlkm image and store it in output_zip."""
 
@@ -313,7 +310,6 @@
   img.Write()
   return img.name
 
-
 def AddPvmfw(output_zip):
   """Adds the pvmfw image.
 
@@ -349,7 +345,6 @@
   img.Write()
   return img.name
 
-
 def AddCustomImages(output_zip, partition_name):
   """Adds and signs custom images in IMAGES/.
 
@@ -378,16 +373,15 @@
       key_path, algorithm, extra_args)
 
   for img_name in OPTIONS.info_dict.get(
-          "avb_{}_image_list".format(partition_name)).split():
-    custom_image = OutputFile(
-        output_zip, OPTIONS.input_tmp, "IMAGES", img_name)
+      "avb_{}_image_list".format(partition_name)).split():
+    custom_image = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", img_name)
     if os.path.exists(custom_image.name):
       continue
 
     custom_image_prebuilt_path = os.path.join(
         OPTIONS.input_tmp, "PREBUILT_IMAGES", img_name)
     assert os.path.exists(custom_image_prebuilt_path), \
-        "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path)
+      "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path)
 
     shutil.copy(custom_image_prebuilt_path, custom_image.name)
 
@@ -685,12 +679,11 @@
 
   return ((os.path.isdir(
       os.path.join(OPTIONS.input_tmp, partition_name.upper())) and
-      OPTIONS.info_dict.get(
-      "building_{}_image".format(partition_name)) == "true") or
-      os.path.exists(
-      os.path.join(OPTIONS.input_tmp, "IMAGES",
-                   "{}.img".format(partition_name))))
-
+           OPTIONS.info_dict.get(
+               "building_{}_image".format(partition_name)) == "true") or
+          os.path.exists(
+              os.path.join(OPTIONS.input_tmp, "IMAGES",
+                           "{}.img".format(partition_name))))
 
 def AddApexInfo(output_zip):
   apex_infos = GetSystemApexInfoFromTargetFiles(OPTIONS.input_tmp)
@@ -779,7 +772,7 @@
     boot_images = OPTIONS.info_dict.get("boot_images")
     if boot_images is None:
       boot_images = "boot.img"
-    for index, b in enumerate(boot_images.split()):
+    for index,b in enumerate(boot_images.split()):
       # common.GetBootableImage() returns the image directly if present.
       boot_image = common.GetBootableImage(
           "IMAGES/" + b, b, OPTIONS.input_tmp, "BOOT")
@@ -934,7 +927,7 @@
 
   if OPTIONS.info_dict.get("build_super_partition") == "true":
     if OPTIONS.info_dict.get(
-            "build_retrofit_dynamic_partitions_ota_package") == "true":
+        "build_retrofit_dynamic_partitions_ota_package") == "true":
       banner("super split images")
       AddSuperSplit(output_zip)
 
@@ -951,7 +944,9 @@
 
     # Generate care_map.pb for ab_partitions, then write this file to
     # target_files package.
-    AddCareMapForAbOta(output_zip, ab_partitions, partitions)
+    output_care_map = os.path.join(OPTIONS.input_tmp, "META", "care_map.pb")
+    AddCareMapForAbOta(output_zip if output_zip else output_care_map,
+                       ab_partitions, partitions)
 
   # Radio images that need to be packed into IMAGES/, and product-img.zip.
   pack_radioimages_txt = os.path.join(
@@ -1000,7 +995,6 @@
   AddImagesToTargetFiles(args[0])
   logger.info("done.")
 
-
 if __name__ == '__main__':
   try:
     common.CloseInheritedPipes()
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 3726df6..301d0da 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -73,9 +73,9 @@
   """
   cmd = ["find", path, "-print"]
   output = common.RunAndCheckOutput(cmd, verbose=False)
-  # increase by > 4% as number of files and directories is not whole picture.
+  # increase by > 6% as number of files and directories is not whole picture.
   inodes = output.count('\n')
-  spare_inodes = inodes * 4 // 100
+  spare_inodes = inodes * 6 // 100
   min_spare_inodes = 12
   if spare_inodes < min_spare_inodes:
     spare_inodes = min_spare_inodes
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index f025bb6..83425cc 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -138,7 +138,6 @@
 # existing search paths.
 RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
 
-
 class ErrorCode(object):
   """Define error_codes for failures that happen during the actual
   update package installation.
@@ -227,7 +226,6 @@
 def SetHostToolLocation(tool_name, location):
   OPTIONS.host_tools[tool_name] = location
 
-
 def FindHostToolPath(tool_name):
   """Finds the path to the host tool.
 
@@ -248,7 +246,6 @@
 
   return tool_name
 
-
 def Run(args, verbose=None, **kwargs):
   """Creates and returns a subprocess.Popen object.
 
@@ -464,7 +461,7 @@
     """Returns the inquired build property for the provided partition."""
 
     # Boot image uses ro.[product.]bootimage instead of boot.
-    prop_partition = "bootimage" if partition == "boot" else partition
+    prop_partition =  "bootimage" if partition == "boot" else partition
 
     # If provided a partition for this property, only look within that
     # partition's build.prop.
@@ -659,6 +656,13 @@
   LZ4 = 1
   GZ = 2
 
+def _GetRamdiskFormat(info_dict):
+  if info_dict.get('lz4_ramdisks') == 'true':
+    ramdisk_format = RamdiskFormat.LZ4
+  else:
+    ramdisk_format = RamdiskFormat.GZ
+  return ramdisk_format
+
 def LoadInfoDict(input_file, repacking=False):
   """Loads the key/value pairs from the given input target_files.
 
@@ -760,10 +764,7 @@
 
   # Load recovery fstab if applicable.
   d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
-  if d.get('lz4_ramdisks') == 'true':
-    ramdisk_format = RamdiskFormat.LZ4
-  else:
-    ramdisk_format = RamdiskFormat.GZ
+  ramdisk_format = _GetRamdiskFormat(d)
 
   # Tries to load the build props for all partitions with care_map, including
   # system and vendor.
@@ -780,8 +781,7 @@
     for partition in PARTITIONS_WITH_BUILD_PROP:
       fingerprint = build_info.GetPartitionFingerprint(partition)
       if fingerprint:
-        d["avb_{}_salt".format(partition)] = sha256(
-            fingerprint.encode()).hexdigest()
+        d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
   try:
     d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
   except KeyError:
@@ -789,6 +789,7 @@
   return d
 
 
+
 def LoadListFromFile(file_path):
   with open(file_path) as f:
     return f.read().splitlines()
@@ -1105,7 +1106,7 @@
     return " ".join(sorted(combined))
 
   if (framework_dict.get("use_dynamic_partitions") !=
-          "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
+      "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
     raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
 
   merged_dict = {"use_dynamic_partitions": "true"}
@@ -1450,7 +1451,8 @@
     AddAftlInclusionProof(image_path)
 
 
-def _MakeRamdisk(sourcedir, fs_config_file=None, lz4_ramdisks=False):
+def _MakeRamdisk(sourcedir, fs_config_file=None,
+                 ramdisk_format=RamdiskFormat.GZ):
   ramdisk_img = tempfile.NamedTemporaryFile()
 
   if fs_config_file is not None and os.access(fs_config_file, os.F_OK):
@@ -1459,11 +1461,13 @@
   else:
     cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
   p1 = Run(cmd, stdout=subprocess.PIPE)
-  if lz4_ramdisks:
+  if ramdisk_format == RamdiskFormat.LZ4:
     p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
              stdout=ramdisk_img.file.fileno())
-  else:
+  elif ramdisk_format == RamdiskFormat.GZ:
     p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
+  else:
+    raise ValueError("Only support lz4 or minigzip ramdisk format.")
 
   p2.wait()
   p1.wait()
@@ -1510,8 +1514,9 @@
   img = tempfile.NamedTemporaryFile()
 
   if has_ramdisk:
-    use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
-    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, lz4_ramdisks=use_lz4)
+    ramdisk_format = _GetRamdiskFormat(info_dict)
+    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file,
+                               ramdisk_format=ramdisk_format)
 
   # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
   mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
@@ -1583,7 +1588,7 @@
   RunAndCheckOutput(cmd)
 
   if (info_dict.get("boot_signer") == "true" and
-          info_dict.get("verity_key")):
+      info_dict.get("verity_key")):
     # Hard-code the path as "/boot" for two-step special recovery image (which
     # will be loaded into /boot during the two-step OTA).
     if two_step_image:
@@ -1699,8 +1704,8 @@
 
   img = tempfile.NamedTemporaryFile()
 
-  use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
-  ramdisk_img = _MakeRamdisk(sourcedir, lz4_ramdisks=use_lz4)
+  ramdisk_format = _GetRamdiskFormat(info_dict)
+  ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format)
 
   # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
   mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
@@ -1748,19 +1753,16 @@
   if os.access(fn, os.F_OK):
     ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
     for ramdisk_fragment in ramdisk_fragments:
-      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
-                        ramdisk_fragment, "mkbootimg_args")
+      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args")
       cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
-      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
-                        ramdisk_fragment, "prebuilt_ramdisk")
+      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk")
       # Use prebuilt image if found, else create ramdisk from supplied files.
       if os.access(fn, os.F_OK):
         ramdisk_fragment_pathname = fn
       else:
-        ramdisk_fragment_root = os.path.join(
-            sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
-        ramdisk_fragment_img = _MakeRamdisk(
-            ramdisk_fragment_root, lz4_ramdisks=use_lz4)
+        ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
+        ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
+                                            ramdisk_format=ramdisk_format)
         ramdisk_fragment_imgs.append(ramdisk_fragment_img)
         ramdisk_fragment_pathname = ramdisk_fragment_img.name
       cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
@@ -3531,7 +3533,7 @@
 
     for g in tgt_groups:
       for p in shlex.split(info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
+          "super_%s_partition_list" % g, "").strip()):
         assert p in self._partition_updates, \
             "{} is in target super_{}_partition_list but no BlockDifference " \
             "object is provided.".format(p, g)
@@ -3539,7 +3541,7 @@
 
     for g in src_groups:
       for p in shlex.split(source_info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
+          "super_%s_partition_list" % g, "").strip()):
         assert p in self._partition_updates, \
             "{} is in source super_{}_partition_list but no BlockDifference " \
             "object is provided.".format(p, g)
@@ -3648,7 +3650,7 @@
       if u.src_size is not None and u.tgt_size is None:
         append('remove_group %s' % g)
       if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size > u.tgt_size):
+          u.src_size > u.tgt_size):
         comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
         append('resize_group %s %d' % (g, u.tgt_size))
 
@@ -3657,7 +3659,7 @@
         comment('Add group %s with maximum size %d' % (g, u.tgt_size))
         append('add_group %s %d' % (g, u.tgt_size))
       if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size < u.tgt_size):
+          u.src_size < u.tgt_size):
         comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
         append('resize_group %s %d' % (g, u.tgt_size))
 
@@ -3691,8 +3693,7 @@
   """
   tmp_dir = MakeTempDir('boot_', suffix='.img')
   try:
-    RunAndCheckOutput(['unpack_bootimg', '--boot_img',
-                       boot_img, '--out', tmp_dir])
+    RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir])
     ramdisk = os.path.join(tmp_dir, 'ramdisk')
     if not os.path.isfile(ramdisk):
       logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
@@ -3714,14 +3715,13 @@
     # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
     # the host environment.
     RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
-                      cwd=extracted_ramdisk)
+               cwd=extracted_ramdisk)
 
     for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
       prop_file = os.path.join(extracted_ramdisk, search_path)
       if os.path.isfile(prop_file):
         return prop_file
-      logger.warning(
-          'Unable to get boot image timestamp: no %s in ramdisk', search_path)
+      logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path)
 
     return None
 
@@ -3754,8 +3754,7 @@
     timestamp = props.GetProp('ro.bootimage.build.date.utc')
     if timestamp:
       return int(timestamp)
-    logger.warning(
-        'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
+    logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
     return None
 
   except ExternalError as e:
@@ -3802,15 +3801,18 @@
   return [which, care_map_ranges.to_string_raw()]
 
 
-def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
+def AddCareMapForAbOta(output_file, ab_partitions, image_paths):
   """Generates and adds care_map.pb for a/b partition that has care_map.
 
   Args:
-    output_zip: The output zip file (needs to be already open), or None to
-        write care_map.pb to OPTIONS.input_tmp/.
+    output_file: The output zip file (needs to be already open),
+        or file path to write care_map.pb.
     ab_partitions: The list of A/B partitions.
     image_paths: A map from the partition name to the image path.
   """
+  if not output_file:
+    raise ExternalError('Expected output_file for AddCareMapForAbOta')
+
   care_map_list = []
   for partition in ab_partitions:
     partition = partition.strip()
@@ -3821,8 +3823,13 @@
     avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
     if (verity_block_device in OPTIONS.info_dict or
             OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
+      if partition not in image_paths:
+        logger.warning('Potential partition with care_map missing from images: %s',
+                       partition)
+        continue
       image_path = image_paths[partition]
-      assert os.path.exists(image_path)
+      if not os.path.exists(image_path):
+        raise ExternalError('Expected image at path {}'.format(image_path))
 
       care_map = GetCareMap(partition, image_path)
       if not care_map:
@@ -3860,10 +3867,17 @@
   care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
   RunAndCheckOutput(care_map_gen_cmd)
 
+  if not isinstance(output_file, zipfile.ZipFile):
+    shutil.copy(temp_care_map, output_file)
+    return
+  # output_file is a zip file
   care_map_path = "META/care_map.pb"
-  if output_zip and care_map_path not in output_zip.namelist():
-    ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
-  else:
+  if care_map_path in output_file.namelist():
+    # Copy the temp file into the OPTIONS.input_tmp dir and update the
+    # replace_updated_files_list used by add_img_to_target_files
+    if not OPTIONS.replace_updated_files_list:
+      OPTIONS.replace_updated_files_list = []
     shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
-    if output_zip:
-      OPTIONS.replace_updated_files_list.append(care_map_path)
+    OPTIONS.replace_updated_files_list.append(care_map_path)
+  else:
+    ZipWrite(output_file, temp_care_map, arcname=care_map_path)
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index c2fd450..17d3030 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -96,12 +96,15 @@
 from xml.etree import ElementTree
 
 import add_img_to_target_files
+import build_image
 import build_super_image
 import check_target_files_vintf
 import common
 import img_from_target_files
 import find_shareduid_violation
 import ota_from_target_files
+import sparse_img
+import verity_utils
 
 from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP
 
@@ -357,8 +360,9 @@
           ' includes %s.', partition, partition)
       has_error = True
 
-  if ('dynamic_partition_list' in framework_misc_info_keys) or (
-          'super_partition_groups' in framework_misc_info_keys):
+  if ('dynamic_partition_list'
+      in framework_misc_info_keys) or ('super_partition_groups'
+                                       in framework_misc_info_keys):
     logger.error('Dynamic partition misc info keys should come from '
                  'the vendor instance of META/misc_info.txt.')
     has_error = True
@@ -449,8 +453,8 @@
     merged_dict[key] = framework_dict[key]
 
   # Merge misc info keys used for Dynamic Partitions.
-  if (merged_dict.get('use_dynamic_partitions') == 'true') and (
-          framework_dict.get('use_dynamic_partitions') == 'true'):
+  if (merged_dict.get('use_dynamic_partitions')
+      == 'true') and (framework_dict.get('use_dynamic_partitions') == 'true'):
     merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
         framework_dict=framework_dict, vendor_dict=merged_dict)
     merged_dict.update(merged_dynamic_partitions_dict)
@@ -695,7 +699,7 @@
   vendor_plat_version_file = get_file('vendor',
                                       'etc/selinux/plat_sepolicy_vers.txt')
   if not vendor_plat_version_file or not os.path.exists(
-          vendor_plat_version_file):
+      vendor_plat_version_file):
     raise ExternalError('Missing required sepolicy file %s',
                         vendor_plat_version_file)
   with open(vendor_plat_version_file) as f:
@@ -735,6 +739,42 @@
   return cmd
 
 
+def generate_care_map(partitions, output_target_files_dir):
+  """Generates a merged META/care_map.pb file in the output target files dir.
+
+  Depends on the info dict from META/misc_info.txt, as well as built images
+  within IMAGES/.
+
+  Args:
+    partitions: A list of partitions to potentially include in the care map.
+    output_target_files_dir: The name of a directory that will be used to create
+      the output target files package after all the special cases are processed.
+  """
+  OPTIONS.info_dict = common.LoadInfoDict(output_target_files_dir)
+  partition_image_map = {}
+  for partition in partitions:
+    image_path = os.path.join(output_target_files_dir, 'IMAGES',
+                              '{}.img'.format(partition))
+    if os.path.exists(image_path):
+      partition_image_map[partition] = image_path
+      # Regenerated images should have their image_size property already set.
+      image_size_prop = '{}_image_size'.format(partition)
+      if image_size_prop not in OPTIONS.info_dict:
+        # Images copied directly from input target files packages will need
+        # their image sizes calculated.
+        partition_size = sparse_img.GetImagePartitionSize(image_path)
+        image_props = build_image.ImagePropFromGlobalDict(
+            OPTIONS.info_dict, partition)
+        verity_image_builder = verity_utils.CreateVerityImageBuilder(
+            image_props)
+        image_size = verity_image_builder.CalculateMaxImageSize(partition_size)
+        OPTIONS.info_dict[image_size_prop] = image_size
+
+  AddCareMapForAbOta(
+      os.path.join(output_target_files_dir, 'META', 'care_map.pb'),
+      PARTITIONS_WITH_CARE_MAP, partition_image_map)
+
+
 def process_special_cases(framework_target_files_temp_dir,
                           vendor_target_files_temp_dir,
                           output_target_files_temp_dir,
@@ -1089,14 +1129,14 @@
   if not output_target_files:
     return
 
+  # Create the merged META/care_map.bp
+  generate_care_map(partition_map.keys(), output_target_files_temp_dir)
+
   output_zip = create_target_files_archive(output_target_files,
                                            output_target_files_temp_dir,
                                            temp_dir)
 
   # Create the IMG package from the merged target files package.
-  with zipfile.ZipFile(output_zip, allowZip64=True) as zfp:
-    AddCareMapForAbOta(zfp, PARTITIONS_WITH_CARE_MAP, partition_map)
-
   if output_img:
     img_from_target_files.main([output_zip, output_img])
 
@@ -1168,8 +1208,7 @@
     elif o == '--vendor-target-files':
       OPTIONS.vendor_target_files = a
     elif o == '--other-item-list':
-      logger.warning(
-          '--other-item-list has been renamed to --vendor-item-list')
+      logger.warning('--other-item-list has been renamed to --vendor-item-list')
       OPTIONS.vendor_item_list = a
     elif o == '--vendor-item-list':
       OPTIONS.vendor_item_list = a
@@ -1225,7 +1264,7 @@
   if (args or OPTIONS.framework_target_files is None or
       OPTIONS.vendor_target_files is None or
       (OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or
-          (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)):
+      (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)):
     common.Usage(__doc__)
     sys.exit(1)
 
@@ -1251,9 +1290,9 @@
     output_item_list = None
 
   if not validate_config_lists(
-          framework_item_list=framework_item_list,
-          framework_misc_info_keys=framework_misc_info_keys,
-          vendor_item_list=vendor_item_list):
+      framework_item_list=framework_item_list,
+      framework_misc_info_keys=framework_misc_info_keys,
+      vendor_item_list=vendor_item_list):
     sys.exit(1)
 
   call_func_with_temp_dir(
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index 3d5300e..a5850d3 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -22,9 +22,9 @@
 import test_utils
 from add_img_to_target_files import (
     AddPackRadioImages,
-    CheckAbOtaImages, GetCareMap)
+    CheckAbOtaImages)
 from rangelib import RangeSet
-from common import AddCareMapForAbOta
+from common import AddCareMapForAbOta, GetCareMap
 
 
 OPTIONS = common.OPTIONS
@@ -124,9 +124,9 @@
   def _test_AddCareMapForAbOta():
     """Helper function to set up the test for test_AddCareMapForAbOta()."""
     OPTIONS.info_dict = {
-        'extfs_sparse_flag': '-s',
-        'system_image_size': 65536,
-        'vendor_image_size': 40960,
+        'extfs_sparse_flag' : '-s',
+        'system_image_size' : 65536,
+        'vendor_image_size' : 40960,
         'system_verity_block_device': '/dev/block/system',
         'vendor_verity_block_device': '/dev/block/vendor',
         'system.build.prop': common.PartitionBuildProps.FromDictionary(
@@ -154,8 +154,8 @@
         (0xCAC2, 12)])
 
     image_paths = {
-        'system': system_image,
-        'vendor': vendor_image,
+        'system' : system_image,
+        'vendor' : vendor_image,
     }
     return image_paths
 
@@ -175,9 +175,9 @@
   def test_AddCareMapForAbOta(self):
     image_paths = self._test_AddCareMapForAbOta()
 
-    AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
     care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
     expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
                 "ro.system.build.fingerprint",
                 "google/sailfish/12345:user/dev-keys",
@@ -192,10 +192,10 @@
     """Partitions without care_map should be ignored."""
     image_paths = self._test_AddCareMapForAbOta()
 
-    AddCareMapForAbOta(
-        None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
-
     care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(
+        care_map_file, ['boot', 'system', 'vendor', 'vbmeta'], image_paths)
+
     expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
                 "ro.system.build.fingerprint",
                 "google/sailfish/12345:user/dev-keys",
@@ -227,9 +227,9 @@
         ),
     }
 
-    AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
     care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
     expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
                 "ro.system.build.fingerprint",
                 "google/sailfish/12345:user/dev-keys",
@@ -244,20 +244,19 @@
     """Tests the case for partitions without fingerprint."""
     image_paths = self._test_AddCareMapForAbOta()
     OPTIONS.info_dict = {
-        'extfs_sparse_flag': '-s',
-        'system_image_size': 65536,
-        'vendor_image_size': 40960,
+        'extfs_sparse_flag' : '-s',
+        'system_image_size' : 65536,
+        'vendor_image_size' : 40960,
         'system_verity_block_device': '/dev/block/system',
         'vendor_verity_block_device': '/dev/block/vendor',
     }
 
-    AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
     care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
     expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown",
-                "unknown", 'vendor', RangeSet(
-        "0-9").to_string_raw(), "unknown",
-        "unknown"]
+                "unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown",
+                "unknown"]
 
     self._verifyCareMap(expected, care_map_file)
 
@@ -283,9 +282,9 @@
         ),
     }
 
-    AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
     care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
     expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
                 "ro.system.build.thumbprint",
                 "google/sailfish/123:user/dev-keys",
@@ -302,9 +301,9 @@
     # Remove vendor_image_size to invalidate the care_map for vendor.img.
     del OPTIONS.info_dict['vendor_image_size']
 
-    AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
     care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
     expected = ['system', RangeSet("0-5 10-15").to_string_raw(),
                 "ro.system.build.fingerprint",
                 "google/sailfish/12345:user/dev-keys"]
@@ -319,25 +318,26 @@
     del OPTIONS.info_dict['system_image_size']
     del OPTIONS.info_dict['vendor_image_size']
 
-    AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
+    care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
 
-    self.assertFalse(
-        os.path.exists(os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')))
+    self.assertFalse(os.path.exists(care_map_file))
 
   def test_AddCareMapForAbOta_verityNotEnabled(self):
     """No care_map.pb should be generated if verity not enabled."""
     image_paths = self._test_AddCareMapForAbOta()
     OPTIONS.info_dict = {}
-    AddCareMapForAbOta(None, ['system', 'vendor'], image_paths)
-
     care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths)
+
     self.assertFalse(os.path.exists(care_map_file))
 
   def test_AddCareMapForAbOta_missingImageFile(self):
     """Missing image file should be considered fatal."""
     image_paths = self._test_AddCareMapForAbOta()
     image_paths['vendor'] = ''
-    self.assertRaises(AssertionError, AddCareMapForAbOta, None,
+    care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
+    self.assertRaises(common.ExternalError, AddCareMapForAbOta, care_map_file,
                       ['system', 'vendor'], image_paths)
 
   @test_utils.SkipIfExternalToolsUnavailable()
@@ -397,8 +397,8 @@
         (0xCAC3, 4),
         (0xCAC1, 6)])
     OPTIONS.info_dict = {
-        'extfs_sparse_flag': '-s',
-        'system_image_size': 53248,
+        'extfs_sparse_flag' : '-s',
+        'system_image_size' : 53248,
     }
     name, care_map = GetCareMap('system', sparse_image)
     self.assertEqual('system', name)
@@ -413,14 +413,14 @@
         (0xCAC3, 4),
         (0xCAC1, 6)])
     OPTIONS.info_dict = {
-        'extfs_sparse_flag': '-s',
-        'system_image_size': -45056,
+        'extfs_sparse_flag' : '-s',
+        'system_image_size' : -45056,
     }
     self.assertRaises(AssertionError, GetCareMap, 'system', sparse_image)
 
   def test_GetCareMap_nonSparseImage(self):
     OPTIONS.info_dict = {
-        'system_image_size': 53248,
+        'system_image_size' : 53248,
     }
     # 'foo' is the image filename, which is expected to be not used by
     # GetCareMap().
diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py
index 12d52e5..236f9ed 100644
--- a/tools/test_post_process_props.py
+++ b/tools/test_post_process_props.py
@@ -53,7 +53,7 @@
 
     p.make_as_comment()
     self.assertTrue(p.is_comment())
-    self.assertTrue("# a comment\n#a=b", str(p))
+    self.assertEqual("# a comment\n#a=b", str(p))
 
 class PropListTestcase(unittest.TestCase):
   def setUp(self):
@@ -251,5 +251,27 @@
         # because it's explicitly allowed
         self.assertTrue(override_optional_props(props, allow_dup=True))
 
+  def test_validateGrfProps(self):
+    stderr_redirect = io.StringIO()
+    with contextlib.redirect_stderr(stderr_redirect):
+      props = PropList("hello")
+      props.put("ro.board.first_api_level","25")
+
+      # ro.board.first_api_level must be less than or equal to the sdk version
+      self.assertFalse(validate_grf_props(props, 20))
+      self.assertTrue(validate_grf_props(props, 26))
+      self.assertTrue(validate_grf_props(props, 35))
+
+      # manually set ro.board.api_level to an invalid value
+      props.put("ro.board.api_level","20")
+      self.assertFalse(validate_grf_props(props, 26))
+
+      props.get_all_props()[-1].make_as_comment()
+      # manually set ro.board.api_level to a valid value
+      props.put("ro.board.api_level","26")
+      self.assertTrue(validate_grf_props(props, 26))
+      # ro.board.api_level must be less than or equal to the sdk version
+      self.assertFalse(validate_grf_props(props, 25))
+
 if __name__ == '__main__':
     unittest.main(verbosity=2)
diff --git a/tools/zipalign/ZipAlignMain.cpp b/tools/zipalign/ZipAlignMain.cpp
index 49be916..47ebd12 100644
--- a/tools/zipalign/ZipAlignMain.cpp
+++ b/tools/zipalign/ZipAlignMain.cpp
@@ -39,7 +39,7 @@
         "  <align>: alignment in bytes, e.g. '4' provides 32-bit alignment\n");
     fprintf(stderr, "  -c: check alignment only (does not modify file)\n");
     fprintf(stderr, "  -f: overwrite existing outfile.zip\n");
-    fprintf(stderr, "  -p: memory page alignment for stored shared object files\n");
+    fprintf(stderr, "  -p: page-align uncompressed .so files\n");
     fprintf(stderr, "  -v: verbose output\n");
     fprintf(stderr, "  -z: recompress using Zopfli\n");
 }