Merge "Disable build-time debugfs restrictions on GSI builds"
diff --git a/core/Makefile b/core/Makefile
index 26b19c0..194e372 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1034,13 +1034,22 @@
 
 else # TARGET_NO_KERNEL == "true"
 ifdef BOARD_PREBUILT_BOOTIMAGE
-ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-# Remove when b/63676296 is resolved.
-$(error Prebuilt bootimage is only supported for AB targets)
-endif
 INTERNAL_PREBUILT_BOOTIMAGE := $(BOARD_PREBUILT_BOOTIMAGE)
 INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
-$(eval $(call copy-one-file,$(INTERNAL_PREBUILT_BOOTIMAGE),$(INSTALLED_BOOTIMAGE_TARGET)))
+
+ifeq ($(BOARD_AVB_ENABLE),true)
+$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH)
+	cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
+	$(AVBTOOL) add_hash_footer \
+	    --image $@ \
+	    --partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
+	    --partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
+	    $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
+else
+$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE)
+	cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
+endif # BOARD_AVB_ENABLE
+
 else # BOARD_PREBUILT_BOOTIMAGE not defined
 INSTALLED_BOOTIMAGE_TARGET :=
 endif # BOARD_PREBUILT_BOOTIMAGE
@@ -1594,7 +1603,7 @@
     $(if $(BOARD_SYSTEMIMAGE_PARTITION_SIZE),$(hide) echo "system_size=$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)" >> $(1))
     $(if $(INTERNAL_SYSTEM_OTHER_PARTITION_SIZE),$(hide) echo "system_other_size=$(INTERNAL_SYSTEM_OTHER_PARTITION_SIZE)" >> $(1))
     $(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_fs_type=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
-    $(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "system_fs_compress=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+    $(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "system_f2fs_compress=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
     $(if $(BOARD_SYSTEMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "system_f2fs_sldc_flags=$(BOARD_SYSTEMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
     $(if $(BOARD_SYSTEMIMAGE_EXTFS_INODE_COUNT),$(hide) echo "system_extfs_inode_count=$(BOARD_SYSTEMIMAGE_EXTFS_INODE_COUNT)" >> $(1))
     $(if $(BOARD_SYSTEMIMAGE_EXTFS_RSV_PCT),$(hide) echo "system_extfs_rsv_pct=$(BOARD_SYSTEMIMAGE_EXTFS_RSV_PCT)" >> $(1))
@@ -1629,6 +1638,8 @@
 )
 $(if $(filter $(2),vendor),\
     $(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+    $(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "vendor_f2fs_compress=$(BOARD_VENDORIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+    $(if $(BOARD_VENDORIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "vendor_f2fs_sldc_flags=$(BOARD_VENDORIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
     $(if $(BOARD_VENDORIMAGE_EXTFS_INODE_COUNT),$(hide) echo "vendor_extfs_inode_count=$(BOARD_VENDORIMAGE_EXTFS_INODE_COUNT)" >> $(1))
     $(if $(BOARD_VENDORIMAGE_EXTFS_RSV_PCT),$(hide) echo "vendor_extfs_rsv_pct=$(BOARD_VENDORIMAGE_EXTFS_RSV_PCT)" >> $(1))
     $(if $(BOARD_VENDORIMAGE_PARTITION_SIZE),$(hide) echo "vendor_size=$(BOARD_VENDORIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1644,6 +1655,8 @@
 )
 $(if $(filter $(2),product),\
     $(if $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "product_fs_type=$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+    $(if $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "product_f2fs_compress=$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+    $(if $(BOARD_PRODUCTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "product_f2fs_sldc_flags=$(BOARD_PRODUCTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
     $(if $(BOARD_PRODUCTIMAGE_EXTFS_INODE_COUNT),$(hide) echo "product_extfs_inode_count=$(BOARD_PRODUCTIMAGE_EXTFS_INODE_COUNT)" >> $(1))
     $(if $(BOARD_PRODUCTIMAGE_EXTFS_RSV_PCT),$(hide) echo "product_extfs_rsv_pct=$(BOARD_PRODUCTIMAGE_EXTFS_RSV_PCT)" >> $(1))
     $(if $(BOARD_PRODUCTIMAGE_PARTITION_SIZE),$(hide) echo "product_size=$(BOARD_PRODUCTIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1659,6 +1672,8 @@
 )
 $(if $(filter $(2),system_ext),\
     $(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_ext_fs_type=$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+    $(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "system_ext_f2fs_compress=$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+    $(if $(BOARD_SYSTEM_EXTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "system_ext_f2fs_sldc_flags=$(BOARD_SYSTEM_EXTIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
     $(if $(BOARD_SYSTEM_EXTIMAGE_EXTFS_INODE_COUNT),$(hide) echo "system_ext_extfs_inode_count=$(BOARD_SYSTEM_EXTIMAGE_EXTFS_INODE_COUNT)" >> $(1))
     $(if $(BOARD_SYSTEM_EXTIMAGE_EXTFS_RSV_PCT),$(hide) echo "system_ext_extfs_rsv_pct=$(BOARD_SYSTEM_EXTIMAGE_EXTFS_RSV_PCT)" >> $(1))
     $(if $(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE),$(hide) echo "system_ext_size=$(BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE)" >> $(1))
@@ -1688,6 +1703,8 @@
 )
 $(if $(filter $(2),vendor_dlkm),\
     $(if $(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_dlkm_fs_type=$(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+    $(if $(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "vendor_dlkm_f2fs_compress=$(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+    $(if $(BOARD_VENDOR_DLKMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "vendor_dlkm_f2fs_sldc_flags=$(BOARD_VENDOR_DLKMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
     $(if $(BOARD_VENDOR_DLKMIMAGE_EXTFS_INODE_COUNT),$(hide) echo "vendor_dlkm_extfs_inode_count=$(BOARD_VENDOR_DLKMIMAGE_EXTFS_INODE_COUNT)" >> $(1))
     $(if $(BOARD_VENDOR_DLKMIMAGE_EXTFS_RSV_PCT),$(hide) echo "vendor_dlkm_extfs_rsv_pct=$(BOARD_VENDOR_DLKMIMAGE_EXTFS_RSV_PCT)" >> $(1))
     $(if $(BOARD_VENDOR_DLKMIMAGE_PARTITION_SIZE),$(hide) echo "vendor_dlkm_size=$(BOARD_VENDOR_DLKMIMAGE_PARTITION_SIZE)" >> $(1))
@@ -3399,7 +3416,7 @@
 # $(INSTALLED_VENDORIMAGE_TARGET)" for "system vendor".
 # (1): list of partitions like "system", "vendor" or "system product system_ext".
 define images-for-partitions
-$(strip $(foreach item,$(1),$(INSTALLED_$(call to-upper,$(item))IMAGE_TARGET)))
+$(strip $(foreach item,$(1),$(if $(filter $(item),system_other),$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(INSTALLED_$(call to-upper,$(item))IMAGE_TARGET))))
 endef
 
 # -----------------------------------------------------------------
@@ -4128,13 +4145,20 @@
 
 ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
 
+PARTITIONS_AND_OTHER_IN_SUPER := $(BOARD_SUPER_PARTITION_PARTITION_LIST)
+
+# Add the system other image to the misc_info. Because factory ota may install system_other to the super partition.
+ifdef BUILDING_SYSTEM_OTHER_IMAGE
+PARTITIONS_AND_OTHER_IN_SUPER += system_other
+endif # BUILDING_SYSTEM_OTHER_IMAGE
+
 # $(1): misc_info.txt
 # #(2): optional log file
 define check-all-partition-sizes-target
   mkdir -p $(dir $(1))
   rm -f $(1)
   $(call dump-super-image-info, $(1))
-  $(foreach partition,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
+  $(foreach partition,$(PARTITIONS_AND_OTHER_IN_SUPER), \
     echo "$(partition)_image="$(call images-for-partitions,$(partition)) >> $(1);)
   $(CHECK_PARTITION_SIZES) $(if $(2),--logfile $(2),-v) $(1)
 endef
@@ -4145,7 +4169,7 @@
 
 $(check_all_partition_sizes_log): \
     $(CHECK_PARTITION_SIZES) \
-    $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
+    $(call images-for-partitions,$(PARTITIONS_AND_OTHER_IN_SUPER))
 	$(call check-all-partition-sizes-target, \
 	  $(call intermediates-dir-for,PACKAGING,check-all-partition-sizes)/misc_info.txt, \
 	  $@)
@@ -4233,6 +4257,7 @@
   e2fsdroid \
   fc_sort \
   fec \
+  fsck.f2fs \
   fs_config \
   generate_verity_key \
   host_init_verifier \
@@ -4747,6 +4772,7 @@
 ifdef BUILDING_VENDOR_BOOT_IMAGE
   $(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_RAMDISK_FILES)
   $(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS)
+  $(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_BOOTCONFIG_TARGET)
   # The vendor ramdisk may be built from the recovery ramdisk.
   ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
     $(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
@@ -5111,12 +5137,17 @@
 	$(hide) mkdir -p $(zip_root)/IMAGES
 	$(hide) cp $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) $(zip_root)/IMAGES/
 endif
+ifndef BOARD_PREBUILT_BOOTIMAGE
 ifneq (,$(INTERNAL_PREBUILT_BOOTIMAGE) $(filter true,$(BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES)))
 ifdef INSTALLED_BOOTIMAGE_TARGET
 	$(hide) mkdir -p $(zip_root)/IMAGES
 	$(hide) cp $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/IMAGES/
 endif # INSTALLED_BOOTIMAGE_TARGET
 endif # INTERNAL_PREBUILT_BOOTIMAGE != "" || BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES == true
+else # BOARD_PREBUILT_BOOTIMAGE is defined
+	$(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
+	$(hide) cp $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
+endif # BOARD_PREBUILT_BOOTIMAGE
 ifdef BOARD_PREBUILT_ODMIMAGE
 	$(hide) mkdir -p $(zip_root)/IMAGES
 	$(hide) cp $(INSTALLED_ODMIMAGE_TARGET) $(zip_root)/IMAGES/
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 5f654a6..1b7a279 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -759,6 +759,12 @@
 endif
 is_instrumentation_test :=
 
+# Currently this flag variable is true only for the `android_test_helper_app` type module
+# which should not have any .config file
+ifeq (true, $(LOCAL_DISABLE_TEST_CONFIG))
+  test_config :=
+endif
+
 # Make sure we only add the files once for multilib modules.
 ifdef $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
   # Sync the auto_test_config value for multilib modules.
diff --git a/core/board_config.mk b/core/board_config.mk
index 9061342..53dbb92 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -379,6 +379,8 @@
 ifeq ($(PRODUCT_BUILD_BOOT_IMAGE),)
   ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
     BUILDING_BOOT_IMAGE :=
+  else ifdef BOARD_PREBUILT_BOOTIMAGE
+    BUILDING_BOOT_IMAGE :=
   else ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
     BUILDING_BOOT_IMAGE := true
   else ifneq (,$(foreach kernel,$(BOARD_KERNEL_BINARIES),$(BOARD_$(call to-upper,$(kernel))_BOOTIMAGE_PARTITION_SIZE)))
@@ -764,8 +766,8 @@
 endif
 
 ###########################################
-# APEXes are by default flattened, i.e. non-updatable.
-# It can be unflattened (and updatable) by inheriting from
+# APEXes are by default flattened, i.e. non-updatable, if not building unbundled
+# apps. It can be unflattened (and updatable) by inheriting from
 # updatable_apex.mk
 #
 # APEX flattening can also be forcibly enabled (resp. disabled) by
@@ -774,7 +776,7 @@
 ifdef OVERRIDE_TARGET_FLATTEN_APEX
   TARGET_FLATTEN_APEX := $(OVERRIDE_TARGET_FLATTEN_APEX)
 else
-  ifeq (,$(TARGET_FLATTEN_APEX))
+  ifeq (,$(TARGET_BUILD_APPS)$(TARGET_FLATTEN_APEX))
     TARGET_FLATTEN_APEX := true
   endif
 endif
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index e2acb67..94a027c 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -61,6 +61,7 @@
 LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING:=
 LOCAL_DEX_PREOPT:= # '',true,false
 LOCAL_DISABLE_AUTO_GENERATE_TEST_CONFIG:=
+LOCAL_DISABLE_TEST_CONFIG:=
 LOCAL_DISABLE_RESOLVE_SUPPORT_LIBRARIES:=
 LOCAL_DONT_CHECK_MODULE:=
 # Don't delete the META_INF dir when merging static Java libraries.
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 90f00c0..46f7f24 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -167,6 +167,13 @@
   my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
 endif
 
+# Disable memtag for host targets. Host executables in AndroidMk files are
+# deprecated, but some partners still have them floating around.
+ifdef LOCAL_IS_HOST_MODULE
+  my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
+  my_sanitize_diag := $(filter-out memtag_heap,$(my_sanitize_diag))
+endif
+
 # Disable sanitizers which need the UBSan runtime for host targets.
 ifdef LOCAL_IS_HOST_MODULE
   my_sanitize := $(filter-out cfi,$(my_sanitize))
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index bcbce8d..f365347 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -210,6 +210,9 @@
 ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests))
   LOCAL_ENFORCE_USES_LIBRARIES := false
 endif
+ifneq (,$(LOCAL_COMPATIBILITY_SUITE))
+  LOCAL_ENFORCE_USES_LIBRARIES := false
+endif
 
 # Disable the check if the app contains no java code.
 ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR)))
diff --git a/core/generate_enforce_rro.mk b/core/generate_enforce_rro.mk
index 6a23aeb..9079981 100644
--- a/core/generate_enforce_rro.mk
+++ b/core/generate_enforce_rro.mk
@@ -29,9 +29,12 @@
 
 LOCAL_PATH:= $(intermediates)
 
+# TODO(b/187404676): remove this condition when the prebuilt for packges exporting resource exists.
+ifeq (,$(TARGET_BUILD_UNBUNDLED))
 ifeq ($(enforce_rro_use_res_lib),true)
   LOCAL_RES_LIBRARIES := $(enforce_rro_source_module)
 endif
+endif
 
 LOCAL_FULL_MANIFEST_FILE := $(rro_android_manifest_file)
 
@@ -45,8 +48,9 @@
 else
   $(error Unsupported partition. Want: [vendor/product] Got: [$(enforce_rro_partition)])
 endif
-
-ifneq (,$(LOCAL_RES_LIBRARIES))
+ifneq (,$(TARGET_BUILD_UNBUNDLED))
+  LOCAL_SDK_VERSION := current
+else ifneq (,$(LOCAL_RES_LIBRARIES))
   # Technically we are linking against the app (if only to grab its resources),
   # and because it's potentially not building against the SDK, we can't either.
   LOCAL_PRIVATE_PLATFORM_APIS := true
diff --git a/core/main.mk b/core/main.mk
index c45c1f2..401fb04 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -311,6 +311,20 @@
 endif
 endif
 
+# Set build prop. This prop is read by ota_from_target_files when generating OTA,
+# to decide if VABC should be disabled.
+ifeq ($(BOARD_DONT_USE_VABC_OTA),true)
+ADDITIONAL_VENDOR_PROPERTIES += \
+    ro.vendor.build.dont_use_vabc=true
+endif
+
+# Set the flag in vendor. So VTS would know if the new fingerprint format is in use when
+# the system images are replaced by GSI.
+ifeq ($(BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT),true)
+ADDITIONAL_VENDOR_PROPERTIES += \
+    ro.vendor.build.fingerprint_has_digest=1
+endif
+
 ADDITIONAL_VENDOR_PROPERTIES += \
     ro.vendor.build.security_patch=$(VENDOR_SECURITY_PATCH) \
     ro.product.board=$(TARGET_BOOTLOADER_BOARD_NAME) \
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 0fc96e0..be9b1f8 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -98,7 +98,7 @@
     $(eval _option := --allow-dup)\
 )
 
-$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) $(3) $(6)
+$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(3) $(6)
 	$(hide) echo Building $$@
 	$(hide) mkdir -p $$(dir $$@)
 	$(hide) rm -f $$@ && touch $$@
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 181ea62..f32ea76 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -229,7 +229,7 @@
 ifeq (REL,$(PLATFORM_VERSION_CODENAME))
   PLATFORM_SYSTEMSDK_VERSIONS += $(PLATFORM_SDK_VERSION)
 else
-  PLATFORM_SYSTEMSDK_VERSIONS += $(PLATFORM_VERSION_CODENAME)
+  PLATFORM_SYSTEMSDK_VERSIONS += $(subst $(comma),$(space),$(PLATFORM_VERSION_ALL_CODENAMES))
 endif
 PLATFORM_SYSTEMSDK_VERSIONS := $(strip $(sort $(PLATFORM_SYSTEMSDK_VERSIONS)))
 .KATI_READONLY := PLATFORM_SYSTEMSDK_VERSIONS
diff --git a/envsetup.sh b/envsetup.sh
index 8a995c7..bbb18e5 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -331,15 +331,15 @@
 
 function bazel()
 {
-    local T="$(gettop)"
-    if [ ! "$T" ]; then
-        echo "Couldn't locate the top of the tree.  Try setting TOP."
-        return
+    if which bazel &>/dev/null; then
+        >&2 echo "NOTE: bazel() function sourced from Android's envsetup.sh is being used instead of $(which bazel)"
+        >&2 echo
     fi
 
-    if which bazel &>/dev/null; then
-        >&2 echo "NOTE: bazel() function sourced from envsetup.sh is being used instead of $(which bazel)"
-        >&2 echo
+    local T="$(gettop)"
+    if [ ! "$T" ]; then
+        >&2 echo "Couldn't locate the top of the Android tree. Try setting TOP. This bazel() function cannot be used outside of the AOSP directory."
+        return
     fi
 
     "$T/tools/bazel" "$@"
@@ -703,6 +703,10 @@
     build_build_var_cache
     if [ $? -ne 0 ]
     then
+        if [[ "$product" =~ .*_(eng|user|userdebug) ]]
+        then
+            echo "Did you mean -${product/*_/}? (dash instead of underscore)"
+        fi
         return 1
     fi
     export TARGET_PRODUCT=$(get_build_var TARGET_PRODUCT)
@@ -1687,10 +1691,19 @@
     if T="$(gettop)"; then
       _wrap_build "$T/build/soong/soong_ui.bash" --build-mode --${bc} --dir="$(pwd)" "$@"
     else
-      echo "Couldn't locate the top of the tree. Try setting TOP."
+      >&2 echo "Couldn't locate the top of the tree. Try setting TOP."
+      return 1
     fi
 )
 
+function b()
+(
+    # Generate BUILD, bzl files into the synthetic Bazel workspace (out/soong/workspace).
+    m nothing GENERATE_BAZEL_FILES=true || return 1
+    # Then, run Bazel using the synthetic workspace as the --package_path.
+    "$(gettop)/tools/bazel" "$@" --config=bp2build
+)
+
 function m()
 (
     _trigger_build "all-modules" "$@"
diff --git a/target/board/BoardConfigGkiCommon.mk b/target/board/BoardConfigGkiCommon.mk
new file mode 100644
index 0000000..1a8c6b1
--- /dev/null
+++ b/target/board/BoardConfigGkiCommon.mk
@@ -0,0 +1,44 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Enable GKI 2.0 signing.
+BOARD_GKI_SIGNING_KEY_PATH := build/make/target/product/gsi/testkey_rsa2048.pem
+BOARD_GKI_SIGNING_ALGORITHM := SHA256_RSA2048
+
+# The following is needed to allow release signing process appends more extra
+# args, e.g., passing --signing_helper_with_files from mkbootimg to avbtool.
+# See b/178559811 for more details.
+BOARD_GKI_SIGNING_SIGNATURE_ARGS := --prop foo:bar
+
+# Boot image with ramdisk and kernel
+BOARD_RAMDISK_USE_LZ4 := true
+BOARD_BOOT_HEADER_VERSION := 4
+BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
+BOARD_USES_RECOVERY_AS_BOOT :=
+TARGET_NO_KERNEL := false
+BOARD_USES_GENERIC_KERNEL_IMAGE := true
+BOARD_KERNEL_MODULE_INTERFACE_VERSIONS := \
+    5.4-android12-unstable \
+    5.10-android12-unstable \
+
+# Copy boot image in $OUT to target files. This is defined for targets where
+# the installed GKI APEXes are built from source.
+BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES := true
+
+# No vendor_boot
+BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT :=
+
+# No recovery
+BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE :=
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index 95ba1d0..c577870 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -30,14 +30,6 @@
 # the devices with metadata parition
 BOARD_USES_METADATA_PARTITION := true
 
-# Enable GKI 2.0 signing.
-BOARD_GKI_SIGNING_KEY_PATH := build/make/target/product/gsi/testkey_rsa2048.pem
-BOARD_GKI_SIGNING_ALGORITHM := SHA256_RSA2048
-# The following is needed to allow release signing process appends more extra
-# args, e.g., passing --signing_helper_with_files from mkbootimg to avbtool.
-# See b/178559811 for more details.
-BOARD_GKI_SIGNING_SIGNATURE_ARGS := --prop foo:bar
-
 # Android Verified Boot (AVB):
 #   Set the rollback index to zero, to prevent the device bootloader from
 #   updating the last seen rollback index in the tamper-evident storage.
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 1dbce1e..21b4065 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -53,6 +53,7 @@
 endif
 
 include build/make/target/board/BoardConfigGsiCommon.mk
+include build/make/target/board/BoardConfigGkiCommon.mk
 
 BOARD_KERNEL-4.19-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
 BOARD_KERNEL-5.4_BOOTIMAGE_PARTITION_SIZE := 67108864
@@ -73,10 +74,6 @@
 
 BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
 
-BOARD_RAMDISK_USE_LZ4 := true
-BOARD_BOOT_HEADER_VERSION := 4
-BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
-
 BOARD_KERNEL_BINARIES := \
     kernel-4.19-gz \
     kernel-5.4 kernel-5.4-gz kernel-5.4-lz4 \
@@ -90,24 +87,6 @@
 
 endif
 
-# Boot image
-BOARD_USES_RECOVERY_AS_BOOT :=
-TARGET_NO_KERNEL := false
-BOARD_USES_GENERIC_KERNEL_IMAGE := true
-BOARD_KERNEL_MODULE_INTERFACE_VERSIONS := \
-    5.4-android12-unstable \
-    5.10-android12-unstable \
-
-# Copy boot image in $OUT to target files. This is defined for targets where
-# the installed GKI APEXes are built from source.
-BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES := true
-
-# No vendor_boot
-BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT :=
-
-# No recovery
-BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE :=
-
 # Some vendors still haven't cleaned up all device specific directories under
 # root!
 
diff --git a/target/board/generic_x86/BoardConfig.mk b/target/board/generic_x86/BoardConfig.mk
index c40c15b..47fd384 100644
--- a/target/board/generic_x86/BoardConfig.mk
+++ b/target/board/generic_x86/BoardConfig.mk
@@ -18,9 +18,8 @@
 TARGET_ARCH := x86
 TARGET_ARCH_VARIANT := x86
 
-TARGET_PRELINK_MODULE := false
-
 include build/make/target/board/BoardConfigGsiCommon.mk
+
 ifndef BUILDING_GSI
 include build/make/target/board/BoardConfigEmuCommon.mk
 
diff --git a/target/board/generic_x86_64/BoardConfig.mk b/target/board/generic_x86_64/BoardConfig.mk
index 660ec6e..bdc862e 100755
--- a/target/board/generic_x86_64/BoardConfig.mk
+++ b/target/board/generic_x86_64/BoardConfig.mk
@@ -22,9 +22,30 @@
 TARGET_2ND_ARCH := x86
 TARGET_2ND_ARCH_VARIANT := x86_64
 
-TARGET_PRELINK_MODULE := false
 include build/make/target/board/BoardConfigGsiCommon.mk
-ifndef BUILDING_GSI
+
+ifdef BUILDING_GSI
+include build/make/target/board/BoardConfigGkiCommon.mk
+
+BOARD_KERNEL-5.4_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.4-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.10_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.10-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
+
+BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
+
+BOARD_KERNEL_BINARIES := \
+    kernel-5.4 \
+    kernel-5.10 \
+
+ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
+BOARD_KERNEL_BINARIES += \
+    kernel-5.4-allsyms \
+    kernel-5.10-allsyms \
+
+endif
+
+else # BUILDING_GSI
 include build/make/target/board/BoardConfigEmuCommon.mk
 
 BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
@@ -41,4 +62,5 @@
 WIFI_DRIVER_FW_PATH_PARAM   := "/dev/null"
 WIFI_DRIVER_FW_PATH_STA     := "/dev/null"
 WIFI_DRIVER_FW_PATH_AP      := "/dev/null"
-endif
+
+endif # BUILDING_GSI
diff --git a/target/board/generic_x86_64/README.txt b/target/board/generic_x86_64/README.txt
index 46b015b..8e515c4 100644
--- a/target/board/generic_x86_64/README.txt
+++ b/target/board/generic_x86_64/README.txt
@@ -1,8 +1,7 @@
-The "generic_x86_64" product defines a non-hardware-specific IA target
-without a kernel or bootloader.
+The "generic_x86_64" product defines a non-hardware-specific x86_64 target
+without a bootloader.
 
-It can be used to build the entire user-level system, and
-will work with the IA version of the emulator,
+It is also the target to build the generic kernel image (GKI).
 
 It is not a product "base class"; no other products inherit
 from it or use it in any way.
diff --git a/target/board/generic_x86_64/device.mk b/target/board/generic_x86_64/device.mk
index 5ad008f..e195bd3 100755
--- a/target/board/generic_x86_64/device.mk
+++ b/target/board/generic_x86_64/device.mk
@@ -14,14 +14,21 @@
 # limitations under the License.
 #
 
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
+PRODUCT_COPY_FILES += \
+    kernel/prebuilts/5.4/x86_64/kernel-5.4:kernel-5.4 \
+    kernel/prebuilts/5.10/x86_64/kernel-5.10:kernel-5.10 \
 
-ifdef NET_ETH0_STARTONBOOT
-  PRODUCT_VENDOR_PROPERTIES += net.eth0.startonboot=1
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.4/x86_64/prebuilt-info.txt:kernel/5.4/prebuilt-info.txt)
+$(call dist-for-goals, dist_files, kernel/prebuilts/5.10/x86_64/prebuilt-info.txt:kernel/5.10/prebuilt-info.txt)
+
+ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
+PRODUCT_COPY_FILES += \
+    kernel/prebuilts/5.4/x86_64/kernel-5.4:kernel-5.4-allsyms \
+    kernel/prebuilts/5.10/x86_64/kernel-5.10:kernel-5.10-allsyms \
+
 endif
 
-# Ensure we package the BIOS files too.
-PRODUCT_HOST_PACKAGES += \
-	bios.bin \
-	vgabios-cirrus.bin \
+PRODUCT_BUILD_VENDOR_BOOT_IMAGE := false
+PRODUCT_BUILD_RECOVERY_IMAGE := false
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_ramdisk.mk)
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 21beda9..7e4c5ef 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -27,7 +27,6 @@
     android.test.base \
     android.test.mock \
     android.test.runner \
-    ANGLE \
     apexd \
     appops \
     app_process \
@@ -352,8 +351,6 @@
 PRODUCT_SYSTEM_PROPERTIES += debug.atrace.tags.enableflags=0
 PRODUCT_SYSTEM_PROPERTIES += persist.traced.enable=1
 
-PRODUCT_PROPERTY_OVERRIDES += ro.gfx.angle.supported=true
-
 # Packages included only for eng or userdebug builds, previously debug tagged
 PRODUCT_PACKAGES_DEBUG := \
     adb_keys \
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index 0e652c1..f0916f9 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -48,7 +48,7 @@
 PRODUCT_BOOT_JARS += \
     com.android.i18n:core-icu4j
 
-# Updatable APEX jars. Keep the list sorted by module names and then library names.
+# Updatable APEX boot jars. Keep the list sorted by module names and then library names.
 PRODUCT_UPDATABLE_BOOT_JARS := \
     com.android.conscrypt:conscrypt \
     com.android.ipsec:android.net.ipsec.ike \
@@ -60,6 +60,11 @@
     com.android.tethering:framework-tethering \
     com.android.wifi:framework-wifi
 
+# Updatable APEX system server jars. Keep the list sorted by module names and then library names.
+PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS := \
+    com.android.art:service-art \
+    com.android.permission:service-permission \
+
 # Minimal configuration for running dex2oat (default argument values).
 # PRODUCT_USES_DEFAULT_ART_CONFIG must be true to enable boot image compilation.
 PRODUCT_USES_DEFAULT_ART_CONFIG := true
diff --git a/target/product/gsi/current.txt b/target/product/gsi/current.txt
index 550ae7c..c753e6c 100644
--- a/target/product/gsi/current.txt
+++ b/target/product/gsi/current.txt
@@ -14,7 +14,6 @@
 LLNDK: libmediandk.so
 LLNDK: libnativewindow.so
 LLNDK: libneuralnetworks.so
-LLNDK: libneuralnetworks_shim.so
 LLNDK: libselinux.so
 LLNDK: libsync.so
 LLNDK: libvndksupport.so
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index c7ac907..30a8621 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -54,12 +54,6 @@
     services \
     ethernet-service
 
-# system server jars which are updated via apex modules.
-# The values should be of the format <apex name>:<jar name>
-PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS := \
-    com.android.art:service-art \
-    com.android.permission:service-permission \
-
 PRODUCT_COPY_FILES += \
     system/core/rootdir/etc/public.libraries.android.txt:system/etc/public.libraries.txt
 
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 4f14ddd..b511aa6 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -75,10 +75,21 @@
 PRODUCT_PACKAGES += \
     hiddenapi-package-whitelist.xml \
 
+# The dalvik.vm.dexopt.thermal-cutoff property must contain one of the values
+# listed here:
+#
+# https://source.android.com/devices/architecture/hidl/thermal-mitigation#thermal-api
+#
+# If the thermal status of the device reaches or exceeds the value set here
+# background dexopt will be terminated and rescheduled using an exponential
+# backoff polcy.
+#
+# The thermal cutoff value is currently set to THERMAL_STATUS_MODERATE.
 PRODUCT_SYSTEM_PROPERTIES += \
     dalvik.vm.usejit=true \
     dalvik.vm.usejitprofiles=true \
     dalvik.vm.dexopt.secondary=true \
+    dalvik.vm.dexopt.thermal-cutoff=2 \
     dalvik.vm.appimageformat=lz4
 
 PRODUCT_SYSTEM_PROPERTIES += \
@@ -99,6 +110,9 @@
 # The install filter is speed-profile in order to enable the use of
 # profiles from the dex metadata files. Note that if a profile is not provided
 # or if it is empty speed-profile is equivalent to (quicken + empty app image).
+# Note that `cmdline` is not strictly needed but it simplifies the management
+# of compilation reason in the platform (as we have a unified, single path,
+# without exceptions).
 PRODUCT_SYSTEM_PROPERTIES += \
     pm.dexopt.post-boot?=extract \
     pm.dexopt.install?=speed-profile \
@@ -110,6 +124,7 @@
     pm.dexopt.bg-dexopt?=speed-profile \
     pm.dexopt.ab-ota?=speed-profile \
     pm.dexopt.inactive?=verify \
+    pm.dexopt.cmdline?=verify \
     pm.dexopt.shared?=speed
 
 # Pass file with the list of updatable boot class path packages to dex2oat.
diff --git a/target/product/updatable_apex.mk b/target/product/updatable_apex.mk
index c8dc8b0..d606e00 100644
--- a/target/product/updatable_apex.mk
+++ b/target/product/updatable_apex.mk
@@ -22,4 +22,9 @@
   PRODUCT_PACKAGES += com.android.apex.cts.shim.v1_prebuilt
   PRODUCT_VENDOR_PROPERTIES := ro.apex.updatable=true
   TARGET_FLATTEN_APEX := false
+  # Use compressed apexes in pre-installed partitions.
+  # Note: this doesn't mean that all pre-installed apexes will be compressed.
+  #  Whether an apex is compressed or not is controlled at apex Soong module
+  #  via compresible property.
+  PRODUCT_COMPRESSED_APEX := true
 endif
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 687070d..fc588e4 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -59,6 +59,8 @@
         "mkuserimg_mke2fs",
         "simg2img",
         "tune2fs",
+        "mkf2fsuserimg.sh",
+        "fsck.f2fs",
     ],
 }
 
@@ -114,6 +116,20 @@
     },
 }
 
+cc_library_static {
+    name: "ota_metadata_proto_cc",
+    srcs: [
+       "ota_metadata.proto",
+    ],
+    host_supported: true,
+    recovery_available: true,
+    proto: {
+        canonical_path_from_root: false,
+        type: "lite",
+        export_proto_headers: true,
+    },
+}
+
 java_library_static {
     name: "ota_metadata_proto_java",
     host_supported: true,
@@ -148,6 +164,7 @@
         "releasetools_common",
         "releasetools_verity_utils",
         "apex_manifest",
+        "care_map_proto_py",
     ],
     required: [
         "brillo_update_payload",
@@ -384,7 +401,7 @@
         "releasetools_common",
     ],
     required: [
-        "aapt",
+        "aapt2",
     ],
 }
 
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index c583d01..28a2f5a 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -687,8 +687,10 @@
               os.path.join(OPTIONS.input_tmp, "IMAGES",
                            "{}.img".format(partition_name))))
 
+
 def AddApexInfo(output_zip):
-  apex_infos = GetApexInfoFromTargetFiles(OPTIONS.input_tmp, 'system')
+  apex_infos = GetApexInfoFromTargetFiles(OPTIONS.input_tmp, 'system',
+                                          compressed_only=False)
   apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
   apex_metadata_proto.apex_info.extend(apex_infos)
   apex_info_bytes = apex_metadata_proto.SerializeToString()
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 2492da9..8eec4b5 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -82,7 +82,7 @@
   return inodes + spare_inodes
 
 
-def GetFilesystemCharacteristics(image_path, sparse_image=True):
+def GetFilesystemCharacteristics(fs_type, image_path, sparse_image=True):
   """Returns various filesystem characteristics of "image_path".
 
   Args:
@@ -96,7 +96,11 @@
   if sparse_image:
     unsparse_image_path = UnsparseImage(image_path, replace=False)
 
-  cmd = ["tune2fs", "-l", unsparse_image_path]
+  if fs_type.startswith("ext"):
+    cmd = ["tune2fs", "-l", unsparse_image_path]
+  elif fs_type.startswith("f2fs"):
+    cmd = ["fsck.f2fs", "-l", unsparse_image_path]
+
   try:
     output = common.RunAndCheckOutput(cmd, verbose=False)
   finally:
@@ -283,7 +287,7 @@
     if "flash_logical_block_size" in prop_dict:
       build_command.extend(["-o", prop_dict["flash_logical_block_size"]])
     # Specify UUID and hash_seed if using mke2fs.
-    if prop_dict["ext_mkuserimg"] == "mkuserimg_mke2fs":
+    if os.path.basename(prop_dict["ext_mkuserimg"]) == "mkuserimg_mke2fs":
       if "uuid" in prop_dict:
         build_command.extend(["-U", prop_dict["uuid"]])
       if "hash_seed" in prop_dict:
@@ -354,14 +358,15 @@
       build_command.append("--prjquota")
     if (needs_casefold):
       build_command.append("--casefold")
-    if (needs_compress or prop_dict.get("system_fs_compress") == "true"):
+    if (needs_compress or prop_dict.get("f2fs_compress") == "true"):
       build_command.append("--compression")
-    if (prop_dict.get("system_fs_compress") == "true"):
+    if (prop_dict.get("f2fs_compress") == "true"):
+      build_command.append("--readonly")
       build_command.append("--sldc")
-      if (prop_dict.get("system_f2fs_sldc_flags") == None):
+      if (prop_dict.get("f2fs_sldc_flags") == None):
         build_command.append(str(0))
       else:
-        sldc_flags_str = prop_dict.get("system_f2fs_sldc_flags")
+        sldc_flags_str = prop_dict.get("f2fs_sldc_flags")
         sldc_flags = sldc_flags_str.split()
         build_command.append(str(len(sldc_flags)))
         build_command.extend(sldc_flags)
@@ -433,6 +438,8 @@
   fs_spans_partition = True
   if fs_type.startswith("squash") or fs_type.startswith("erofs"):
     fs_spans_partition = False
+  elif fs_type.startswith("f2fs") and prop_dict.get("f2fs_compress") == "true":
+    fs_spans_partition = False
 
   # Get a builder for creating an image that's to be verified by Verified Boot,
   # or None if not applicable.
@@ -473,7 +480,7 @@
       sparse_image = False
       if "extfs_sparse_flag" in prop_dict:
         sparse_image = True
-      fs_dict = GetFilesystemCharacteristics(out_file, sparse_image)
+      fs_dict = GetFilesystemCharacteristics(fs_type, out_file, sparse_image)
       os.remove(out_file)
       block_size = int(fs_dict.get("Block size", "4096"))
       free_size = int(fs_dict.get("Free blocks", "0")) * block_size
@@ -510,6 +517,19 @@
       prop_dict["partition_size"] = str(size)
       logger.info(
           "Allocating %d Inodes for %s.", inodes, out_file)
+    elif fs_type.startswith("f2fs") and prop_dict.get("f2fs_compress") == "true":
+      prop_dict["partition_size"] = str(size)
+      prop_dict["image_size"] = str(size)
+      BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config)
+      sparse_image = False
+      if "f2fs_sparse_flag" in prop_dict:
+        sparse_image = True
+      fs_dict = GetFilesystemCharacteristics(fs_type, out_file, sparse_image)
+      os.remove(out_file)
+      block_count = int(fs_dict.get("block_count", "0"))
+      log_blocksize = int(fs_dict.get("log_blocksize", "12"))
+      size = block_count << log_blocksize
+      prop_dict["partition_size"] = str(size)
     if verity_image_builder:
       size = verity_image_builder.CalculateDynamicPartitionSize(size)
     prop_dict["partition_size"] = str(size)
@@ -569,7 +589,7 @@
       "extfs_sparse_flag",
       "erofs_sparse_flag",
       "squashfs_sparse_flag",
-      "system_fs_compress",
+      "system_f2fs_compress",
       "system_f2fs_sldc_flags",
       "f2fs_sparse_flag",
       "skip_fsck",
@@ -607,6 +627,8 @@
     copy_prop("root_dir", "root_dir")
     copy_prop("root_fs_config", "root_fs_config")
     copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+    copy_prop("system_f2fs_compress", "f2fs_compress")
+    copy_prop("system_f2fs_sldc_flags", "f2fs_sldc_flags")
     copy_prop("system_squashfs_compressor", "squashfs_compressor")
     copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
     copy_prop("system_squashfs_block_size", "squashfs_block_size")
@@ -633,6 +655,8 @@
       d["journal_size"] = "0"
     copy_prop("system_verity_block_device", "verity_block_device")
     copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+    copy_prop("system_f2fs_compress", "f2fs_compress")
+    copy_prop("system_f2fs_sldc_flags", "f2fs_sldc_flags")
     copy_prop("system_squashfs_compressor", "squashfs_compressor")
     copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
     copy_prop("system_squashfs_block_size", "squashfs_block_size")
@@ -669,6 +693,8 @@
       d["journal_size"] = "0"
     copy_prop("vendor_verity_block_device", "verity_block_device")
     copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+    copy_prop("vendor_f2fs_compress", "f2fs_compress")
+    copy_prop("vendor_f2fs_sldc_flags", "f2fs_sldc_flags")
     copy_prop("vendor_squashfs_compressor", "squashfs_compressor")
     copy_prop("vendor_squashfs_compressor_opt", "squashfs_compressor_opt")
     copy_prop("vendor_squashfs_block_size", "squashfs_block_size")
@@ -692,6 +718,8 @@
       d["journal_size"] = "0"
     copy_prop("product_verity_block_device", "verity_block_device")
     copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+    copy_prop("product_f2fs_compress", "f2fs_compress")
+    copy_prop("product_f2fs_sldc_flags", "f2fs_sldc_flags")
     copy_prop("product_squashfs_compressor", "squashfs_compressor")
     copy_prop("product_squashfs_compressor_opt", "squashfs_compressor_opt")
     copy_prop("product_squashfs_block_size", "squashfs_block_size")
@@ -715,6 +743,8 @@
       d["journal_size"] = "0"
     copy_prop("system_ext_verity_block_device", "verity_block_device")
     copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks")
+    copy_prop("system_ext_f2fs_compress", "f2fs_compress")
+    copy_prop("system_ext_f2fs_sldc_flags", "f2fs_sldc_flags")
     copy_prop("system_ext_squashfs_compressor", "squashfs_compressor")
     copy_prop("system_ext_squashfs_compressor_opt",
               "squashfs_compressor_opt")
@@ -759,6 +789,8 @@
     copy_prop("avb_vendor_dlkm_salt", "avb_salt")
     copy_prop("vendor_dlkm_fs_type", "fs_type")
     copy_prop("vendor_dlkm_size", "partition_size")
+    copy_prop("vendor_dlkm_f2fs_compress", "f2fs_compress")
+    copy_prop("vendor_dlkm_f2fs_sldc_flags", "f2fs_sldc_flags")
     if not copy_prop("vendor_dlkm_journal_size", "journal_size"):
       d["journal_size"] = "0"
     copy_prop("vendor_dlkm_verity_block_device", "verity_block_device")
diff --git a/tools/releasetools/care_map_pb2.py b/tools/releasetools/care_map_pb2.py
new file mode 100644
index 0000000..06aee25
--- /dev/null
+++ b/tools/releasetools/care_map_pb2.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: bootable/recovery/update_verifier/care_map.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='bootable/recovery/update_verifier/care_map.proto',
+  package='recovery_update_verifier',
+  syntax='proto3',
+  serialized_options=_b('H\003'),
+  serialized_pb=_b('\n0bootable/recovery/update_verifier/care_map.proto\x12\x18recovery_update_verifier\"\x9e\x01\n\x07\x43\x61reMap\x12\x43\n\npartitions\x18\x01 \x03(\x0b\x32/.recovery_update_verifier.CareMap.PartitionInfo\x1aN\n\rPartitionInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06ranges\x18\x02 \x01(\t\x12\n\n\x02id\x18\x03 \x01(\t\x12\x13\n\x0b\x66ingerprint\x18\x04 \x01(\tB\x02H\x03\x62\x06proto3')
+)
+
+
+
+
+_CAREMAP_PARTITIONINFO = _descriptor.Descriptor(
+  name='PartitionInfo',
+  full_name='recovery_update_verifier.CareMap.PartitionInfo',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='recovery_update_verifier.CareMap.PartitionInfo.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='ranges', full_name='recovery_update_verifier.CareMap.PartitionInfo.ranges', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='id', full_name='recovery_update_verifier.CareMap.PartitionInfo.id', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='fingerprint', full_name='recovery_update_verifier.CareMap.PartitionInfo.fingerprint', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=159,
+  serialized_end=237,
+)
+
+_CAREMAP = _descriptor.Descriptor(
+  name='CareMap',
+  full_name='recovery_update_verifier.CareMap',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='partitions', full_name='recovery_update_verifier.CareMap.partitions', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_CAREMAP_PARTITIONINFO, ],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=79,
+  serialized_end=237,
+)
+
+_CAREMAP_PARTITIONINFO.containing_type = _CAREMAP
+_CAREMAP.fields_by_name['partitions'].message_type = _CAREMAP_PARTITIONINFO
+DESCRIPTOR.message_types_by_name['CareMap'] = _CAREMAP
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+CareMap = _reflection.GeneratedProtocolMessageType('CareMap', (_message.Message,), {
+
+  'PartitionInfo' : _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), {
+    'DESCRIPTOR' : _CAREMAP_PARTITIONINFO,
+    '__module__' : 'bootable.recovery.update_verifier.care_map_pb2'
+    # @@protoc_insertion_point(class_scope:recovery_update_verifier.CareMap.PartitionInfo)
+    })
+  ,
+  'DESCRIPTOR' : _CAREMAP,
+  '__module__' : 'bootable.recovery.update_verifier.care_map_pb2'
+  # @@protoc_insertion_point(class_scope:recovery_update_verifier.CareMap)
+  })
+_sym_db.RegisterMessage(CareMap)
+_sym_db.RegisterMessage(CareMap.PartitionInfo)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/tools/releasetools/check_partition_sizes.py b/tools/releasetools/check_partition_sizes.py
index 3047ddb..eaed07e 100644
--- a/tools/releasetools/check_partition_sizes.py
+++ b/tools/releasetools/check_partition_sizes.py
@@ -223,9 +223,15 @@
       error_limit = Expression(
           "BOARD_SUPER_PARTITION_ERROR_LIMIT{}".format(size_limit_suffix),
           int(info_dict["super_partition_error_limit"]) // num_slots)
-      self._CheckSumOfPartitionSizes(
-          max_size, info_dict["dynamic_partition_list"].strip().split(),
-          warn_limit, error_limit)
+      partitions_in_super = info_dict["dynamic_partition_list"].strip().split()
+      # In the vab case, factory OTA will allocate space on super to install
+      # the system_other partition. So add system_other to the partition list.
+      if DeviceType.Get(self.info_dict) == DeviceType.VAB and (
+          "system_other_image" in info_dict or
+          "system_other_image_size" in info_dict):
+        partitions_in_super.append("system_other")
+      self._CheckSumOfPartitionSizes(max_size, partitions_in_super,
+                                     warn_limit, error_limit)
 
     groups = info_dict.get("super_partition_groups", "").strip().split()
 
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 5e2a50d..498e487 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -276,6 +276,9 @@
     args = args[:]
     args[0] = FindHostToolPath(args[0])
 
+  if verbose is None:
+    verbose = OPTIONS.verbose
+
   # Don't log any if caller explicitly says so.
   if verbose:
     logger.info("  Running: \"%s\"", " ".join(args))
@@ -451,6 +454,13 @@
     return vabc_enabled
 
   @property
+  def vendor_suppressed_vabc(self):
+    vendor_prop = self.info_dict.get("vendor.build.prop")
+    vabc_suppressed = vendor_prop and \
+        vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
+    return vabc_suppressed and vabc_suppressed.lower() == "true"
+
+  @property
   def oem_props(self):
     return self._oem_props
 
@@ -1710,6 +1720,38 @@
   return data
 
 
+def _SignBootableImage(image_path, prebuilt_name, partition_name,
+                       info_dict=None):
+  """Performs AVB signing for a prebuilt boot.img.
+
+  Args:
+    image_path: The full path of the image, e.g., /path/to/boot.img.
+    prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
+        boot-5.10.img, recovery.img.
+    partition_name: The partition name, e.g., 'boot' or 'recovery'.
+    info_dict: The information dict read from misc_info.txt.
+  """
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
+  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
+  if info_dict.get("avb_enable") == "true":
+    avbtool = info_dict["avb_avbtool"]
+    if partition_name == "recovery":
+      part_size = info_dict["recovery_size"]
+    else:
+      part_size = info_dict[prebuilt_name.replace(".img", "_size")]
+
+    cmd = [avbtool, "add_hash_footer", "--image", image_path,
+           "--partition_size", str(part_size), "--partition_name",
+           partition_name]
+    AppendAVBSigningArgs(cmd, partition_name)
+    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
+    if args and args.strip():
+      cmd.extend(shlex.split(args))
+    RunAndCheckOutput(cmd)
+
+
 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
                      info_dict=None, two_step_image=False):
   """Return a File object with the desired bootable image.
@@ -1718,6 +1760,9 @@
   otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
   the source files in 'unpack_dir'/'tree_subdir'."""
 
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
   prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
   if os.path.exists(prebuilt_path):
     logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
@@ -1728,10 +1773,16 @@
     logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
     return File.FromLocalFile(name, prebuilt_path)
 
-  logger.info("building image from target_files %s...", tree_subdir)
+  prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
+  if os.path.exists(prebuilt_path):
+    logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
+    signed_img = MakeTempFile()
+    shutil.copy(prebuilt_path, signed_img)
+    partition_name = tree_subdir.lower()
+    _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
+    return File.FromLocalFile(name, signed_img)
 
-  if info_dict is None:
-    info_dict = OPTIONS.info_dict
+  logger.info("building image from target_files %s...", tree_subdir)
 
   # With system_root_image == "true", we don't pack ramdisk into the boot image.
   # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 229f7e9..bf0b8f1 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -237,6 +237,7 @@
 import sys
 import zipfile
 
+import care_map_pb2
 import common
 import ota_utils
 from ota_utils import (UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata,
@@ -832,6 +833,17 @@
   with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
     common.ZipWriteStr(partial_target_zip, 'META/ab_partitions.txt',
                        '\n'.join(ab_partitions))
+    CARE_MAP_ENTRY = "META/care_map.pb"
+    if CARE_MAP_ENTRY in input_zip.namelist():
+      caremap = care_map_pb2.CareMap()
+      caremap.ParseFromString(input_zip.read(CARE_MAP_ENTRY))
+      filtered = [
+          part for part in caremap.partitions if part.name in ab_partitions]
+      del caremap.partitions[:]
+      caremap.partitions.extend(filtered)
+      common.ZipWriteStr(partial_target_zip, CARE_MAP_ENTRY,
+                         caremap.SerializeToString())
+
     for info_file in ['META/misc_info.txt', DYNAMIC_PARTITION_INFO]:
       if info_file not in input_zip.namelist():
         logger.warning('Cannot find %s in input zipfile', info_file)
@@ -841,7 +853,8 @@
           content, lambda p: p in ab_partitions)
       common.ZipWriteStr(partial_target_zip, info_file, modified_info)
 
-    # TODO(xunchang) handle 'META/care_map.pb', 'META/postinstall_config.txt'
+    # TODO(xunchang) handle META/postinstall_config.txt'
+
   common.ZipClose(partial_target_zip)
 
   return partial_target_file
@@ -1063,6 +1076,7 @@
     # serve I/O request when device boots. Therefore, disable VABC if source
     # build doesn't supports it.
     if not source_info.is_vabc or not target_info.is_vabc:
+      logger.info("Either source or target does not support VABC, disabling.")
       OPTIONS.disable_vabc = True
 
   else:
@@ -1071,6 +1085,9 @@
     target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
     source_info = None
 
+  if target_info.vendor_suppressed_vabc:
+    logger.info("Vendor suppressed VABC. Disabling")
+    OPTIONS.disable_vabc = True
   additional_args = []
 
   # Prepare custom images.
@@ -1166,14 +1183,12 @@
     else:
       logger.warning("Cannot find care map file in target_file package")
 
-  # Copy apex_info.pb over to generated OTA package.
-  try:
-    apex_info_entry = target_zip.getinfo("META/apex_info.pb")
-    with target_zip.open(apex_info_entry, "r") as zfp:
-      common.ZipWriteStr(output_zip, "apex_info.pb", zfp.read(),
-                         compress_type=zipfile.ZIP_STORED)
-  except KeyError:
-    logger.warning("target_file doesn't contain apex_info.pb %s", target_file)
+  # Add the source apex version for incremental ota updates, and write the
+  # result apex info to the ota package.
+  ota_apex_info = ota_utils.ConstructOtaApexInfo(target_zip, source_file)
+  if ota_apex_info is not None:
+    common.ZipWriteStr(output_zip, "apex_info.pb", ota_apex_info,
+                       compress_type=zipfile.ZIP_STORED)
 
   common.ZipClose(target_zip)
 
diff --git a/tools/releasetools/ota_metadata.proto b/tools/releasetools/ota_metadata.proto
index ed9d0c3..689ce80 100644
--- a/tools/releasetools/ota_metadata.proto
+++ b/tools/releasetools/ota_metadata.proto
@@ -72,6 +72,8 @@
   int64 version = 2;
   bool is_compressed = 3;
   int64 decompressed_size = 4;
+  // Used in OTA
+  int64 source_version = 5;
 }
 
 // Just a container to hold repeated apex_info, so that we can easily serialize
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 104f02f..28c246b 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -569,3 +569,45 @@
 
   SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
            whole_file=True)
+
+
+def ConstructOtaApexInfo(target_zip, source_file=None):
+  """If applicable, add the source version to the apex info."""
+
+  def _ReadApexInfo(input_zip):
+    if "META/apex_info.pb" not in input_zip.namelist():
+      logger.warning("target_file doesn't contain apex_info.pb %s", input_zip)
+      return None
+
+    with input_zip.open("META/apex_info.pb", "r") as zfp:
+      return zfp.read()
+
+  target_apex_string = _ReadApexInfo(target_zip)
+  # Return early if the target apex info doesn't exist or is empty.
+  if not target_apex_string:
+    return target_apex_string
+
+  # If the source apex info isn't available, just return the target info
+  if not source_file:
+    return target_apex_string
+
+  with zipfile.ZipFile(source_file, "r", allowZip64=True) as source_zip:
+    source_apex_string = _ReadApexInfo(source_zip)
+  if not source_apex_string:
+    return target_apex_string
+
+  source_apex_proto = ota_metadata_pb2.ApexMetadata()
+  source_apex_proto.ParseFromString(source_apex_string)
+  source_apex_versions = {apex.package_name: apex.version for apex in
+                          source_apex_proto.apex_info}
+
+  # If the apex package is available in the source build, initialize the source
+  # apex version.
+  target_apex_proto = ota_metadata_pb2.ApexMetadata()
+  target_apex_proto.ParseFromString(target_apex_string)
+  for target_apex in target_apex_proto.apex_info:
+    name = target_apex.package_name
+    if name in source_apex_versions:
+      target_apex.source_version = source_apex_versions[name]
+
+  return target_apex_proto.SerializeToString()
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index dd2de36..2859948 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -217,6 +217,18 @@
     raise RuntimeError("Missing {} in AVB_FOOTER_ARGS".format(partition))
 
 
+def IsApexFile(filename):
+  return filename.endswith(".apex") or filename.endswith(".capex")
+
+
+def GetApexFilename(filename):
+  name = os.path.basename(filename)
+  # Replace the suffix for compressed apex
+  if name.endswith(".capex"):
+    return name.replace(".capex", ".apex")
+  return name
+
+
 def GetApkCerts(certmap):
   # apply the key remapping to the contents of the file
   for apk, cert in certmap.items():
@@ -356,8 +368,8 @@
   unknown_files = []
   for info in input_tf_zip.infolist():
     # Handle APEXes on all partitions
-    if info.filename.endswith('.apex'):
-      name = os.path.basename(info.filename)
+    if IsApexFile(info.filename):
+      name = GetApexFilename(info.filename)
       if name not in known_keys:
         unknown_files.append(name)
       continue
@@ -388,10 +400,11 @@
 
   invalid_apexes = []
   for info in input_tf_zip.infolist():
-    if not info.filename.endswith('.apex'):
+    if not IsApexFile(info.filename):
       continue
 
-    name = os.path.basename(info.filename)
+    name = GetApexFilename(info.filename)
+
     (payload_key, container_key) = apex_keys[name]
     if ((payload_key in common.SPECIAL_CERT_STRINGS and
          container_key not in common.SPECIAL_CERT_STRINGS) or
@@ -541,8 +554,9 @@
         common.ZipWriteStr(output_tf_zip, out_info, data)
 
     # Sign bundled APEX files on all partitions
-    elif filename.endswith(".apex"):
-      name = os.path.basename(filename)
+    elif IsApexFile(filename):
+      name = GetApexFilename(filename)
+
       payload_key, container_key = apex_keys[name]
 
       # We've asserted not having a case with only one of them PRESIGNED.
diff --git a/tools/releasetools/test_check_partition_sizes.py b/tools/releasetools/test_check_partition_sizes.py
index 073d229..88cf60f 100644
--- a/tools/releasetools/test_check_partition_sizes.py
+++ b/tools/releasetools/test_check_partition_sizes.py
@@ -33,6 +33,7 @@
         system_image_size=50
         vendor_image_size=20
         product_image_size=20
+        system_other_image_size=10
         """.split("\n"))
 
   def test_ab(self):
@@ -126,3 +127,13 @@
         """.split("\n")))
     with self.assertRaises(RuntimeError):
       CheckPartitionSizes(self.info_dict)
+
+  def test_vab_too_big_with_system_other(self):
+    self.info_dict.update(common.LoadDictionaryFromLines("""
+        virtual_ab=true
+        system_other_image_size=20
+        super_partition_size=101
+        super_super_device_size=101
+        """.split("\n")))
+    with self.assertRaises(RuntimeError):
+      CheckPartitionSizes(self.info_dict)
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 661712a..51def30 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -24,7 +24,7 @@
 import test_utils
 from ota_utils import (
     BuildLegacyOtaMetadata, CalculateRuntimeDevicesAndFingerprints,
-    FinalizeMetadata, GetPackageMetadata, PropertyFiles)
+    ConstructOtaApexInfo, FinalizeMetadata, GetPackageMetadata, PropertyFiles)
 from ota_from_target_files import (
     _LoadOemDicts, AbOtaPropertyFiles,
     GetTargetFilesZipForCustomImagesUpdates,
@@ -295,6 +295,35 @@
     uncompressed_apex_size = os.path.getsize(original_apex_filepath)
     self.assertEqual(apex_infos[0].decompressed_size, uncompressed_apex_size)
 
+  @staticmethod
+  def construct_tf_with_apex_info(infos):
+    apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
+    apex_metadata_proto.apex_info.extend(infos)
+
+    output = common.MakeTempFile(suffix='.zip')
+    with zipfile.ZipFile(output, 'w') as zfp:
+      common.ZipWriteStr(zfp, "META/apex_info.pb",
+                         apex_metadata_proto.SerializeToString())
+    return output
+
+  def test_ConstructOtaApexInfo_incremental_package(self):
+    infos = [ota_metadata_pb2.ApexInfo(package_name='com.android.apex.1',
+                                       version=1000, is_compressed=False),
+             ota_metadata_pb2.ApexInfo(package_name='com.android.apex.2',
+                                       version=2000, is_compressed=True)]
+    target_file = self.construct_tf_with_apex_info(infos)
+
+    with zipfile.ZipFile(target_file) as target_zip:
+      info_bytes = ConstructOtaApexInfo(target_zip, source_file=target_file)
+    apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
+    apex_metadata_proto.ParseFromString(info_bytes)
+
+    info_list = apex_metadata_proto.apex_info
+    self.assertEqual(2, len(info_list))
+    self.assertEqual('com.android.apex.1', info_list[0].package_name)
+    self.assertEqual(1000, info_list[0].version)
+    self.assertEqual(1000, info_list[0].source_version)
+
   def test_GetPackageMetadata_retrofitDynamicPartitions(self):
     target_info = common.BuildInfo(self.TEST_TARGET_INFO_DICT, None)
     common.OPTIONS.retrofit_dynamic_partitions = True
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 401857f..cfe3139 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -194,7 +194,8 @@
 
     # Check we have the same recovery target in the check and flash commands.
     assert check_partition == flash_partition, \
-        "Mismatching targets: {} vs {}".format(check_partition, flash_partition)
+        "Mismatching targets: {} vs {}".format(
+            check_partition, flash_partition)
 
     # Validate the SHA-1 of the recovery image.
     recovery_sha1 = flash_partition.split(':')[3]
@@ -248,6 +249,29 @@
     os.symlink(os.path.join(src, filename), os.path.join(dst, filename))
 
 
+def ValidatePartitionFingerprints(input_tmp, info_dict):
+  build_info = common.BuildInfo(info_dict)
+  # Expected format:
+  #  Prop: com.android.build.vendor.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys'
+  #  Prop: com.android.build.vendor_boot.fingerprint -> 'generic/aosp_cf_x86_64_phone/vsoc_x86_64:S/AOSP.MASTER/7335886:userdebug/test-keys'
+  p = re.compile(
+      r"Prop: com.android.build.(?P<partition>\w+).fingerprint -> '(?P<fingerprint>[\w\/:\.-]+)'")
+  for vbmeta_partition in ["vbmeta", "vbmeta_system"]:
+    image = os.path.join(input_tmp, "IMAGES", vbmeta_partition + ".img")
+    output = common.RunAndCheckOutput(
+        [info_dict["avb_avbtool"], "info_image", "--image", image])
+    matches = p.findall(output)
+    for (partition, fingerprint) in matches:
+      actual_fingerprint = build_info.GetPartitionFingerprint(
+          partition)
+      if actual_fingerprint is None:
+        logging.warning(
+            "Failed to get fingerprint for partition %s", partition)
+        continue
+      assert fingerprint == actual_fingerprint, "Fingerprint mismatch for partition {}, expected: {} actual: {}".format(
+          partition, fingerprint, actual_fingerprint)
+
+
 def ValidateVerifiedBootImages(input_tmp, info_dict, options):
   """Validates the Verified Boot related images.
 
@@ -273,7 +297,7 @@
   # longer copied from RADIO to the IMAGES folder. But avbtool assumes that
   # images are in IMAGES folder. So we symlink them.
   symlinkIfNotExists(os.path.join(input_tmp, "RADIO"),
-                    os.path.join(input_tmp, "IMAGES"))
+                     os.path.join(input_tmp, "IMAGES"))
   # Verified boot 1.0 (images signed with boot_signer and verity_signer).
   if info_dict.get('boot_signer') == 'true':
     logging.info('Verifying Verified Boot images...')
@@ -325,11 +349,12 @@
     if info_dict.get("system_root_image") != "true":
       verity_key_ramdisk = os.path.join(
           input_tmp, 'BOOT', 'RAMDISK', 'verity_key')
-      assert os.path.exists(verity_key_ramdisk), 'Missing verity_key in ramdisk'
+      assert os.path.exists(
+          verity_key_ramdisk), 'Missing verity_key in ramdisk'
 
       assert filecmp.cmp(
           verity_key_mincrypt, verity_key_ramdisk, shallow=False), \
-              'Mismatching verity_key files in root and ramdisk'
+          'Mismatching verity_key files in root and ramdisk'
       logging.info('Verified the content of /verity_key in ramdisk')
 
     # Then verify the verity signed system/vendor/product images, against the
@@ -362,6 +387,8 @@
     if key is None:
       key = info_dict['avb_vbmeta_key_path']
 
+    ValidatePartitionFingerprints(input_tmp, info_dict)
+
     # avbtool verifies all the images that have descriptors listed in vbmeta.
     # Using `--follow_chain_partitions` so it would additionally verify chained
     # vbmeta partitions (e.g. vbmeta_system).
@@ -411,7 +438,7 @@
 
     # avbtool verifies recovery image for non-A/B devices.
     if (info_dict.get('ab_update') != 'true' and
-        info_dict.get('no_recovery') != 'true'):
+            info_dict.get('no_recovery') != 'true'):
       image = os.path.join(input_tmp, 'IMAGES', 'recovery.img')
       key = info_dict['avb_recovery_key_path']
       cmd = [info_dict['avb_avbtool'], 'verify_image', '--image', image,
@@ -427,21 +454,21 @@
 
 
 def CheckDataInconsistency(lines):
-    build_prop = {}
-    for line in lines:
-      if line.startswith("import") or line.startswith("#"):
-        continue
-      if "=" not in line:
-        continue
+  build_prop = {}
+  for line in lines:
+    if line.startswith("import") or line.startswith("#"):
+      continue
+    if "=" not in line:
+      continue
 
-      key, value = line.rstrip().split("=", 1)
-      if key in build_prop:
-        logging.info("Duplicated key found for {}".format(key))
-        if value != build_prop[key]:
-          logging.error("Key {} is defined twice with different values {} vs {}"
-                        .format(key, value, build_prop[key]))
-          return key
-      build_prop[key] = value
+    key, value = line.rstrip().split("=", 1)
+    if key in build_prop:
+      logging.info("Duplicated key found for {}".format(key))
+      if value != build_prop[key]:
+        logging.error("Key {} is defined twice with different values {} vs {}"
+                      .format(key, value, build_prop[key]))
+        return key
+    build_prop[key] = value
 
 
 def CheckBuildPropDuplicity(input_tmp):
diff --git a/tools/warn/cpp_warn_patterns.py b/tools/warn/cpp_warn_patterns.py
index 2fa9916..90759d9 100644
--- a/tools/warn/cpp_warn_patterns.py
+++ b/tools/warn/cpp_warn_patterns.py
@@ -91,6 +91,8 @@
          [r".*: warning: incompatible redeclaration of library function .+"]),
     high('Null passed as non-null argument',
          [r".*: warning: Null passed to a callee that requires a non-null"]),
+    medium('Unused command line argument',
+           [r".*: warning: argument unused during compilation: .+"]),
     medium('Unused parameter',
            [r".*: warning: unused parameter '.*'"]),
     medium('Unused function, variable, label, comparison, etc.',
@@ -166,6 +168,8 @@
            [r".*: warning: '.+' declared with greater visibility than the type of its field '.+'"]),
     medium('Shift count greater than width of type',
            [r".*: warning: (left|right) shift count >= width of type"]),
+    medium('Shift operator precedence',
+           [r".*: warning: operator .* has lower precedence .+Wshift-op-parentheses.+"]),
     medium('extern &lt;foo&gt; is initialized',
            [r".*: warning: '.+' initialized and declared 'extern'",
             r".*: warning: 'extern' variable has an initializer"]),
@@ -239,6 +243,8 @@
            [r".*: warning: ignoring #pragma .+"]),
     medium('Pragma warning messages',
            [r".*: warning: .+W#pragma-messages"]),
+    medium('Pragma once in main file',
+           [r".*: warning: #pragma once in main file .+Wpragma-once-outside-header.*"]),
     medium('Variable might be clobbered by longjmp or vfork',
            [r".*: warning: variable '.+' might be clobbered by 'longjmp' or 'vfork'"]),
     medium('Argument might be clobbered by longjmp or vfork',
@@ -333,7 +339,7 @@
     low('Deprecated register',
         [r".*: warning: 'register' storage class specifier is deprecated"]),
     low('Converts between pointers to integer types with different sign',
-        [r".*: warning: .+ converts between pointers to integer types with different sign"]),
+        [r".*: warning: .+ converts between pointers to integer types .+Wpointer-sign\]"]),
     harmless('Extra tokens after #endif',
              [r".*: warning: extra tokens at end of #endif directive"]),
     medium('Comparison between different enums',
@@ -410,6 +416,32 @@
         [r".*: warning: missing .+Winvalid-pp-token"]),
     low('need glibc to link',
         [r".*: warning: .* requires at runtime .* glibc .* for linking"]),
+    low('Add braces to avoid dangling else',
+        [r".*: warning: add explicit braces to avoid dangling else"]),
+    low('Assigning value to self',
+        [r".*: warning: explicitly assigning value of .+ to itself"]),
+    low('Comparison of integers of different signs',
+        [r".*: warning: comparison of integers of different signs.+sign-compare"]),
+    low('Incompatible pointer types',
+        [r".*: warning: incompatible .*pointer types .*-Wincompatible-.*pointer-types"]),
+    low('Missing braces',
+        [r".*: warning: suggest braces around initialization of",
+         r".*: warning: too many braces around scalar initializer .+Wmany-braces-around-scalar-init",
+         r".*: warning: braces around scalar initializer"]),
+    low('Missing field initializers',
+        [r".*: warning: missing field '.+' initializer"]),
+    low('Typedef redefinition',
+        [r".*: warning: redefinition of typedef '.+' is a C11 feature"]),
+    low('GNU old-style field designator',
+        [r".*: warning: use of GNU old-style field designator extension"]),
+    low('Initializer overrides prior initialization',
+        [r".*: warning: initializer overrides prior initialization of this subobject"]),
+    low('GNU extension, variable sized type not at end',
+        [r".*: warning: field '.+' with variable sized type '.+' not at the end of a struct or class"]),
+    low('Comparison of constant is always false/true',
+        [r".*: comparison of .+ is always .+Wtautological-constant-out-of-range-compare"]),
+    low('Hides overloaded virtual function',
+        [r".*: '.+' hides overloaded virtual function"]),
     medium('Operator new returns NULL',
            [r".*: warning: 'operator new' must not return NULL unless it is declared 'throw\(\)' .+"]),
     medium('NULL used in arithmetic',
diff --git a/tools/warn/html_writer.py b/tools/warn/html_writer.py
index ac5d4b7..ef173bc 100644
--- a/tools/warn/html_writer.py
+++ b/tools/warn/html_writer.py
@@ -328,7 +328,8 @@
     cur_row_class = 1 - cur_row_class
     # remove last '\n'
     out_text = text[:-1] if text[-1] == '\n' else text
-    writer('<tr><td class="c' + str(cur_row_class) + '">' + out_text + '</td></tr>')
+    writer('<tr><td class="c' + str(cur_row_class) + '">'
+           + out_text + '</td></tr>')
   writer('</table></div>')
   writer('</blockquote>')
 
@@ -355,7 +356,8 @@
   sort_warnings(warn_patterns)
   total = 0
   for severity in Severity.levels:
-    total += write_severity(csvwriter, severity, severity.column_header, warn_patterns)
+    total += write_severity(
+        csvwriter, severity, severity.column_header, warn_patterns)
   csvwriter.writerow([total, '', 'All warnings'])
 
 
diff --git a/tools/warn/java_warn_patterns.py b/tools/warn/java_warn_patterns.py
index 534f48d..3f5da9d 100644
--- a/tools/warn/java_warn_patterns.py
+++ b/tools/warn/java_warn_patterns.py
@@ -74,6 +74,8 @@
                 [r'.*\.class\): warning: Cannot find annotation method .+ in']),
     java_medium('No class/method in SDK ...',
                 [r'.*\.java:.*: warning: No such (class|method) .* for SDK']),
+    java_medium('Unknown enum constant',
+                [r'unknown_source_file: warning: unknown enum constant .+']),
     # Warnings generated by Error Prone
     java_medium('Non-ascii characters used, but ascii encoding specified',
                 [r".*: warning: unmappable character for encoding ascii"]),
@@ -207,6 +209,8 @@
            'Logging or rethrowing exceptions should usually be preferred to catching and calling printStackTrace'),
     medium('CatchFail',
            'Ignoring exceptions and calling fail() is unnecessary, and makes test output less useful'),
+    medium('ChangedAbstract',
+           'Method has changed \'abstract\' qualifier'),
     medium('ClassCanBeStatic',
            'Inner class is non-static but does not reference enclosing class'),
     medium('ClassNewInstance',
@@ -355,6 +359,8 @@
            'equals method doesn\'t override Object.equals'),
     medium('NotCloseable',
            'Not closeable'),
+    medium('NullableCollection',
+           'Method should not return a nullable collection'),
     medium('NullableConstructor',
            'Constructors should not be annotated with @Nullable since they cannot return null'),
     medium('NullableDereference',
@@ -801,6 +807,8 @@
                 [r".*: warning: \[path\] bad path element .*\.jar"]),
     java_medium('Supported version from annotation processor',
                 [r".*: warning: Supported source version .+ from annotation processor"]),
+    java_medium('Schema export directory is not provided',
+                [r".*\.(java|kt):.*: warning: Schema export directory is not provided"]),
 ]
 
 compile_patterns(warn_patterns)
diff --git a/tools/warn/make_warn_patterns.py b/tools/warn/make_warn_patterns.py
index a54c502..11ad5cc 100644
--- a/tools/warn/make_warn_patterns.py
+++ b/tools/warn/make_warn_patterns.py
@@ -35,6 +35,9 @@
     {'category': 'make', 'severity': Severity.HIGH,
      'description': 'System module linking to a vendor module',
      'patterns': [r".*: warning: .+ \(.+\) should not link to .+ \(partition:.+\)"]},
+    {'category': 'make', 'severity': Severity.HIGH,
+     'description': 'make: lstat file does not exist',
+     'patterns': [r".*: warning: lstat .+: file does not exist"]},
     {'category': 'make', 'severity': Severity.MEDIUM,
      'description': 'Invalid SDK/NDK linking',
      'patterns': [r".*: warning: .+ \(.+\) should not link to .+ \(.+\)"]},
@@ -56,6 +59,9 @@
     {'category': 'make', 'severity': Severity.MEDIUM,
      'description': 'make: deprecated macros',
      'patterns': [r".*\.mk:.* warning:.* [A-Z_]+ (is|has been) deprecated."]},
+    {'category': 'make', 'severity': Severity.MEDIUM,
+     'description': 'make: other Android.mk warnings',
+     'patterns': [r".*/Android.mk:.*: warning: .+"]},
 ]
 
 
diff --git a/tools/warn/other_warn_patterns.py b/tools/warn/other_warn_patterns.py
index d05c8e9..c95528c 100644
--- a/tools/warn/other_warn_patterns.py
+++ b/tools/warn/other_warn_patterns.py
@@ -75,37 +75,15 @@
     # misc warnings
     misc('Duplicate logtag',
          [r".*: warning: tag \".+\" \(.+\) duplicated in .+"]),
-    misc('Typedef redefinition',
-         [r".*: warning: redefinition of typedef '.+' is a C11 feature"]),
-    misc('GNU old-style field designator',
-         [r".*: warning: use of GNU old-style field designator extension"]),
-    misc('Missing field initializers',
-         [r".*: warning: missing field '.+' initializer"]),
-    misc('Missing braces',
-         [r".*: warning: suggest braces around initialization of",
-          r".*: warning: too many braces around scalar initializer .+Wmany-braces-around-scalar-init",
-          r".*: warning: braces around scalar initializer"]),
-    misc('Comparison of integers of different signs',
-         [r".*: warning: comparison of integers of different signs.+sign-compare"]),
-    misc('Add braces to avoid dangling else',
-         [r".*: warning: add explicit braces to avoid dangling else"]),
-    misc('Initializer overrides prior initialization',
-         [r".*: warning: initializer overrides prior initialization of this subobject"]),
-    misc('Assigning value to self',
-         [r".*: warning: explicitly assigning value of .+ to itself"]),
-    misc('GNU extension, variable sized type not at end',
-         [r".*: warning: field '.+' with variable sized type '.+' not at the end of a struct or class"]),
-    misc('Comparison of constant is always false/true',
-         [r".*: comparison of .+ is always .+Wtautological-constant-out-of-range-compare"]),
-    misc('Hides overloaded virtual function',
-         [r".*: '.+' hides overloaded virtual function"]),
-    misc('Incompatible pointer types',
-         [r".*: warning: incompatible .*pointer types .*-Wincompatible-.*pointer-types"]),
     # Assembler warnings
     asm('ASM value size does not match register size',
         [r".*: warning: value size does not match register size specified by the constraint and modifier"]),
     asm('IT instruction is deprecated',
         [r".*: warning: applying IT instruction .* is deprecated"]),
+    asm('section flags ignored',
+        [r".*: warning: section flags ignored on section redeclaration"]),
+    asm('setjmp/longjmp/vfork changed binding',
+        [r".*: warning: .*(setjmp|longjmp|vfork) changed binding to .*"]),
     # NDK warnings
     {'category': 'NDK', 'severity': Severity.HIGH,
      'description': 'NDK: Generate guard with empty availability, obsoleted',
@@ -168,6 +146,9 @@
     {'category': 'RenderScript', 'severity': Severity.LOW,
      'description': 'RenderScript warnings',
      'patterns': [r'.*\.rscript:.*: warning: ']},
+    {'category': 'RenderScript', 'severity': Severity.HIGH,
+     'description': 'RenderScript is deprecated',
+     'patterns': [r'.*: warning: Renderscript is deprecated:.+']},
     # Broken/partial warning messages will be skipped.
     {'category': 'Misc', 'severity': Severity.SKIP,
      'description': 'skip, ,',