Merge "Change the global CFI flag to default to enabled."
diff --git a/CleanSpec.mk b/CleanSpec.mk
index bbeac6c..88f9172 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -395,6 +395,28 @@
 $(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS/*_intermediates/src)
 $(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/src)
 
+$(call add-clean-step, rm -rf $(HOST_OUT_TESTCASES))
+$(call add-clean-step, rm -rf $(TARGET_OUT_TESTCASES))
+
+$(call add-clean-step, rm -rf $(TARGET_OUT_ETC)/init)
+
+# Libraries are moved from {system|vendor}/lib to ./lib/framework, ./lib/vndk, etc.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/vendor/lib*)
+
+# Revert that move
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/vendor/lib*)
+
+# Sanitized libraries now live in a different location.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/vendor/lib*)
+
+# Soong module variant change, remove obsolete intermediates
+$(call add-clean-step, rm -rf $(OUT_DIR)/soong/.intermediates)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/buildspec.mk.default b/buildspec.mk.default
index a7ac7ec..b31578a 100644
--- a/buildspec.mk.default
+++ b/buildspec.mk.default
@@ -36,6 +36,12 @@
 #TARGET_BUILD_VARIANT:=eng
 endif
 
+# Choose a targeted release.  If you don't pick one, the default is the
+# soonest future release.
+ifndef TARGET_PLATFORM_RELEASE
+#TARGET_PLATFORM_RELEASE:=OPR1
+endif
+
 # Choose additional targets to always install, even when building
 # minimal targets like "make droid".  This takes simple target names
 # like "Browser" or "MyApp", the names used by LOCAL_MODULE or
@@ -105,4 +111,4 @@
 # variable will be changed.  After you have modified this file with the new
 # changes (see buildspec.mk.default), update this to the new value from
 # buildspec.mk.default.
-BUILD_ENV_SEQUENCE_NUMBER := 12
+BUILD_ENV_SEQUENCE_NUMBER := 13
diff --git a/core/Makefile b/core/Makefile
index 1dc3f52..0a99376 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -5,7 +5,7 @@
 LOCAL_PATH := $(BUILD_SYSTEM)
 
 # Pick a reasonable string to use to identify files.
-ifneq "" "$(filter eng.%,$(BUILD_NUMBER))"
+ifneq (,$(filter eng.%,$(BUILD_NUMBER)))
   # BUILD_NUMBER has a timestamp in it, which means that
   # it will change every time.  Pick a stable value.
   FILE_NAME_TAG := eng.$(USER)
@@ -74,12 +74,12 @@
 # default.prop
 INSTALLED_DEFAULT_PROP_TARGET := $(TARGET_ROOT_OUT)/default.prop
 ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DEFAULT_PROP_TARGET)
-ADDITIONAL_DEFAULT_PROPERTIES := \
+FINAL_DEFAULT_PROPERTIES := \
     $(call collapse-pairs, $(ADDITIONAL_DEFAULT_PROPERTIES))
-ADDITIONAL_DEFAULT_PROPERTIES += \
+FINAL_DEFAULT_PROPERTIES += \
     $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
-ADDITIONAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
-    $(ADDITIONAL_DEFAULT_PROPERTIES),=)
+FINAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
+    $(FINAL_DEFAULT_PROPERTIES),=)
 
 intermediate_system_build_prop := $(call intermediates-dir-for,ETC,system_build_prop)/build.prop
 
@@ -89,7 +89,7 @@
 	$(hide) echo "#" > $@; \
 	        echo "# ADDITIONAL_DEFAULT_PROPERTIES" >> $@; \
 	        echo "#" >> $@;
-	$(hide) $(foreach line,$(ADDITIONAL_DEFAULT_PROPERTIES), \
+	$(hide) $(foreach line,$(FINAL_DEFAULT_PROPERTIES), \
 		echo "$(line)" >> $@;)
 	$(hide) echo "#" >> $@; \
 	        echo "# BOOTIMAGE_BUILD_PROPERTIES" >> $@; \
@@ -103,10 +103,10 @@
 # build.prop
 INSTALLED_BUILD_PROP_TARGET := $(TARGET_OUT)/build.prop
 ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_BUILD_PROP_TARGET)
-ADDITIONAL_BUILD_PROPERTIES := \
+FINAL_BUILD_PROPERTIES := \
     $(call collapse-pairs, $(ADDITIONAL_BUILD_PROPERTIES))
-ADDITIONAL_BUILD_PROPERTIES := $(call uniq-pairs-by-first-component, \
-    $(ADDITIONAL_BUILD_PROPERTIES),=)
+FINAL_BUILD_PROPERTIES := $(call uniq-pairs-by-first-component, \
+    $(FINAL_BUILD_PROPERTIES),=)
 
 # A list of arbitrary tags describing the build configuration.
 # Force ":=" so we can use +=
@@ -171,7 +171,7 @@
   # release build number or branch.buld_number non-release builds
 
   # Dev. branches should have DISPLAY_BUILD_NUMBER set
-  ifeq "true" "$(DISPLAY_BUILD_NUMBER)"
+  ifeq (true,$(DISPLAY_BUILD_NUMBER))
     BUILD_DISPLAY_ID := $(BUILD_ID).$(BUILD_NUMBER_FROM_FILE) $(BUILD_KEYS)
   else
     BUILD_DISPLAY_ID := $(BUILD_ID) $(BUILD_KEYS)
@@ -256,12 +256,12 @@
 			echo "#" >> $@; \
 			cat $(file) >> $@; \
 		fi;)
-	$(if $(ADDITIONAL_BUILD_PROPERTIES), \
+	$(if $(FINAL_BUILD_PROPERTIES), \
 		$(hide) echo >> $@; \
 		        echo "#" >> $@; \
 		        echo "# ADDITIONAL_BUILD_PROPERTIES" >> $@; \
 		        echo "#" >> $@; )
-	$(hide) $(foreach line,$(ADDITIONAL_BUILD_PROPERTIES), \
+	$(hide) $(foreach line,$(FINAL_BUILD_PROPERTIES), \
 		echo "$(line)" >> $@;)
 	$(hide) cat $(INSTALLED_ANDROID_INFO_TXT_TARGET) | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' >> $@
 	$(hide) build/tools/post_process_props.py $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST)
@@ -600,14 +600,14 @@
 
 else ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true
 
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(FUTILITY)
 	$(call pretty,"Target boot image: $@")
 	$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@.unsigned
 	$(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $@.keyblock $@
 	$(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
 
 .PHONY: bootimage-nodeps
-bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER)
+bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER) $(FUTILITY)
 	@echo "make $@: ignoring dependencies"
 	$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET).unsigned
 	$(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET)
@@ -693,6 +693,7 @@
 
 ifndef TARGET_BUILD_APPS
 kernel_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/kernel.txt
+winpthreads_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/winpthreads.txt
 pdk_fusion_notice_files := $(filter $(TARGET_OUT_NOTICE_FILES)/%, $(ALL_PDK_FUSION_FILES))
 
 $(eval $(call combine-notice-files, \
@@ -707,7 +708,8 @@
 			$(tools_notice_file_html), \
 			"Notices for files contained in the tools directory:", \
 			$(HOST_OUT_NOTICE_FILES), \
-			$(ALL_DEFAULT_INSTALLED_MODULES)))
+			$(ALL_DEFAULT_INSTALLED_MODULES) \
+			$(winpthreads_notice_file)))
 
 # Install the html file at /system/etc/NOTICE.html.gz.
 # This is not ideal, but this is very late in the game, after a lot of
@@ -736,6 +738,12 @@
 	$(hide) mkdir -p $(dir $@)
 	$(hide) $(ACP) $< $@
 
+$(winpthreads_notice_file): \
+	    $(BUILD_SYSTEM)/WINPTHREADS_COPYING \
+	    | $(ACP)
+	@echo Copying: $@
+	$(hide) mkdir -p $(dir $@)
+	$(hide) $(ACP) $< $@
 
 # -----------------------------------------------------------------
 # Build a keystore with the authorized keys in it, used to verify the
@@ -751,11 +759,11 @@
 	$(hide) zip -qjX $@ $<
 	$(remove-timestamps-from-package)
 
-# Carry the public key for update_engine if it's a non-Brillo target that
+# Carry the public key for update_engine if it's a non-IoT target that
 # uses the AB updater. We use the same key as otacerts but in RSA public key
 # format.
 ifeq ($(AB_OTA_UPDATER),true)
-ifeq ($(BRILLO),)
+ifneq ($(PRODUCT_IOT),true)
 ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem
 $(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem: $(addsuffix .x509.pem,$(DEFAULT_KEY_CERT_PAIR))
 	$(hide) rm -f $@
@@ -827,7 +835,7 @@
 endif
 endif
 
-SELINUX_FC := $(TARGET_ROOT_OUT)/file_contexts.bin
+SELINUX_FC := $(call intermediates-dir-for,ETC,file_contexts.bin)/file_contexts.bin
 INTERNAL_USERIMAGES_DEPS += $(SELINUX_FC)
 
 INTERNAL_USERIMAGES_DEPS += $(BLK_ALLOC_TO_BASE_FS)
@@ -878,12 +886,14 @@
 $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)" >> $(1))
 $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_key=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY)" >> $(1))
 $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_subkey=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY)" >> $(1))
-$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "futility=$(FUTILITY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "futility=$(notdir $(FUTILITY))" >> $(1))
 $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_signer_cmd=$(VBOOT_SIGNER)" >> $(1))
 $(if $(BOARD_AVB_ENABLE),$(hide) echo "avb_signing_args=$(INTERNAL_AVB_SIGNING_ARGS)" >> $(1))
 $(if $(BOARD_AVB_ENABLE),$(hide) echo "avb_avbtool=$(AVBTOOL)" >> $(1))
 $(if $(BOARD_AVB_ENABLE),$(hide) echo "system_avb_enable=$(BOARD_AVB_ENABLE)" >> $(1))
 $(if $(BOARD_AVB_ENABLE),$(hide) echo "system_avb_add_hashtree_footer_args=$(BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS)" >> $(1))
+$(if $(BOARD_AVB_ENABLE),$(hide) echo "vendor_avb_enable=$(BOARD_AVB_ENABLE)" >> $(1))
+$(if $(BOARD_AVB_ENABLE),$(hide) echo "vendor_avb_add_hashtree_footer_args=$(BOARD_AVB_VENDOR_ADD_HASHTREE_FOOTER_ARGS)" >> $(1))
 $(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
     $(hide) echo "recovery_as_boot=true" >> $(1))
 $(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),\
@@ -902,7 +912,15 @@
     $(ALL_DEFAULT_INSTALLED_MODULES))
 
 recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
-recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolicy.recovery
+recovery_sepolicy := \
+    $(TARGET_RECOVERY_ROOT_OUT)/sepolicy \
+    $(TARGET_RECOVERY_ROOT_OUT)/file_contexts.bin \
+    $(TARGET_RECOVERY_ROOT_OUT)/plat_property_contexts \
+    $(TARGET_RECOVERY_ROOT_OUT)/nonplat_property_contexts
+# Passed into rsync from non-recovery root to recovery root, to avoid overwriting recovery-specific
+# SELinux files
+IGNORE_RECOVERY_SEPOLICY := $(patsubst $(TARGET_RECOVERY_OUT)/%,--exclude=/%,$(recovery_sepolicy))
+
 recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
 recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
 recovery_build_prop := $(intermediate_system_build_prop)
@@ -1013,18 +1031,37 @@
 	java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) $(extra_keys) > $@
 
 RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id
+
+# $(1): modules list
+# $(2): output dir
+# $(3): mount point
+# $(4): staging dir
+# Depmod requires a well-formed kernel version so 0.0 is used as a placeholder.
+define build-image-kernel-modules
+    $(hide) rm -rf $(2)/lib/modules
+    $(hide) mkdir -p $(2)/lib/modules
+    $(hide) cp $(1) $(2)/lib/modules/
+    $(hide) rm -rf $(4)
+    $(hide) mkdir -p $(4)/lib/modules/0.0/$(3)lib/modules
+    $(hide) cp $(1) $(4)/lib/modules/0.0/$(3)lib/modules
+    $(hide) $(DEPMOD) -b $(4) 0.0
+    $(hide) sed -e 's/\(.*modules.*\):/\/\1:/g' -e 's/ \([^ ]*modules[^ ]*\)/ \/\1/g' -i $(4)/lib/modules/0.0/modules.dep
+    $(hide) cp $(4)/lib/modules/0.0/modules.dep $(2)/lib/modules
+endef
+
 # $(1): output file
 define build-recoveryimage-target
   @echo ----- Making recovery image ------
   $(hide) mkdir -p $(TARGET_RECOVERY_OUT)
   $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/sdcard $(TARGET_RECOVERY_ROOT_OUT)/tmp
   @echo Copying baseline ramdisk...
-  $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
+  # Use rsync because "cp -Rf" fails to overwrite broken symlinks on Mac.
+  $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_RECOVERY_SEPOLICY) $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
   @echo Modifying ramdisk contents...
+  $(if $(BOARD_RECOVERY_KERNEL_MODULES), \
+    $(call build-image-kernel-modules,$(BOARD_RECOVERY_KERNEL_MODULES),$(TARGET_RECOVERY_ROOT_OUT),,$(call intermediates-dir-for,PACKAGING,depmod_recovery)))
   $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc
   $(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
-  $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
-  $(hide) cp -f $(recovery_sepolicy) $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
   $(hide) cp $(TARGET_ROOT_OUT)/init.recovery.*.rc $(TARGET_RECOVERY_ROOT_OUT)/ || true # Ignore error when the src file doesn't exist.
   $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/res
   $(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/*
@@ -1082,7 +1119,9 @@
 		$(INSTALLED_2NDBOOTLOADER_TARGET) \
 		$(recovery_build_prop) $(recovery_resource_deps) \
 		$(recovery_fstab) \
-		$(RECOVERY_INSTALL_OTA_KEYS)
+		$(RECOVERY_INSTALL_OTA_KEYS) \
+		$(BOARD_RECOVERY_KERNEL_MODULES) \
+		$(DEPMOD)
 		$(call pretty,"Target boot image from recovery: $@")
 		$(call build-recoveryimage-target, $@)
 endif
@@ -1095,7 +1134,9 @@
 		$(INSTALLED_2NDBOOTLOADER_TARGET) \
 		$(recovery_build_prop) $(recovery_resource_deps) \
 		$(recovery_fstab) \
-		$(RECOVERY_INSTALL_OTA_KEYS)
+		$(RECOVERY_INSTALL_OTA_KEYS) \
+		$(BOARD_RECOVERY_KERNEL_MODULES) \
+		$(DEPMOD)
 		$(call build-recoveryimage-target, $@)
 
 ifdef RECOVERY_RESOURCE_ZIP
@@ -1141,6 +1182,15 @@
 
 
 FULL_SYSTEMIMAGE_DEPS := $(INTERNAL_SYSTEMIMAGE_FILES) $(INTERNAL_USERIMAGES_DEPS)
+
+# ASAN libraries in the system image - add dependency.
+ASAN_IN_SYSTEM_INSTALLED := $(TARGET_OUT)/asan.tar.bz2
+ifneq (,$(SANITIZE_TARGET))
+  ifeq (true,$(SANITIZE_TARGET_SYSTEM))
+    FULL_SYSTEMIMAGE_DEPS += $(ASAN_IN_SYSTEM_INSTALLED)
+  endif
+endif
+
 # -----------------------------------------------------------------
 # installed file list
 # Depending on anything that $(BUILT_SYSTEMIMAGE) depends on.
@@ -1148,11 +1198,11 @@
 # so that we can get the size stat even if the build fails due to too large
 # system image.
 INSTALLED_FILES_FILE := $(PRODUCT_OUT)/installed-files.txt
-$(INSTALLED_FILES_FILE): $(FULL_SYSTEMIMAGE_DEPS)
+$(INSTALLED_FILES_FILE): $(FULL_SYSTEMIMAGE_DEPS) $(FILESLIST)
 	@echo Installed file list: $@
 	@mkdir -p $(dir $@)
 	@rm -f $@
-	$(hide) build/tools/fileslist.py $(TARGET_OUT) > $(@:.txt=.json)
+	$(hide) $(FILESLIST) $(TARGET_OUT) > $(@:.txt=.json)
 	$(hide) build/tools/fileslist_util.py -c $(@:.txt=.json) > $@
 
 .PHONY: installed-file-list
@@ -1316,7 +1366,7 @@
 	  $(hide) echo "PDK.DEXPREOPT.$(m).MULTILIB:=$(DEXPREOPT.$(m).MULTILIB)" >> $@$(newline)\
 	  $(hide) echo "PDK.DEXPREOPT.$(m).DEX_PREOPT_FLAGS:=$(DEXPREOPT.$(m).DEX_PREOPT_FLAGS)" >> $@$(newline)\
 	  $(hide) echo "PDK.DEXPREOPT.$(m).PRIVILEGED_MODULE:=$(DEXPREOPT.$(m).PRIVILEGED_MODULE)" >> $@$(newline)\
-	  $(hide) echo "PDK.DEXPREOPT.$(m).PROPRIETARY_MODULE:=$(DEXPREOPT.$(m).PROPRIETARY_MODULE)" >> $@$(newline)\
+	  $(hide) echo "PDK.DEXPREOPT.$(m).VENDOR_MODULE:=$(DEXPREOPT.$(m).VENDOR_MODULE)" >> $@$(newline)\
 	  $(hide) echo "PDK.DEXPREOPT.$(m).TARGET_ARCH:=$(DEXPREOPT.$(m).TARGET_ARCH)" >> $@$(newline)\
 	  $(hide) echo "PDK.DEXPREOPT.$(m).STRIPPED_SRC:=$(patsubst $(PRODUCT_OUT)/%,%,$(DEXPREOPT.$(m).INSTALLED_STRIPPED))" >> $@$(newline)\
 	  )
@@ -1417,9 +1467,11 @@
 
 # We just build this directly to the install location.
 INSTALLED_USERDATAIMAGE_TARGET := $(BUILT_USERDATAIMAGE_TARGET)
-$(INSTALLED_USERDATAIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) \
-                                   $(INTERNAL_USERDATAIMAGE_FILES) \
-                                   $(BUILD_IMAGE_SRCS)
+INSTALLED_USERDATAIMAGE_TARGET_DEPS := \
+    $(INTERNAL_USERIMAGES_DEPS) \
+    $(INTERNAL_USERDATAIMAGE_FILES) \
+    $(BUILD_IMAGE_SRCS)
+$(INSTALLED_USERDATAIMAGE_TARGET): $(INSTALLED_USERDATAIMAGE_TARGET_DEPS)
 	$(build-userdataimage-target)
 
 .PHONY: userdataimage-nodeps
@@ -1429,6 +1481,19 @@
 endif # not skip_userdata.img
 skip_userdata.img :=
 
+# ASAN libraries in the system image - build rule.
+ASAN_OUT_DIRS_FOR_SYSTEM_INSTALL := $(sort $(patsubst $(PRODUCT_OUT)/%,%,\
+  $(TARGET_OUT_SHARED_LIBRARIES) \
+  $(2ND_TARGET_OUT_SHARED_LIBRARIES) \
+  $(TARGET_OUT_VENDOR_SHARED_LIBRARIES) \
+  $(2ND_TARGET_OUT_VENDOR_SHARED_LIBRARIES)))
+# Extra options: Enforce the system user for the files to avoid having to change ownership.
+ASAN_SYSTEM_INSTALL_OPTIONS := --owner=1000 --group=1000
+# Note: experimentally, it seems not worth it to try to get "best" compression. We don't save
+#       enough space.
+$(ASAN_IN_SYSTEM_INSTALLED): $(INSTALLED_USERDATAIMAGE_TARGET_DEPS)
+	tar cfj $(ASAN_IN_SYSTEM_INSTALLED) $(ASAN_SYSTEM_INSTALL_OPTIONS) -C $(TARGET_OUT_DATA)/.. $(ASAN_OUT_DIRS_FOR_SYSTEM_INSTALL) >/dev/null
+
 #######
 ## data partition tarball
 define build-userdatatarball-target
@@ -1522,61 +1587,6 @@
 endif # BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE
 
 # -----------------------------------------------------------------
-# vbmeta image
-ifeq ($(BOARD_AVB_ENABLE),true)
-
-BUILT_VBMETAIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta.img
-
-INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS := \
-    --include_descriptors_from_image $(INSTALLED_BOOTIMAGE_TARGET) \
-    --include_descriptors_from_image $(INSTALLED_SYSTEMIMAGE) \
-    --generate_dm_verity_cmdline_from_hashtree $(INSTALLED_SYSTEMIMAGE)
-
-ifdef BOARD_AVB_ROLLBACK_INDEX
-INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS += --rollback_index $(BOARD_AVB_ROLLBACK_INDEX)
-endif
-
-ifndef BOARD_AVB_KEY_PATH
-# If key path isn't specified, use the 4096-bit test key.
-INTERNAL_AVB_SIGNING_ARGS := \
-    --algorithm SHA256_RSA4096 \
-    --key external/avb/test/data/testkey_rsa4096.pem
-else
-INTERNAL_AVB_SIGNING_ARGS := \
-    --algorithm $(BOARD_AVB_ALGORITHM) --key $(BOARD_AVB_KEY_PATH)
-endif
-
-ifndef BOARD_BOOTIMAGE_PARTITION_SIZE
-  $(error BOARD_BOOTIMAGE_PARTITION_SIZE must be set for BOARD_AVB_ENABLE)
-endif
-
-ifndef BOARD_SYSTEMIMAGE_PARTITION_SIZE
-  $(error BOARD_SYSTEMIMAGE_PARTITION_SIZE must be set for BOARD_AVB_ENABLE)
-endif
-
-define build-vbmetaimage-target
-  $(call pretty,"Target vbmeta image: $(INSTALLED_VBMETAIMAGE_TARGET)")
-  $(hide) $(AVBTOOL) make_vbmeta_image \
-    $(INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS) \
-    $(INTERNAL_AVB_SIGNING_ARGS) \
-    $(BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS) \
-    --output $@
-endef
-
-INSTALLED_VBMETAIMAGE_TARGET := $(BUILT_VBMETAIMAGE_TARGET)
-$(INSTALLED_VBMETAIMAGE_TARGET): $(AVBTOOL) $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_SYSTEMIMAGE)
-	$(build-vbmetaimage-target)
-
-.PHONY: vbmetaimage-nodeps
-vbmetaimage-nodeps:
-	$(build-vbmetaimage-target)
-
-# We need $(AVBTOOL) for system.img generation.
-FULL_SYSTEMIMAGE_DEPS += $(AVBTOOL)
-
-endif # BOARD_AVB_ENABLE
-
-# -----------------------------------------------------------------
 # system_other partition image
 ifeq ($(BOARD_USES_SYSTEM_OTHER_ODEX),true)
 BOARD_USES_SYSTEM_OTHER := true
@@ -1595,11 +1605,12 @@
       $(ALL_PDK_FUSION_FILES))
 
 INSTALLED_FILES_FILE_SYSTEMOTHER := $(PRODUCT_OUT)/installed-files-system-other.txt
-$(INSTALLED_FILES_FILE_SYSTEMOTHER) : $(INTERNAL_SYSTEMOTHERIMAGE_FILES)
+$(INSTALLED_FILES_FILE_SYSTEMOTHER) : $(INTERNAL_SYSTEMOTHERIMAGE_FILES) $(FILESLIST)
 	@echo Installed file list: $@
 	@mkdir -p $(dir $@)
 	@rm -f $@
-	$(hide) build/tools/fileslist.py $(TARGET_OUT_SYSTEM_OTHER) > $@
+	$(hide) $(FILESLIST) $(TARGET_OUT_SYSTEM_OTHER) > $(@:.txt=.json)
+	$(hide) build/tools/fileslist_util.py -c $(@:.txt=.json) > $@
 
 systemotherimage_intermediates := \
     $(call intermediates-dir-for,PACKAGING,system_other)
@@ -1641,22 +1652,23 @@
 $(INSTALLED_PLATFORM_ZIP) : $(INTERNAL_VENDORIMAGE_FILES)
 
 INSTALLED_FILES_FILE_VENDOR := $(PRODUCT_OUT)/installed-files-vendor.txt
-$(INSTALLED_FILES_FILE_VENDOR) : $(INTERNAL_VENDORIMAGE_FILES)
+$(INSTALLED_FILES_FILE_VENDOR) : $(INTERNAL_VENDORIMAGE_FILES) $(FILESLIST)
 	@echo Installed file list: $@
 	@mkdir -p $(dir $@)
 	@rm -f $@
-	$(hide) build/tools/fileslist.py $(TARGET_OUT_VENDOR) > $(@:.txt=.json)
+	$(hide) $(FILESLIST) $(TARGET_OUT_VENDOR) > $(@:.txt=.json)
 	$(hide) build/tools/fileslist_util.py -c $(@:.txt=.json) > $@
 
 vendorimage_intermediates := \
     $(call intermediates-dir-for,PACKAGING,vendor)
 BUILT_VENDORIMAGE_TARGET := $(PRODUCT_OUT)/vendor.img
-
 define build-vendorimage-target
   $(call pretty,"Target vendor fs image: $(INSTALLED_VENDORIMAGE_TARGET)")
   @mkdir -p $(TARGET_OUT_VENDOR)
   @mkdir -p $(vendorimage_intermediates) && rm -rf $(vendorimage_intermediates)/vendor_image_info.txt
   $(call generate-userimage-prop-dictionary, $(vendorimage_intermediates)/vendor_image_info.txt, skip_fsck=true)
+  $(if $(BOARD_VENDOR_KERNEL_MODULES), \
+    $(call build-image-kernel-modules,$(BOARD_VENDOR_KERNEL_MODULES),$(TARGET_OUT_VENDOR),vendor/,$(call intermediates-dir-for,PACKAGING,depmod_vendor)))
   $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
       ./build/tools/releasetools/build_image.py \
       $(TARGET_OUT_VENDOR) $(vendorimage_intermediates)/vendor_image_info.txt $(INSTALLED_VENDORIMAGE_TARGET) $(TARGET_OUT)
@@ -1665,11 +1677,11 @@
 
 # We just build this directly to the install location.
 INSTALLED_VENDORIMAGE_TARGET := $(BUILT_VENDORIMAGE_TARGET)
-$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR) $(BUILD_IMAGE_SRCS) $(DEPMOD) $(BOARD_VENDOR_KERNEL_MODULES)
 	$(build-vendorimage-target)
 
-.PHONY: vendorimage-nodeps
-vendorimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
+.PHONY: vendorimage-nodeps vnod
+vendorimage-nodeps vnod: | $(INTERNAL_USERIMAGES_DEPS) $(DEPMOD)
 	$(build-vendorimage-target)
 
 else ifdef BOARD_PREBUILT_VENDORIMAGE
@@ -1678,6 +1690,69 @@
 endif
 
 # -----------------------------------------------------------------
+# vbmeta image
+ifeq ($(BOARD_AVB_ENABLE),true)
+
+BUILT_VBMETAIMAGE_TARGET := $(PRODUCT_OUT)/vbmeta.img
+
+INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS := \
+    --include_descriptors_from_image $(INSTALLED_BOOTIMAGE_TARGET) \
+    --include_descriptors_from_image $(INSTALLED_SYSTEMIMAGE) \
+
+ifdef INSTALLED_VENDORIMAGE_TARGET
+INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS += \
+    --include_descriptors_from_image $(INSTALLED_VENDORIMAGE_TARGET)
+endif
+
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS += --setup_rootfs_from_kernel $(BUILT_SYSTEMIMAGE)
+endif
+
+ifdef BOARD_AVB_ROLLBACK_INDEX
+INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS += --rollback_index $(BOARD_AVB_ROLLBACK_INDEX)
+endif
+
+ifndef BOARD_AVB_KEY_PATH
+# If key path isn't specified, use the 4096-bit test key.
+INTERNAL_AVB_SIGNING_ARGS := \
+    --algorithm SHA256_RSA4096 \
+    --key external/avb/test/data/testkey_rsa4096.pem
+else
+INTERNAL_AVB_SIGNING_ARGS := \
+    --algorithm $(BOARD_AVB_ALGORITHM) --key $(BOARD_AVB_KEY_PATH)
+endif
+
+ifndef BOARD_BOOTIMAGE_PARTITION_SIZE
+  $(error BOARD_BOOTIMAGE_PARTITION_SIZE must be set for BOARD_AVB_ENABLE)
+endif
+
+ifndef BOARD_SYSTEMIMAGE_PARTITION_SIZE
+  $(error BOARD_SYSTEMIMAGE_PARTITION_SIZE must be set for BOARD_AVB_ENABLE)
+endif
+
+define build-vbmetaimage-target
+  $(call pretty,"Target vbmeta image: $(INSTALLED_VBMETAIMAGE_TARGET)")
+  $(hide) $(AVBTOOL) make_vbmeta_image \
+    $(INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS) \
+    $(INTERNAL_AVB_SIGNING_ARGS) \
+    $(BOARD_AVB_MAKE_VBMETA_IMAGE_ARGS) \
+    --output $@
+endef
+
+INSTALLED_VBMETAIMAGE_TARGET := $(BUILT_VBMETAIMAGE_TARGET)
+$(INSTALLED_VBMETAIMAGE_TARGET): $(AVBTOOL) $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_SYSTEMIMAGE) $(INSTALLED_VENDORIMAGE_TARGET)
+	$(build-vbmetaimage-target)
+
+.PHONY: vbmetaimage-nodeps
+vbmetaimage-nodeps:
+	$(build-vbmetaimage-target)
+
+# We need $(AVBTOOL) for system.img generation.
+FULL_SYSTEMIMAGE_DEPS += $(AVBTOOL)
+
+endif # BOARD_AVB_ENABLE
+
+# -----------------------------------------------------------------
 # bring in the installer image generation defines if necessary
 ifeq ($(TARGET_USE_DISKINSTALLER),true)
 include bootable/diskinstaller/config.mk
@@ -1747,6 +1822,12 @@
   $(HOST_OUT_EXECUTABLES)/delta_generator \
   $(BLK_ALLOC_TO_BASE_FS)
 
+ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT))
+OTATOOLS += \
+  $(FUTILITY) \
+  $(VBOOT_SIGNER)
+endif
+
 # Shared libraries.
 OTATOOLS += \
   $(HOST_LIBRARY_PATH)/libc++$(HOST_SHLIB_SUFFIX) \
@@ -1758,6 +1839,7 @@
   $(HOST_LIBRARY_PATH)/libext2_blkid-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_com_err-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_e2p-host$(HOST_SHLIB_SUFFIX) \
+  $(HOST_LIBRARY_PATH)/libext2_misc$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_profile-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_quota-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_uuid-host$(HOST_SHLIB_SUFFIX) \
@@ -1770,6 +1852,7 @@
   $(HOST_LIBRARY_PATH)/libprotobuf-cpp-lite$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libssl-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libz-host$(HOST_SHLIB_SUFFIX) \
+  $(HOST_LIBRARY_PATH)/libsparse-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libbase$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libpcre2$(HOST_SHLIB_SUFFIX)
 
@@ -1787,6 +1870,10 @@
 	$(hide) $(ACP) $(HOST_OUT_JAVA_LIBRARIES)/VeritySigner.jar $(zip_root)/framework/
 	$(hide) $(ACP) -p system/extras/verity/build_verity_metadata.py $(zip_root)/system/extras/verity/
 	$(hide) $(ACP) -r -d -p build/tools/releasetools/* $(zip_root)/releasetools
+ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT))
+	$(hide) mkdir -p $(zip_root)/external/vboot_reference/tests/devkeys
+	$(hide) $(ACP) -r -d -p external/vboot_reference/tests/devkeys/* $(zip_root)/external/vboot_reference/tests/devkeys
+endif
 	$(hide) rm -rf $@ $(zip_root)/releasetools/*.pyc
 	$(hide) (cd $(zip_root) && zip -qryX $(abspath $@) *)
 	$(hide) zip -qryX $(abspath $@) build/target/product/security/
@@ -1839,14 +1926,18 @@
 
 ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
 # default to common dir for device vendor
-$(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_DEVICE_DIR)/../common
+tool_extensions := $(TARGET_DEVICE_DIR)/../common
 else
-$(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
+tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
 endif
+tool_extension := $(wildcard $(tool_extensions)/releasetools.py)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSIONS := $(tool_extensions)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSION := $(tool_extension)
 
 ifeq ($(AB_OTA_UPDATER),true)
 # Build zlib fingerprint if using the AB Updater.
 updater_dep := $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint
+updater_dep += system/update_engine/update_engine.conf
 else
 # Build OTA tools if not using the AB Updater.
 updater_dep := $(built_ota_tools)
@@ -1860,25 +1951,51 @@
 $(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := RECOVERY
 endif
 
+ifeq ($(AB_OTA_UPDATER),true)
+  ifdef BRILLO_VENDOR_PARTITIONS
+    $(BUILT_TARGET_FILES_PACKAGE): $(foreach p,$(BRILLO_VENDOR_PARTITIONS),\
+                                     $(call word-colon,1,$(p))/$(call word-colon,2,$(p)))
+  endif
+  ifdef OSRELEASED_DIRECTORY
+    $(BUILT_TARGET_FILES_PACKAGE): $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id
+    $(BUILT_TARGET_FILES_PACKAGE): $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version
+    $(BUILT_TARGET_FILES_PACKAGE): $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/system_version
+  endif
+endif
+
+# Run fs_config while creating the target files package
+# $1: root directory
+# $2: add prefix
+define fs_config
+(cd $(1); find . -type d | sed 's,$$,/,'; find . \! -type d) | cut -c 3- | sort | sed 's,^,$(2),' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC)
+endef
+
 # Depending on the various images guarantees that the underlying
 # directories are up-to-date.
 $(BUILT_TARGET_FILES_PACKAGE): \
 		$(INSTALLED_BOOTIMAGE_TARGET) \
 		$(INSTALLED_RADIOIMAGE_TARGET) \
 		$(INSTALLED_RECOVERYIMAGE_TARGET) \
-		$(INSTALLED_SYSTEMIMAGE) \
+		$(FULL_SYSTEMIMAGE_DEPS) \
 		$(INSTALLED_USERDATAIMAGE_TARGET) \
 		$(INSTALLED_CACHEIMAGE_TARGET) \
 		$(INSTALLED_VENDORIMAGE_TARGET) \
-		$(INSTALLED_SYSTEMOTHERIMAGE_TARGET) \
+		$(INTERNAL_SYSTEMOTHERIMAGE_FILES) \
 		$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
+		$(INSTALLED_KERNEL_TARGET) \
+		$(INSTALLED_2NDBOOTLOADER_TARGET) \
+		$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH) \
+		$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH) \
 		$(SELINUX_FC) \
 		$(APKCERTS_FILE) \
 		$(SOONG_ZIP) \
 		$(HOST_OUT_EXECUTABLES)/fs_config \
-		build/tools/releasetools/add_img_to_target_files \
+		$(HOST_OUT_EXECUTABLES)/imgdiff \
+		$(HOST_OUT_EXECUTABLES)/bsdiff \
+		$(BUILD_IMAGE_SRCS) \
 		| $(ACP)
 	@echo "Package target files: $@"
+	$(call create-system-vendor-symlink)
 	$(hide) rm -rf $@ $@.list $(zip_root)
 	$(hide) mkdir -p $(dir $@) $(zip_root)
 ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
@@ -1887,11 +2004,10 @@
 	$(hide) $(call package_files-copy-root, \
 		$(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/$(PRIVATE_RECOVERY_OUT)/RAMDISK)
 ifdef INSTALLED_KERNEL_TARGET
-	$(hide) $(ACP) $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/kernel
+	$(hide) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/kernel
 endif
 ifdef INSTALLED_2NDBOOTLOADER_TARGET
-	$(hide) $(ACP) \
-		$(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/second
+	$(hide) cp $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/second
 endif
 ifdef INTERNAL_KERNEL_CMDLINE
 	$(hide) echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/cmdline
@@ -1916,11 +2032,10 @@
 	@# If we are using recovery as boot, this is already done when processing recovery.
 ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
 ifdef INSTALLED_KERNEL_TARGET
-	$(hide) $(ACP) $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/kernel
+	$(hide) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/kernel
 endif
 ifdef INSTALLED_2NDBOOTLOADER_TARGET
-	$(hide) $(ACP) \
-		$(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second
+	$(hide) cp $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second
 endif
 ifdef INTERNAL_KERNEL_CMDLINE
 	$(hide) echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
@@ -1934,7 +2049,7 @@
 endif # BOARD_USES_RECOVERY_AS_BOOT
 	$(hide) $(foreach t,$(INSTALLED_RADIOIMAGE_TARGET),\
 	            mkdir -p $(zip_root)/RADIO; \
-	            $(ACP) $(t) $(zip_root)/RADIO/$(notdir $(t));)
+	            cp $(t) $(zip_root)/RADIO/$(notdir $(t));)
 	@# Contents of the system image
 	$(hide) $(call package_files-copy-root, \
 		$(SYSTEMIMAGE_SOURCE_DIR),$(zip_root)/SYSTEM)
@@ -1953,20 +2068,22 @@
 endif
 	@# Extra contents of the OTA package
 	$(hide) mkdir -p $(zip_root)/OTA
-	$(hide) $(ACP) $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
+	$(hide) cp $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
 ifneq ($(AB_OTA_UPDATER),true)
 ifneq ($(built_ota_tools),)
 	$(hide) mkdir -p $(zip_root)/OTA/bin
-	$(hide) $(ACP) $(PRIVATE_OTA_TOOLS) $(zip_root)/OTA/bin/
+	$(hide) cp $(PRIVATE_OTA_TOOLS) $(zip_root)/OTA/bin/
 endif
 endif
 	@# Files that do not end up in any images, but are necessary to
 	@# build them.
 	$(hide) mkdir -p $(zip_root)/META
-	$(hide) $(ACP) $(APKCERTS_FILE) $(zip_root)/META/apkcerts.txt
-	$(hide) if test -e $(tool_extensions)/releasetools.py; then $(ACP) $(tool_extensions)/releasetools.py $(zip_root)/META/; fi
+	$(hide) cp $(APKCERTS_FILE) $(zip_root)/META/apkcerts.txt
+ifneq ($(tool_extension),)
+	$(hide) cp $(PRIVATE_TOOL_EXTENSION) $(zip_root)/META/
+endif
 	$(hide) echo "$(PRODUCT_OTA_PUBLIC_KEYS)" > $(zip_root)/META/otakeys.txt
-	$(hide) $(ACP) $(SELINUX_FC) $(zip_root)/META/file_contexts.bin
+	$(hide) cp $(SELINUX_FC) $(zip_root)/META/file_contexts.bin
 	$(hide) echo "recovery_api_version=$(PRIVATE_RECOVERY_API_VERSION)" > $(zip_root)/META/misc_info.txt
 	$(hide) echo "fstab_version=$(PRIVATE_RECOVERY_FSTAB_VERSION)" >> $(zip_root)/META/misc_info.txt
 ifdef BOARD_FLASH_BLOCK_SIZE
@@ -1993,27 +2110,25 @@
 else
 	$(hide) echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
 endif
-	$(hide) echo "tool_extensions=$(tool_extensions)" >> $(zip_root)/META/misc_info.txt
+	$(hide) echo "tool_extensions=$(PRIVATE_TOOL_EXTENSIONS)" >> $(zip_root)/META/misc_info.txt
 	$(hide) echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $(zip_root)/META/misc_info.txt
 ifdef PRODUCT_EXTRA_RECOVERY_KEYS
 	$(hide) echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $(zip_root)/META/misc_info.txt
 endif
 	$(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(zip_root)/META/misc_info.txt
 	$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "use_set_metadata=1" >> $(zip_root)/META/misc_info.txt
 	$(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "update_rename_support=1" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "blockimgdiff_versions=1,2,3,4" >> $(zip_root)/META/misc_info.txt
+	$(hide) echo "blockimgdiff_versions=3,4" >> $(zip_root)/META/misc_info.txt
 ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
 	# OTA scripts are only interested in fingerprint related properties
 	$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
 endif
 ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH),)
-	$(hide) $(ACP) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH) \
+	$(hide) cp $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH) \
 	  $(zip_root)/META/$(notdir $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH))
 endif
 ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH),)
-	$(hide) $(ACP) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH) \
+	$(hide) cp $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH) \
 	  $(zip_root)/META/$(notdir $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH))
 endif
 ifneq ($(strip $(SANITIZE_TARGET)),)
@@ -2047,8 +2162,8 @@
 endif
 ifeq ($(AB_OTA_UPDATER),true)
 	@# When using the A/B updater, include the updater config files in the zip.
-	$(hide) $(ACP) $(TOPDIR)system/update_engine/update_engine.conf $(zip_root)/META/update_engine_config.txt
-	$(hide) $(ACP) $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint $(zip_root)/META/zlib_fingerprint.txt
+	$(hide) cp $(TOPDIR)system/update_engine/update_engine.conf $(zip_root)/META/update_engine_config.txt
+	$(hide) cp $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint $(zip_root)/META/zlib_fingerprint.txt
 	$(hide) for part in $(AB_OTA_PARTITIONS); do \
 	  echo "$${part}" >> $(zip_root)/META/ab_partitions.txt; \
 	done
@@ -2066,12 +2181,13 @@
 	  src=$${pair1}/$${pair2}; \
 	  dest=$(zip_root)/VENDOR_IMAGES/$${pair2}; \
 	  mkdir -p $$(dirname "$${dest}"); \
-	  $(ACP) $${src} $${dest}; \
+	  cp $${src} $${dest}; \
 	done;
 endif
 ifdef OSRELEASED_DIRECTORY
-	$(hide) $(ACP) $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
-	$(hide) $(ACP) $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
+	$(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
+	$(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
+	$(hide) cp $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/system_version $(zip_root)/META/system_version.txt
 endif
 endif
 ifeq ($(BREAKPAD_GENERATE_SYMBOLS),true)
@@ -2082,28 +2198,29 @@
 	$(hide) mkdir -p $(zip_root)/IMAGES
 	$(hide) cp $(INSTALLED_VENDORIMAGE_TARGET) $(zip_root)/IMAGES/
 endif
+	@# Run fs_config on all the system, vendor, boot ramdisk,
+	@# and recovery ramdisk files in the zip, and save the output
+	$(hide) $(call fs_config,$(zip_root)/SYSTEM,system/) > $(zip_root)/META/filesystem_config.txt
+ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+	$(hide) $(call fs_config,$(zip_root)/VENDOR,vendor/) > $(zip_root)/META/vendor_filesystem_config.txt
+endif
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+	$(hide) $(call fs_config,$(zip_root)/ROOT,) > $(zip_root)/META/root_filesystem_config.txt
+endif
+	$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
+ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+	$(hide) $(call fs_config,$(zip_root)/RECOVERY/RAMDISK,) > $(zip_root)/META/recovery_filesystem_config.txt
+endif
+ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
+	$(hide) $(call fs_config,$(zip_root)/SYSTEM_OTHER,system/) > $(zip_root)/META/system_other_filesystem_config.txt
+endif
+	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+	    ./build/tools/releasetools/add_img_to_target_files -a -v -p $(HOST_OUT) $(zip_root)
 	@# Zip everything up, preserving symlinks and placing META/ files first to
 	@# help early validation of the .zip file while uploading it.
 	$(hide) find $(zip_root)/META | sort >$@.list
-	$(hide) find $(zip_root) | grep -v "^$(zip_root)/META/" | sort >>$@.list
+	$(hide) find $(zip_root) -path $(zip_root)/META -prune -o -print | sort >>$@.list
 	$(hide) $(SOONG_ZIP) -d -o $@ -C $(zip_root) -l $@.list
-	@# Run fs_config on all the system, vendor, boot ramdisk,
-	@# and recovery ramdisk files in the zip, and save the output
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM/" } /^SYSTEM\// {print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/filesystem_config.txt
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="VENDOR/" } /^VENDOR\// {print "vendor/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/vendor_filesystem_config.txt
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="ROOT/" } /^ROOT\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/root_filesystem_config.txt
-endif
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/boot_filesystem_config.txt
-ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/recovery_filesystem_config.txt
-endif
-ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM_OTHER/" } /^SYSTEM_OTHER\// { print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/system_other_filesystem_config.txt
-endif
-	$(hide) (cd $(zip_root) && zip -qX ../$(notdir $@) META/*filesystem_config.txt)
-	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
-	    ./build/tools/releasetools/add_img_to_target_files -a -v -p $(HOST_OUT) $@
 
 .PHONY: target-files-package
 target-files-package: $(BUILT_TARGET_FILES_PACKAGE)
@@ -2141,6 +2258,7 @@
 	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
 	   ./build/tools/releasetools/ota_from_target_files -v \
 	   --block \
+	   --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
 	   -p $(HOST_OUT) \
 	   -k $(KEY_CERT_PAIR) \
 	   $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \
@@ -2195,7 +2313,7 @@
 	$(hide) rm -rf $@ $(PRIVATE_LIST_FILE)
 	$(hide) mkdir -p $(dir $@) $(TARGET_OUT_UNSTRIPPED) $(dir $(PRIVATE_LIST_FILE))
 	$(hide) find $(TARGET_OUT_UNSTRIPPED) | sort >$(PRIVATE_LIST_FILE)
-	$(hide) $(SOONG_ZIP) -d -o $@ -C . -l $(PRIVATE_LIST_FILE)
+	$(hide) $(SOONG_ZIP) -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
 # -----------------------------------------------------------------
 # A zip of the coverage directory.
 #
diff --git a/core/WINPTHREADS_COPYING b/core/WINPTHREADS_COPYING
new file mode 100644
index 0000000..3507701
--- /dev/null
+++ b/core/WINPTHREADS_COPYING
@@ -0,0 +1,57 @@
+Copyright (c) 2011 mingw-w64 project
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+
+/*
+ * Parts of this library are derived by:
+ *
+ * Posix Threads library for Microsoft Windows
+ *
+ * Use at own risk, there is no implied warranty to this code.
+ * It uses undocumented features of Microsoft Windows that can change
+ * at any time in the future.
+ *
+ * (C) 2010 Lockless Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ *
+ *  * Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *  * Neither the name of Lockless Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AN
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 68ac08b..ff6f5bd 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -65,6 +65,16 @@
   my_host_cross :=
 endif
 
+ifndef LOCAL_PROPRIETARY_MODULE
+  LOCAL_PROPRIETARY_MODULE := $(LOCAL_VENDOR_MODULE)
+endif
+ifndef LOCAL_VENDOR_MODULE
+  LOCAL_VENDOR_MODULE := $(LOCAL_PROPRIETARY_MODULE)
+endif
+ifneq ($(filter-out $(LOCAL_PROPRIETARY_MODULE),$(LOCAL_VENDOR_MODULE))$(filter-out $(LOCAL_VENDOR_MODULE),$(LOCAL_PROPRIETARY_MODULE)),)
+$(call pretty-error,Only one of LOCAL_PROPRIETARY_MODULE[$(LOCAL_PROPRIETARY_MODULE)] and LOCAL_VENDOR_MODULE[$(LOCAL_VENDOR_MODULE)] may be set, or they must be equal)
+endif
+
 include $(BUILD_SYSTEM)/local_vndk.mk
 
 my_module_tags := $(LOCAL_MODULE_TAGS)
@@ -121,7 +131,7 @@
 # makefiles. Anything else is either a typo or a source of unexpected
 # behaviors.
 ifneq ($(filter-out debug eng tests optional samples,$(my_module_tags)),)
-$(warning unusual tags $(my_module_tags) on $(LOCAL_MODULE) at $(LOCAL_PATH))
+$(call pretty-warning,unusual tags $(my_module_tags))
 endif
 
 # Add implicit tags.
@@ -163,24 +173,24 @@
 endif
 my_module_path := $(patsubst %/,%,$(my_module_path))
 my_module_relative_path := $(strip $(LOCAL_MODULE_RELATIVE_PATH))
+ifdef LOCAL_IS_HOST_MODULE
+  partition_tag :=
+else
+ifeq (true,$(LOCAL_VENDOR_MODULE))
+  partition_tag := _VENDOR
+else ifeq (true,$(LOCAL_OEM_MODULE))
+  partition_tag := _OEM
+else ifeq (true,$(LOCAL_ODM_MODULE))
+  partition_tag := _ODM
+else ifeq (NATIVE_TESTS,$(LOCAL_MODULE_CLASS))
+  partition_tag := _DATA
+else
+  # The definition of should-install-to-system will be different depending
+  # on which goal (e.g., sdk or just droid) is being built.
+  partition_tag := $(if $(call should-install-to-system,$(my_module_tags)),,_DATA)
+endif
+endif
 ifeq ($(my_module_path),)
-  ifdef LOCAL_IS_HOST_MODULE
-    partition_tag :=
-  else
-  ifeq (true,$(LOCAL_PROPRIETARY_MODULE))
-    partition_tag := _VENDOR
-  else ifeq (true,$(LOCAL_OEM_MODULE))
-    partition_tag := _OEM
-  else ifeq (true,$(LOCAL_ODM_MODULE))
-    partition_tag := _ODM
-  else ifeq (NATIVE_TESTS,$(LOCAL_MODULE_CLASS))
-    partition_tag := _DATA
-  else
-    # The definition of should-install-to-system will be different depending
-    # on which goal (e.g., sdk or just droid) is being built.
-    partition_tag := $(if $(call should-install-to-system,$(my_module_tags)),,_DATA)
-  endif
-  endif
   install_path_var := $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT$(partition_tag)_$(LOCAL_MODULE_CLASS)
   ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
     install_path_var := $(install_path_var)_PRIVILEGED
@@ -417,47 +427,62 @@
 endif
 
 ###########################################################
-## Compatibiliy suite files.
+## Compatibility suite files.
 ###########################################################
 ifdef LOCAL_COMPATIBILITY_SUITE
-ifneq ($(words $(LOCAL_COMPATIBILITY_SUITE)),1)
-$(error $(LOCAL_PATH):$(LOCAL_MODULE) LOCAL_COMPATIBILITY_SUITE can be only one name)
+
+# If we are building a native test or benchmark and its stem variants are not defined,
+# separate the multiple architectures into subdirectories of the testcase folder.
+arch_dir :=
+is_native :=
+ifeq ($(LOCAL_MODULE_CLASS),NATIVE_TESTS)
+  is_native := true
+endif
+ifeq ($(LOCAL_MODULE_CLASS),NATIVE_BENCHMARK)
+  is_native := true
+endif
+ifdef LOCAL_MULTILIB
+  is_native := true
+endif
+ifdef is_native
+  arch_dir := /$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)
+  is_native :=
 endif
 
 # The module itself.
-my_compat_dist := \
-  $(LOCAL_BUILT_MODULE):$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(my_installed_module_stem)
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) := $(foreach dir, $(call compatibility_suite_dirs,$(suite),$(arch_dir)), \
+    $(LOCAL_BUILT_MODULE):$(dir)/$(my_installed_module_stem))))
 
 # Make sure we only add the files once for multilib modules.
 ifndef $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
 $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files := true
 
 # LOCAL_COMPATIBILITY_SUPPORT_FILES is a list of <src>[:<dest>].
-my_compat_dist += $(foreach f, $(LOCAL_COMPATIBILITY_SUPPORT_FILES),\
-  $(eval p := $(subst :,$(space),$(f)))\
-  $(eval s := $(word 1,$(p)))\
-  $(eval d := $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(or $(word 2,$(p)),$(notdir $(word 1,$(p)))))\
-  $(s):$(d))
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) += $(foreach f, $(LOCAL_COMPATIBILITY_SUPPORT_FILES), \
+    $(eval p := $(subst :,$(space),$(f))) \
+    $(eval s := $(word 1,$(p))) \
+    $(eval n := $(or $(word 2,$(p)),$(notdir $(word 1, $(p))))) \
+    $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+      $(s):$(dir)/$(n)))))
+
 
 ifneq (,$(wildcard $(LOCAL_PATH)/AndroidTest.xml))
-my_compat_dist += \
-  $(LOCAL_PATH)/AndroidTest.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).config
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) += $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+    $(LOCAL_PATH)/AndroidTest.xml:$(dir)/$(LOCAL_MODULE).config)))
 endif
 
 ifneq (,$(wildcard $(LOCAL_PATH)/DynamicConfig.xml))
-my_compat_dist += \
-  $(LOCAL_PATH)/DynamicConfig.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).dynamic
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) += $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+    $(LOCAL_PATH)/DynamicConfig.xml:$(dir)/$(LOCAL_MODULE).dynamic)))
 endif
 endif # $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
 
-my_compat_files := $(call copy-many-files, $(my_compat_dist))
+$(call create-suite-dependencies)
 
-COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES := \
-  $(COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES) \
-  $(my_compat_files)
-
-# Copy over the compatibility files when user runs mm/mmm.
-$(my_all_targets) : $(my_compat_files)
 endif  # LOCAL_COMPATIBILITY_SUITE
 
 ###########################################################
@@ -528,6 +553,9 @@
 endif
 ALL_MODULES.$(my_register_name).REQUIRED := \
     $(strip $(ALL_MODULES.$(my_register_name).REQUIRED) $(my_required_modules))
+ALL_MODULES.$(my_register_name).EXPLICITLY_REQUIRED := \
+    $(strip $(ALL_MODULES.$(my_register_name).EXPLICITLY_REQUIRED)\
+        $(my_required_modules))
 ALL_MODULES.$(my_register_name).EVENT_LOG_TAGS := \
     $(ALL_MODULES.$(my_register_name).EVENT_LOG_TAGS) $(event_log_tags)
 ALL_MODULES.$(my_register_name).MAKEFILE := \
@@ -552,6 +580,7 @@
   $(LOCAL_STATIC_LIBRARIES) \
   $(LOCAL_WHOLE_STATIC_LIBRARIES) \
   $(LOCAL_SHARED_LIBRARIES) \
+  $(LOCAL_HEADER_LIBRARIES) \
   $(LOCAL_STATIC_JAVA_LIBRARIES) \
   $(LOCAL_JAVA_LIBRARIES)\
   $(LOCAL_JNI_SHARED_LIBRARIES))
@@ -573,7 +602,7 @@
 ## umbrella targets used to verify builds
 ###########################################################
 j_or_n :=
-ifneq (,$(filter EXECUTABLES SHARED_LIBRARIES STATIC_LIBRARIES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)))
+ifneq (,$(filter EXECUTABLES SHARED_LIBRARIES STATIC_LIBRARIES HEADER_LIBRARIES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)))
 j_or_n := native
 else
 ifneq (,$(filter JAVA_LIBRARIES APPS,$(LOCAL_MODULE_CLASS)))
diff --git a/core/binary.mk b/core/binary.mk
index 4986c85..e28db7e 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -40,6 +40,7 @@
 my_static_libraries := $(LOCAL_STATIC_LIBRARIES)
 my_whole_static_libraries := $(LOCAL_WHOLE_STATIC_LIBRARIES)
 my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
+my_header_libraries := $(LOCAL_HEADER_LIBRARIES)
 my_cflags := $(LOCAL_CFLAGS)
 my_conlyflags := $(LOCAL_CONLYFLAGS)
 my_cppflags := $(LOCAL_CPPFLAGS)
@@ -60,10 +61,14 @@
 my_arflags :=
 
 ifneq (,$(strip $(foreach dir,$(subst $(comma),$(space),$(COVERAGE_PATHS)),$(filter $(dir)%,$(LOCAL_PATH)))))
+ifeq (,$(strip $(foreach dir,$(subst $(comma),$(space),$(COVERAGE_EXCLUDE_PATHS)),$(filter $(dir)%,$(LOCAL_PATH)))))
   my_native_coverage := true
 else
   my_native_coverage := false
 endif
+else
+  my_native_coverage := false
+endif
 
 my_allow_undefined_symbols := $(strip $(LOCAL_ALLOW_UNDEFINED_SYMBOLS))
 ifdef SANITIZE_HOST
@@ -75,9 +80,8 @@
 my_ndk_sysroot :=
 my_ndk_sysroot_include :=
 my_ndk_sysroot_lib :=
-ifneq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
+ifneq ($(LOCAL_SDK_VERSION),)
   ifdef LOCAL_IS_HOST_MODULE
-    # LOCAL_USE_VNDK is checked in local_vndk.mk
     $(error $(LOCAL_PATH): LOCAL_SDK_VERSION cannot be used in host module)
   endif
 
@@ -106,13 +110,9 @@
   # missing API levels to existing ones where necessary, but we're not doing
   # that for the generated libraries. Clip the API level to the minimum where
   # appropriate.
-  ifdef LOCAL_USE_VNDK
-    my_ndk_api := $(BOARD_VNDK_VERSION)
-  else
-    my_ndk_api := $(LOCAL_SDK_VERSION)
-  endif
+  my_ndk_api := $(LOCAL_SDK_VERSION)
   ifneq ($(my_ndk_api),current)
-      my_ndk_api := $(call math_max,$(my_ndk_api),$(my_min_sdk_version))
+    my_ndk_api := $(call math_max,$(my_ndk_api),$(my_min_sdk_version))
   endif
 
   my_ndk_api_def := $(my_ndk_api)
@@ -160,28 +160,25 @@
   my_built_ndk_libs := $(my_ndk_platform_dir)/usr/$(my_ndk_libdir_name)
   my_ndk_sysroot_lib := $(my_ndk_sysroot)/usr/$(my_ndk_libdir_name)
 
-  ifndef LOCAL_USE_VNDK
-    # The bionic linker now has support for packed relocations and gnu style
-    # hashes (which are much faster!), but shipping to older devices requires
-    # the old style hash. Fortunately, we can build with both and it'll work
-    # anywhere.
-    #
-    # This is not currently supported on MIPS architectures.
-    ifeq (,$(filter mips mips64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)))
-      my_ldflags += -Wl,--hash-style=both
-    endif
-
-    # We don't want to expose the relocation packer to the NDK just yet.
-    LOCAL_PACK_MODULE_RELOCATIONS := false
+  # The bionic linker now has support for packed relocations and gnu style
+  # hashes (which are much faster!), but shipping to older devices requires
+  # the old style hash. Fortunately, we can build with both and it'll work
+  # anywhere.
+  #
+  # This is not currently supported on MIPS architectures.
+  ifeq (,$(filter mips mips64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)))
+    my_ldflags += -Wl,--hash-style=both
   endif
 
+  # We don't want to expose the relocation packer to the NDK just yet.
+  LOCAL_PACK_MODULE_RELOCATIONS := false
+
   # Set up the NDK stl variant. Starting from NDK-r5 the c++ stl resides in a separate location.
   # See ndk/docs/CPLUSPLUS-SUPPORT.html
   my_ndk_stl_include_path :=
   my_ndk_stl_shared_lib_fullpath :=
   my_ndk_stl_static_lib :=
   my_ndk_cpp_std_version :=
-  ifndef LOCAL_USE_VNDK
   my_cpu_variant := $(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)CPU_ABI)
   ifeq (mips32r6,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH_VARIANT))
     my_cpu_variant := mips32r6
@@ -269,7 +266,10 @@
   endif
   endif
   endif
-  endif
+endif
+
+ifneq ($(LOCAL_USE_VNDK),)
+  my_cflags += -D__ANDROID_API__=__ANDROID_API_FUTURE__
 endif
 
 ifndef LOCAL_IS_HOST_MODULE
@@ -290,7 +290,7 @@
 my_ldlibs := $(filter $(my_allowed_ldlibs),$(my_ldlibs))
 endif
 
-ifneq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
+ifneq ($(LOCAL_SDK_VERSION),)
   my_all_ndk_libraries := \
       $(NDK_MIGRATED_LIBS) $(addprefix lib,$(NDK_PREBUILT_SHARED_LIBRARIES))
   my_ndk_shared_libraries := \
@@ -309,7 +309,7 @@
 ifneq ($(LOCAL_NO_PIC),true)
 ifneq ($($(my_prefix)OS),windows)
 ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
-my_cflags += -fpie
+my_cflags += -fPIE
 else
 my_cflags += -fPIC
 endif
@@ -320,6 +320,7 @@
 my_src_files += $(LOCAL_SRC_FILES_$($(my_prefix)OS)) $(LOCAL_SRC_FILES_$($(my_prefix)OS)_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
 my_static_libraries += $(LOCAL_STATIC_LIBRARIES_$($(my_prefix)OS))
 my_shared_libraries += $(LOCAL_SHARED_LIBRARIES_$($(my_prefix)OS))
+my_header_libraries += $(LOCAL_HEADER_LIBRARIES_$($(my_prefix)OS))
 my_cflags += $(LOCAL_CFLAGS_$($(my_prefix)OS))
 my_cppflags += $(LOCAL_CPPFLAGS_$($(my_prefix)OS))
 my_ldflags += $(LOCAL_LDFLAGS_$($(my_prefix)OS))
@@ -394,8 +395,21 @@
     my_clang := true
 endif
 
-my_c_std_version := $(DEFAULT_C_STD_VERSION)
-my_cpp_std_version := $(DEFAULT_CPP_STD_VERSION)
+ifeq ($(LOCAL_C_STD),)
+    my_c_std_version := $(DEFAULT_C_STD_VERSION)
+else ifeq ($(LOCAL_C_STD),experimental)
+    my_c_std_version := $(EXPERIMENTAL_C_STD_VERSION)
+else
+    my_c_std_version := $(LOCAL_C_STD)
+endif
+
+ifeq ($(LOCAL_CPP_STD),)
+    my_cpp_std_version := $(DEFAULT_CPP_STD_VERSION)
+else ifeq ($(LOCAL_CPP_STD),experimental)
+    my_cpp_std_version := $(EXPERIMENTAL_CPP_STD_VERSION)
+else
+    my_cpp_std_version := $(LOCAL_CPP_STD)
+endif
 
 ifneq ($(my_clang),true)
     # GCC uses an invalid C++14 ABI (emits calls to
@@ -430,6 +444,7 @@
 # arch-specific static libraries go first so that generic ones can depend on them
 my_static_libraries := $(LOCAL_STATIC_LIBRARIES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_STATIC_LIBRARIES_$(my_32_64_bit_suffix)) $(my_static_libraries)
 my_whole_static_libraries := $(LOCAL_WHOLE_STATIC_LIBRARIES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_WHOLE_STATIC_LIBRARIES_$(my_32_64_bit_suffix)) $(my_whole_static_libraries)
+my_header_libraries := $(LOCAL_HEADER_LIBRARIES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_HEADER_LIBRARIES_$(my_32_64_bit_suffix)) $(my_header_libraries)
 
 include $(BUILD_SYSTEM)/cxx_stl_setup.mk
 
@@ -457,6 +472,11 @@
 endif
 endif
 
+# Statically link libwinpthread when cross compiling win32.
+ifeq ($($(my_prefix)OS),windows)
+  my_static_libraries += libwinpthread
+endif
+
 ifneq ($(filter ../%,$(my_src_files)),)
 my_soong_problems += dotdot_srcs
 endif
@@ -494,6 +514,36 @@
 ###########################################################
 my_asflags += -D__ASSEMBLY__
 
+###########################################################
+## When compiling against the VNDK, use LL-NDK libraries
+###########################################################
+ifneq ($(LOCAL_USE_VNDK),)
+  ####################################################
+  ## Soong modules may be built twice, once for /system
+  ## and once for /vendor. If we're using the VNDK,
+  ## switch all soong libraries over to the /vendor
+  ## variant.
+  ####################################################
+  ifeq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+    # Soong-built libraries should always use the .vendor variant
+    my_whole_static_libraries := $(addsuffix .vendor,$(my_whole_static_libraries))
+    my_static_libraries := $(addsuffix .vendor,$(my_static_libraries))
+    my_shared_libraries := $(addsuffix .vendor,$(my_shared_libraries))
+    my_system_shared_libraries := $(addsuffix .vendor,$(my_system_shared_libraries))
+    my_header_libraries := $(addsuffix .vendor,$(my_header_libraries))
+  else
+    my_whole_static_libraries := $(foreach l,$(my_whole_static_libraries),\
+      $(if $(SPLIT_VENDOR.STATIC_LIBRARIES.$(l)),$(l).vendor,$(l)))
+    my_static_libraries := $(foreach l,$(my_static_libraries),\
+      $(if $(SPLIT_VENDOR.STATIC_LIBRARIES.$(l)),$(l).vendor,$(l)))
+    my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+      $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+    my_system_shared_libraries := $(foreach l,$(my_system_shared_libraries),\
+      $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+    my_header_libraries := $(foreach l,$(my_header_libraries),\
+      $(if $(SPLIT_VENDOR.HEADER_LIBRARIES.$(l)),$(l).vendor,$(l)))
+  endif
+endif
 
 ###########################################################
 ## Define PRIVATE_ variables from global vars
@@ -503,8 +553,7 @@
 my_target_global_c_includes := \
     $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)PROJECT_INCLUDES)
 my_target_global_c_system_includes := \
-    $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)PROJECT_SYSTEM_INCLUDES) \
-    $(my_ndk_sysroot_include)
+    $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)PROJECT_SYSTEM_INCLUDES)
 else ifdef LOCAL_SDK_VERSION
 my_target_global_c_includes :=
 my_target_global_c_system_includes := $(my_ndk_stl_include_path) $(my_ndk_sysroot_include)
@@ -913,62 +962,6 @@
 endif  # $(proto_sources) non-empty
 
 ###########################################################
-## Compile the .dbus-xml files to c++ headers
-###########################################################
-dbus_definitions := $(filter %.dbus-xml,$(my_src_files))
-dbus_generated_headers :=
-ifneq ($(dbus_definitions),)
-my_soong_problems += dbus
-
-dbus_definition_paths := $(addprefix $(LOCAL_PATH)/,$(dbus_definitions))
-dbus_service_config := $(filter %dbus-service-config.json,$(my_src_files))
-dbus_service_config_path := $(addprefix $(LOCAL_PATH)/,$(dbus_service_config))
-
-# Mark these source files as not producing objects
-$(call track-src-file-obj,$(dbus_definitions) $(dbus_service_config),)
-
-dbus_gen_dir := $(generated_sources_dir)/dbus_bindings
-
-ifdef LOCAL_DBUS_PROXY_PREFIX
-dbus_header_dir := $(dbus_gen_dir)/include/$(LOCAL_DBUS_PROXY_PREFIX)
-dbus_headers := dbus-proxies.h
-else
-dbus_header_dir := $(dbus_gen_dir)
-dbus_headers := $(patsubst %.dbus-xml,%.h,$(dbus_definitions))
-endif
-dbus_generated_headers := $(addprefix $(dbus_header_dir)/,$(dbus_headers))
-
-# Ensure that we only define build rules once in multilib builds.
-ifndef $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined
-$(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined := true
-
-$(dbus_generated_headers): PRIVATE_MODULE := $(LOCAL_MODULE)
-$(dbus_generated_headers): PRIVATE_DBUS_SERVICE_CONFIG := $(dbus_service_config_path)
-$(dbus_generated_headers) : $(dbus_service_config_path) $(DBUS_GENERATOR)
-ifdef LOCAL_DBUS_PROXY_PREFIX
-$(dbus_generated_headers) : $(dbus_definition_paths)
-	$(generate-dbus-proxies)
-else
-$(dbus_generated_headers) : $(dbus_header_dir)/%.h : $(LOCAL_PATH)/%.dbus-xml
-	$(generate-dbus-adaptors)
-endif  # $(LOCAL_DBUS_PROXY_PREFIX)
-endif  # $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined
-
-ifdef LOCAL_DBUS_PROXY_PREFIX
-# Auto-export the generated dbus proxy directory.
-my_export_c_include_dirs += $(dbus_gen_dir)/include
-my_c_includes += $(dbus_gen_dir)/include
-else
-my_export_c_include_dirs += $(dbus_header_dir)
-my_c_includes += $(dbus_header_dir)
-endif  # $(LOCAL_DBUS_PROXY_PREFIX)
-
-my_generated_sources += $(dbus_generated_headers)
-
-endif  # $(dbus_definitions) non-empty
-
-
-###########################################################
 ## AIDL: Compile .aidl files to .cpp and .h files
 ###########################################################
 aidl_src := $(strip $(filter %.aidl,$(my_src_files)))
@@ -1357,7 +1350,7 @@
 ## they may cusomize their install path with LOCAL_MODULE_PATH
 ##########################################################
 # Get the list of INSTALLED libraries as module names.
-ifneq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
+ifneq ($(LOCAL_SDK_VERSION),)
   installed_shared_library_module_names := \
       $(my_shared_libraries)
 else
@@ -1383,7 +1376,9 @@
     $(foreach l, $(installed_shared_library_module_names), \
       $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
     $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
-      $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+      $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
+    $(foreach l, $(my_header_libraries), \
+      $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
 $(import_includes): PRIVATE_IMPORT_EXPORT_INCLUDES := $(import_includes_deps)
 $(import_includes) : $(import_includes_deps)
 	@echo Import includes file: $@
@@ -1407,6 +1402,10 @@
 $(my_link_type): PRIVATE_LINK_TYPE := native:ndk
 $(my_link_type): PRIVATE_WARN_TYPES :=
 $(my_link_type): PRIVATE_ALLOWED_TYPES := native:ndk
+else ifdef LOCAL_USE_VNDK
+$(my_link_type): PRIVATE_LINK_TYPE := native:vendor
+$(my_link_type): PRIVATE_WARN_TYPES :=
+$(my_link_type): PRIVATE_ALLOWED_TYPES := native:vendor
 else
 $(my_link_type): PRIVATE_LINK_TYPE := native:platform
 $(my_link_type): PRIVATE_WARN_TYPES :=
@@ -1417,10 +1416,12 @@
    $(foreach l,$(my_whole_static_libraries) $(my_static_libraries), \
      $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/link_type))
 ifneq ($(LOCAL_MODULE_CLASS),STATIC_LIBRARIES)
+ifneq ($(LOCAL_MODULE_CLASS),HEADER_LIBRARIES)
 my_link_type_deps += $(strip \
    $(foreach l,$(my_shared_libraries), \
      $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/link_type))
 endif
+endif
 $(my_link_type): PRIVATE_DEPS := $(my_link_type_deps)
 $(my_link_type): PRIVATE_MODULE := $(LOCAL_MODULE)
 $(my_link_type): PRIVATE_MAKEFILE := $(LOCAL_MODULE_MAKEFILE)
@@ -1526,7 +1527,7 @@
 so_suffix := $($(my_prefix)SHLIB_SUFFIX)
 a_suffix := $($(my_prefix)STATIC_LIB_SUFFIX)
 
-ifneq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
+ifneq ($(LOCAL_SDK_VERSION),)
 built_shared_libraries := \
     $(addprefix $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)/, \
       $(addsuffix $(so_suffix), \
@@ -1708,7 +1709,7 @@
 # One last verification check for ldlibs
 ifndef LOCAL_IS_HOST_MODULE
 my_allowed_ldlibs :=
-ifneq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
+ifneq ($(LOCAL_SDK_VERSION),)
   my_allowed_ldlibs := $(addprefix -l,$(NDK_PREBUILT_SHARED_LIBRARIES))
 endif
 
@@ -1765,7 +1766,14 @@
 # Export includes
 ###########################################################
 export_includes := $(intermediates)/export_includes
-$(export_includes): PRIVATE_EXPORT_C_INCLUDE_DIRS := $(my_export_c_include_dirs)
+export_cflags := $(foreach d,$(my_export_c_include_dirs),-I $(d))
+# Soong exports cflags instead of include dirs, so that -isystem can be included.
+ifeq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+export_cflags += $(LOCAL_EXPORT_CFLAGS)
+else ifdef LOCAL_EXPORT_CFLAGS
+$(call pretty-error,LOCAL_EXPORT_CFLAGS can only be used by Soong, use LOCAL_EXPORT_C_INCLUDE_DIRS instead)
+endif
+$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
 # Headers exported by whole static libraries are also exported by this library.
 export_include_deps := $(strip \
    $(foreach l,$(my_whole_static_libraries), \
@@ -1778,16 +1786,18 @@
 export_include_deps += $(strip \
    $(foreach l,$(LOCAL_EXPORT_STATIC_LIBRARY_HEADERS), \
      $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+# Re-export requested headers from header libraries.
+export_include_deps += $(strip \
+   $(foreach l,$(LOCAL_EXPORT_HEADER_LIBRARY_HEADERS), \
+     $(call intermediates-dir-for,HEADER_LIBRARIES,$(l),$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
 $(export_includes): PRIVATE_REEXPORTED_INCLUDES := $(export_include_deps)
 # By adding $(my_generated_sources) it makes sure the headers get generated
 # before any dependent source files get compiled.
-$(export_includes) : $(my_export_c_include_deps) $(my_generated_sources) $(export_include_deps)
+$(export_includes) : $(my_export_c_include_deps) $(my_generated_sources) $(export_include_deps) $(LOCAL_EXPORT_C_INCLUDE_DEPS)
 	@echo Export includes file: $< -- $@
 	$(hide) mkdir -p $(dir $@) && rm -f $@.tmp && touch $@.tmp
-ifdef my_export_c_include_dirs
-	$(hide) for d in $(PRIVATE_EXPORT_C_INCLUDE_DIRS); do \
-	        echo "-I $$d" >> $@.tmp; \
-	        done
+ifdef export_cflags
+	$(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >>$@.tmp
 endif
 ifdef export_include_deps
 	$(hide) for f in $(PRIVATE_REEXPORTED_INCLUDES); do \
@@ -1799,6 +1809,7 @@
 	else \
 	  mv $@.tmp $@ ; \
 	fi
+export_cflags :=
 
 # Kati adds restat=1 to ninja. GNU make does nothing for this.
 .KATI_RESTAT: $(export_includes)
@@ -1826,6 +1837,14 @@
 # Coverage packaging.
 ###########################################################
 ifeq ($(my_native_coverage),true)
-LOCAL_GCNO_FILES := $(patsubst %.o,%.gcno,$(all_objects))
-$(foreach f,$(all_objects),$(eval $(call gcno-touch-rule,$(f),$(f:.o=.gcno))))
+my_gcno_objects := \
+    $(cpp_objects) \
+    $(gen_cpp_objects) \
+    $(c_objects) \
+    $(gen_c_objects) \
+    $(objc_objects) \
+    $(objcpp_objects)
+
+LOCAL_GCNO_FILES := $(patsubst %.o,%.gcno,$(my_gcno_objects))
+$(foreach f,$(my_gcno_objects),$(eval $(call gcno-touch-rule,$(f),$(f:.o=.gcno))))
 endif
diff --git a/core/build-system.html b/core/build-system.html
index 95f35ce..c7938cc 100644
--- a/core/build-system.html
+++ b/core/build-system.html
@@ -592,6 +592,17 @@
 </ul>
 </p>
 
+<h4>LOCAL_ANNOTATION_PROCESSORS</h4>
+<p>Set this to a list of modules built with <code>BUILD_HOST_JAVA_LIBRARY</code>
+to have their jars passed to javac with -processorpath for use as annotation
+processors.</p>
+
+<h4>LOCAL_ANNOTATION_PROCESSOR_CLASSES</h4>
+<p>Set this to a list of classes to be passed to javac as -processor arguments.
+This list is would be unnecessary, as javac will autodetect annotation processor
+classes, except that the Grok tool that is used on the Android source code
+does not autodetect them and requires listing them manually.</p>
+
 <h4>LOCAL_ASSET_FILES</h4>
 <p>In Android.mk files that <code>include $(BUILD_PACKAGE)</code> set this
 to the set of files you want built into your app.  Usually:</p>
@@ -707,6 +718,11 @@
 them here.  For example:</p>
 <p><code>LOCAL_JAVACFLAGS += -Xlint:deprecation</code></p>
 
+<h4>LOCAL_ERROR_PRONE_FLAGS</h4>
+<p>If you have additional flags to pass into the error prone compiler, add
+them here.  For example:</p>
+<p><code>LOCAL_ERROR_PRONE_FLAGS += -Xep:ClassCanBeStatic:ERROR</code></p>
+
 <h4>LOCAL_JAVA_LIBRARIES</h4>
 <p>When linking Java apps and libraries, <code>LOCAL_JAVA_LIBRARIES</code>
 specifies which sets of java classes to include.  Currently there are
diff --git a/core/build_rro_package.mk b/core/build_rro_package.mk
index 24cd9a3..9865b33 100644
--- a/core/build_rro_package.mk
+++ b/core/build_rro_package.mk
@@ -1,10 +1,13 @@
-#########################################################################
+#############################################################################
 ## Standard rules for installing runtime resouce overlay APKs.
 ##
-## Set LOCAL_RRO_SKU to the SKU name if the package should apply only to
-## a particular SKU as set by ro.boot.vendor.overlay.sku system property.
+## Set LOCAL_RRO_THEME to the theme name if the package should apply only to
+## a particular theme as set by ro.boot.vendor.overlay.theme system property.
 ##
-#########################################################################
+## If LOCAL_RRO_THEME is not set, the package will apply always, independent
+## of themes.
+##
+#############################################################################
 
 LOCAL_IS_RUNTIME_RESOURCE_OVERLAY := true
 
@@ -12,10 +15,10 @@
   $(error runtime resource overlay package should not contain sources)
 endif
 
-ifeq (S(LOCAL_RRO_SKU),)
+ifeq (S(LOCAL_RRO_THEME),)
   LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR)/overlay
 else
-  LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR)/overlay/$(LOCAL_RRO_SKU)
+  LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR)/overlay/$(LOCAL_RRO_THEME)
 endif
 
 include $(BUILD_SYSTEM)/package.mk
diff --git a/core/clang/config.mk b/core/clang/config.mk
index b4fe708..be43a26 100644
--- a/core/clang/config.mk
+++ b/core/clang/config.mk
@@ -5,16 +5,6 @@
 CLANG_TBLGEN := $(BUILD_OUT_EXECUTABLES)/clang-tblgen$(BUILD_EXECUTABLE_SUFFIX)
 LLVM_TBLGEN := $(BUILD_OUT_EXECUTABLES)/llvm-tblgen$(BUILD_EXECUTABLE_SUFFIX)
 
-# RenderScript-specific tools
-# These are tied to the version of LLVM directly in external/, so they might
-# trail the host prebuilts being used for the rest of the build process.
-RS_LLVM_PREBUILTS_VERSION := clang-3289846
-RS_LLVM_PREBUILTS_BASE := prebuilts/clang/host
-RS_LLVM_PREBUILTS_PATH := $(RS_LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(RS_LLVM_PREBUILTS_VERSION)/bin
-RS_CLANG := $(RS_LLVM_PREBUILTS_PATH)/clang$(BUILD_EXECUTABLE_SUFFIX)
-RS_LLVM_AS := $(RS_LLVM_PREBUILTS_PATH)/llvm-as$(BUILD_EXECUTABLE_SUFFIX)
-RS_LLVM_LINK := $(RS_LLVM_PREBUILTS_PATH)/llvm-link$(BUILD_EXECUTABLE_SUFFIX)
-
 define convert-to-clang-flags
 $(strip $(filter-out $(CLANG_CONFIG_UNKNOWN_CFLAGS),$(1)))
 endef
diff --git a/core/clang/versions.mk b/core/clang/versions.mk
index d9c8aab..c5cc690 100644
--- a/core/clang/versions.mk
+++ b/core/clang/versions.mk
@@ -1,3 +1,4 @@
 ## Clang/LLVM release versions.
 
-LLVM_PREBUILTS_VERSION ?= clang-3289846
+LLVM_PREBUILTS_VERSION ?= clang-3859424
+LLVM_PREBUILTS_BASE ?= prebuilts/clang/host
diff --git a/core/cleanbuild.mk b/core/cleanbuild.mk
index 76f4613..fa89758 100644
--- a/core/cleanbuild.mk
+++ b/core/cleanbuild.mk
@@ -165,7 +165,7 @@
 PREVIOUS_BUILD_CONFIG := $(strip $(PREVIOUS_BUILD_CONFIG))
 
 ifdef PREVIOUS_BUILD_CONFIG
-  ifneq "$(current_build_config)" "$(PREVIOUS_BUILD_CONFIG)"
+  ifneq ($(current_build_config),$(PREVIOUS_BUILD_CONFIG))
     $(info *** Build configuration changed: "$(PREVIOUS_BUILD_CONFIG)" -> "$(current_build_config)")
     ifneq ($(DISABLE_AUTO_INSTALLCLEAN),true)
       force_installclean := true
@@ -194,22 +194,33 @@
 # installclean logic
 #
 
-# The files/dirs to delete during an installclean.  This includes the
-# non-common APPS directory, which may contain the wrong resources.
+# The files/dirs to delete during an installclean.
 #
-# Deletes all of the files that change between different build types,
-# like "make user" vs. "make sdk".  This lets you work with different
-# build types without having to do a full clean each time.  E.g.:
+# Deletes all of the installed files -- the intent is to remove files
+# that may no longer be installed, either because the user previously
+# installed them, or they were previously installed by default but no
+# longer are.
 #
-#     $ make -j8 all
-#     $ make installclean
-#     $ make -j8 user
-#     $ make installclean
-#     $ make -j8 sdk
+# This is faster than a full clean, since we're not deleting the
+# intermediates. Instead of recompiling, we can just copy the results.
 #
+# Host bin, frameworks, and lib* are intentionally omitted, since
+# otherwise we'd have to rebuild any generated files created with those
+# tools.
 installclean_files := \
 	$(HOST_OUT)/obj/NOTICE_FILES \
+	$(HOST_OUT)/obj/PACKAGING \
+	$(HOST_OUT)/coverage \
+	$(HOST_OUT)/cts \
+	$(HOST_OUT)/nativetest* \
 	$(HOST_OUT)/sdk \
+	$(HOST_OUT)/sdk_addon \
+	$(HOST_OUT)/testcases \
+	$(HOST_OUT)/vts \
+	$(HOST_CROSS_OUT)/bin \
+	$(HOST_CROSS_OUT)/coverage \
+	$(HOST_CROSS_OUT)/lib* \
+	$(HOST_CROSS_OUT)/nativetest* \
 	$(PRODUCT_OUT)/*.img \
 	$(PRODUCT_OUT)/*.ini \
 	$(PRODUCT_OUT)/*.txt \
@@ -226,7 +237,14 @@
 	$(PRODUCT_OUT)/system_other \
 	$(PRODUCT_OUT)/vendor \
 	$(PRODUCT_OUT)/oem \
-	$(PRODUCT_OUT)/obj/FAKE
+	$(PRODUCT_OUT)/obj/FAKE \
+	$(PRODUCT_OUT)/breakpad \
+	$(PRODUCT_OUT)/cache \
+	$(PRODUCT_OUT)/coverage \
+	$(PRODUCT_OUT)/installer \
+	$(PRODUCT_OUT)/odm \
+	$(PRODUCT_OUT)/sysloader \
+	$(PRODUCT_OUT)/testcases \
 
 # The files/dirs to delete during a dataclean, which removes any files
 # in the staging and emulator data partitions.
@@ -254,7 +272,7 @@
 	$(hide) rm -rf $(FILES)
 	@echo "Deleted images and staging directories."
 
-ifeq "$(force_installclean)" "true"
+ifeq ($(force_installclean),true)
   $(info *** Forcing "make installclean"...)
   $(info *** rm -rf $(dataclean_files) $(installclean_files))
   $(shell rm -rf $(dataclean_files) $(installclean_files))
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 6e61d15..a4751fd 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -2,7 +2,8 @@
 ## Clear out values of all variables used by rule templates.
 ###########################################################
 
-LOCAL_32_BIT_ONLY:= # '',true
+# '',true
+LOCAL_32_BIT_ONLY:=
 LOCAL_AAPT_FLAGS:=
 LOCAL_AAPT_INCLUDE_ALL_RESOURCES:=
 LOCAL_ADDITIONAL_CERTIFICATES:=
@@ -11,6 +12,8 @@
 LOCAL_ADDITIONAL_JAVA_DIR:=
 LOCAL_AIDL_INCLUDES:=
 LOCAL_ALLOW_UNDEFINED_SYMBOLS:=
+LOCAL_ANNOTATION_PROCESSORS:=
+LOCAL_ANNOTATION_PROCESSOR_CLASSES:=
 LOCAL_APK_LIBRARIES:=
 LOCAL_ARM_MODE:=
 LOCAL_ASFLAGS:=
@@ -37,14 +40,18 @@
 LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES:=
 LOCAL_CPP_EXTENSION:=
 LOCAL_CPPFLAGS:=
+LOCAL_CPP_STD:=
+LOCAL_C_STD:=
 LOCAL_CTS_TEST_PACKAGE:=
 LOCAL_CTS_TEST_RUNNER:=
 LOCAL_CXX:=
 LOCAL_CXX_STL := default
 LOCAL_DATA_BINDING:=
-LOCAL_DBUS_PROXY_PREFIX:=
+LOCAL_DEX_PREOPT_APP_IMAGE:=
 LOCAL_DEX_PREOPT_FLAGS:=
+LOCAL_DEX_PREOPT_GENERATE_PROFILE:=
 LOCAL_DEX_PREOPT_IMAGE_LOCATION:=
+LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING:=
 LOCAL_DEX_PREOPT:= # '',true,false,nostripping
 LOCAL_DONT_CHECK_MODULE:=
 # Don't delete the META_INF dir when merging static Java libraries.
@@ -60,10 +67,14 @@
 LOCAL_DROIDDOC_STUB_OUT_DIR:=
 LOCAL_DROIDDOC_TEMPLATE_DIR:=
 LOCAL_DROIDDOC_USE_STANDARD_DOCLET:=
+LOCAL_DX_FLAGS:=
 LOCAL_EMMA_COVERAGE_FILTER:=
 LOCAL_EMMA_INSTRUMENT:=
+LOCAL_ERROR_PRONE_FLAGS:=
+LOCAL_EXPORT_CFLAGS:=
 LOCAL_EXPORT_C_INCLUDE_DEPS:=
 LOCAL_EXPORT_C_INCLUDE_DIRS:=
+LOCAL_EXPORT_HEADER_LIBRARY_HEADERS:=
 LOCAL_EXPORT_PACKAGE_RESOURCES:=
 LOCAL_EXPORT_SHARED_LIBRARY_HEADERS:=
 LOCAL_EXPORT_STATIC_LIBRARY_HEADERS:=
@@ -80,6 +91,7 @@
 LOCAL_GROUP_STATIC_LIBRARIES:=
 LOCAL_GTEST:=true
 LOCAL_HAL_STATIC_LIBRARIES:=
+LOCAL_HEADER_LIBRARIES:=
 LOCAL_INIT_RC:=
 LOCAL_INSTALLED_MODULE:=
 LOCAL_INSTALLED_MODULE_STEM:=
@@ -92,8 +104,8 @@
 LOCAL_JACK_CLASSPATH:=
 LOCAL_JACK_COVERAGE_EXCLUDE_FILTER:=
 LOCAL_JACK_COVERAGE_INCLUDE_FILTER:=
-# full or incremental
-LOCAL_JACK_ENABLED:=full
+# '' (ie disabled), disabled, full, incremental
+LOCAL_JACK_ENABLED:=$(DEFAULT_JACK_ENABLED)
 LOCAL_JACK_FLAGS:=
 LOCAL_JACK_PLUGIN:=
 LOCAL_JACK_PLUGIN_PATH:=
@@ -148,6 +160,7 @@
 LOCAL_NO_FPIE :=
 LOCAL_NO_LIBCOMPILER_RT:=
 LOCAL_NO_LIBGCC:=
+LOCAL_NO_NOTICE_FILE:=
 LOCAL_NO_PIC:=
 LOCAL_NOSANITIZE:=
 LOCAL_NO_STANDARD_LIBRARIES:=
@@ -162,6 +175,7 @@
 LOCAL_PICKUP_FILES:=
 LOCAL_POST_INSTALL_CMD:=
 LOCAL_POST_LINK_CMD:=
+LOCAL_PREBUILT_COVERAGE_ARCHIVE:=
 LOCAL_PREBUILT_EXECUTABLES:=
 LOCAL_PREBUILT_JAVA_LIBRARIES:=
 LOCAL_PREBUILT_JNI_LIBS:=
@@ -171,12 +185,14 @@
 LOCAL_PREBUILT_STATIC_JAVA_LIBRARIES:=
 LOCAL_PREBUILT_STRIP_COMMENTS:=
 LOCAL_PRIVILEGED_MODULE:=
-LOCAL_PROGUARD_ENABLED:= # '',full,custom,nosystem,disabled,obfuscation,optimization
+# '',full,custom,nosystem,disabled,obfuscation,optimization
+LOCAL_PROGUARD_ENABLED:=
 LOCAL_PROGUARD_FLAG_FILES:=
 LOCAL_PROGUARD_FLAGS:=
 LOCAL_PROPRIETARY_MODULE:=
 LOCAL_PROTOC_FLAGS:=
-LOCAL_PROTOC_OPTIMIZE_TYPE:= # lite(default),micro,nano,full,nanopb-c,nanopb-c-enable_malloc
+# lite(default),micro,nano,full,nanopb-c,nanopb-c-enable_malloc
+LOCAL_PROTOC_OPTIMIZE_TYPE:=
 LOCAL_PROTO_JAVA_OUTPUT_PARAMS:=
 LOCAL_RECORDED_MODULE_TYPE:=
 LOCAL_RENDERSCRIPT_CC:=
@@ -192,7 +208,7 @@
 LOCAL_RES_LIBRARIES:=
 LOCAL_RESOURCE_DIR:=
 LOCAL_RMTYPEDEFS:=
-LOCAL_RRO_SKU:=
+LOCAL_RRO_THEME:=
 LOCAL_RTTI_FLAG:=
 LOCAL_SANITIZE:=
 LOCAL_SANITIZE_DIAG:=
@@ -201,7 +217,8 @@
 LOCAL_SDK_VERSION:=
 LOCAL_SHARED_ANDROID_LIBRARIES:=
 LOCAL_SHARED_LIBRARIES:=
-LOCAL_SOURCE_FILES_ALL_GENERATED:= # '',true
+# '',true
+LOCAL_SOURCE_FILES_ALL_GENERATED:=
 LOCAL_SRC_FILES:=
 LOCAL_SRC_FILES_EXCLUDE:=
 LOCAL_STATIC_ANDROID_LIBRARIES:=
@@ -219,6 +236,7 @@
 LOCAL_UNSTRIPPED_PATH:=
 LOCAL_USE_AAPT2:=$(USE_AAPT2)
 LOCAL_USE_VNDK:=
+LOCAL_VENDOR_MODULE:=
 LOCAL_VTSC_FLAGS:=
 LOCAL_VTS_INCLUDES:=
 LOCAL_WARNINGS_ENABLE:=
@@ -237,6 +255,7 @@
 LOCAL_CLANG_$(TARGET_ARCH):=
 LOCAL_CPPFLAGS_$(TARGET_ARCH):=
 LOCAL_GENERATED_SOURCES_$(TARGET_ARCH):=
+LOCAL_HEADER_LIBRARIES_$(TARGET_ARCH):=
 LOCAL_LDFLAGS_$(TARGET_ARCH):=
 LOCAL_PACK_MODULE_RELOCATIONS_$(TARGET_ARCH):=
 LOCAL_PREBUILT_JNI_LIBS_$(TARGET_ARCH):=
@@ -258,6 +277,7 @@
 LOCAL_CLANG_$(TARGET_2ND_ARCH):=
 LOCAL_CPPFLAGS_$(TARGET_2ND_ARCH):=
 LOCAL_GENERATED_SOURCES_$(TARGET_2ND_ARCH):=
+LOCAL_HEADER_LIBRARIES_$(TARGET_2ND_ARCH):=
 LOCAL_LDFLAGS_$(TARGET_2ND_ARCH):=
 LOCAL_PACK_MODULE_RELOCATIONS_$(TARGET_2ND_ARCH):=
 LOCAL_PREBUILT_JNI_LIBS_$(TARGET_2ND_ARCH):=
@@ -279,6 +299,7 @@
 LOCAL_CLANG_LDFLAGS_$(HOST_ARCH):=
 LOCAL_CPPFLAGS_$(HOST_ARCH):=
 LOCAL_GENERATED_SOURCES_$(HOST_ARCH):=
+LOCAL_HEADER_LIBRARIES_$(HOST_ARCH):=
 LOCAL_LDFLAGS_$(HOST_ARCH):=
 LOCAL_REQUIRED_MODULES_$(HOST_ARCH):=
 LOCAL_SHARED_LIBRARIES_$(HOST_ARCH):=
@@ -297,6 +318,7 @@
 LOCAL_CLANG_LDFLAGS_$(HOST_2ND_ARCH):=
 LOCAL_CPPFLAGS_$(HOST_2ND_ARCH):=
 LOCAL_GENERATED_SOURCES_$(HOST_2ND_ARCH):=
+LOCAL_HEADER_LIBRARIES_$(HOST_2ND_ARCH):=
 LOCAL_LDFLAGS_$(HOST_2ND_ARCH):=
 LOCAL_REQUIRED_MODULES_$(HOST_2ND_ARCH):=
 LOCAL_SHARED_LIBRARIES_$(HOST_2ND_ARCH):=
@@ -311,6 +333,7 @@
 LOCAL_C_INCLUDES_$(HOST_OS):=
 LOCAL_CPPFLAGS_$(HOST_OS):=
 LOCAL_GENERATED_SOURCES_$(HOST_OS):=
+LOCAL_HEADER_LIBRARIES_$(HOST_OS):=
 LOCAL_LDFLAGS_$(HOST_OS):=
 LOCAL_LDLIBS_$(HOST_OS):=
 LOCAL_REQUIRED_MODULES_$(HOST_OS):=
@@ -324,6 +347,7 @@
 LOCAL_C_INCLUDES_$(HOST_CROSS_OS):=
 LOCAL_CPPFLAGS_$(HOST_CROSS_OS):=
 LOCAL_GENERATED_SOURCES_$(HOST_CROSS_OS):=
+LOCAL_HEADER_LIBRARIES_$(HOST_CROSS_OS):=
 LOCAL_LDFLAGS_$(HOST_CROSS_OS):=
 LOCAL_LDLIBS_$(HOST_CROSS_OS):=
 LOCAL_REQUIRED_MODULES_$(HOST_CROSS_OS):=
@@ -363,6 +387,8 @@
 LOCAL_CPPFLAGS_64:=
 LOCAL_GENERATED_SOURCES_32:=
 LOCAL_GENERATED_SOURCES_64:=
+LOCAL_HEADER_LIBRARIES_32:=
+LOCAL_HEADER_LIBRARIES_64:=
 LOCAL_INIT_RC_32:=
 LOCAL_INIT_RC_64:=
 LOCAL_LDFLAGS_32:=
diff --git a/core/combo/javac.mk b/core/combo/javac.mk
index 9042d83..7f66ea8 100644
--- a/core/combo/javac.mk
+++ b/core/combo/javac.mk
@@ -9,6 +9,11 @@
 #   COMMON_JAVAC -- Java compiler command with common arguments
 #
 
+ifndef ANDROID_COMPILE_WITH_JACK
+# Defines if compilation with jack is enabled by default.
+ANDROID_COMPILE_WITH_JACK := true
+endif
+
 common_jdk_flags := -Xmaxerrs 9999999
 
 # Use the indexer wrapper to index the codebase instead of the javac compiler
diff --git a/core/config.mk b/core/config.mk
index 2f43f46..884be1e 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -92,6 +92,7 @@
 BUILD_HOST_STATIC_LIBRARY:= $(BUILD_SYSTEM)/host_static_library.mk
 BUILD_HOST_SHARED_LIBRARY:= $(BUILD_SYSTEM)/host_shared_library.mk
 BUILD_STATIC_LIBRARY:= $(BUILD_SYSTEM)/static_library.mk
+BUILD_HEADER_LIBRARY:= $(BUILD_SYSTEM)/header_library.mk
 BUILD_AUX_STATIC_LIBRARY:= $(BUILD_SYSTEM)/aux_static_library.mk
 BUILD_AUX_EXECUTABLE:= $(BUILD_SYSTEM)/aux_executable.mk
 BUILD_SHARED_LIBRARY:= $(BUILD_SYSTEM)/shared_library.mk
@@ -489,12 +490,16 @@
 SIGNAPK_JNI_LIBRARY_PATH := $(HOST_OUT_SHARED_LIBRARIES)
 LLVM_RS_CC := $(HOST_OUT_EXECUTABLES)/llvm-rs-cc
 BCC_COMPAT := $(HOST_OUT_EXECUTABLES)/bcc_compat
+DEPMOD := $(HOST_OUT_EXECUTABLES)/depmod
 
 DX := $(HOST_OUT_EXECUTABLES)/dx
 MAINDEXCLASSES := $(HOST_OUT_EXECUTABLES)/mainDexClasses
 
 SOONG_ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/soong_zip
 ZIP2ZIP := $(SOONG_HOST_OUT_EXECUTABLES)/zip2zip
+FILESLIST := $(SOONG_HOST_OUT_EXECUTABLES)/fileslist
+
+SOONG_JAVAC_WRAPPER := $(SOONG_HOST_OUT_EXECUTABLES)/soong_javac_wrapper
 
 # Always use prebuilts for ckati and makeparallel
 prebuilt_build_tools := prebuilts/build-tools
@@ -563,7 +568,6 @@
                external/nanopb-c/generator/google/*.py \
                external/nanopb-c/generator/proto/*.py)
 VTSC := $(HOST_OUT_EXECUTABLES)/vtsc$(HOST_EXECUTABLE_SUFFIX)
-DBUS_GENERATOR := $(HOST_OUT_EXECUTABLES)/dbus-binding-generator
 MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
 MINIGZIP := $(HOST_OUT_EXECUTABLES)/minigzip$(HOST_EXECUTABLE_SUFFIX)
 ifeq (,$(strip $(BOARD_CUSTOM_MKBOOTIMG)))
@@ -601,10 +605,16 @@
 MKTARBALL := build/tools/mktarball.sh
 TUNE2FS := $(HOST_OUT_EXECUTABLES)/tune2fs$(HOST_EXECUTABLE_SUFFIX)
 JARJAR := $(HOST_OUT_JAVA_LIBRARIES)/jarjar.jar
+DESUGAR := $(HOST_OUT_JAVA_LIBRARIES)/desugar.jar
 DATA_BINDING_COMPILER := $(HOST_OUT_JAVA_LIBRARIES)/databinding-compiler.jar
 FAT16COPY := build/tools/fat16copy.py
 CHECK_LINK_TYPE := build/tools/check_link_type.py
 
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
+DEFAULT_JACK_ENABLED:=full
+else
+DEFAULT_JACK_ENABLED:=
+endif
 ifneq ($(ANDROID_JACK_EXTRA_ARGS),)
 JACK_DEFAULT_ARGS :=
 DEFAULT_JACK_EXTRA_ARGS := $(ANDROID_JACK_EXTRA_ARGS)
@@ -621,11 +631,12 @@
 VERITY_SIGNER := $(HOST_OUT_EXECUTABLES)/verity_signer
 BUILD_VERITY_TREE := $(HOST_OUT_EXECUTABLES)/build_verity_tree
 BOOT_SIGNER := $(HOST_OUT_EXECUTABLES)/boot_signer
-FUTILITY := prebuilts/misc/$(BUILD_OS)-$(HOST_PREBUILT_ARCH)/futility/futility
+FUTILITY := $(HOST_OUT_EXECUTABLES)/futility-host
 VBOOT_SIGNER := prebuilts/misc/scripts/vboot_signer/vboot_signer.sh
 FEC := $(HOST_OUT_EXECUTABLES)/fec
 
 DEXDUMP := $(HOST_OUT_EXECUTABLES)/dexdump2$(BUILD_EXECUTABLE_SUFFIX)
+PROFMAN := $(HOST_OUT_EXECUTABLES)/profman
 
 # relocation packer
 RELOCATION_PACKER := prebuilts/misc/$(BUILD_OS)-$(HOST_PREBUILT_ARCH)/relocation_packer/relocation_packer
@@ -701,7 +712,7 @@
 
 # allow overriding default Java libraries on a per-target basis
 ifeq ($(TARGET_DEFAULT_JAVA_LIBRARIES),)
-  TARGET_DEFAULT_JAVA_LIBRARIES := core-oj core-libart legacy-test ext framework okhttp
+  TARGET_DEFAULT_JAVA_LIBRARIES := core-oj core-libart ext framework okhttp
 endif
 
 # Flags for DEX2OAT
@@ -807,6 +818,7 @@
     art/% \
     bionic/% \
     external/fio/% \
+    hardware/interfaces/% \
 
 define find_warning_disallowed_projects
     $(filter $(ANDROID_WARNING_DISALLOWED_PROJECTS),$(1)/)
@@ -841,7 +853,7 @@
     userdataimage-nodeps userdatatarball-nodeps \
     cacheimage-nodeps \
     bptimage-nodeps \
-    vendorimage-nodeps \
+    vnod vendorimage-nodeps \
     systemotherimage-nodeps \
     ramdisk-nodeps \
     bootimage-nodeps \
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 4a35299..d735a02 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -68,6 +68,18 @@
   my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
 endif
 
+# Disable CFI for arm32 (b/35157333).
+ifneq ($(filter arm,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+  my_sanitize := $(filter-out cfi,$(my_sanitize))
+  my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
+endif
+
+# CFI needs gold linker, and mips toolchain does not have one.
+ifneq ($(filter mips mips64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+  my_sanitize := $(filter-out cfi,$(my_sanitize))
+  my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
+endif
+
 my_nosanitize = $(strip $(LOCAL_NOSANITIZE))
 ifneq ($(my_nosanitize),)
   my_sanitize := $(filter-out $(my_nosanitize),$(my_sanitize))
@@ -147,9 +159,14 @@
   # LLVM is not set up to do this on a function basis, so force Thumb on the
   # entire module.
   LOCAL_ARM_MODE := thumb
-  my_cflags += -flto -fsanitize-cfi-cross-dso -fvisibility=default
-  my_ldflags += -flto -fsanitize-cfi-cross-dso -fsanitize=cfi -Wl,-plugin-opt,O1 -Wl,-export-dynamic-symbol=__cfi_check
+  my_cflags += $(CFI_EXTRA_CFLAGS)
+  my_ldflags += $(CFI_EXTRA_LDFLAGS)
   my_arflags += --plugin $(LLVM_PREBUILTS_PATH)/../lib64/LLVMgold.so
+  # Workaround for b/33678192. CFI jumptables need Thumb2 codegen.  Revert when
+  # Clang is updated past r290384.
+  ifneq ($(filter arm,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+    my_ldflags += -march=armv7-a
+  endif
 endif
 
 # If local or global modules need ASAN, add linker flags.
diff --git a/core/configure_local_jack.mk b/core/configure_local_jack.mk
index 446bab7..2270c88 100644
--- a/core/configure_local_jack.mk
+++ b/core/configure_local_jack.mk
@@ -21,9 +21,16 @@
 LOCAL_JACK_ENABLED := $(strip $(LOCAL_JACK_ENABLED))
 LOCAL_MODULE := $(strip $(LOCAL_MODULE))
 
-ifeq ($(filter full incremental,$(LOCAL_JACK_ENABLED)),)
+ifneq ($(LOCAL_JACK_ENABLED),full)
+ifneq ($(LOCAL_JACK_ENABLED),incremental)
+ifdef LOCAL_JACK_ENABLED
+ifneq ($(LOCAL_JACK_ENABLED),disabled)
 $(error $(LOCAL_PATH): invalid LOCAL_JACK_ENABLED "$(LOCAL_JACK_ENABLED)" for $(LOCAL_MODULE))
 endif
+endif
+LOCAL_JACK_ENABLED :=
+endif
+endif
 
 ifdef $(LOCAL_MODULE).JACK_VERSION
 LOCAL_JACK_VERSION := $($(LOCAL_MODULE).JACK_VERSION)
diff --git a/core/definitions.mk b/core/definitions.mk
index d77cea9..72c27af 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -681,7 +681,7 @@
 # $(1): library name
 # $(2): Non-empty if IS_HOST_MODULE
 define _java-lib-full-classes.jar
-$(call _java-lib-dir,$(1),$(2))/$(if $(2),javalib,classes)$(COMMON_JAVA_PACKAGE_SUFFIX)
+$(call _java-lib-dir,$(1),$(2))/classes.jar
 endef
 
 # Get the jar files (you can pass to "javac -classpath") of static or shared
@@ -704,14 +704,6 @@
 $(call java-lib-files,$(1),$(2))
 endef
 
-# Get the jar files (you can pass to "javac -classpath") of host dalvik Java libraries.
-# You can also use them as dependency files.
-# A host dalvik Java library is different from a host Java library in that
-# the java lib file is classes.jar, not javalib.jar.
-# $(1): library name list
-define host-dex-java-lib-files
-$(foreach lib,$(1),$(call _java-lib-dir,$(lib),true)/classes.jar)
-endef
 
 ###########################################################
 ## Convert "core ext framework" to "out/.../classes.jack ..."
@@ -850,18 +842,37 @@
 
 
 ###########################################################
-## Color-coded warnings and errors in build rules
-##
-## $(1): message to print
+## Color-coded warnings and errors
+## Use echo-(warning|error) in a build rule
+## Use pretty-(warning|error) instead of $(warning)/$(error)
 ###########################################################
+ESC_BOLD := \e[1m
+ESC_WARNING := \e[35m
+ESC_ERROR := \e[31m
+ESC_RESET := \e[0m
+
+# $(1): path (and optionally line) information
+# $(2): message to print
 define echo-warning
-echo -e "\e[1;35mwarning:\e[0m \e[1m" $(1) "\e[0m\n"
+echo -e "$(ESC_BOLD)$(1): $(ESC_WARNING)warning:$(ESC_RESET)$(ESC_BOLD)" $(2) "$(ESC_RESET)" >&2
 endef
 
+# $(1): path (and optionally line) information
+# $(2): message to print
 define echo-error
-echo -e "\e[1;31merror:\e[0m \e[1m" $(1) "\e[0m\n"
+echo -e "$(ESC_BOLD)$(1): $(ESC_ERROR)error:$(ESC_RESET)$(ESC_BOLD)" $(2) "$(ESC_RESET)" >&2
 endef
 
+# $(1): message to print
+define pretty-warning
+$(shell $(call echo-warning,$(LOCAL_MODULE_MAKEFILE),$(LOCAL_MODULE): $(1)))
+endef
+
+# $(1): message to print
+define pretty-error
+$(shell $(call echo-error,$(LOCAL_MODULE_MAKEFILE),$(LOCAL_MODULE): $(1)))
+$(error done)
+endef
 
 ###########################################################
 ## Package filtering
@@ -1177,31 +1188,6 @@
   $(hide) mv $(basename $@).cc $@)
 endef
 
-
-######################################################################
-## Commands for generating DBus adaptors from .dbus-xml files.
-######################################################################
-define generate-dbus-adaptors
-@echo "Generating DBus adaptors for $(PRIVATE_MODULE)"
-@mkdir -p $(dir $@)
-$(hide) $(DBUS_GENERATOR) \
-	--service-config=$(PRIVATE_DBUS_SERVICE_CONFIG) \
-	--adaptor=$@ \
-	$<
-endef
-
-######################################################################
-## Commands for generating DBus proxies from .dbus-xml files.
-######################################################################
-define generate-dbus-proxies
-@echo "Generating DBus proxies for $(PRIVATE_MODULE)"
-@mkdir -p $(dir $@)
-$(hide) $(DBUS_GENERATOR) \
-	--service-config=$(PRIVATE_DBUS_SERVICE_CONFIG) \
-	--proxy=$@ \
-	$(filter %.dbus-xml,$^)
-endef
-
 ###########################################################
 ## Helper to set include paths form transform-*-to-o
 ###########################################################
@@ -1535,6 +1521,7 @@
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-target-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1556,20 +1543,22 @@
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \
-        $@ $$filelist
+        $(2) $$filelist
 
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define extract-and-include-whole-static-libs-first
 $(if $(strip $(1)),
-$(hide) cp $(1) $@)
+$(hide) cp $(1) $(2))
 endef
 
+# $(1): the full path of the destination static library.
 define extract-and-include-target-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-target-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-target-whole-static-lib, $(lib), $(1)))
 endef
 
 # Explicitly delete the archive first so that ar doesn't
@@ -1577,15 +1566,17 @@
 define transform-o-to-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-target-whole-static-libs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-target-whole-static-libs,$@.tmp)
 $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \
     $(PRIVATE_ARFLAGS) \
-    $@,$(PRIVATE_ALL_OBJECTS))
+    $@.tmp,$(PRIVATE_ALL_OBJECTS))
+$(hide) mv -f $@.tmp $@
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-aux-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1606,14 +1597,14 @@
         $(PRIVATE_AR) p $$lib_to_include $$f > $$ldir/$$ext$$f; \
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
-    $(PRIVATE_AR) $(AUX_GLOBAL_ARFLAGS) $@ $$filelist
+    $(PRIVATE_AR) $(AUX_GLOBAL_ARFLAGS) $(2) $$filelist
 
 endef
 
 define extract-and-include-aux-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-aux-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-aux-whole-static-lib, $(lib), $(1)))
 endef
 
 # Explicitly delete the archive first so that ar doesn't
@@ -1621,10 +1612,11 @@
 define transform-o-to-aux-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-aux-whole-static-libs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-aux-whole-static-libs,$@.tmp)
 $(call split-long-arguments,$(PRIVATE_AR) \
-    $(AUX_GLOBAL_ARFLAGS) $@,$(PRIVATE_ALL_OBJECTS))
+    $(AUX_GLOBAL_ARFLAGS) $@.tmp,$(PRIVATE_ALL_OBJECTS))
+$(hide) mv -f $@.tmp $@
 endef
 
 define transform-o-to-aux-executable-inner
@@ -1671,6 +1663,7 @@
 ###########################################################
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-host-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1692,30 +1685,30 @@
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) \
-        $@ $$filelist
+        $(2) $$filelist
 
 endef
 
 define extract-and-include-host-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-host-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-host-whole-static-lib, $(lib),$(1)))
 endef
 
 ifeq ($(HOST_OS),darwin)
 # On Darwin the host ar fails if there is nothing to add to .a at all.
 # We work around by adding a dummy.o and then deleting it.
 define create-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(hide) touch $(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(hide) touch $(dir $(1))dummy.o)
 endef
 
 define get-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(dir $(1))dummy.o)
 endef
 
 define delete-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) d $@ $(dir $@)dummy.o \
-  && rm -f $(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) d $(1) $(dir $(1))dummy.o \
+  && rm -f $(dir $(1))dummy.o)
 endef
 endif  # HOST_OS is darwin
 
@@ -1724,13 +1717,14 @@
 define transform-host-o-to-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-host-whole-static-libs)
-$(create-dummy.o-if-no-objs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-host-whole-static-libs,$@.tmp)
+$(call create-dummy.o-if-no-objs,$@.tmp)
 $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) \
-    $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) $@,\
-    $(PRIVATE_ALL_OBJECTS) $(get-dummy.o-if-no-objs))
-$(delete-dummy.o-if-no-objs)
+    $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) $@.tmp,\
+    $(PRIVATE_ALL_OBJECTS) $(call get-dummy.o-if-no-objs,$@.tmp))
+$(call delete-dummy.o-if-no-objs,$@.tmp)
+$(hide) mv -f $@.tmp $@
 endef
 
 
@@ -1832,18 +1826,21 @@
 define transform-to-stripped-keep-mini-debug-info
 @echo "$($(PRIVATE_PREFIX)DISPLAY) Strip (mini debug info): $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-$(hide) $(PRIVATE_NM) -D $< --format=posix --defined-only | awk '{ print $$1 }' | sort >$@.dynsyms
-$(hide) $(PRIVATE_NM) $< --format=posix --defined-only | awk '{ if ($$2 == "T" || $$2 == "t" || $$2 == "D") print $$1 }' | sort >$@.funcsyms
-$(hide) comm -13 $@.dynsyms $@.funcsyms >$@.keep_symbols
-$(hide) $(PRIVATE_OBJCOPY) --only-keep-debug $< $@.debug
-$(hide) $(PRIVATE_OBJCOPY) --rename-section .debug_frame=saved_debug_frame $@.debug $@.mini_debuginfo
-$(hide) $(PRIVATE_OBJCOPY) -S --remove-section .gdb_index --remove-section .comment --keep-symbols=$@.keep_symbols $@.mini_debuginfo
-$(hide) $(PRIVATE_OBJCOPY) --rename-section saved_debug_frame=.debug_frame $@.mini_debuginfo
-$(hide) $(PRIVATE_STRIP) --strip-all -R .comment $< -o $@
-$(hide) rm -f $@.mini_debuginfo.xz
-$(hide) xz $@.mini_debuginfo
-$(hide) $(PRIVATE_OBJCOPY) --add-section .gnu_debugdata=$@.mini_debuginfo.xz $@
-$(hide) rm -f $@.dynsyms $@.funcsyms $@.keep_symbols $@.debug $@.mini_debuginfo.xz
+$(hide) rm -f $@ $@.dynsyms $@.funcsyms $@.keep_symbols $@.debug $@.mini_debuginfo.xz
+if $(PRIVATE_STRIP) --strip-all -R .comment $< -o $@; then \
+  $(PRIVATE_OBJCOPY) --only-keep-debug $< $@.debug && \
+  $(PRIVATE_NM) -D $< --format=posix --defined-only | awk '{ print $$1 }' | sort >$@.dynsyms && \
+  $(PRIVATE_NM) $< --format=posix --defined-only | awk '{ if ($$2 == "T" || $$2 == "t" || $$2 == "D") print $$1 }' | sort >$@.funcsyms && \
+  comm -13 $@.dynsyms $@.funcsyms >$@.keep_symbols && \
+  $(PRIVATE_OBJCOPY) --rename-section .debug_frame=saved_debug_frame $@.debug $@.mini_debuginfo && \
+  $(PRIVATE_OBJCOPY) -S --remove-section .gdb_index --remove-section .comment --keep-symbols=$@.keep_symbols $@.mini_debuginfo && \
+  $(PRIVATE_OBJCOPY) --rename-section saved_debug_frame=.debug_frame $@.mini_debuginfo && \
+  rm -f $@.mini_debuginfo.xz && \
+  xz $@.mini_debuginfo && \
+  $(PRIVATE_OBJCOPY) --add-section .gnu_debugdata=$@.mini_debuginfo.xz $@; \
+else \
+  cp -f $< $@; \
+fi
 endef
 
 define transform-to-stripped-keep-symbols
@@ -2194,9 +2191,9 @@
 # $(2): bootclasspath
 define compile-java
 $(hide) rm -f $@
-$(hide) rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR)
+$(hide) rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR) $(PRIVATE_ANNO_INTERMEDIATES_DIR)
 $(hide) mkdir -p $(dir $@)
-$(hide) mkdir -p $(PRIVATE_CLASS_INTERMEDIATES_DIR)
+$(hide) mkdir -p $(PRIVATE_CLASS_INTERMEDIATES_DIR) $(PRIVATE_ANNO_INTERMEDIATES_DIR)
 $(call unzip-jar-files,$(PRIVATE_STATIC_JAVA_LIBRARIES),$(PRIVATE_CLASS_INTERMEDIATES_DIR))
 $(call dump-words-to-file,$(PRIVATE_JAVA_SOURCES),$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list)
 $(hide) if [ -d "$(PRIVATE_SOURCE_INTERMEDIATES_DIR)" ]; then \
@@ -2209,13 +2206,13 @@
 $(hide) tr ' ' '\n' < $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list \
     | $(NORMALIZE_PATH) | sort -u > $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq
 $(hide) if [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq ] ; then \
-    $(1) -encoding UTF-8 \
+    $(SOONG_JAVAC_WRAPPER) $(1) -encoding UTF-8 \
     $(if $(findstring true,$(PRIVATE_WARNINGS_ENABLE)),$(xlint_unchecked),) \
     $(2) \
     $(addprefix -classpath ,$(strip \
         $(call normalize-path-list,$(PRIVATE_ALL_JAVA_LIBRARIES)))) \
     $(if $(findstring true,$(PRIVATE_WARNINGS_ENABLE)),$(xlint_unchecked),) \
-    -extdirs "" -d $(PRIVATE_CLASS_INTERMEDIATES_DIR) \
+    -extdirs "" -d $(PRIVATE_CLASS_INTERMEDIATES_DIR) -s $(PRIVATE_ANNO_INTERMEDIATES_DIR) \
     $(PRIVATE_JAVACFLAGS) \
     \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq \
     || ( rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR) ; exit 41 ) \
@@ -2345,8 +2342,9 @@
 $(hide) tr ' ' '\n' < $@.java-source-list \
     | sort -u > $@.java-source-list-uniq
 $(hide) if [ -s $@.java-source-list-uniq ] ; then \
-	$(call call-jack) \
+	$(call call-jack,$(PRIVATE_JACK_EXTRA_ARGS)) \
 	    $(strip $(PRIVATE_JACK_FLAGS)) \
+	    $(strip $(PRIVATE_JACK_DEBUG_FLAGS)) \
 	    $(addprefix --classpath ,$(strip \
 	        $(call normalize-path-list,$(call reverse-list,$(PRIVATE_STATIC_JACK_LIBRARIES)) $(PRIVATE_JACK_SHARED_LIBRARIES)))) \
 	    -D jack.import.resource.policy=keep-first \
@@ -2506,6 +2504,52 @@
     $(addprefix -ix , $(PRIVATE_EMMA_COVERAGE_FILTER))
 endef
 
+define desugar-classpath
+$(filter-out -classpath -bootclasspath "",$(subst :,$(space),$(1)))
+endef
+
+# Takes an sdk version that might be PLATFORM_VERSION_CODENAME (for example P),
+# returns a number greater than the highest existing sdk version if it is, or
+# the input if it is not.
+define codename-or-sdk-to-sdk
+$(if $(filter $(1),$(PLATFORM_VERSION_CODENAME)),10000,$(1))
+endef
+
+define desugar-classes-jar
+@echo Desugar: $@
+@mkdir -p $(dir $@)
+$(hide) rm -f $@ $@.tmp
+$(hide) java -jar $(DESUGAR) \
+    $(addprefix --bootclasspath_entry ,$(call desugar-bootclasspath,$(PRIVATE_BOOTCLASSPATH))) \
+    $(addprefix --classpath_entry ,$(PRIVATE_ALL_JAVA_LIBRARIES)) \
+    --min_sdk_version $(call codename-or-sdk-to-sdk,$(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
+    --allow_empty_bootclasspath \
+    $(if $(filter --core-library,$(PRIVATE_DX_FLAGS)),--core_library) \
+    -i $< -o $@.tmp
+    mv $@.tmp $@
+endef
+
+
+#TODO: use a smaller -Xmx value for most libraries;
+#      only core.jar and framework.jar need a heap this big.
+define transform-classes.jar-to-dex
+@echo "target Dex: $(PRIVATE_MODULE)"
+@mkdir -p $(dir $@)
+$(hide) rm -f $(dir $@)classes*.dex
+$(hide) $(DX) \
+    -JXms16M -JXmx2048M \
+    --dex --output=$(dir $@) \
+    --min-sdk-version=$(call codename-or-sdk-to-sdk,$(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
+    $(if $(NO_OPTIMIZE_DX), \
+        --no-optimize) \
+    $(if $(GENERATE_DEX_DEBUG), \
+	    --debug --verbose \
+	    --dump-to=$(@:.dex=.lst) \
+	    --dump-width=1000) \
+    $(PRIVATE_DX_FLAGS) \
+    $<
+endef
+
 # Create a mostly-empty .jar file that we'll add to later.
 # The MacOS jar tool doesn't like creating empty jar files,
 # so we need to give it something.
@@ -2525,6 +2569,17 @@
 $(call create-empty-package-at,$@)
 endef
 
+# Copy an arhchive file and delete any class files and empty folders inside.
+# $(1): the source archive file.
+# $(2): the destination archive file.
+define initialize-package-file
+@mkdir -p $(dir $(2))
+$(hide) cp -f $(1) $(2)
+$(hide) zip -qd $(2) "*.class" \
+    $(if $(strip $(PRIVATE_DONT_DELETE_JAR_DIRS)),,"*/") \
+    || true # Ignore the error when nothing to delete.
+endef
+
 #TODO: we kinda want to build different asset packages for
 #      different configurations, then combine them later (or something).
 #      Per-locale, etc.
@@ -2743,13 +2798,6 @@
 $(hide) cp -p $< $@
 endef
 
-# The same as copy-file-to-target, but use the zipalign tool to do so.
-define copy-file-to-target-with-zipalign
-@mkdir -p $(dir $@)
-$(hide) rm -f $@
-$(hide) $(ZIPALIGN) -f 4 $< $@
-endef
-
 # The same as copy-file-to-target, but strip out "# comment"-style
 # comments (for config files and such).
 define copy-file-to-target-strip-comments
@@ -2780,12 +2828,6 @@
 $(copy-file-to-target)
 endef
 
-# Copy a prebuilt file to a target location, using zipalign on it.
-define transform-prebuilt-to-target-with-zipalign
-@echo "$($(PRIVATE_PREFIX)DISPLAY) Prebuilt APK: $(PRIVATE_MODULE) ($@)"
-$(copy-file-to-target-with-zipalign)
-endef
-
 # Copy a prebuilt file to a target location, stripping "# comment" comments.
 define transform-prebuilt-to-target-strip-comments
 @echo "$($(PRIVATE_PREFIX)DISPLAY) Prebuilt: $(PRIVATE_MODULE) ($@)"
@@ -2826,7 +2868,8 @@
 ###########################################################
 define transform-jar-to-proguard
 @echo Proguard: $@
-$(hide) $(PROGUARD) -injars $< -outjars $@ $(PRIVATE_PROGUARD_FLAGS) \
+$(hide) $(PROGUARD) -injars '$<$(PRIVATE_PROGUARD_INJAR_FILTERS)' \
+    -outjars $@ $(PRIVATE_PROGUARD_FLAGS) \
     $(addprefix -injars , $(PRIVATE_EXTRA_INPUT_JAR))
 endef
 
@@ -3078,7 +3121,8 @@
   NOTICE_FILE \
   HOST_DALVIK_JAVA_LIBRARY \
   HOST_DALVIK_STATIC_JAVA_LIBRARY \
-  base_rules
+  base_rules \
+  HEADER_LIBRARY
 
 $(foreach $(s),$(STATS.MODULE_TYPE),$(eval STATS.MODULE_TYPE.$(s) :=))
 define record-module-type
@@ -3177,6 +3221,44 @@
 #$(warning 42 == $(call math_max,5,42))
 #$(warning 42 == $(call math_max,42,5))
 
+define math_gt_or_eq
+$(if $(filter $(1),$(call math_max,$(1),$(2))),true)
+endef
+
+#$(warning $(call math_gt_or_eq, 2, 1))
+#$(warning $(call math_gt_or_eq, 1, 1))
+#$(warning $(if $(call math_gt_or_eq, 1, 2),false,true))
+
+# $1 is the variable name to increment
+define inc_and_print
+$(strip $(eval $(1) := $($(1)) .)$(words $($(1))))
+endef
+
+###########################################################
+## Compatibility suite tools
+###########################################################
+
+# Return a list of output directories for a given suite and the current LOCAL_MODULE.
+# Can be passed a subdirectory to use for the common testcase directory.
+define compatibility_suite_dirs
+  $(strip \
+    $(COMPATIBILITY_TESTCASES_OUT_$(1)) \
+    $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)$(2))
+endef
+
+# For each suite:
+# 1. Copy the files to the many suite output directories.
+# 2. Add all the files to each suite's dependent files list.
+# 3. Do the dependency addition to my_all_targets
+# Requires for each suite: my_compat_dist_$(suite) to be defined.
+define create-suite-dependencies
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_files_$(suite) := $(call copy-many-files, $(my_compat_dist_$(suite)))) \
+  $(eval COMPATIBILITY.$(suite).FILES := \
+    $(COMPATIBILITY.$(suite).FILES) $(my_compat_files_$(suite))) \
+  $(eval $(my_all_targets) : $(my_compat_files_$(suite))))
+endef
+
 ###########################################################
 ## Other includes
 ###########################################################
@@ -3220,3 +3302,40 @@
 #	  sed -e 's/#.*//' -e 's/^[^:]*: *//' -e 's/ *\\$$//' \
 #	      -e '/^$$/ d' -e 's/$$/ :/' < $*.d >> $*.P; \
 #	  rm -f $*.d
+
+
+###########################################################
+# Append the information to generate a RRO package for the
+# source module.
+#
+#  $(1): Source module name.
+#  $(2): Whether $(3) is a manifest package name or not.
+#  $(3): Manifest package name if $(2) is true.
+#        Otherwise, android manifest file path of the
+#        source module.
+#  $(4): Whether LOCAL_EXPORT_PACKAGE_RESOURCES is set or
+#        not for the source module.
+#  $(5): Resource overlay list.
+###########################################################
+define append_enforce_rro_sources
+  $(eval ENFORCE_RRO_SOURCES += \
+      $(strip $(1))||$(strip $(2))||$(strip $(3))||$(strip $(4))||$(call normalize-path-list, $(strip $(5))))
+endef
+
+###########################################################
+# Generate all RRO packages for source modules stored in
+# ENFORCE_RRO_SOURCES
+###########################################################
+define generate_all_enforce_rro_packages
+$(foreach source,$(ENFORCE_RRO_SOURCES), \
+  $(eval _o := $(subst ||,$(space),$(source))) \
+  $(eval enforce_rro_source_module := $(word 1,$(_o))) \
+  $(eval enforce_rro_source_is_manifest_package_name := $(word 2,$(_o))) \
+  $(eval enforce_rro_source_manifest_package_info := $(word 3,$(_o))) \
+  $(eval enforce_rro_use_res_lib := $(word 4,$(_o))) \
+  $(eval enforce_rro_source_overlays := $(subst :, ,$(word 5,$(_o)))) \
+  $(eval enforce_rro_module := $(enforce_rro_source_module)__auto_generated_rro) \
+  $(eval include $(BUILD_SYSTEM)/generate_enforce_rro.mk) \
+  $(eval ALL_MODULES.$(enforce_rro_source_module).REQUIRED += $(enforce_rro_module)) \
+)
+endef
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 3cff9c8..ffb888c 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -119,10 +119,12 @@
 	--instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
 	--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
 	--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
-	--include-patch-information --runtime-arg -Xnorelocate --compile-pic \
+	--runtime-arg -Xnorelocate --compile-pic \
 	--no-generate-debug-info --generate-build-id \
 	--abort-on-hard-verifier-error \
 	--no-inline-from=core-oj.jar \
 	$(PRIVATE_DEX_PREOPT_FLAGS) \
+	$(PRIVATE_ART_FILE_PREOPT_FLAGS) \
+	$(PRIVATE_PROFILE_PREOPT_FLAGS) \
 	$(GLOBAL_DEXPREOPT_FLAGS)
 endef
diff --git a/core/dex_preopt_libart_boot.mk b/core/dex_preopt_libart_boot.mk
index 54211a4..860a66a 100644
--- a/core/dex_preopt_libart_boot.mk
+++ b/core/dex_preopt_libart_boot.mk
@@ -72,7 +72,7 @@
 		--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
 		--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
 		--android-root=$(PRODUCT_OUT)/system \
-		--include-patch-information --runtime-arg -Xnorelocate --compile-pic \
+		--runtime-arg -Xnorelocate --compile-pic \
 		--no-generate-debug-info --generate-build-id \
 		--multi-image --no-inline-from=core-oj.jar \
 		$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(COMPILED_CLASSES_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS)
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 52a67fe..68c46f5 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -50,10 +50,14 @@
 
 built_odex :=
 built_vdex :=
+built_art :=
 installed_odex :=
 installed_vdex :=
+installed_art :=
 built_installed_odex :=
 built_installed_vdex :=
+built_installed_art :=
+
 ifdef LOCAL_DEX_PREOPT
 dexpreopt_boot_jar_module := $(filter $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE))
 ifdef dexpreopt_boot_jar_module
@@ -103,33 +107,69 @@
 
 built_odex := $(strip $(built_odex))
 built_vdex := $(strip $(built_vdex))
+built_art := $(strip $(built_art))
 installed_odex := $(strip $(installed_odex))
 installed_vdex := $(strip $(installed_vdex))
+installed_art := $(strip $(installed_art))
 
 ifdef built_odex
+
+ifndef LOCAL_DEX_PREOPT_GENERATE_PROFILE
+ifeq (true,$(WITH_DEX_PREOPT_GENERATE_PROFILE))
+  LOCAL_DEX_PREOPT_GENERATE_PROFILE := true
+endif
+endif
+
+ifeq (true,$(LOCAL_DEX_PREOPT_GENERATE_PROFILE))
+ifndef LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING
+$(call pretty-error,Must have specified class listing (LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING))
+endif
+my_built_profile := $(dir $(LOCAL_BUILT_MODULE))/profile.prof
+my_dex_location := $(patsubst $(PRODUCT_OUT)%,%,$(LOCAL_INSTALLED_MODULE))
+$(built_odex): $(my_built_profile)
+$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS := --profile-file=$(my_built_profile)
+$(my_built_profile): PRIVATE_BUILT_MODULE := $(LOCAL_BUILT_MODULE)
+$(my_built_profile): PRIVATE_DEX_LOCATION := $(my_dex_location)
+$(my_built_profile): PRIVATE_SOURCE_CLASSES := $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
+$(my_built_profile): $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
+$(my_built_profile): $(PROFMAN)
+$(my_built_profile): $(LOCAL_BUILT_MODULE)
+$(my_built_profile):
+	$(hide) mkdir -p $(dir $@)
+	ANDROID_LOG_TAGS="*:e" $(PROFMAN) \
+		--create-profile-from=$(PRIVATE_SOURCE_CLASSES) \
+		--apk=$(PRIVATE_BUILT_MODULE) \
+		--dex-location=$(PRIVATE_DEX_LOCATION) \
+		--reference-profile-file=$@
+else
+$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS :=
+endif
+
 ifndef LOCAL_DEX_PREOPT_FLAGS
 LOCAL_DEX_PREOPT_FLAGS := $(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG)
 ifndef LOCAL_DEX_PREOPT_FLAGS
 LOCAL_DEX_PREOPT_FLAGS := $(PRODUCT_DEX_PREOPT_DEFAULT_FLAGS)
 endif
 endif
-
 $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
 $(built_vdex): $(built_odex)
+$(built_art): $(built_odex)
 endif
 
 # Add the installed_odex to the list of installed files for this module.
 ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
 ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
+ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
 ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
 ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
+ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
 
 # Record dex-preopt config.
 DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
 DEXPREOPT.$(LOCAL_MODULE).MULTILIB := $(LOCAL_MULTILIB)
 DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
 DEXPREOPT.$(LOCAL_MODULE).PRIVILEGED_MODULE := $(LOCAL_PRIVILEGED_MODULE)
-DEXPREOPT.$(LOCAL_MODULE).PROPRIETARY_MODULE := $(LOCAL_PROPRIETARY_MODULE)
+DEXPREOPT.$(LOCAL_MODULE).VENDOR_MODULE := $(LOCAL_VENDOR_MODULE)
 DEXPREOPT.$(LOCAL_MODULE).TARGET_ARCH := $(LOCAL_MODULE_TARGET_ARCH)
 DEXPREOPT.$(LOCAL_MODULE).INSTALLED := $(installed_odex)
 DEXPREOPT.$(LOCAL_MODULE).INSTALLED_STRIPPED := $(LOCAL_INSTALLED_MODULE)
@@ -138,6 +178,6 @@
 
 
 # Make sure to install the .odex and .vdex when you run "make <module_name>"
-$(my_all_targets): $(installed_odex) $(installed_vdex)
+$(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
 
 endif # LOCAL_DEX_PREOPT
diff --git a/core/dpi_specific_apk.mk b/core/dpi_specific_apk.mk
index bcc5c18..1b0be07 100644
--- a/core/dpi_specific_apk.mk
+++ b/core/dpi_specific_apk.mk
@@ -31,9 +31,15 @@
 $(built_dpi_apk): $(additional_certificates)
 $(built_dpi_apk): PRIVATE_ADDITIONAL_CERTIFICATES := $(additional_certificates)
 
+$(built_dpi_apk): PRIVATE_SOURCE_ARCHIVE :=
 ifneq ($(full_classes_jar),)
-$(built_dpi_apk): PRIVATE_JACK_INTERMEDIATES_DIR := $(intermediates.COMMON)/jack-rsc
 $(built_dpi_apk): PRIVATE_DEX_FILE := $(built_dex)
+ifndef LOCAL_JACK_ENABLED
+# Use the jarjar processed arhive as the initial package file.
+$(built_dpi_apk): PRIVATE_SOURCE_ARCHIVE := $(full_classes_pre_proguard_jar)
+else
+$(built_dpi_apk): PRIVATE_JACK_INTERMEDIATES_DIR := $(intermediates.COMMON)/jack-rsc
+endif # LOCAL_JACK_ENABLED
 $(built_dpi_apk): $(built_dex)
 else
 $(built_dpi_apk): PRIVATE_DEX_FILE :=
@@ -46,7 +52,9 @@
 $(built_dpi_apk) : $(AAPT)
 $(built_dpi_apk) : $(all_res_assets) $(jni_shared_libraries) $(full_android_manifest)
 	@echo "target Package: $(PRIVATE_MODULE) ($@)"
-	$(create-empty-package)
+	$(if $(PRIVATE_SOURCE_ARCHIVE),\
+	  $(call initialize-package-file,$(PRIVATE_SOURCE_ARCHIVE),$@),\
+	  $(create-empty-package))
 	$(add-assets-to-package)
 ifneq ($(jni_shared_libraries),)
 	$(add-jni-shared-libs-to-package)
@@ -56,8 +64,10 @@
 	$(if $(PRIVATE_EXTRA_JAR_ARGS),$(call add-java-resources-to,$@))
 else
 	$(add-dex-to-package)
+ifdef LOCAL_JACK_ENABLED
 	$(add-carried-jack-resources)
 endif
+endif
 	$(sign-package)
 
 # Set up global variables to register this apk to the higher-level dependency graph.
diff --git a/core/dumpvar.mk b/core/dumpvar.mk
index 74ea3ff..acae48e 100644
--- a/core/dumpvar.mk
+++ b/core/dumpvar.mk
@@ -6,6 +6,7 @@
   TARGET_PRODUCT \
   TARGET_BUILD_VARIANT \
   TARGET_BUILD_TYPE \
+  TARGET_PLATFORM_VERSION \
   TARGET_BUILD_APPS \
   TARGET_ARCH \
   TARGET_ARCH_VARIANT \
diff --git a/core/envsetup.mk b/core/envsetup.mk
index fedd3f3..67ac751 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -11,16 +11,72 @@
 #         This can be useful if you set OUT_DIR to be a different directory
 #         than other outputs of your build system.
 
+# Returns all words in $1 up to and including $2
+define find_and_earlier
+  $(strip $(if $(1),
+    $(firstword $(1))
+    $(if $(filter $(firstword $(1)),$(2)),,
+      $(call find_and_earlier,$(wordlist 2,$(words $(1)),$(1)),$(2)))))
+endef
+
+#$(warning $(call find_and_earlier,A B C,A))
+#$(warning $(call find_and_earlier,A B C,B))
+#$(warning $(call find_and_earlier,A B C,C))
+#$(warning $(call find_and_earlier,A B C,D))
+
+define version-list
+$(1)PR1 $(1)PD1 $(1)PD2 $(1)PM1 $(1)PM2
+endef
+
+ALL_VERSIONS := O P Q R S T U V W X Y Z
+ALL_VERSIONS := $(foreach v,$(ALL_VERSIONS),$(call version-list,$(v)))
+
+# Filters ALL_VERSIONS down to the range [$1, $2], and errors if $1 > $2 or $3 is
+# not in [$1, $2]
+# $(1): min platform version
+# $(2): max platform version
+# $(3): default platform version
+define allowed-platform-versions
+$(strip \
+  $(if $(filter $(ALL_VERSIONS),$(1)),,
+    $(error Invalid MIN_PLATFORM_VERSION '$(1)'))
+  $(if $(filter $(ALL_VERSIONS),$(2)),,
+    $(error Invalid MAX_PLATFORM_VERSION '$(2)'))
+  $(if $(filter $(ALL_VERSIONS),$(3)),,
+    $(error Invalid DEFAULT_PLATFORM_VERSION '$(3)'))
+
+  $(eval allowed_versions_ := $(call find_and_earlier,$(ALL_VERSIONS),$(2)))
+
+  $(if $(filter $(allowed_versions_),$(1)),,
+    $(error MIN_PLATFORM_VERSION '$(1)' must be before MAX_PLATFORM_VERSION '$(2)'))
+
+  $(eval allowed_versions_ := $(1) \
+    $(filter-out $(call find_and_earlier,$(allowed_versions_),$(1)),$(allowed_versions_)))
+
+  $(if $(filter $(allowed_versions_),$(3)),,
+    $(error DEFAULT_PLATFORM_VERSION '$(3)' must be between MIN_PLATFORM_VERSION '$(1)' and MAX_PLATFORM_VERSION '$(2)'))
+
+  $(allowed_versions_))
+endef
+
+#$(warning $(call allowed-platform-versions,OPR1,PPR1,OPR1))
+#$(warning $(call allowed-platform-versions,OPM1,PPR1,OPR1))
+
 # Set up version information.
 include $(BUILD_SYSTEM)/version_defaults.mk
 
+ENABLED_VERSIONS := $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
+
+$(foreach v,$(ENABLED_VERSIONS), \
+  $(eval IS_AT_LEAST_$(v) := true))
+
 # ---------------------------------------------------------------
 # If you update the build system such that the environment setup
 # or buildspec.mk need to be updated, increment this number, and
 # people who haven't re-run those will have to do so before they
 # can build.  Make sure to also update the corresponding value in
 # buildspec.mk.default and envsetup.sh.
-CORRECT_BUILD_ENV_SEQUENCE_NUMBER := 12
+CORRECT_BUILD_ENV_SEQUENCE_NUMBER := 13
 
 # ---------------------------------------------------------------
 # The product defaults to generic on hardware
@@ -112,6 +168,7 @@
 TARGET_COPY_OUT_SYSTEM := system
 TARGET_COPY_OUT_SYSTEM_OTHER := system_other
 TARGET_COPY_OUT_DATA := data
+TARGET_COPY_OUT_ASAN := $(TARGET_COPY_OUT_DATA)/asan
 TARGET_COPY_OUT_OEM := oem
 TARGET_COPY_OUT_ODM := odm
 TARGET_COPY_OUT_ROOT := root
@@ -198,6 +255,13 @@
   endif
 endif
 
+# Check BOARD_VNDK_VERSION
+ifdef BOARD_VNDK_VERSION
+  ifneq ($(BOARD_VNDK_VERSION),current)
+    $(error BOARD_VNDK_VERSION: Only "current" is implemented)
+  endif
+endif
+
 # ---------------------------------------------------------------
 # Set up configuration for target machine.
 # The following must be set:
@@ -276,6 +340,7 @@
 HOST_CROSS_OUT_SHARED_LIBRARIES := $(HOST_CROSS_OUT)/lib
 HOST_CROSS_OUT_NATIVE_TESTS := $(HOST_CROSS_OUT)/nativetest
 HOST_CROSS_OUT_COVERAGE := $(HOST_CROSS_OUT)/coverage
+HOST_OUT_TESTCASES := $(HOST_OUT)/testcases
 
 HOST_OUT_INTERMEDIATES := $(HOST_OUT)/obj
 HOST_OUT_INTERMEDIATE_LIBRARIES := $(HOST_OUT_INTERMEDIATES)/lib
@@ -304,6 +369,7 @@
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_EXECUTABLES := $(HOST_OUT_EXECUTABLES)
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_JAVA_LIBRARIES := $(HOST_OUT_JAVA_LIBRARIES)
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_NATIVE_TESTS := $(HOST_OUT)/nativetest
+$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_TESTCASES := $(HOST_OUT_TESTCASES)
 
 # The default host library path.
 # It always points to the path where we build libraries in the default bitness.
@@ -328,7 +394,7 @@
 
 TARGET_OUT := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_SYSTEM)
 ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_DATA)
+target_out_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/system
 else
 target_out_shared_libraries_base := $(TARGET_OUT)
 endif
@@ -351,6 +417,7 @@
 TARGET_OUT_ETC := $(TARGET_OUT)/etc
 TARGET_OUT_NOTICE_FILES := $(TARGET_OUT_INTERMEDIATES)/NOTICE_FILES
 TARGET_OUT_FAKE := $(PRODUCT_OUT)/fake_packages
+TARGET_OUT_TESTCASES := $(PRODUCT_OUT)/testcases
 
 TARGET_OUT_SYSTEM_OTHER := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_SYSTEM_OTHER)
 
@@ -373,6 +440,7 @@
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_EXECUTABLES := $(TARGET_OUT_EXECUTABLES)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_APPS := $(TARGET_OUT_APPS)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_APPS_PRIVILEGED := $(TARGET_OUT_APPS_PRIVILEGED)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_TESTCASES := $(TARGET_OUT_TESTCASES)
 
 TARGET_OUT_DATA := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_DATA)
 TARGET_OUT_DATA_EXECUTABLES := $(TARGET_OUT_EXECUTABLES)
@@ -406,7 +474,7 @@
 
 TARGET_OUT_VENDOR := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR)
 ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_DATA)/vendor
+target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/vendor
 else
 target_out_vendor_shared_libraries_base := $(TARGET_OUT_VENDOR)
 endif
@@ -418,6 +486,7 @@
 else
 TARGET_OUT_VENDOR_SHARED_LIBRARIES := $(target_out_vendor_shared_libraries_base)/lib
 endif
+TARGET_OUT_VENDOR_RENDERSCRIPT_BITCODE := $(TARGET_OUT_VENDOR_SHARED_LIBRARIES)
 TARGET_OUT_VENDOR_JAVA_LIBRARIES := $(TARGET_OUT_VENDOR)/framework
 TARGET_OUT_VENDOR_APPS := $(TARGET_OUT_VENDOR)/app
 TARGET_OUT_VENDOR_APPS_PRIVILEGED := $(TARGET_OUT_VENDOR)/priv-app
@@ -429,6 +498,7 @@
 else
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES := $(target_out_vendor_shared_libraries_base)/lib
 endif
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_RENDERSCRIPT_BITCODE := $($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS := $(TARGET_OUT_VENDOR_APPS)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS_PRIVILEGED := $(TARGET_OUT_VENDOR_APPS_PRIVILEGED)
 
@@ -500,14 +570,14 @@
 TARGET_INSTALLER_SYSTEM_OUT := $(TARGET_INSTALLER_OUT)/root/system
 
 COMMON_MODULE_CLASSES := TARGET-NOTICE_FILES HOST-NOTICE_FILES HOST-JAVA_LIBRARIES
-PER_ARCH_MODULE_CLASSES := SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP RENDERSCRIPT_BITCODE NATIVE_TESTS
+PER_ARCH_MODULE_CLASSES := SHARED_LIBRARIES STATIC_LIBRARIES EXECUTABLES GYP RENDERSCRIPT_BITCODE NATIVE_TESTS HEADER_LIBRARIES
 
 ifeq (,$(strip $(DIST_DIR)))
   DIST_DIR := $(OUT_DIR)/dist
 endif
 
-ifeq ($(PRINT_BUILD_CONFIG),)
-PRINT_BUILD_CONFIG := true
+ifndef KATI
+PRINT_BUILD_CONFIG ?= true
 endif
 
 ifeq ($(USE_CLANG_PLATFORM_BUILD),)
diff --git a/core/executable_internal.mk b/core/executable_internal.mk
index f51ddb8..3509bd2 100644
--- a/core/executable_internal.mk
+++ b/core/executable_internal.mk
@@ -44,12 +44,16 @@
 my_target_crtbegin_dynamic_o :=
 my_target_crtbegin_static_o :=
 my_target_crtend_o :=
+else ifdef LOCAL_USE_VNDK
+my_target_crtbegin_dynamic_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_dynamic.vendor.o
+my_target_crtbegin_static_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_static.vendor.o
+my_target_crtend_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_android.vendor.o
 else
 my_target_crtbegin_dynamic_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_dynamic.o
 my_target_crtbegin_static_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_static.o
 my_target_crtend_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_android.o
 endif
-ifneq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
+ifneq ($(LOCAL_SDK_VERSION),)
 my_target_crtbegin_dynamic_o := $(wildcard $(my_ndk_sysroot_lib)/crtbegin_dynamic.o)
 my_target_crtbegin_static_o := $(wildcard $(my_ndk_sysroot_lib)/crtbegin_static.o)
 my_target_crtend_o := $(wildcard $(my_ndk_sysroot_lib)/crtend_android.o)
diff --git a/core/generate_enforce_rro.mk b/core/generate_enforce_rro.mk
new file mode 100644
index 0000000..579089c
--- /dev/null
+++ b/core/generate_enforce_rro.mk
@@ -0,0 +1,30 @@
+include $(CLEAR_VARS)
+
+LOCAL_PACKAGE_NAME := $(enforce_rro_module)
+
+intermediates := $(call intermediates-dir-for,APPS,$(LOCAL_PACKAGE_NAME),,COMMON)
+rro_android_manifest_file := $(intermediates)/AndroidManifest.xml
+
+ifeq (true,$(enforce_rro_source_is_manifest_package_name))
+$(rro_android_manifest_file): PRIVATE_PACKAGE_NAME := $(enforce_rro_source_manifest_package_info)
+$(rro_android_manifest_file): build/tools/generate-enforce-rro-android-manifest.py
+	$(hide) build/tools/generate-enforce-rro-android-manifest.py -u -p $(PRIVATE_PACKAGE_NAME) -o $@
+else
+$(rro_android_manifest_file): PRIVATE_SOURCE_MANIFEST_FILE := $(enforce_rro_source_manifest_package_info)
+$(rro_android_manifest_file): $(enforce_rro_source_manifest_package_info) build/tools/generate-enforce-rro-android-manifest.py
+	$(hide) build/tools/generate-enforce-rro-android-manifest.py -p $(PRIVATE_SOURCE_MANIFEST_FILE) -o $@
+endif
+
+LOCAL_PATH:= $(intermediates)
+
+ifeq ($(enforce_rro_use_res_lib),true)
+LOCAL_RES_LIBRARIES := $(enforce_rro_source_module)
+endif
+
+LOCAL_FULL_MANIFEST_FILE := $(rro_android_manifest_file)
+LOCAL_CERTIFICATE := platform
+
+LOCAL_AAPT_FLAGS += --auto-add-overlay
+LOCAL_RESOURCE_DIR := $(enforce_rro_source_overlays)
+
+include $(BUILD_RRO_PACKAGE)
diff --git a/core/header_library.mk b/core/header_library.mk
new file mode 100644
index 0000000..5144679
--- /dev/null
+++ b/core/header_library.mk
@@ -0,0 +1,72 @@
+$(call record-module-type,HEADER_LIBRARY)
+ifdef LOCAL_IS_HOST_MODULE
+  my_prefix := HOST_
+  LOCAL_HOST_PREFIX :=
+else
+  my_prefix := TARGET_
+endif
+include $(BUILD_SYSTEM)/multilib.mk
+
+ifndef my_module_multilib
+  # libraries default to building for both architecturess
+  my_module_multilib := both
+endif
+
+LOCAL_2ND_ARCH_VAR_PREFIX :=
+include $(BUILD_SYSTEM)/module_arch_supported.mk
+
+ifeq ($(my_module_arch_supported),true)
+  include $(BUILD_SYSTEM)/header_library_internal.mk
+endif
+
+ifdef $(my_prefix)2ND_ARCH
+  LOCAL_2ND_ARCH_VAR_PREFIX := $($(my_prefix)2ND_ARCH_VAR_PREFIX)
+  include $(BUILD_SYSTEM)/module_arch_supported.mk
+
+  ifeq ($(my_module_arch_supported),true)
+    # Build for 2ND_ARCH
+    OVERRIDE_BUILT_MODULE_PATH :=
+    LOCAL_BUILT_MODULE :=
+    LOCAL_INSTALLED_MODULE :=
+    LOCAL_INTERMEDIATE_TARGETS :=
+    include $(BUILD_SYSTEM)/header_library_internal.mk
+  endif
+  LOCAL_2ND_ARCH_VAR_PREFIX :=
+endif # 2ND_ARCH
+
+ifdef LOCAL_IS_HOST_MODULE
+  ifdef HOST_CROSS_OS
+    my_prefix := HOST_CROSS_
+    LOCAL_HOST_PREFIX := $(my_prefix)
+
+    include $(BUILD_SYSTEM)/module_arch_supported.mk
+
+    ifeq ($(my_module_arch_supported),true)
+      # Build for 2ND_ARCH
+      OVERRIDE_BUILT_MODULE_PATH :=
+      LOCAL_BUILT_MODULE :=
+      LOCAL_INSTALLED_MODULE :=
+      LOCAL_INTERMEDIATE_TARGETS :=
+      include $(BUILD_SYSTEM)/header_library_internal.mk
+    endif
+
+    ifdef HOST_CROSS_2ND_ARCH
+      LOCAL_2ND_ARCH_VAR_PREFIX := $(HOST_CROSS_2ND_ARCH_VAR_PREFIX)
+      include $(BUILD_SYSTEM)/module_arch_supported.mk
+
+      ifeq ($(my_module_arch_supported),true)
+        # Build for HOST_CROSS_2ND_ARCH
+        OVERRIDE_BUILT_MODULE_PATH :=
+        LOCAL_BUILT_MODULE :=
+        LOCAL_INSTALLED_MODULE :=
+        LOCAL_INTERMEDIATE_TARGETS :=
+        include $(BUILD_SYSTEM)/header_library_internal.mk
+      endif
+      LOCAL_2ND_ARCH_VAR_PREFIX :=
+    endif
+
+    LOCAL_HOST_PREFIX :=
+  endif
+endif
+
+my_module_arch_supported :=
diff --git a/core/header_library_internal.mk b/core/header_library_internal.mk
new file mode 100644
index 0000000..35ee1bc
--- /dev/null
+++ b/core/header_library_internal.mk
@@ -0,0 +1,21 @@
+###########################################################
+## Standard rules for building a header library.
+##
+## Additional inputs from base_rules.make:
+## None.
+###########################################################
+
+LOCAL_MODULE_CLASS := HEADER_LIBRARIES
+LOCAL_UNINSTALLABLE_MODULE := true
+ifneq ($(strip $(LOCAL_MODULE_STEM)$(LOCAL_BUILT_MODULE_STEM)),)
+$(error $(LOCAL_PATH): Cannot set module stem for a library)
+endif
+
+include $(BUILD_SYSTEM)/binary.mk
+
+ifneq ($(strip $(all_objects)),)
+$(call pretty-error,Header libraries may not have any sources)
+endif
+
+$(LOCAL_BUILT_MODULE):
+	$(hide) touch $@
diff --git a/core/help.mk b/core/help.mk
index 6e0b2c0..c034e79 100644
--- a/core/help.mk
+++ b/core/help.mk
@@ -22,6 +22,7 @@
 	@echo "droid                   Default target"
 	@echo "clean                   (aka clobber) equivalent to rm -rf out/"
 	@echo "snod                    Quickly rebuild the system image from built packages"
+	@echo "vnod                    Quickly rebuild the vendor image from built packages"
 	@echo "offline-sdk-docs        Generate the HTML for the developer SDK docs"
 	@echo "doc-comment-check-docs  Check HTML doc links & validity, without generating HTML"
 	@echo "libandroid_runtime      All the JNI framework stuff"
diff --git a/core/host_dalvik_java_library.mk b/core/host_dalvik_java_library.mk
index 64b610e..7101229 100644
--- a/core/host_dalvik_java_library.mk
+++ b/core/host_dalvik_java_library.mk
@@ -31,27 +31,45 @@
 #######################################
 include $(BUILD_SYSTEM)/host_java_library_common.mk
 #######################################
+ifdef LOCAL_JACK_ENABLED
 ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
   # For static library, $(LOCAL_BUILT_MODULE) is $(full_classes_jack).
   LOCAL_BUILT_MODULE_STEM := classes.jack
 endif
+endif
 
 ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
   LOCAL_JAVA_LIBRARIES :=  core-oj-hostdex core-libart-hostdex $(LOCAL_JAVA_LIBRARIES)
 endif
 
+full_classes_compiled_jar := $(intermediates.COMMON)/classes-full-debug.jar
+full_classes_desugar_jar := $(intermediates.COMMON)/desugar.classes.jar
+full_classes_jarjar_jar := $(intermediates.COMMON)/classes-jarjar.jar
+full_classes_jar := $(intermediates.COMMON)/classes.jar
 full_classes_jack := $(intermediates.COMMON)/classes.jack
 jack_check_timestamp := $(intermediates.COMMON)/jack.check.timestamp
 built_dex := $(intermediates.COMMON)/classes.dex
 
 LOCAL_INTERMEDIATE_TARGETS += \
+    $(full_classes_compiled_jar) \
+    $(full_classes_desugar_jar) \
+    $(full_classes_jarjar_jar) \
     $(full_classes_jack) \
+    $(full_classes_jar) \
     $(jack_check_timestamp) \
     $(built_dex)
 
 # See comment in java.mk
 ifndef LOCAL_CHECKED_MODULE
+ifdef LOCAL_JACK_ENABLED
 LOCAL_CHECKED_MODULE := $(jack_check_timestamp)
+else
+ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
+LOCAL_CHECKED_MODULE := $(full_classes_compiled_jar)
+else
+LOCAL_CHECKED_MODULE := $(built_dex)
+endif
+endif
 endif
 
 #######################################
@@ -65,6 +83,81 @@
 
 $(cleantarget): PRIVATE_CLEAN_FILES += $(intermediates.COMMON)
 
+ifndef LOCAL_JACK_ENABLED
+
+$(full_classes_compiled_jar): PRIVATE_JAVA_LAYERS_FILE := $(layers_file)
+$(full_classes_compiled_jar): PRIVATE_JAVACFLAGS := $(GLOBAL_JAVAC_DEBUG_FLAGS) $(LOCAL_JAVACFLAGS) $(annotation_processor_flags)
+$(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_FILES :=
+$(full_classes_compiled_jar): PRIVATE_JAR_PACKAGES :=
+$(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_PACKAGES :=
+$(full_classes_compiled_jar): \
+        $(java_sources) \
+        $(java_resource_sources) \
+        $(full_java_lib_deps) \
+        $(jar_manifest_file) \
+        $(proto_java_sources_file_stamp) \
+        $(annotation_processor_deps) \
+        $(NORMALIZE_PATH) \
+        $(LOCAL_ADDITIONAL_DEPENDENCIES) \
+        | $(SOONG_JAVAC_WRAPPER)
+	$(transform-host-java-to-package)
+
+my_desugaring :=
+ifeq ($(LOCAL_JAVA_LANGUAGE_VERSION),1.8)
+my_desugaring := true
+$(full_classes_desugar_jar): PRIVATE_DX_FLAGS := $(LOCAL_DX_FLAGS)
+$(full_classes_desugar_jar): $(full_classes_compiled_jar) $(DESUGAR)
+	$(desugar-classes-jar)
+endif
+
+ifndef my_desugaring
+full_classes_desugar_jar := $(full_classes_compiled_jar)
+endif
+
+# Run jarjar if necessary, otherwise just copy the file.
+ifneq ($(strip $(LOCAL_JARJAR_RULES)),)
+$(full_classes_jarjar_jar): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES)
+$(full_classes_jarjar_jar): $(full_classes_desugar_jar) $(LOCAL_JARJAR_RULES) | $(JARJAR)
+	@echo JarJar: $@
+	$(hide) java -jar $(JARJAR) process $(PRIVATE_JARJAR_RULES) $< $@
+else
+full_classes_jarjar_jar := $(full_classes_desugar_jar)
+endif
+
+$(eval $(call copy-one-file,$(full_classes_jarjar_jar),$(full_classes_jar)))
+
+ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
+# No dex; all we want are the .class files with resources.
+$(LOCAL_BUILT_MODULE) : $(java_resource_sources)
+$(LOCAL_BUILT_MODULE) : $(full_classes_jar)
+	@echo "host Static Jar: $(PRIVATE_MODULE) ($@)"
+	$(copy-file-to-target)
+
+else # !LOCAL_IS_STATIC_JAVA_LIBRARY
+$(built_dex): PRIVATE_INTERMEDIATES_DIR := $(intermediates.COMMON)
+$(built_dex): PRIVATE_DX_FLAGS := $(LOCAL_DX_FLAGS)
+$(built_dex): $(full_classes_jar) $(DX)
+	$(transform-classes.jar-to-dex)
+
+$(LOCAL_BUILT_MODULE): PRIVATE_DEX_FILE := $(built_dex)
+$(LOCAL_BUILT_MODULE): PRIVATE_SOURCE_ARCHIVE := $(full_classes_jarjar_jar)
+$(LOCAL_BUILT_MODULE): PRIVATE_DONT_DELETE_JAR_DIRS := $(LOCAL_DONT_DELETE_JAR_DIRS)
+$(LOCAL_BUILT_MODULE): $(built_dex) $(java_resource_sources)
+	@echo "Host Jar: $(PRIVATE_MODULE) ($@)"
+	$(call initialize-package-file,$(PRIVATE_SOURCE_ARCHIVE),$@)
+	$(add-dex-to-package)
+
+endif # !LOCAL_IS_STATIC_JAVA_LIBRARY
+
+ifneq (,$(filter-out current system_current test_current, $(LOCAL_SDK_VERSION)))
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEFAULT_APP_TARGET_SDK := $(LOCAL_SDK_VERSION)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SDK_VERSION := $(LOCAL_SDK_VERSION)
+else
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEFAULT_APP_TARGET_SDK := $(DEFAULT_APP_TARGET_SDK)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SDK_VERSION := $(PLATFORM_SDK_VERSION)
+endif
+
+else # LOCAL_JACK_ENABLED
 $(LOCAL_INTERMEDIATE_TARGETS): \
   PRIVATE_JACK_INTERMEDIATES_DIR := $(intermediates.COMMON)/jack-rsc
 
@@ -116,6 +209,7 @@
 $(jack_check_timestamp): $(jack_all_deps) | setup-jack-server
 	@echo Checking build with Jack: $@
 	$(jack-check-java)
+endif # LOCAL_JACK_ENABLED
 
 USE_CORE_LIB_BOOTCLASSPATH :=
 
diff --git a/core/host_java_library.mk b/core/host_java_library.mk
index cc26d7c..35dece4 100644
--- a/core/host_java_library.mk
+++ b/core/host_java_library.mk
@@ -37,6 +37,7 @@
 # emma is hardcoded to use the leaf name of its input for the output file --
 # only the output directory can be changed
 full_classes_emma_jar := $(emma_intermediates_dir)/lib/$(notdir $(full_classes_jarjar_jar))
+full_classes_jar := $(intermediates.COMMON)/classes.jar
 
 LOCAL_INTERMEDIATE_TARGETS += \
     $(full_classes_compiled_jar) \
@@ -57,8 +58,13 @@
 # Run build/tools/java-layers.py for more details.
 layers_file := $(addprefix $(LOCAL_PATH)/, $(LOCAL_JAVA_LAYERS_FILE))
 
+# If error prone is enabled then add LOCAL_ERROR_PRONE_FLAGS to LOCAL_JAVACFLAGS
+ifeq ($(RUN_ERROR_PRONE),true)
+LOCAL_JAVACFLAGS += $(LOCAL_ERROR_PRONE_FLAGS)
+endif
+
 $(full_classes_compiled_jar): PRIVATE_JAVA_LAYERS_FILE := $(layers_file)
-$(full_classes_compiled_jar): PRIVATE_JAVACFLAGS := $(GLOBAL_JAVAC_DEBUG_FLAGS) $(LOCAL_JAVACFLAGS)
+$(full_classes_compiled_jar): PRIVATE_JAVACFLAGS := $(GLOBAL_JAVAC_DEBUG_FLAGS) $(LOCAL_JAVACFLAGS) $(annotation_processor_flags)
 $(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_FILES :=
 $(full_classes_compiled_jar): PRIVATE_JAR_PACKAGES :=
 $(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_PACKAGES :=
@@ -68,8 +74,10 @@
         $(full_java_lib_deps) \
         $(jar_manifest_file) \
         $(proto_java_sources_file_stamp) \
+        $(annotation_processor_deps) \
         $(NORMALIZE_PATH) \
-        $(LOCAL_ADDITIONAL_DEPENDENCIES)
+        $(LOCAL_ADDITIONAL_DEPENDENCIES) \
+        | $(SOONG_JAVAC_WRAPPER)
 	$(transform-host-java-to-package)
 
 javac-check : $(full_classes_compiled_jar)
@@ -82,9 +90,7 @@
 	@echo JarJar: $@
 	$(hide) java -jar $(JARJAR) process $(PRIVATE_JARJAR_RULES) $< $@
 else
-$(full_classes_jarjar_jar): $(full_classes_compiled_jar) | $(ACP)
-	@echo Copying: $@
-	$(hide) $(ACP) -fp $< $@
+full_classes_jarjar_jar := $(full_classes_compiled_jar)
 endif
 
 ifeq (true,$(LOCAL_EMMA_INSTRUMENT))
@@ -101,13 +107,9 @@
 # $(full_classes_emma_jar)
 $(full_classes_emma_jar) : $(full_classes_jarjar_jar) | $(EMMA_JAR)
 	$(transform-classes.jar-to-emma)
-
-$(LOCAL_BUILT_MODULE) : $(full_classes_emma_jar)
-	@echo Copying: $@
-	$(hide) $(ACP) -fp $< $@
-
 else # LOCAL_EMMA_INSTRUMENT
-$(LOCAL_BUILT_MODULE) : $(full_classes_jarjar_jar) | $(ACP)
-	@echo Copying: $@
-	$(hide) $(ACP) -fp $< $@
+full_classes_emma_jar := $(full_classes_jarjar_jar)
 endif # LOCAL_EMMA_INSTRUMENT
+
+$(eval $(call copy-one-file,$(full_classes_emma_jar),$(LOCAL_BUILT_MODULE)))
+$(eval $(call copy-one-file,$(full_classes_emma_jar),$(full_classes_jar)))
diff --git a/core/jack-default.args b/core/jack-default.args
index 0232301..433bc53 100644
--- a/core/jack-default.args
+++ b/core/jack-default.args
@@ -5,3 +5,5 @@
 -D jack.reporter.level.file=error=--,warning=-
 --verbose error
 -D jack.jayce.cache=false
+-D jack.lambda.grouping-scope=package
+-D jack.lambda.simplify-stateless=true
diff --git a/core/java.mk b/core/java.mk
index baf097b..78c8ba3 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -96,14 +96,29 @@
 intermediates := $(call local-intermediates-dir)
 intermediates.COMMON := $(call local-intermediates-dir,COMMON)
 
+# Choose leaf name for the compiled jar file.
+ifeq ($(LOCAL_EMMA_INSTRUMENT),true)
+full_classes_compiled_jar_leaf := classes-no-debug-var.jar
+built_dex_intermediate_leaf := no-local
+else
+full_classes_compiled_jar_leaf := classes-full-debug.jar
+built_dex_intermediate_leaf := with-local
+endif
+
 ifeq ($(LOCAL_PROGUARD_ENABLED),disabled)
 LOCAL_PROGUARD_ENABLED :=
 endif
 
-full_classes_compiled_jar := $(intermediates.COMMON)/classes-full-debug.jar
-full_classes_jarjar_jar := $(intermediates.COMMON)/classes-jarjar.jar
-full_classes_proguard_jar := $(intermediates.COMMON)/proguard.classes.jar
-built_dex_intermediate := $(intermediates.COMMON)/dex-dir/classes.dex
+full_classes_compiled_jar := $(intermediates.COMMON)/$(full_classes_compiled_jar_leaf)
+full_classes_desugar_jar := $(intermediates.COMMON)/classes-desugar.jar
+jarjar_leaf := classes-jarjar.jar
+full_classes_jarjar_jar := $(intermediates.COMMON)/$(jarjar_leaf)
+emma_intermediates_dir := $(intermediates.COMMON)/emma_out
+# emma is hardcoded to use the leaf name of its input for the output file --
+# only the output directory can be changed
+full_classes_emma_jar := $(emma_intermediates_dir)/lib/$(jarjar_leaf)
+full_classes_proguard_jar := $(intermediates.COMMON)/classes-proguard.jar
+built_dex_intermediate := $(intermediates.COMMON)/$(built_dex_intermediate_leaf)/classes.dex
 full_classes_stubs_jar := $(intermediates.COMMON)/stubs.jar
 
 ifeq ($(LOCAL_MODULE_CLASS)$(LOCAL_SRC_FILES)$(LOCAL_STATIC_JAVA_LIBRARIES)$(LOCAL_SOURCE_FILES_ALL_GENERATED),APPS)
@@ -122,7 +137,9 @@
 
 LOCAL_INTERMEDIATE_TARGETS += \
     $(full_classes_compiled_jar) \
+    $(full_classes_desugar_jar) \
     $(full_classes_jarjar_jar) \
+    $(full_classes_emma_jar) \
     $(full_classes_jar) \
     $(full_classes_proguard_jar) \
     $(built_dex_intermediate) \
@@ -322,7 +339,15 @@
 # command line.
 ifndef LOCAL_CHECKED_MODULE
 ifdef full_classes_jar
+ifdef LOCAL_JACK_ENABLED
 LOCAL_CHECKED_MODULE := $(jack_check_timestamp)
+else
+ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
+LOCAL_CHECKED_MODULE := $(full_classes_compiled_jar)
+else
+LOCAL_CHECKED_MODULE := $(built_dex)
+endif
+endif
 endif
 endif
 
@@ -380,15 +405,7 @@
 # Droiddoc isn't currently able to generate stubs for modules, so we're just
 # allowing it to use the classes.jar as the "stubs" that would be use to link
 # against, for the cases where someone needs the jar to link against.
-# - Use the classes.jar instead of the handful of other intermediates that
-#   we have, because it's the most processed, but still hasn't had dex run on
-#   it, so it's closest to what's on the device.
-# - This extra copy, with the dependency on LOCAL_BUILT_MODULE allows the
-#   PRIVATE_ vars to be preserved.
-$(full_classes_stubs_jar): PRIVATE_SOURCE_FILE := $(full_classes_jar)
-$(full_classes_stubs_jar) : $(full_classes_jar) | $(ACP)
-	@echo Copying $(PRIVATE_SOURCE_FILE)
-	$(hide) $(ACP) -fp $(PRIVATE_SOURCE_FILE) $@
+$(eval $(call copy-one-file,$(full_classes_jar),$(full_classes_stubs_jar)))
 ALL_MODULES.$(LOCAL_MODULE).STUBS := $(full_classes_stubs_jar)
 
 # The layers file allows you to enforce a layering between java packages.
@@ -401,7 +418,13 @@
 # This intentionally depends on java_sources, not all_java_sources.
 # Deps for generated source files must be handled separately,
 # via deps on the target that generates the sources.
-$(full_classes_compiled_jar): PRIVATE_JAVACFLAGS := $(GLOBAL_JAVAC_DEBUG_FLAGS) $(LOCAL_JAVACFLAGS)
+
+# If error prone is enabled then add LOCAL_ERROR_PRONE_FLAGS to LOCAL_JAVACFLAGS
+ifeq ($(RUN_ERROR_PRONE),true)
+LOCAL_JAVACFLAGS += $(LOCAL_ERROR_PRONE_FLAGS)
+endif
+
+$(full_classes_compiled_jar): PRIVATE_JAVACFLAGS := $(GLOBAL_JAVAC_DEBUG_FLAGS) $(LOCAL_JAVACFLAGS) $(annotation_processor_flags)
 $(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_FILES := $(LOCAL_JAR_EXCLUDE_FILES)
 $(full_classes_compiled_jar): PRIVATE_JAR_PACKAGES := $(LOCAL_JAR_PACKAGES)
 $(full_classes_compiled_jar): PRIVATE_JAR_EXCLUDE_PACKAGES := $(LOCAL_JAR_EXCLUDE_PACKAGES)
@@ -414,33 +437,67 @@
         $(layers_file) \
         $(RenderScript_file_stamp) \
         $(proto_java_sources_file_stamp) \
+        $(annotation_processor_deps) \
         $(NORMALIZE_PATH) \
-        $(LOCAL_ADDITIONAL_DEPENDENCIES)
+        $(LOCAL_ADDITIONAL_DEPENDENCIES) \
+        | $(SOONG_JAVAC_WRAPPER)
 	$(transform-java-to-classes.jar)
 
 javac-check : $(full_classes_compiled_jar)
 javac-check-$(LOCAL_MODULE) : $(full_classes_compiled_jar)
 
-# Run jarjar if necessary, otherwise just copy the file.
+my_desugaring :=
+ifndef LOCAL_JACK_ENABLED
+ifndef LOCAL_IS_STATIC_JAVA_LIBRARY
+my_desugaring := true
+$(full_classes_desugar_jar): PRIVATE_DX_FLAGS := $(LOCAL_DX_FLAGS)
+$(full_classes_desugar_jar): $(full_classes_compiled_jar) $(DESUGAR)
+	$(desugar-classes-jar)
+endif
+endif
+
+ifndef my_desugaring
+full_classes_desugar_jar := $(full_classes_compiled_jar)
+endif
+
+# Run jarjar if necessary
 ifneq ($(strip $(LOCAL_JARJAR_RULES)),)
 $(full_classes_jarjar_jar): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES)
-$(full_classes_jarjar_jar): $(full_classes_compiled_jar) $(LOCAL_JARJAR_RULES) | $(JARJAR)
+$(full_classes_jarjar_jar): $(full_classes_desugar_jar) $(LOCAL_JARJAR_RULES) | $(JARJAR)
 	@echo JarJar: $@
 	$(hide) java -jar $(JARJAR) process $(PRIVATE_JARJAR_RULES) $< $@
 else
-$(full_classes_jarjar_jar): $(full_classes_compiled_jar) | $(ACP)
-	@echo Copying: $@
-	$(hide) $(ACP) -fp $< $@
+full_classes_jarjar_jar := $(full_classes_desugar_jar)
 endif
 
+ifeq ($(LOCAL_EMMA_INSTRUMENT),true)
+$(full_classes_emma_jar): PRIVATE_EMMA_COVERAGE_FILE := $(intermediates.COMMON)/coverage.emma.ignore
+$(full_classes_emma_jar): PRIVATE_EMMA_INTERMEDIATES_DIR := $(emma_intermediates_dir)
+# module level coverage filter can be defined using LOCAL_EMMA_COVERAGE_FILTER
+# in Android.mk
+ifdef LOCAL_EMMA_COVERAGE_FILTER
+$(full_classes_emma_jar): PRIVATE_EMMA_COVERAGE_FILTER := $(LOCAL_EMMA_COVERAGE_FILTER)
+else
+# by default, avoid applying emma instrumentation onto emma classes itself,
+# otherwise there will be exceptions thrown
+$(full_classes_emma_jar): PRIVATE_EMMA_COVERAGE_FILTER := *,-emma,-emmarun,-com.vladium.*
+endif
+# this rule will generate both $(PRIVATE_EMMA_COVERAGE_FILE) and
+# $(full_classes_emma_jar)
+$(full_classes_emma_jar): $(full_classes_jarjar_jar) | $(EMMA_JAR)
+	$(transform-classes.jar-to-emma)
+
+else
+full_classes_emma_jar := $(full_classes_jarjar_jar)
+endif
+
+# TODO: this should depend on full_classes_emma_jar once coverage works again
+full_classes_pre_proguard_jar := $(full_classes_jarjar_jar)
+
 # Keep a copy of the jar just before proguard processing.
-$(full_classes_jar): $(full_classes_jarjar_jar) | $(ACP)
-	@echo Copying: $@
-	$(hide) $(ACP) -fp $< $@
+$(eval $(call copy-one-file,$(full_classes_pre_proguard_jar),$(intermediates.COMMON)/classes-pre-proguard.jar))
 
-$(call define-jar-to-toc-rule, $(full_classes_jar))
-
-# Run proguard if necessary, otherwise just copy the file.
+# Run proguard if necessary
 ifdef LOCAL_PROGUARD_ENABLED
 ifneq ($(filter-out full custom nosystem obfuscation optimization shrinktests,$(LOCAL_PROGUARD_ENABLED)),)
     $(warning while processing: $(LOCAL_MODULE))
@@ -470,7 +527,8 @@
 endif
 
 # jack already has the libraries in its classpath and doesn't support jars
-legacy_proguard_flags := $(addprefix -libraryjars ,$(my_support_library_sdk_raise) $(full_shared_java_libs))
+legacy_proguard_flags := $(addprefix -libraryjars ,$(my_support_library_sdk_raise) \
+  $(filter-out $(my_support_library_sdk_raise),$(full_shared_java_libs)))
 
 legacy_proguard_flags += -printmapping $(proguard_dictionary)
 jack_proguard_flags := -printmapping $(jack_dictionary)
@@ -481,7 +539,11 @@
 ifeq ($(filter nosystem,$(LOCAL_PROGUARD_ENABLED)),)
 common_proguard_flag_files += $(BUILD_SYSTEM)/proguard.flags
 ifeq ($(LOCAL_EMMA_INSTRUMENT),true)
+ifdef LOCAL_JACK_ENABLED
 common_proguard_flag_files += $(BUILD_SYSTEM)/proguard.jacoco.flags
+else
+common_proguard_flags += -include $(BUILD_SYSTEM)/proguard.emma.flags
+endif # LOCAL_JACK_ENABLED
 endif
 # If this is a test package, add proguard keep flags for tests.
 ifneq ($(LOCAL_INSTRUMENTATION_FOR)$(filter tests,$(LOCAL_MODULE_TAGS)),)
@@ -521,9 +583,10 @@
     -applymapping $(link_instr_intermediates_dir.COMMON)/proguard_dictionary \
     -verbose \
     $(legacy_proguard_flags)
-
+ifdef LOCAL_JACK_ENABLED
 jack_proguard_flags += -applymapping $(link_instr_intermediates_dir.COMMON)/jack_dictionary
 full_jack_deps += $(link_instr_intermediates_dir.COMMON)/jack_dictionary
+endif
 
 # Sometimes (test + main app) uses different keep rules from the main app -
 # apply the main app's dictionary anyway.
@@ -544,29 +607,65 @@
 else
 extra_input_jar :=
 endif
+
+# If not using jack and building against the current SDK version then filter
+# out junit and android.test classes from the application that are to be
+# removed from the Android API as part of b/30188076 but which are still
+# present in the Android API. This is to allow changes to be made to the
+# build to statically include those classes into the application without
+# simultaneously removing those classes from the API.
+proguard_injar_filters :=
+ifndef LOCAL_JACK_ENABLED
+ifdef LOCAL_SDK_VERSION
+ifeq (,$(filter-out current system_current test_current, $(LOCAL_SDK_VERSION)))
+proguard_injar_filters := (!junit/framework/**,!junit/runner/**,!android/test/**)
+endif
+endif
+endif
+
+$(full_classes_proguard_jar): PRIVATE_PROGUARD_INJAR_FILTERS := $(proguard_injar_filters)
 $(full_classes_proguard_jar): PRIVATE_EXTRA_INPUT_JAR := $(extra_input_jar)
 $(full_classes_proguard_jar): PRIVATE_PROGUARD_FLAGS := $(legacy_proguard_flags) $(common_proguard_flags) $(LOCAL_PROGUARD_FLAGS)
-$(full_classes_proguard_jar) : $(full_classes_jar) $(extra_input_jar) $(my_support_library_sdk_raise) $(common_proguard_flag_files) $(proguard_flag_files) | $(PROGUARD)
+$(full_classes_proguard_jar) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_support_library_sdk_raise) $(common_proguard_flag_files) $(proguard_flag_files) | $(PROGUARD)
 	$(call transform-jar-to-proguard)
 
 else  # LOCAL_PROGUARD_ENABLED not defined
-$(full_classes_proguard_jar) : $(full_classes_jar) | $(ACP)
-	@echo Copying: $@
-	$(hide) $(ACP) -fp $< $@
-
+full_classes_proguard_jar := $(full_classes_pre_proguard_jar)
 endif # LOCAL_PROGUARD_ENABLED defined
 
-$(built_dex): $(built_dex_intermediate) | $(ACP)
+$(eval $(call copy-one-file,$(full_classes_proguard_jar),$(full_classes_jar)))
+
+$(call define-jar-to-toc-rule, $(full_classes_jar))
+
+ifneq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
+ifndef LOCAL_JACK_ENABLED
+$(built_dex_intermediate): PRIVATE_DX_FLAGS := $(LOCAL_DX_FLAGS)
+# If you instrument class files that have local variable debug information in
+# them emma does not correctly maintain the local variable table.
+# This will cause an error when you try to convert the class files for Android.
+# The workaround here is to build different dex file here based on emma switch
+# then later copy into classes.dex. When emma is on, dx is run with --no-locals
+# option to remove local variable information
+ifeq ($(LOCAL_EMMA_INSTRUMENT),true)
+$(built_dex_intermediate): PRIVATE_DX_FLAGS += --no-locals
+endif
+$(built_dex_intermediate): $(full_classes_jar) $(DX)
+	$(transform-classes.jar-to-dex)
+endif # LOCAL_JACK_ENABLED is disabled
+
+$(built_dex): $(built_dex_intermediate)
 	@echo Copying: $@
 	$(hide) mkdir -p $(dir $@)
 	$(hide) rm -f $(dir $@)/classes*.dex
-	$(hide) $(ACP) -fp $(dir $<)/classes*.dex $(dir $@)
+	$(hide) cp -fp $(dir $<)/classes*.dex $(dir $@)
+
+endif # !LOCAL_IS_STATIC_JAVA_LIBRARY
 
 findbugs_xml := $(intermediates.COMMON)/findbugs.xml
 $(findbugs_xml): PRIVATE_AUXCLASSPATH := $(addprefix -auxclasspath ,$(strip \
     $(call normalize-path-list,$(filter %.jar,$(full_java_libs)))))
 $(findbugs_xml): PRIVATE_FINDBUGS_FLAGS := $(LOCAL_FINDBUGS_FLAGS)
-$(findbugs_xml) : $(full_classes_jar) $(filter %.xml, $(LOCAL_FINDBUGS_FLAGS))
+$(findbugs_xml) : $(full_classes_pre_proguard_jar) $(filter %.xml, $(LOCAL_FINDBUGS_FLAGS))
 	@echo Findbugs: $@
 	$(hide) $(FINDBUGS) -textui -effort:min -xml:withMessages \
 		$(PRIVATE_AUXCLASSPATH) $(PRIVATE_FINDBUGS_FLAGS) \
@@ -588,6 +687,15 @@
 
 endif  # full_classes_jar is defined
 
+ifneq (,$(filter-out current system_current test_current, $(LOCAL_SDK_VERSION)))
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEFAULT_APP_TARGET_SDK := $(LOCAL_SDK_VERSION)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SDK_VERSION := $(LOCAL_SDK_VERSION)
+else
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEFAULT_APP_TARGET_SDK := $(DEFAULT_APP_TARGET_SDK)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SDK_VERSION := $(PLATFORM_SDK_VERSION)
+endif
+
+ifdef LOCAL_JACK_ENABLED
 $(LOCAL_INTERMEDIATE_TARGETS): \
 	PRIVATE_JACK_INTERMEDIATES_DIR := $(intermediates.COMMON)/jack-rsc
 ifeq ($(LOCAL_JACK_ENABLED),incremental)
@@ -624,13 +732,14 @@
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JACK_PROGUARD_FLAGS :=
 endif # LOCAL_PROGUARD_ENABLED defined
 
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JACK_FLAGS := $(GLOBAL_JAVAC_DEBUG_FLAGS) $(LOCAL_JACK_FLAGS)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JACK_FLAGS := $(GLOBAL_JAVAC_DEBUG_FLAGS) $(LOCAL_JACK_FLAGS) $(annotation_processor_flags)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JACK_VERSION := $(LOCAL_JACK_VERSION)
 
 jack_all_deps := $(java_sources) $(java_resource_sources) $(full_jack_deps) \
         $(jar_manifest_file) $(layers_file) $(RenderScript_file_stamp) \
         $(common_proguard_flag_files) $(proguard_flag_files) \
-        $(proto_java_sources_file_stamp) $(LOCAL_ADDITIONAL_DEPENDENCIES) $(LOCAL_JARJAR_RULES) \
+        $(proto_java_sources_file_stamp) $(annotation_processor_deps) \
+        $(LOCAL_ADDITIONAL_DEPENDENCIES) $(LOCAL_JARJAR_RULES) \
         $(NORMALIZE_PATH) $(JACK_DEFAULT_ARGS) $(JACK)
 
 $(jack_check_timestamp): $(jack_all_deps) | setup-jack-server
@@ -689,3 +798,4 @@
 	@echo Building with Jack: $@
 	$(java-to-jack)
 endif  # full_classes_jar is defined
+endif # LOCAL_JACK_ENABLED
diff --git a/core/java_common.mk b/core/java_common.mk
index dbdea26..600208a 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -148,10 +148,24 @@
 need_compile_java := $(strip $(all_java_sources)$(all_res_assets)$(java_resource_sources))$(LOCAL_STATIC_JAVA_LIBRARIES)$(filter true,$(LOCAL_SOURCE_FILES_ALL_GENERATED))
 ifdef need_compile_java
 
+annotation_processor_flags :=
+annotation_processor_deps :=
+
+ifdef LOCAL_ANNOTATION_PROCESSORS
+  annotation_processor_jars := $(call java-lib-deps,$(LOCAL_ANNOTATION_PROCESSORS),true)
+  annotation_processor_flags += -processorpath $(call normalize-path-list,$(annotation_processor_jars))
+  annotation_processor_deps += $(annotation_processor_jars)
+
+  # b/25860419: annotation processors must be explicitly specified for grok
+  annotation_processor_flags += $(foreach class,$(LOCAL_ANNOTATION_PROCESSOR_CLASSES),-processor $(class))
+
+  annotation_processor_jars :=
+endif
+
 full_static_java_libs := \
     $(foreach lib,$(LOCAL_STATIC_JAVA_LIBRARIES), \
       $(call intermediates-dir-for, \
-        JAVA_LIBRARIES,$(lib),$(LOCAL_IS_HOST_MODULE),COMMON)/javalib.jar)
+        JAVA_LIBRARIES,$(lib),$(LOCAL_IS_HOST_MODULE),COMMON)/classes.jar)
 
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_STATIC_JAVA_LIBRARIES := $(full_static_java_libs)
 
@@ -159,6 +173,7 @@
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ASSET_DIR := $(LOCAL_ASSET_DIR)
 
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CLASS_INTERMEDIATES_DIR := $(intermediates.COMMON)/classes
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ANNO_INTERMEDIATES_DIR := $(intermediates.COMMON)/anno
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SOURCE_INTERMEDIATES_DIR := $(intermediates.COMMON)/src
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HAS_PROTO_SOURCES := $(if $(proto_sources),true)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_PROTO_SOURCE_INTERMEDIATES_DIR := $(intermediates.COMMON)/proto
@@ -216,11 +231,11 @@
 ifeq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
 my_bootclasspath := ""
 else
-my_bootclasspath := $(call normalize-path-list,$(call host-dex-java-lib-files,core-oj-hostdex core-libart-hostdex))
+my_bootclasspath := $(call normalize-path-list,$(call java-lib-files,core-oj-hostdex core-libart-hostdex,true))
 endif
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_BOOTCLASSPATH := -bootclasspath $(my_bootclasspath)
 
-full_shared_java_libs := $(call host-dex-java-lib-files,$(LOCAL_JAVA_LIBRARIES))
+full_shared_java_libs := $(call java-lib-files,$(LOCAL_JAVA_LIBRARIES),true)
 full_java_lib_deps := $(full_shared_java_libs)
 else # !USE_CORE_LIB_BOOTCLASSPATH
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_BOOTCLASSPATH :=
@@ -242,7 +257,7 @@
   link_apk_libraries := \
       $(foreach lib,$(apk_libraries), \
         $(call intermediates-dir-for, \
-              APPS,$(lib),,COMMON)/classes.jar)
+              APPS,$(lib),,COMMON)/classes-pre-proguard.jar)
 
   # link against the jar with full original names (before proguard processing).
   full_shared_java_libs += $(link_apk_libraries)
@@ -263,7 +278,7 @@
   link_instr_intermediates_dir.COMMON := $(call intermediates-dir-for, \
       APPS,$(LOCAL_INSTRUMENTATION_FOR),,COMMON)
   # link against the jar with full original names (before proguard processing).
-  link_instr_classes_jar := $(link_instr_intermediates_dir.COMMON)/classes.jar
+  link_instr_classes_jar := $(link_instr_intermediates_dir.COMMON)/classes-pre-proguard.jar
   full_java_libs += $(link_instr_classes_jar)
   full_java_lib_deps += $(link_instr_classes_jar)
 endif  # LOCAL_INSTRUMENTATION_FOR
@@ -320,6 +335,7 @@
 ###########################################################
 # JACK
 ###########################################################
+ifdef LOCAL_JACK_ENABLED
 ifdef need_compile_java
 
 LOCAL_JACK_FLAGS += -D jack.java.source.version=$(LOCAL_JAVA_LANGUAGE_VERSION)
@@ -373,6 +389,7 @@
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES)
 
 endif  # need_compile_java
+endif # LOCAL_JACK_ENABLED
 
 
 ###########################################################
diff --git a/core/java_library.mk b/core/java_library.mk
index b132fa6..9db587d 100644
--- a/core/java_library.mk
+++ b/core/java_library.mk
@@ -29,13 +29,20 @@
 include $(BUILD_SYSTEM)/configure_local_jack.mk
 #################################
 
+ifdef LOCAL_JACK_ENABLED
 ifdef LOCAL_IS_STATIC_JAVA_LIBRARY
 LOCAL_BUILT_MODULE_STEM := classes.jack
 endif
+endif
 
+# For non-static java libraries, other modules should depend on
+# out/target/common/obj/JAVA_LIBRARIES/.../javalib.jar (for jack)
+# or out/target/common/obj/JAVA_LIBRARIES/.../classes.jar (for javac).
+# For static java libraries, other modules should depend on
+# out/target/common/obj/JAVA_LIBRARIES/.../classes.jar
+# There are some dependencies outside the build system that assume static
+# java libraries produce javalib.jar, so we will copy classes.jar there too.
 intermediates.COMMON := $(call local-intermediates-dir,COMMON)
-
-# This file will be the one that other modules should depend on.
 common_javalib.jar := $(intermediates.COMMON)/javalib.jar
 LOCAL_INTERMEDIATE_TARGETS += $(common_javalib.jar)
 
@@ -46,8 +53,12 @@
 ifeq (true,$(EMMA_INSTRUMENT))
 ifeq (true,$(LOCAL_EMMA_INSTRUMENT))
 ifeq (true,$(EMMA_INSTRUMENT_STATIC))
+ifdef LOCAL_JACK_ENABLED
 # Jack supports coverage with Jacoco
 LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
+else
+LOCAL_STATIC_JAVA_LIBRARIES += emma
+endif # LOCAL_JACK_ENABLED
 endif # LOCAL_EMMA_INSTRUMENT
 endif # EMMA_INSTRUMENT_STATIC
 else
@@ -59,31 +70,39 @@
 #################################
 
 ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
-# No dex; all we want are the .class files with resources.
-$(common_javalib.jar) : $(java_resource_sources)
-$(common_javalib.jar) : $(full_classes_jar)
-	@echo "target Static Jar: $(PRIVATE_MODULE) ($@)"
-	$(copy-file-to-target)
+# There are some dependencies outside the build system that assume classes.jar
+# is available as javalib.jar so copy it there too.
+$(eval $(call copy-one-file,$(full_classes_jar),$(common_javalib.jar)))
 
-$(LOCAL_BUILT_MODULE) : $(full_classes_jack)
-	$(copy-file-to-target)
+ifdef LOCAL_JACK_ENABLED
+$(eval $(call copy-one-file,$(full_classes_jack),$(LOCAL_BUILT_MODULE)))
+else
+$(eval $(call copy-one-file,$(full_classes_jar),$(LOCAL_BUILT_MODULE)))
+endif
 
 else # !LOCAL_IS_STATIC_JAVA_LIBRARY
 
 $(common_javalib.jar): PRIVATE_DEX_FILE := $(built_dex)
+$(common_javalib.jar): PRIVATE_SOURCE_ARCHIVE := $(full_classes_pre_proguard_jar)
+$(common_javalib.jar): PRIVATE_DONT_DELETE_JAR_DIRS := $(LOCAL_DONT_DELETE_JAR_DIRS)
 $(common_javalib.jar) : $(built_dex) $(java_resource_sources) | $(ZIPTIME)
 	@echo "target Jar: $(PRIVATE_MODULE) ($@)"
+ifdef LOCAL_JACK_ENABLED
 	$(create-empty-package)
+else
+	$(call initialize-package-file,$(PRIVATE_SOURCE_ARCHIVE),$@)
+endif
 	$(add-dex-to-package)
+ifdef LOCAL_JACK_ENABLED
 	$(add-carried-jack-resources)
+endif
 	$(remove-timestamps-from-package)
 
 ifdef LOCAL_DEX_PREOPT
 ifneq ($(dexpreopt_boot_jar_module),) # boot jar
 # boot jar's rules are defined in dex_preopt.mk
 dexpreopted_boot_jar := $(DEXPREOPT_BOOT_JAR_DIR_FULL_PATH)/$(dexpreopt_boot_jar_module)_nodex.jar
-$(LOCAL_BUILT_MODULE) : $(dexpreopted_boot_jar)
-	$(call copy-file-to-target)
+$(eval $(call copy-one-file,$(dexpreopted_boot_jar),$(LOCAL_BUILT_MODULE)))
 
 # For libart boot jars, we don't have .odex files.
 else # ! boot jar
@@ -93,8 +112,7 @@
 	@echo "Dexpreopt Jar: $(PRIVATE_MODULE) ($@)"
 	$(call dexpreopt-one-file,$<,$@)
 
-$(LOCAL_BUILT_MODULE) : $(common_javalib.jar)
-	$(call copy-file-to-target)
+$(eval $(call copy-one-file,$(common_javalib.jar),$(LOCAL_BUILT_MODULE)))
 ifneq (nostripping,$(LOCAL_DEX_PREOPT))
 	$(call dexpreopt-remove-classes.dex,$@)
 endif
@@ -102,8 +120,7 @@
 endif # ! boot jar
 
 else # LOCAL_DEX_PREOPT
-$(LOCAL_BUILT_MODULE) : $(common_javalib.jar)
-	$(call copy-file-to-target)
+$(eval $(call copy-one-file,$(common_javalib.jar),$(LOCAL_BUILT_MODULE)))
 
 endif # LOCAL_DEX_PREOPT
 endif # !LOCAL_IS_STATIC_JAVA_LIBRARY
diff --git a/core/local_vndk.mk b/core/local_vndk.mk
index f7970f0..5ac5f26 100644
--- a/core/local_vndk.mk
+++ b/core/local_vndk.mk
@@ -3,7 +3,7 @@
 #If LOCAL_SDK_VERSION is set, thats a more restrictive set, so they dont need LOCAL_USE_VNDK
 ifndef LOCAL_IS_HOST_MODULE
 ifndef LOCAL_SDK_VERSION
-  ifneq (,$(filter true,$(LOCAL_PROPRIETARY_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE)))
+  ifneq (,$(filter true,$(LOCAL_VENDOR_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE)))
     LOCAL_USE_VNDK:=true
   else
     ifneq (,$(filter $(TARGET_OUT_VENDOR)%,$(LOCAL_MODULE_PATH) $(LOCAL_MODULE_PATH_32) $(LOCAL_MODULE_PATH_64)))
diff --git a/core/main.mk b/core/main.mk
index 391f239..a36251d 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -9,6 +9,23 @@
 SHELL := /bin/bash
 endif
 
+ifndef KATI
+
+host_prebuilts := linux-x86
+ifeq ($(shell uname),Darwin)
+host_prebuilts := darwin-x86
+endif
+
+.PHONY: run_soong_ui
+run_soong_ui:
+	+@prebuilts/build-tools/$(host_prebuilts)/bin/makeparallel --ninja build/soong/soong_ui.bash --make-mode $(MAKECMDGOALS)
+
+.PHONY: $(MAKECMDGOALS)
+$(sort $(MAKECMDGOALS)) : run_soong_ui
+	@#empty
+
+else # KATI
+
 # Absolute path of the present working direcotry.
 # This overrides the shell variable $PWD, which does not necessarily points to
 # the top of the source tree, for example when "make -C" is used in m/mm/mmm.
@@ -27,23 +44,10 @@
 .PHONY: droid_targets
 droid_targets:
 
-# Targets that provide quick help on the build system.
-include $(BUILD_SYSTEM)/help.mk
-
 # Set up various standard variables based on configuration
 # and host information.
 include $(BUILD_SYSTEM)/config.mk
 
-ifndef KATI
-ifdef USE_NINJA
-$(warning USE_NINJA is ignored. Ninja is always used.)
-endif
-
-# Mark this is a ninja build.
-$(shell mkdir -p $(OUT_DIR) && touch $(OUT_DIR)/ninja_build)
-include build/core/ninja.mk
-else # KATI
-
 ifneq ($(filter $(dont_bother_goals), $(MAKECMDGOALS)),)
 dont_bother := true
 endif
@@ -70,6 +74,10 @@
 -include cts/build/config.mk
 # VTS-specific config.
 -include test/vts/tools/vts-tradefed/build/config.mk
+# device-tests-specific-config.
+-include tools/tradefederation/build/suites/device-tests/config.mk
+# general-tests-specific-config.
+-include tools/tradefederation/build/suites/general-tests/config.mk
 
 # This allows us to force a clean build - included after the config.mk
 # environment setup is done, but before we generate any dependencies.  This
@@ -230,6 +238,31 @@
 EMMA_INSTRUMENT := true
 endif
 
+#
+# -----------------------------------------------------------------
+# Validate ADDITIONAL_DEFAULT_PROPERTIES.
+ifneq ($(ADDITIONAL_DEFAULT_PROPERTIES),)
+$(error ADDITIONAL_DEFAULT_PROPERTIES must not be set before here: $(ADDITIONAL_DEFAULT_PROPERTIES))
+endif
+
+#
+# -----------------------------------------------------------------
+# Validate ADDITIONAL_BUILD_PROPERTIES.
+ifneq ($(ADDITIONAL_BUILD_PROPERTIES),)
+$(error ADDITIONAL_BUILD_PROPERTIES must not be set before here: $(ADDITIONAL_BUILD_PROPERTIES))
+endif
+
+#
+# -----------------------------------------------------------------
+# Add the product-defined properties to the build properties.
+ifdef PRODUCT_SHIPPING_API_LEVEL
+ADDITIONAL_BUILD_PROPERTIES += \
+  ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
+endif
+ADDITIONAL_BUILD_PROPERTIES := \
+  $(ADDITIONAL_BUILD_PROPERTIES) \
+  $(PRODUCT_PROPERTY_OVERRIDES)
+
 # Bring in standard build system definitions.
 include $(BUILD_SYSTEM)/definitions.mk
 
@@ -294,6 +327,16 @@
   ADDITIONAL_BUILD_PROPERTIES += ro.bionic.ld.warning=1
 endif
 
+# Boolean variable determining if Treble is fully enabled
+PRODUCT_FULL_TREBLE := false
+ifeq ($(PRODUCT_FULL_TREBLE_OVERRIDE),true)
+  PRODUCT_FULL_TREBLE := true
+else ifeq ($(PRODUCT_SHIPPING_API_LEVEL),)
+  #$(warning no product shipping level defined)
+else ifneq ($(call math_gt_or_eq,$(PRODUCT_SHIPPING_API_LEVEL),26),)
+  PRODUCT_FULL_TREBLE := true
+endif
+
 # -----------------------------------------------------------------
 ###
 ### In this section we set up the things that are different
@@ -447,8 +490,16 @@
 FULL_BUILD := true
 
 # Before we go and include all of the module makefiles, mark the PRODUCT_*
-# values readonly so that they won't be modified.
+# and ADDITIONAL*PROPERTIES values readonly so that they won't be modified.
 $(call readonly-product-vars)
+ADDITIONAL_DEFAULT_PROPERTIES := $(strip $(ADDITIONAL_DEFAULT_PROPERTIES))
+.KATI_READONLY := ADDITIONAL_DEFAULT_PROPERTIES
+ADDITIONAL_BUILD_PROPERTIES := $(strip $(ADDITIONAL_BUILD_PROPERTIES))
+.KATI_READONLY := ADDITIONAL_BUILD_PROPERTIES
+
+ifneq ($(PRODUCT_ENFORCE_RRO_TARGETS),)
+ENFORCE_RRO_SOURCES :=
+endif
 
 ifneq ($(ONE_SHOT_MAKEFILE),)
 # We've probably been invoked by the "mm" shell function
@@ -498,8 +549,10 @@
 #
 
 subdir_makefiles := $(SOONG_ANDROID_MK) $(call first-makefiles-under,$(TOP))
+subdir_makefiles_total := $(words $(subdir_makefiles))
+.KATI_READONLY := subdir_makefiles_total
 
-$(foreach mk,$(subdir_makefiles),$(info including $(mk) ...)$(eval include $(mk)))
+$(foreach mk,$(subdir_makefiles),$(info [$(call inc_and_print,subdir_makefiles_inc)/$(subdir_makefiles_total)] including $(mk) ...)$(eval include $(mk)))
 
 ifdef PDK_FUSION_PLATFORM_ZIP
 # Bring in the PDK platform.zip modules.
@@ -514,6 +567,13 @@
 # All module makefiles have been included at this point.
 # -------------------------------------------------------------------
 
+# -------------------------------------------------------------------
+# Enforce to generate all RRO packages for modules having resource
+# overlays.
+# -------------------------------------------------------------------
+ifneq ($(PRODUCT_ENFORCE_RRO_TARGETS),)
+$(call generate_all_enforce_rro_packages)
+endif
 
 # -------------------------------------------------------------------
 # Fix up CUSTOM_MODULES to refer to installed files rather than
@@ -751,7 +811,7 @@
 ifdef overridden_packages
 #  old_modules_to_install := $(modules_to_install)
   modules_to_install := \
-      $(filter-out $(foreach p,$(overridden_packages),$(p) %/$(p).apk %/$(p).odex), \
+      $(filter-out $(foreach p,$(overridden_packages),$(p) %/$(p).apk %/$(p).odex %/$(p).vdex), \
           $(modules_to_install))
 endif
 #$(error filtered out
@@ -1029,15 +1089,6 @@
 # Phony target to run all java compilations that use javac instead of jack.
 .PHONY: javac-check
 
-# To catch more build breakage, check build tests modules in eng and userdebug builds.
-ifneq ($(ANDROID_NO_TEST_CHECK),true)
-ifneq ($(TARGET_BUILD_PDK),true)
-ifneq ($(filter eng userdebug,$(TARGET_BUILD_VARIANT)),)
-droidcore : target-tests host-tests
-endif
-endif
-endif
-
 ifneq (,$(filter samplecode, $(MAKECMDGOALS)))
 .PHONY: samplecode
 sample_MODULES := $(sort $(call get-tagged-modules,samples))
diff --git a/core/ninja.mk b/core/ninja.mk
deleted file mode 100644
index af2ede0..0000000
--- a/core/ninja.mk
+++ /dev/null
@@ -1,103 +0,0 @@
-include $(BUILD_SYSTEM)/soong.mk
-
-# Modifier goals we don't need to pass to Ninja.
-.PHONY : $(NINJA_EXCLUDE_GOALS)
-
-define replace_space_and_slash
-$(subst /,_,$(subst $(space),_,$(sort $1)))
-endef
-
-KATI_NINJA_SUFFIX := -$(TARGET_PRODUCT)
-ifneq ($(KATI_GOALS),)
-KATI_NINJA_SUFFIX := $(KATI_NINJA_SUFFIX)-$(call replace_space_and_slash,$(KATI_GOALS))
-endif
-ifneq ($(ONE_SHOT_MAKEFILE),)
-KATI_NINJA_SUFFIX := $(KATI_NINJA_SUFFIX)-mmm-$(call replace_space_and_slash,$(ONE_SHOT_MAKEFILE))
-endif
-
-my_checksum_suffix :=
-my_ninja_suffix_too_long := $(filter 1, $(shell v='$(KATI_NINJA_SUFFIX)' && echo $$(($${$(pound)v} > 64))))
-ifneq ($(my_ninja_suffix_too_long),)
-# Replace the suffix with a checksum if it gets too long.
-my_checksum_suffix := $(KATI_NINJA_SUFFIX)
-KATI_NINJA_SUFFIX := -$(word 1, $(shell echo $(my_checksum_suffix) | $(MD5SUM)))
-endif
-
-KATI_BUILD_NINJA := $(OUT_DIR)/build$(KATI_NINJA_SUFFIX).ninja
-KATI_ENV_SH := $(OUT_DIR)/env$(KATI_NINJA_SUFFIX).sh
-
-# Write out a file mapping checksum to the real suffix.
-ifneq ($(my_checksum_suffix),)
-my_ninja_suffix_file := $(basename $(KATI_BUILD_NINJA)).suf
-$(shell mkdir -p $(dir $(my_ninja_suffix_file)) && \
-    echo $(my_checksum_suffix) > $(my_ninja_suffix_file))
-endif
-
-ifeq (,$(NINJA_STATUS))
-NINJA_STATUS := [%p %f/%t]$(space)
-endif
-
-NINJA_EXTRA_ARGS :=
-
-ifneq (,$(filter showcommands,$(ORIGINAL_MAKECMDGOALS)))
-NINJA_EXTRA_ARGS += "-v"
-endif
-
-# Make multiple rules to generate the same target an error instead of
-# proceeding with undefined behavior.
-NINJA_EXTRA_ARGS += -w dupbuild=err
-
-ifneq ($(filter-out false,$(USE_GOMA)),)
-KATI_MAKEPARALLEL := $(MAKEPARALLEL)
-# Ninja runs remote jobs (i.e., commands which contain gomacc) with
-# this parallelism. Note the parallelism of all other jobs is still
-# limited by the -j flag passed to GNU make.
-NINJA_REMOTE_NUM_JOBS ?= 500
-NINJA_EXTRA_ARGS += -j$(NINJA_REMOTE_NUM_JOBS)
-else
-NINJA_MAKEPARALLEL := $(MAKEPARALLEL) --ninja
-
-# We never want Kati to see MAKEFLAGS, as forcefully overriding variables is
-# terrible. The variables in MAKEFLAGS are still available in the environment,
-# so if part of the build wants input from the user, it should be explicitly
-# checking for an environment variable or using ?=
-#
-# makeparallel already clears MAKEFLAGS, so it's not necessary in the GOMA case
-KATI_MAKEPARALLEL := MAKEFLAGS=
-endif
-
-NINJA_ARGS += $(NINJA_EXTRA_ARGS)
-
-COMBINED_BUILD_NINJA := $(OUT_DIR)/combined$(KATI_NINJA_SUFFIX).ninja
-
-$(COMBINED_BUILD_NINJA): $(KATI_BUILD_NINJA) FORCE
-	$(hide) echo "builddir = $(OUT_DIR)" > $(COMBINED_BUILD_NINJA)
-	$(hide) echo "include $(KATI_BUILD_NINJA)" >> $(COMBINED_BUILD_NINJA)
-	$(hide) echo "include $(SOONG_BUILD_NINJA)" >> $(COMBINED_BUILD_NINJA)
-	$(hide) echo "build $(COMBINED_BUILD_NINJA): phony $(SOONG_BUILD_NINJA)" >> $(COMBINED_BUILD_NINJA)
-
-$(sort $(DEFAULT_GOAL) $(ANDROID_GOALS)) : ninja_wrapper
-	@#empty
-
-.PHONY: ninja_wrapper
-ninja_wrapper: $(COMBINED_BUILD_NINJA) $(MAKEPARALLEL)
-	@echo Starting build with ninja
-	+$(hide) export NINJA_STATUS="$(NINJA_STATUS)" && source $(KATI_ENV_SH) && exec $(NINJA_MAKEPARALLEL) $(NINJA) -d keepdepfile $(NINJA_GOALS) -C $(TOP) -f $(COMBINED_BUILD_NINJA) $(NINJA_ARGS)
-
-# Dummy Android.mk and CleanSpec.mk files so that kati won't recurse into the
-# out directory
-DUMMY_OUT_MKS := $(OUT_DIR)/Android.mk $(OUT_DIR)/CleanSpec.mk
-$(DUMMY_OUT_MKS):
-	@mkdir -p $(dir $@)
-	$(hide) echo '# This file prevents findleaves.py from traversing this directory further' >$@
-
-KATI_FIND_EMULATOR := --use_find_emulator
-ifeq ($(KATI_EMULATE_FIND),false)
-  KATI_FIND_EMULATOR :=
-endif
-$(KATI_BUILD_NINJA): $(CKATI) $(MAKEPARALLEL) $(DUMMY_OUT_MKS) run_soong FORCE
-	@echo Running kati to generate build$(KATI_NINJA_SUFFIX).ninja...
-	+$(hide) $(KATI_MAKEPARALLEL) $(CKATI) --ninja --ninja_dir=$(OUT_DIR) --ninja_suffix=$(KATI_NINJA_SUFFIX) --regen --ignore_optional_include=$(OUT_DIR)/%.P --detect_android_echo $(KATI_FIND_EMULATOR) -f build/core/main.mk $(KATI_GOALS) --gen_all_targets BUILDING_WITH_NINJA=true SOONG_ANDROID_MK=$(SOONG_ANDROID_MK) SOONG_MAKEVARS_MK=$(SOONG_MAKEVARS_MK)
-
-.PHONY: FORCE
-FORCE:
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 8a0ae6f..f850fff 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -14,6 +14,14 @@
   notice_file :=
 endif
 
+# Soong generates stub libraries that don't need NOTICE files
+ifdef LOCAL_NO_NOTICE_FILE
+  ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+    $(call pretty-error,LOCAL_NO_NOTICE_FILE should not be used by Android.mk files)
+  endif
+  notice_file :=
+endif
+
 ifeq ($(LOCAL_MODULE_CLASS),NOTICE_FILES)
 # If this is a NOTICE-only module, we don't include base_rule.mk,
 # so my_prefix is not set at this point.
@@ -36,7 +44,7 @@
   module_installed_filename := $(patsubst $(PRODUCT_OUT)/%,%,$(LOCAL_INSTALLED_MODULE))
 else
   # This module isn't installable
-  ifeq ($(LOCAL_MODULE_CLASS),STATIC_LIBRARIES)
+  ifneq ($(filter STATIC_LIBRARIES HEADER_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
     # Stick the static libraries with the dynamic libraries.
     # We can't use xxx_OUT_STATIC_LIBRARIES because it points into
     # device-obj or host-obj.
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 5dd021c..e512211 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -100,7 +100,34 @@
     $(wildcard $(foreach dir, $(DEVICE_PACKAGE_OVERLAYS), \
       $(addprefix $(dir)/, $(LOCAL_RESOURCE_DIR)))))
 
+enforce_rro_enabled :=
+ifneq ($(PRODUCT_ENFORCE_RRO_TARGETS),)
+  ifneq ($(package_resource_overlays),)
+    ifeq ($(PRODUCT_ENFORCE_RRO_TARGETS),*)
+      enforce_rro_enabled := true
+    else ifneq (,$(filter $(LOCAL_PACKAGE_NAME), $(PRODUCT_ENFORCE_RRO_TARGETS)))
+      enforce_rro_enabled := true
+    endif
+  endif
+
+  ifdef enforce_rro_enabled
+    ifeq (,$(LOCAL_MODULE_PATH))
+      ifeq (true,$(LOCAL_PROPRIETARY_MODULE))
+        enforce_rro_enabled :=
+      else ifeq (true,$(LOCAL_OEM_MODULE))
+        enforce_rro_enabled :=
+      else ifeq (true,$(LOCAL_ODM_MODULE))
+        enforce_rro_enabled :=
+      endif
+    else ifeq ($(filter $(TARGET_OUT)/%,$(LOCAL_MODULE_PATH)),)
+      enforce_rro_enabled :=
+    endif
+  endif
+endif
+
+ifndef enforce_rro_enabled
 LOCAL_RESOURCE_DIR := $(package_resource_overlays) $(LOCAL_RESOURCE_DIR)
+endif
 
 all_assets := $(strip \
     $(foreach dir, $(LOCAL_ASSET_DIR), \
@@ -209,10 +236,12 @@
 endif # !custom
 LOCAL_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_PROGUARD_FLAGS)
 
+ifdef LOCAL_JACK_ENABLED
 ifndef LOCAL_JACK_PROGUARD_FLAGS
     LOCAL_JACK_PROGUARD_FLAGS := $(LOCAL_PROGUARD_FLAGS)
 endif
 LOCAL_JACK_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_JACK_PROGUARD_FLAGS)
+endif # LOCAL_JACK_ENABLED
 
 ifeq (true,$(EMMA_INSTRUMENT))
 ifndef LOCAL_EMMA_INSTRUMENT
@@ -225,22 +254,35 @@
 LOCAL_EMMA_INSTRUMENT := false
 endif # EMMA_INSTRUMENT is true
 
-ifneq ($(LOCAL_SRC_FILES)$(LOCAL_STATIC_JAVA_LIBRARIES)$(LOCAL_SOURCE_FILES_ALL_GENERATED),)
-# Only add jacocoagent if the package contains some java code
 ifeq (true,$(LOCAL_EMMA_INSTRUMENT))
 ifeq (true,$(EMMA_INSTRUMENT_STATIC))
+ifdef LOCAL_JACK_ENABLED
 # Jack supports coverage with Jacoco
+ifneq ($(LOCAL_SRC_FILES)$(LOCAL_STATIC_JAVA_LIBRARIES)$(LOCAL_SOURCE_FILES_ALL_GENERATED),)
+# Only add jacocoagent if the package contains some java code
 LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
-else  # ! EMMA_INSTRUMENT_STATIC
+endif # Contains java code
+else
+LOCAL_STATIC_JAVA_LIBRARIES += emma
+endif # LOCAL_JACK_ENABLED
+else
 ifdef LOCAL_SDK_VERSION
 ifdef TARGET_BUILD_APPS
 # In unbundled build, merge the coverage library into the apk.
+ifdef LOCAL_JACK_ENABLED
 # Jack supports coverage with Jacoco
+ifneq ($(LOCAL_SRC_FILES)$(LOCAL_STATIC_JAVA_LIBRARIES)$(LOCAL_SOURCE_FILES_ALL_GENERATED),)
+# Only add jacocoagent if the package contains some java code
 LOCAL_STATIC_JAVA_LIBRARIES += jacocoagent
 # Exclude jacoco classes from proguard
 LOCAL_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
 LOCAL_JACK_PROGUARD_FLAGS += -include $(BUILD_SYSTEM)/proguard.jacoco.flags
-else # ! TARGET_BUILD_APPS
+endif # Contains java code
+else  # ! LOCAL_JACK_ENABLED
+LOCAL_STATIC_JAVA_LIBRARIES += emma
+endif # ! LOCAL_JACK_ENABLED
+else  # ! TARGET_BUILD_APPS
+ifdef LOCAL_JACK_ENABLED
 # If build against the SDK in full build, core.jar is not used
 # so coverage classes are not present.
 # Jack needs jacoco on the classpath but we do not want it to be in
@@ -249,11 +291,16 @@
 # Note: we have nothing to do for proguard since jacoco will be
 # on the classpath only, thus not modified during the compilation.
 LOCAL_JAVA_LIBRARIES += jacocoagent
-endif # TARGET_BUILD_APPS
+else  # ! LOCAL_JACK_ENABLED
+# If build against the SDK in full build, core.jar is not used,
+# we have to use prebiult emma.jar to make Proguard happy;
+# Otherwise emma classes are included in core.jar.
+LOCAL_PROGUARD_FLAGS += -libraryjars $(EMMA_JAR)
+endif # ! LOCAL_JACK_ENABLED
+endif # ! TARGET_BUILD_APPS
 endif # LOCAL_SDK_VERSION
-endif # ! EMMA_INSTRUMENT_STATIC
+endif # EMMA_INSTRUMENT_STATIC
 endif # LOCAL_EMMA_INSTRUMENT
-endif # Contains java code
 
 rs_compatibility_jni_libs :=
 
@@ -288,13 +335,6 @@
 
 $(LOCAL_INTERMEDIATE_TARGETS): \
     PRIVATE_ANDROID_MANIFEST := $(full_android_manifest)
-ifneq (,$(filter-out current system_current test_current, $(LOCAL_SDK_VERSION)))
-$(LOCAL_INTERMEDIATE_TARGETS): \
-    PRIVATE_DEFAULT_APP_TARGET_SDK := $(LOCAL_SDK_VERSION)
-else
-$(LOCAL_INTERMEDIATE_TARGETS): \
-    PRIVATE_DEFAULT_APP_TARGET_SDK := $(DEFAULT_APP_TARGET_SDK)
-endif
 
 ifeq ($(LOCAL_DATA_BINDING),true)
 data_binding_stamp := $(data_binding_intermediates)/data-binding.stamp
@@ -374,13 +414,30 @@
 
 endif  # LOCAL_USE_AAPT2
 
-# Make sure to generate R.java before compiling.
 # Other modules should depend on the BUILT module if
 # they want to use this module's R.java file.
-$(LOCAL_BUILT_MODULE) \
-$(full_classes_compiled_jar) \
-$(built_dex_intermediate) $(noshrob_classes_jack) $(full_classes_jack) $(jack_check_timestamp) \
-  :  $(R_file_stamp)
+$(LOCAL_BUILT_MODULE): $(R_file_stamp)
+
+ifdef LOCAL_JACK_ENABLED
+ifneq ($(built_dex_intermediate),)
+$(built_dex_intermediate): $(R_file_stamp)
+endif
+ifneq ($(noshrob_classes_jack),)
+$(noshrob_classes_jack): $(R_file_stamp)
+endif
+ifneq ($(full_classes_jack),)
+$(full_classes_jack): $(R_file_stamp)
+$(jack_check_timestamp): $(R_file_stamp)
+endif
+endif # LOCAL_JACK_ENABLED
+
+ifneq ($(full_classes_jar),)
+# If full_classes_jar is non-empty, we're building sources.
+# If we're building sources, the initial javac step (which
+# produces full_classes_compiled_jar) needs to ensure the
+# R.java and Manifest.java files have been generated first.
+$(full_classes_compiled_jar): $(R_file_stamp)
+endif
 
 endif  # need_compile_res
 
@@ -426,9 +483,12 @@
 
 ifneq ($(full_classes_jar),)
 $(LOCAL_BUILT_MODULE): PRIVATE_DEX_FILE := $(built_dex)
+# Use the jarjar processed arhive as the initial package file.
+$(LOCAL_BUILT_MODULE): PRIVATE_SOURCE_ARCHIVE := $(full_classes_pre_proguard_jar)
 $(LOCAL_BUILT_MODULE): $(built_dex)
 else
 $(LOCAL_BUILT_MODULE): PRIVATE_DEX_FILE :=
+$(LOCAL_BUILT_MODULE): PRIVATE_SOURCE_ARCHIVE :=
 endif # full_classes_jar
 
 include $(BUILD_SYSTEM)/install_jni_libs.mk
@@ -487,6 +547,7 @@
     $(my_res_package) $(LOCAL_BUILT_MODULE): PRIVATE_PRODUCT_AAPT_PREF_CONFIG := $(PRODUCT_AAPT_PREF_CONFIG)
 endif
 endif
+$(LOCAL_BUILT_MODULE): PRIVATE_DONT_DELETE_JAR_DIRS := $(LOCAL_DONT_DELETE_JAR_DIRS)
 $(LOCAL_BUILT_MODULE) : $(jni_shared_libraries)
 ifdef LOCAL_USE_AAPT2
 $(LOCAL_BUILT_MODULE): PRIVATE_RES_PACKAGE := $(my_res_package)
@@ -498,7 +559,13 @@
 ifdef LOCAL_USE_AAPT2
 	$(call copy-file-to-new-target)
 else  # ! LOCAL_USE_AAPT2
+ifdef LOCAL_JACK_ENABLED
 	$(create-empty-package)
+else
+	$(if $(PRIVATE_SOURCE_ARCHIVE),\
+	  $(call initialize-package-file,$(PRIVATE_SOURCE_ARCHIVE),$@),\
+	  $(create-empty-package))
+endif
 	$(add-assets-to-package)
 endif  # LOCAL_USE_AAPT2
 ifneq ($(jni_shared_libraries),)
@@ -510,7 +577,9 @@
 else  # full_classes_jar
 	$(add-dex-to-package)
 endif  # full_classes_jar
+ifdef LOCAL_JACK_ENABLED
 	$(add-carried-jack-resources)
+endif
 ifdef LOCAL_DEX_PREOPT
 ifneq ($(BUILD_PLATFORM_ZIP),)
 	@# Keep a copy of apk with classes.dex unstripped
@@ -582,15 +651,14 @@
 $(my_all_targets): $(installed_apk_splits)
 
 ifdef LOCAL_COMPATIBILITY_SUITE
-cts_testcase_file := $(foreach s,$(my_split_suffixes),$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_$(s).apk)
-$(cts_testcase_file) : $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_%.apk : $(built_module_path)/package_%.apk | $(ACP)
-	$(copy-file-to-new-target)
 
-COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES := \
-  $(COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES) \
-  $(cts_testcase_file)
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) := $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+    $(foreach s,$(my_split_suffixes),\
+      $(built_module_path)/package_$(s).apk:$(dir)/$(LOCAL_MODULE)_$(s).apk))))
 
-$(my_all_targets) : $(cts_testcase_file)
+$(call create-suite-dependencies)
+
 endif # LOCAL_COMPATIBILITY_SUITE
 endif # LOCAL_PACKAGE_SPLITS
 
@@ -604,3 +672,27 @@
 
 # Reset internal variables.
 all_res_assets :=
+
+ifdef enforce_rro_enabled
+  ifdef LOCAL_EXPORT_PACKAGE_RESOURCES
+    enforce_rro_use_res_lib := true
+  else
+    enforce_rro_use_res_lib := false
+  endif
+
+  ifdef LOCAL_MANIFEST_PACKAGE_NAME
+    enforce_rro_is_manifest_package_name := true
+    enforce_rro_manifest_package_info := $(LOCAL_MANIFEST_PACKAGE_NAME)
+  else
+    enforce_rro_is_manifest_package_name := false
+    enforce_rro_manifest_package_info := $(full_android_manifest)
+  endif
+
+$(call append_enforce_rro_sources, \
+    $(my_register_name), \
+    $(enforce_rro_is_manifest_package_name), \
+    $(enforce_rro_manifest_package_info), \
+    $(enforce_rro_use_res_lib), \
+    $(package_resource_overlays) \
+    )
+endif  # enforce_rro_enabled
diff --git a/core/pdk_fusion_modules.mk b/core/pdk_fusion_modules.mk
index 49b30dc..0c03f37 100644
--- a/core/pdk_fusion_modules.mk
+++ b/core/pdk_fusion_modules.mk
@@ -23,7 +23,7 @@
 LOCAL_BUILT_MODULE_STEM:=$(7)
 LOCAL_MODULE_SUFFIX:=$(suffix $(7))
 LOCAL_PRIVILEGED_MODULE:=$(8)
-LOCAL_PROPRIETARY_MODULE:=$(9)
+LOCAL_VENDOR_MODULE:=$(9)
 LOCAL_MODULE_TARGET_ARCH:=$(10)
 LOCAL_REPLACE_PREBUILT_APK_INSTALLED:=$(11)
 LOCAL_CERTIFICATE:=PRESIGNED
@@ -72,7 +72,7 @@
     $(PDK.DEXPREOPT.$(a).DEX_PREOPT_FLAGS),\
     package.apk,\
     $(PDK.DEXPREOPT.$(a).PRIVILEGED_MODULE),\
-    $(PDK.DEXPREOPT.$(a).PROPRIETARY_MODULE),\
+    $(PDK.DEXPREOPT.$(a).VENDOR_MODULE),\
     $(PDK.DEXPREOPT.$(a).TARGET_ARCH),\
     $(_pdk_fusion_intermediates)/$(PDK.DEXPREOPT.$(a).STRIPPED_SRC),\
     )))
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index 10512ff..48ec340 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -79,9 +79,13 @@
 endif
 
 ifeq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
-ifeq ($(prebuilt_module_is_a_library),true)
-SOONG_ALREADY_CONV := $(SOONG_ALREADY_CONV) $(LOCAL_MODULE)
-endif
+  ifeq ($(prebuilt_module_is_a_library),true)
+    SOONG_ALREADY_CONV := $(SOONG_ALREADY_CONV) $(LOCAL_MODULE)
+  endif
+
+  ifdef LOCAL_USE_VNDK
+    SPLIT_VENDOR.$(LOCAL_MODULE_CLASS).$(patsubst %.vendor,%,$(LOCAL_MODULE)) := 1
+  endif
 endif
 
 # Don't install static libraries by default.
@@ -126,20 +130,32 @@
 
 ifdef prebuilt_module_is_a_library
 export_includes := $(intermediates)/export_includes
-$(export_includes): PRIVATE_EXPORT_C_INCLUDE_DIRS := $(LOCAL_EXPORT_C_INCLUDE_DIRS)
+export_cflags := $(foreach d,$(LOCAL_EXPORT_C_INCLUDE_DIRS),-I $(d))
+# Soong exports cflags instead of include dirs, so that -isystem can be included.
+ifeq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+export_cflags += $(LOCAL_EXPORT_CFLAGS)
+else ifdef LOCAL_EXPORT_CFLAGS
+$(call pretty-error,LOCAL_EXPORT_CFLAGS can only be used by Soong, use LOCAL_EXPORT_C_INCLUDE_DIRS instead)
+endif
+$(export_includes): PRIVATE_EXPORT_CFLAGS := $(export_cflags)
 $(export_includes): $(LOCAL_EXPORT_C_INCLUDE_DEPS)
 	@echo Export includes file: $< -- $@
 	$(hide) mkdir -p $(dir $@) && rm -f $@
-ifdef LOCAL_EXPORT_C_INCLUDE_DIRS
-	$(hide) for d in $(PRIVATE_EXPORT_C_INCLUDE_DIRS); do \
-	        echo "-I $$d" >> $@; \
-	        done
+ifdef export_cflags
+	$(hide) echo "$(PRIVATE_EXPORT_CFLAGS)" >$@
 else
 	$(hide) touch $@
 endif
+export_cflags :=
 
 my_link_type := $(intermediates)/link_type
-$(my_link_type): PRIVATE_LINK_TYPE := native:$(if $(LOCAL_SDK_VERSION),ndk,platform)
+ifdef LOCAL_SDK_VERSION
+$(my_link_type): PRIVATE_LINK_TYPE := native:ndk
+else ifdef LOCAL_USE_VNDK
+$(my_link_type): PRIVATE_LINK_TYPE := native:vendor
+else
+$(my_link_type): PRIVATE_LINK_TYPE := native:platform
+endif
 $(eval $(call link-type-partitions,$(my_link_type)))
 $(my_link_type):
 	@echo Check module type: $@
@@ -156,6 +172,14 @@
 my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
 # Extra shared libraries introduced by LOCAL_CXX_STL.
 include $(BUILD_SYSTEM)/cxx_stl_setup.mk
+ifdef LOCAL_USE_VNDK
+  ifeq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
+    my_shared_libraries := $(addsuffix .vendor,$(my_shared_libraries))
+  else
+    my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+      $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+  endif
+endif
 $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
   $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
 
@@ -174,6 +198,19 @@
 endif  # my_strip_module not true
 
 ifeq ($(NATIVE_COVERAGE),true)
+ifneq (,$(strip $(LOCAL_PREBUILT_COVERAGE_ARCHIVE)))
+  $(eval $(call copy-one-file,$(LOCAL_PREBUILT_COVERAGE_ARCHIVE),$(intermediates)/$(LOCAL_MODULE).gcnodir))
+  ifneq ($(LOCAL_UNINSTALLABLE_MODULE),true)
+    ifdef LOCAL_IS_HOST_MODULE
+      my_coverage_path := $($(my_prefix)OUT_COVERAGE)/$(patsubst $($(my_prefix)OUT)/%,%,$(my_module_path))
+    else
+      my_coverage_path := $(TARGET_OUT_COVERAGE)/$(patsubst $(PRODUCT_OUT)/%,%,$(my_module_path))
+    endif
+    my_coverage_path := $(my_coverage_path)/$(basename $(my_installed_module_stem)).gcnodir
+    $(eval $(call copy-one-file,$(LOCAL_PREBUILT_COVERAGE_ARCHIVE),$(my_coverage_path)))
+    $(LOCAL_BUILT_MODULE): $(my_coverage_path)
+  endif
+else
 # Coverage information is needed when static lib is a dependency of another
 # coverage-enabled module.
 ifeq (STATIC_LIBRARIES, $(LOCAL_MODULE_CLASS))
@@ -186,6 +223,7 @@
 	$(transform-o-to-static-lib)
 endif
 endif
+endif
 
 ifeq ($(LOCAL_MODULE_CLASS),APPS)
 PACKAGES.$(LOCAL_MODULE).OVERRIDES := $(strip $(LOCAL_OVERRIDES_PACKAGES))
@@ -413,13 +451,27 @@
 
 ifeq ($(LOCAL_MODULE_CLASS),JAVA_LIBRARIES)
 my_src_jar := $(my_prebuilt_src_file)
-ifeq ($(LOCAL_IS_HOST_MODULE),)
+
+ifdef LOCAL_IS_HOST_MODULE
+# for host java libraries deps should be in the common dir, so we make a copy in
+# the common dir.
+common_classes_jar := $(intermediates.COMMON)/classes.jar
+
+$(common_classes_jar): PRIVATE_MODULE := $(LOCAL_MODULE)
+$(common_classes_jar): PRIVATE_PREFIX := $(my_prefix)
+
+$(common_classes_jar) : $(my_src_jar)
+	$(transform-prebuilt-to-target)
+
+else # !LOCAL_IS_HOST_MODULE
 # for target java libraries, the LOCAL_BUILT_MODULE is in a product-specific dir,
 # while the deps should be in the common dir, so we make a copy in the common dir.
 common_classes_jar := $(intermediates.COMMON)/classes.jar
+common_classes_pre_proguard_jar := $(intermediates.COMMON)/classes-pre-proguard.jar
 common_javalib_jar := $(intermediates.COMMON)/javalib.jar
 
-$(common_classes_jar) $(common_javalib_jar): PRIVATE_MODULE := $(LOCAL_MODULE)
+$(common_classes_jar) $(common_classes_pre_proguard_jar) $(common_javalib_jar): PRIVATE_MODULE := $(LOCAL_MODULE)
+$(common_classes_jar) $(common_classes_pre_proguard_jar) $(common_javalib_jar): PRIVATE_PREFIX := $(my_prefix)
 
 my_link_type := $(intermediates.COMMON)/link_type
 ifeq ($(LOCAL_SDK_VERSION),system_current)
@@ -459,6 +511,9 @@
 $(common_classes_jar) : $(my_src_jar)
 	$(transform-prebuilt-to-target)
 
+$(common_classes_pre_proguard_jar) : $(my_src_jar)
+	$(transform-prebuilt-to-target)
+
 $(common_javalib_jar) : $(common_classes_jar)
 	$(transform-prebuilt-to-target)
 
@@ -502,6 +557,7 @@
 
 ifneq ($(prebuilt_module_is_dex_javalib),true)
 
+ifdef LOCAL_JACK_ENABLED
 # We may be building classes.jack from a host jar for host dalvik Java library.
 $(intermediates.COMMON)/classes.jack : PRIVATE_JACK_FLAGS:=$(LOCAL_JACK_FLAGS)
 $(intermediates.COMMON)/classes.jack : PRIVATE_JACK_MIN_SDK_VERSION := $(if $(strip $(LOCAL_MIN_SDK_VERSION)),$(LOCAL_MIN_SDK_VERSION),1)
@@ -516,7 +572,7 @@
 # always rebuilt.
 $(intermediates.COMMON)/classes.dex.toc: $(intermediates.COMMON)/classes.jack
 	touch $@
-
+endif # LOCAL_JACK_ENABLED
 endif # ! prebuilt_module_is_dex_javalib
 endif # JAVA_LIBRARIES
 
diff --git a/core/product.mk b/core/product.mk
index 93d42fd..3d20168 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -94,6 +94,7 @@
     PRODUCT_EXTRA_RECOVERY_KEYS \
     PRODUCT_PACKAGE_OVERLAYS \
     DEVICE_PACKAGE_OVERLAYS \
+    PRODUCT_ENFORCE_RRO_TARGETS \
     PRODUCT_SDK_ATREE_FILES \
     PRODUCT_SDK_ADDON_NAME \
     PRODUCT_SDK_ADDON_COPY_FILES \
@@ -128,6 +129,7 @@
     VENDOR_EXCEPTION_MODULES \
     VENDOR_EXCEPTION_PATHS \
     PRODUCT_ART_USE_READ_BARRIER \
+    PRODUCT_IOT \
 
 
 
@@ -289,7 +291,8 @@
 _product_stash_var_list += \
 	DEFAULT_SYSTEM_DEV_CERTIFICATE \
 	WITH_DEXPREOPT \
-	WITH_DEXPREOPT_BOOT_IMG_ONLY
+	WITH_DEXPREOPT_BOOT_IMG_ONLY \
+	WITH_DEXPREOPT_APP_IMAGE
 
 #
 # Mark the variables in _product_stash_var_list as readonly
diff --git a/core/product_config.mk b/core/product_config.mk
index 295e263..8246c14 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -204,7 +204,7 @@
     $(eval _cpm_word2 := $(word 2,$(_cpm_words)))\
     $(if $(_cpm_word2),\
         $(eval all_product_makefiles += $(_cpm_word2))\
-        $(eval all_named_products += $(_cpm_word2))\
+        $(eval all_named_products += $(_cpm_word1))\
         $(if $(filter $(TARGET_PRODUCT),$(_cpm_word1)),\
             $(eval current_product_makefile += $(_cpm_word2)),),\
         $(eval all_product_makefiles += $(f))\
@@ -354,18 +354,16 @@
 # whitespace characters on either side of the '='.
 PRODUCT_PROPERTY_OVERRIDES := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_PROPERTY_OVERRIDES))
+.KATI_READONLY := PRODUCT_PROPERTY_OVERRIDES
 
 PRODUCT_SHIPPING_API_LEVEL := $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SHIPPING_API_LEVEL))
-ifdef PRODUCT_SHIPPING_API_LEVEL
-ADDITIONAL_BUILD_PROPERTIES += \
-    ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
-endif
 
 # A list of property assignments, like "key = value", with zero or more
 # whitespace characters on either side of the '='.
 # used for adding properties to default.prop
 PRODUCT_DEFAULT_PROPERTY_OVERRIDES := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+.KATI_READONLY := PRODUCT_DEFAULT_PROPERTY_OVERRIDES
 
 # Should we use the default resources or add any product specific overlays
 PRODUCT_PACKAGE_OVERLAYS := \
@@ -377,11 +375,6 @@
 PRODUCT_VENDOR_KERNEL_HEADERS := \
     $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_KERNEL_HEADERS)
 
-# Add the product-defined properties to the build properties.
-ADDITIONAL_BUILD_PROPERTIES := \
-    $(ADDITIONAL_BUILD_PROPERTIES) \
-    $(PRODUCT_PROPERTY_OVERRIDES)
-
 # The OTA key(s) specified by the product config, if any.  The names
 # of these keys are stored in the target-files zip so that post-build
 # signing tools can substitute them for the test key embedded by
@@ -427,3 +420,11 @@
 # Make this art variable visible to soong_config.mk.
 PRODUCT_ART_USE_READ_BARRIER := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ART_USE_READ_BARRIER))
+
+# Whether the product is an Android Things variant.
+PRODUCT_IOT := \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_IOT))
+
+# Package list to apply enforcing RRO.
+PRODUCT_ENFORCE_RRO_TARGETS := \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ENFORCE_RRO_TARGETS))
diff --git a/core/setup_one_odex.mk b/core/setup_one_odex.mk
index 37aeb60..f0ffe81 100644
--- a/core/setup_one_odex.mk
+++ b/core/setup_one_odex.mk
@@ -36,6 +36,24 @@
 
 my_built_vdex := $(patsubst %.odex,%.vdex,$(my_built_odex))
 my_installed_vdex := $(patsubst %.odex,%.vdex,$(my_installed_odex))
+my_installed_art := $(patsubst %.odex,%.art,$(my_installed_odex))
+
+ifndef LOCAL_DEX_PREOPT_APP_IMAGE
+# Local override not defined, use the global one.
+ifeq (true,$(WITH_DEX_PREOPT_APP_IMAGE))
+  LOCAL_DEX_PREOPT_APP_IMAGE := true
+endif
+endif
+
+ifeq (true,$(LOCAL_DEX_PREOPT_APP_IMAGE))
+my_built_art := $(patsubst %.odex,%.art,$(my_built_odex))
+$(my_built_odex): PRIVATE_ART_FILE_PREOPT_FLAGS := --app-image-file=$(my_built_art) \
+    --image-format=lz4
+$(eval $(call copy-one-file,$(my_built_art),$(my_installed_art)))
+built_art += $(my_built_art)
+installed_art += $(my_installed_art)
+built_installed_art += $(my_built_art):$(my_installed_art)
+endif
 
 $(eval $(call copy-one-file,$(my_built_odex),$(my_installed_odex)))
 $(eval $(call copy-one-file,$(my_built_vdex),$(my_installed_vdex)))
diff --git a/core/shared_library_internal.mk b/core/shared_library_internal.mk
index 6b914c9..687536b 100644
--- a/core/shared_library_internal.mk
+++ b/core/shared_library_internal.mk
@@ -48,11 +48,14 @@
 ifeq ($(LOCAL_NO_CRT),true)
 my_target_crtbegin_so_o :=
 my_target_crtend_so_o :=
+else ifdef LOCAL_USE_VNDK
+my_target_crtbegin_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_so.vendor.o
+my_target_crtend_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_so.vendor.o
 else
 my_target_crtbegin_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtbegin_so.o
 my_target_crtend_so_o := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)/crtend_so.o
 endif
-ifneq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
+ifneq ($(LOCAL_SDK_VERSION),)
 my_target_crtbegin_so_o := $(wildcard $(my_ndk_sysroot_lib)/crtbegin_so.o)
 my_target_crtend_so_o := $(wildcard $(my_ndk_sysroot_lib)/crtend_so.o)
 endif
@@ -96,6 +99,7 @@
 
 $(intermediates)/$(GCNO_ARCHIVE) : PRIVATE_ALL_OBJECTS := $(strip $(LOCAL_GCNO_FILES))
 $(intermediates)/$(GCNO_ARCHIVE) : PRIVATE_ALL_WHOLE_STATIC_LIBRARIES := $(strip $(built_whole_gcno_libraries)) $(strip $(built_static_gcno_libraries))
+$(intermediates)/$(GCNO_ARCHIVE) : PRIVATE_INTERMEDIATES_DIR := $(intermediates)
 $(intermediates)/$(GCNO_ARCHIVE) : $(LOCAL_GCNO_FILES) $(built_whole_gcno_libraries) $(built_static_gcno_libraries)
 	$(transform-o-to-static-lib)
 
diff --git a/core/soong.mk b/core/soong.mk
deleted file mode 100644
index 4a74f2e..0000000
--- a/core/soong.mk
+++ /dev/null
@@ -1,29 +0,0 @@
-# We need to rebootstrap soong if SOONG_OUT_DIR or the reverse path from
-# SOONG_OUT_DIR to TOP changes
-SOONG_NEEDS_REBOOTSTRAP :=
-ifneq ($(wildcard $(SOONG_BOOTSTRAP)),)
-  ifneq ($(SOONG_OUT_DIR),$(strip $(shell source $(SOONG_BOOTSTRAP); echo $$BUILDDIR)))
-    SOONG_NEEDS_REBOOTSTRAP := FORCE
-    $(warning soong_out_dir changed)
-  endif
-  ifneq ($(strip $(shell build/soong/scripts/reverse_path.py $(SOONG_OUT_DIR))),$(strip $(shell source $(SOONG_BOOTSTRAP); echo $$SRCDIR_FROM_BUILDDIR)))
-    SOONG_NEEDS_REBOOTSTRAP := FORCE
-    $(warning reverse path changed)
-  endif
-endif
-
-# Bootstrap soong.
-$(SOONG_BOOTSTRAP): bootstrap.bash $(SOONG_NEEDS_REBOOTSTRAP)
-	$(hide) mkdir -p $(dir $@)
-	$(hide) BUILDDIR=$(SOONG_OUT_DIR) ./bootstrap.bash
-
-# Tell soong that it is embedded in make
-$(SOONG_IN_MAKE):
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-# Run Soong, this implicitly create an Android.mk listing all soong outputs as
-# prebuilts.
-.PHONY: run_soong
-run_soong: $(SOONG_BOOTSTRAP) $(SOONG_VARIABLES) $(SOONG_IN_MAKE) FORCE
-	$(hide) SKIP_NINJA=true $(SOONG)
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 7b1be47..cba9d84 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -13,6 +13,17 @@
 endif
 endif
 
+# Converts a list to a JSON list.
+# $1: List separator.
+# $2: List.
+_json_list = [$(if $(2),"$(subst $(1),"$(comma)",$(2))")]
+
+# Converts a space-separated list to a JSON list.
+json_list = $(call _json_list,$(space),$(1))
+
+# Converts a comma-separated list to a JSON list.
+csv_to_json_list = $(call _json_list,$(comma),$(1))
+
 # Create soong.variables with copies of makefile settings.  Runs every build,
 # but only updates soong.variables if it changes
 SOONG_VARIABLES_TMP := $(SOONG_VARIABLES).$$$$
@@ -23,16 +34,15 @@
 	echo '    "Make_suffix": "-$(TARGET_PRODUCT)",'; \
 	echo ''; \
 	echo '    "Platform_sdk_version": $(PLATFORM_SDK_VERSION),'; \
+	echo '    "Platform_version_all_codenames": $(call csv_to_json_list,$(PLATFORM_VERSION_ALL_CODENAMES)),'; \
 	echo '    "Unbundled_build": $(if $(TARGET_BUILD_APPS),true,false),'; \
 	echo '    "Brillo": $(if $(BRILLO),true,false),'; \
 	echo '    "Malloc_not_svelte": $(if $(filter true,$(MALLOC_SVELTE)),false,true),'; \
 	echo '    "Allow_missing_dependencies": $(if $(TARGET_BUILD_APPS)$(filter true,$(SOONG_ALLOW_MISSING_DEPENDENCIES)),true,false),'; \
-	echo '    "SanitizeHost": [$(if $(SANITIZE_HOST),"$(subst $(space),"$(comma)",$(SANITIZE_HOST))")],'; \
-	echo '    "SanitizeDevice": [$(if $(SANITIZE_TARGET),"$(subst $(space),"$(comma)",$(SANITIZE_TARGET))")],'; \
-	echo '    "SanitizeDeviceArch": [$(if $(SANITIZE_TARGET_ARCH),"$(subst $(space),"$(comma)",$(SANITIZE_TARGET_ARCH))")],'; \
+	echo '    "SanitizeHost": $(call json_list,$(SANITIZE_HOST)),'; \
+	echo '    "SanitizeDevice": $(call json_list,$(SANITIZE_TARGET)),'; \
+	echo '    "SanitizeDeviceArch": $(call json_list,$(SANITIZE_TARGET_ARCH)),'; \
 	echo '    "HostStaticBinaries": $(if $(strip $(BUILD_HOST_static)),true,false),'; \
-	echo '    "Cpusets": $(if $(strip $(ENABLE_CPUSETS)),true,false),'; \
-	echo '    "Schedboost": $(if $(strip $(ENABLE_SCHEDBOOST)),true,false),'; \
 	echo '    "Binder32bit": $(if $(BINDER32BIT),true,false),'; \
 	echo '    "DevicePrefer32BitExecutables": $(if $(filter true,$(TARGET_PREFER_32_BIT_EXECUTABLES)),true,false),'; \
 	echo '    "UseGoma": $(if $(filter-out false,$(USE_GOMA)),true,false),'; \
@@ -43,6 +53,10 @@
 	echo '    "ClangTidy": $(if $(filter 1 true,$(WITH_TIDY)),true,false),'; \
 	echo '    "TidyChecks": "$(WITH_TIDY_CHECKS)",'; \
 	echo ''; \
+	echo '    "NativeCoverage": $(if $(filter true,$(NATIVE_COVERAGE)),true,false),'; \
+	echo '    "CoveragePaths": $(call csv_to_json_list,$(COVERAGE_PATHS)),'; \
+	echo '    "CoverageExcludePaths": $(call csv_to_json_list,$(COVERAGE_EXCLUDE_PATHS)),'; \
+	echo ''; \
 	echo '    "DeviceName": "$(TARGET_DEVICE)",'; \
 	echo '    "DeviceArch": "$(TARGET_ARCH)",'; \
 	echo '    "DeviceArchVariant": "$(TARGET_ARCH_VARIANT)",'; \
@@ -63,13 +77,11 @@
 	echo '    "CrossHostArch": "$(HOST_CROSS_ARCH)",'; \
 	echo '    "CrossHostSecondaryArch": "$(HOST_CROSS_2ND_ARCH)",'; \
 	echo '    "Safestack": $(if $(filter true,$(USE_SAFESTACK)),true,false),'; \
-	echo '    "EnableCFI": $(if $(filter true,$(ENABLE_CFI)),true,false),'; \
+	echo '    "EnableCFI": $(if $(filter false,$(ENABLE_CFI)),false,true),'; \
 	echo ''; \
-	echo '    "ArtUseReadBarrier": $(if $(filter true,$(PRODUCT_ART_USE_READ_BARRIER)),true,false),'; \
+	echo '    "ArtUseReadBarrier": $(if $(filter false,$(PRODUCT_ART_USE_READ_BARRIER)),false,true),'; \
 	echo ''; \
-	echo '    "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)",'; \
-	echo '    "BtHcilpIncluded": "$(BOARD_BLUETOOTH_BDROID_HCILP_INCLUDED)",'; \
-	echo '    "BtHciUseMct": $(if $(filter true,$(BLUETOOTH_HCI_USE_MCT)),true,false)'; \
+	echo '    "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)"'; \
 	echo '}') > $(SOONG_VARIABLES_TMP); \
 	if ! cmp -s $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); then \
 	  mv $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); \
diff --git a/core/static_java_library.mk b/core/static_java_library.mk
index 258c5f8..bf72a82 100644
--- a/core/static_java_library.mk
+++ b/core/static_java_library.mk
@@ -75,10 +75,12 @@
 
 LOCAL_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_PROGUARD_FLAGS)
 
+ifdef LOCAL_JACK_ENABLED
 ifndef LOCAL_JACK_PROGUARD_FLAGS
     LOCAL_JACK_PROGUARD_FLAGS := $(LOCAL_PROGUARD_FLAGS)
 endif
 LOCAL_JACK_PROGUARD_FLAGS := $(addprefix -include ,$(proguard_options_file)) $(LOCAL_JACK_PROGUARD_FLAGS)
+endif # LOCAL_JACK_ENABLED
 
 R_file_stamp := $(intermediates.COMMON)/src/R.stamp
 LOCAL_INTERMEDIATE_TARGETS += $(R_file_stamp)
@@ -139,12 +141,6 @@
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RESOURCE_DIR := $(LOCAL_RESOURCE_DIR)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_AAPT_INCLUDES := $(framework_res_package_export)
 
-ifneq (,$(filter-out current system_current test_current, $(LOCAL_SDK_VERSION)))
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEFAULT_APP_TARGET_SDK := $(LOCAL_SDK_VERSION)
-else
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEFAULT_APP_TARGET_SDK := $(DEFAULT_APP_TARGET_SDK)
-endif
-
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ASSET_DIR :=
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_PROGUARD_OPTIONS_FILE := $(proguard_options_file)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_MANIFEST_PACKAGE_NAME :=
@@ -164,10 +160,13 @@
 	$(hide) find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name R.java | xargs cat > $@
 endif  # LOCAL_USE_AAPT2
 
-$(LOCAL_BUILT_MODULE) \
-$(full_classes_compiled_jar) \
-$(noshrob_classes_jack) $(full_classes_jack) $(jack_check_timestamp) \
-  : $(R_file_stamp)
+$(LOCAL_BUILT_MODULE): $(R_file_stamp)
+ifdef LOCAL_JACK_ENABLED
+$(noshrob_classes_jack): $(R_file_stamp)
+$(full_classes_jack): $(R_file_stamp)
+$(jack_check_timestamp): $(R_file_stamp)
+endif # LOCAL_JACK_ENABLED
+$(full_classes_compiled_jar): $(R_file_stamp)
 
 # Rule to build AAR, archive including classes.jar, resource, etc.
 built_aar := $(intermediates.COMMON)/javalib.aar
diff --git a/core/static_library_internal.mk b/core/static_library_internal.mk
index 6133ea2..6b4d22f 100644
--- a/core/static_library_internal.mk
+++ b/core/static_library_internal.mk
@@ -39,6 +39,7 @@
 $(intermediates)/$(GCNO_ARCHIVE) : PRIVATE_ALL_WHOLE_STATIC_LIBRARIES := $(strip $(built_whole_gcno_libraries))
 $(intermediates)/$(GCNO_ARCHIVE) : PRIVATE_PREFIX := $(my_prefix)
 $(intermediates)/$(GCNO_ARCHIVE) : PRIVATE_2ND_ARCH_VAR_PREFIX := $(LOCAL_2ND_ARCH_VAR_PREFIX)
+$(intermediates)/$(GCNO_ARCHIVE) : PRIVATE_INTERMEDIATES_DIR := $(intermediates)
 $(intermediates)/$(GCNO_ARCHIVE) : $(LOCAL_GCNO_FILES) $(built_whole_gcno_libraries)
 	$(transform-o-to-static-lib)
 endif
diff --git a/core/tasks/device-tests.mk b/core/tasks/device-tests.mk
new file mode 100644
index 0000000..084353b
--- /dev/null
+++ b/core/tasks/device-tests.mk
@@ -0,0 +1,29 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agrls eed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.PHONY: device-tests
+
+device-tests-zip := $(PRODUCT_OUT)/device-tests.zip
+$(device-tests-zip): $(COMPATIBILITY.device-tests.FILES) $(SOONG_ZIP)
+	echo $(COMPATIBILITY.device-tests.FILES) > $@.list
+	sed -i -e 's/\s\+/\n/g' $@.list
+	grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+	grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+	$(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
+
+device-tests: $(device-tests-zip)
+$(call dist-for-goals, device-tests, $(device-tests-zip))
+
+tests: device-tests
diff --git a/core/tasks/general-tests.mk b/core/tasks/general-tests.mk
new file mode 100644
index 0000000..9629bfa
--- /dev/null
+++ b/core/tasks/general-tests.mk
@@ -0,0 +1,26 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agrls eed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: general-tests
+
+general-tests-zip := $(PRODUCT_OUT)/general-tests.zip
+$(general-tests-zip): $(COMPATIBILITY.general-tests.FILES) $(SOONG_ZIP)
+	echo $(COMPATIBILITY.general-tests.FILES) > $@.list
+	sed -i -e 's/\s\+/\n/g' $@.list
+	grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+	grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+	$(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
+
+general-tests: $(general-tests-zip)
+$(call dist-for-goals, general-tests, $(general-tests-zip))
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index d8f900e..1455a44 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -37,13 +37,14 @@
 $(compatibility_zip): PRIVATE_TOOLS := $(test_tools)
 $(compatibility_zip): PRIVATE_SUITE_NAME := $(test_suite_name)
 $(compatibility_zip): PRIVATE_DYNAMIC_CONFIG := $(test_suite_dynamic_config)
-$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) | $(ADB) $(ACP)
+$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) $(SOONG_ZIP) | $(ADB) $(ACP)
 # Make dir structure
 	$(hide) mkdir -p $(PRIVATE_OUT_DIR)/tools $(PRIVATE_OUT_DIR)/testcases
 # Copy tools
 	$(hide) $(ACP) -fp $(PRIVATE_TOOLS) $(PRIVATE_OUT_DIR)/tools
 	$(if $(PRIVATE_DYNAMIC_CONFIG),$(hide) $(ACP) -fp $(PRIVATE_DYNAMIC_CONFIG) $(PRIVATE_OUT_DIR)/testcases/$(PRIVATE_SUITE_NAME).dynamic)
-	$(hide) cd $(dir $@) && zip -rq $(notdir $@) $(PRIVATE_NAME)
+	$(hide) find $(dir $@)/$(PRIVATE_NAME) | sort >$@.list
+	$(hide) $(SOONG_ZIP) -d -o $@ -C $(dir $@) -l $@.list
 
 # Reset all input variables
 test_suite_name :=
diff --git a/core/tasks/tools/package-modules.mk b/core/tasks/tools/package-modules.mk
index 24a7608..4dde9fd 100644
--- a/core/tasks/tools/package-modules.mk
+++ b/core/tasks/tools/package-modules.mk
@@ -9,21 +9,32 @@
 #
 #
 
+my_makefile := $(lastword $(filter-out $(lastword $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
 my_staging_dir := $(call intermediates-dir-for,PACKAGING,$(my_package_name))
 my_built_modules :=
 my_copy_pairs :=
 my_pickup_files :=
 
+# Iterate over the modules and include their direct dependencies stated in the
+# LOCAL_REQUIRED_MODULES.
+my_modules_and_deps := $(my_modules)
+$(foreach m,$(my_modules),\
+  $(eval _explicitly_required := \
+    $(strip $(ALL_MODULES.$(m).EXPLICITLY_REQUIRED)\
+    $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).EXPLICITLY_REQUIRED)))\
+  $(eval my_modules_and_deps += $(_explicitly_required))\
+)
+
 # Iterate over modules' built files and installed files;
 # Calculate the dest files in the output zip file.
 
-$(foreach m,$(my_modules),\
+$(foreach m,$(my_modules_and_deps),\
   $(eval _pickup_files := $(strip $(ALL_MODULES.$(m).PICKUP_FILES)\
     $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).PICKUP_FILES)))\
   $(eval _built_files := $(strip $(ALL_MODULES.$(m).BUILT_INSTALLED)\
     $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).BUILT_INSTALLED)))\
   $(if $(_pickup_files)$(_built_files),,\
-    $(warning Unknown installed file for module '$(m)'))\
+    $(shell $(call echo-warning,$(my_makefile),$(my_package_name): Unknown installed file for module '$(m)')))\
   $(eval my_pickup_files += $(_pickup_files))\
   $(foreach i, $(_built_files),\
     $(eval bui_ins := $(subst :,$(space),$(i)))\
@@ -37,26 +48,16 @@
       $(eval my_copy_pairs += $(bui):$(my_staging_dir)/$(my_copy_dest)))\
   ))
 
-define copy-tests-in-batch
-$(hide) $(foreach p, $(1),\
-  $(eval pair := $(subst :,$(space),$(p)))\
-  mkdir -p $(dir $(word 2,$(pair)));\
-  cp -Rf $(word 1,$(pair)) $(word 2,$(pair));)
-endef
-
 my_package_zip := $(my_staging_dir)/$(my_package_name).zip
 $(my_package_zip): PRIVATE_COPY_PAIRS := $(my_copy_pairs)
 $(my_package_zip): PRIVATE_PICKUP_FILES := $(my_pickup_files)
 $(my_package_zip) : $(my_built_modules)
 	@echo "Package $@"
 	@rm -rf $(dir $@) && mkdir -p $(dir $@)
-	$(call copy-tests-in-batch,$(wordlist 1,200,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 201,400,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 401,600,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 601,800,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 801,1000,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 1001,1200,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 1201,9999,$(PRIVATE_COPY_PAIRS)))
+	$(foreach p, $(PRIVATE_COPY_PAIRS),\
+	  $(eval pair := $(subst :,$(space),$(p)))\
+	  mkdir -p $(dir $(word 2,$(pair))) && \
+	  cp -Rf $(word 1,$(pair)) $(word 2,$(pair)) && ) true
 	$(hide) $(foreach f, $(PRIVATE_PICKUP_FILES),\
-	  cp -RfL $(f) $(dir $@);)
+	  cp -RfL $(f) $(dir $@) && ) true
 	$(hide) cd $(dir $@) && zip -rqX $(notdir $@) *
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 2449ea9..ba07200 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -34,19 +34,60 @@
 # if the file exists.
 #
 INTERNAL_BUILD_ID_MAKEFILE := $(wildcard $(BUILD_SYSTEM)/build_id.mk)
-ifneq "" "$(INTERNAL_BUILD_ID_MAKEFILE)"
+ifdef INTERNAL_BUILD_ID_MAKEFILE
   include $(INTERNAL_BUILD_ID_MAKEFILE)
 endif
 
-ifeq "" "$(PLATFORM_VERSION)"
-  # This is the canonical definition of the platform version,
-  # which is the version that we reveal to the end user.
-  # Update this value when the platform version changes (rather
-  # than overriding it somewhere else).  Can be an arbitrary string.
-  PLATFORM_VERSION := 7.1.1
+DEFAULT_PLATFORM_VERSION := OPR1
+MIN_PLATFORM_VERSION := OPR1
+MAX_PLATFORM_VERSION := PPR1
+
+ALLOWED_VERSIONS := $(call allowed-platform-versions,\
+  $(MIN_PLATFORM_VERSION),\
+  $(MAX_PLATFORM_VERSION),\
+  $(DEFAULT_PLATFORM_VERSION))
+
+ifndef TARGET_PLATFORM_VERSION
+  TARGET_PLATFORM_VERSION := $(DEFAULT_PLATFORM_VERSION)
 endif
 
-ifeq "" "$(PLATFORM_SDK_VERSION)"
+ifeq (,$(filter $(ALLOWED_VERSIONS), $(TARGET_PLATFORM_VERSION)))
+  $(warning Invalid TARGET_PLATFORM_VERSION '$(TARGET_PLATFORM_VERSION)', must be one of)
+  $(error $(ALLOWED_VERSIONS))
+endif
+
+# Default versions for each TARGET_PLATFORM_VERSION
+# TODO: PLATFORM_VERSION, PLATFORM_SDK_VERSION, etc. should be conditional
+# on this
+
+# This is the canonical definition of the platform version,
+# which is the version that we reveal to the end user.
+# Update this value when the platform version changes (rather
+# than overriding it somewhere else).  Can be an arbitrary string.
+
+# When you add a new PLATFORM_VERSION which will result in a new
+# PLATFORM_SDK_VERSION please ensure you add a corresponding isAtLeast*
+# method in the following java file:
+# frameworks/support/compat/gingerbread/android/support/v4/os/BuildCompat.java
+
+# When you change PLATFORM_VERSION for a given PLATFORM_SDK_VERSION
+# please add that PLATFORM_VERSION to the following text file:
+# cts/tests/tests/os/assets/platform_versions.txt
+PLATFORM_VERSION.OPR1 := O
+
+# These are the current development codenames.
+PLATFORM_VERSION_CODENAME.OPR1 := O
+PLATFORM_VERSION_CODENAME.PPR1 := P
+
+ifndef PLATFORM_VERSION
+  PLATFORM_VERSION := $(PLATFORM_VERSION.$(TARGET_PLATFORM_VERSION))
+  ifndef PLATFORM_VERSION
+    # PLATFORM_VERSION falls back to TARGET_PLATFORM_VERSION
+    PLATFORM_VERSION := $(TARGET_PLATFORM_VERSION)
+  endif
+endif
+
+ifndef PLATFORM_SDK_VERSION
   # This is the canonical definition of the SDK version, which defines
   # the set of APIs and functionality available in the platform.  It
   # is a single integer that increases monotonically as updates to
@@ -55,10 +96,18 @@
   # intermediate builds).  During development, this number remains at the
   # SDK version the branch is based on and PLATFORM_VERSION_CODENAME holds
   # the code-name of the new development work.
+
+  # When you change PLATFORM_SDK_VERSION please ensure you also update the
+  # corresponding methods for isAtLeast* in the following java file:
+  # frameworks/support/compat/gingerbread/android/support/v4/os/BuildCompat.java
+
+  # When you increment the PLATFORM_SDK_VERSION please ensure you also
+  # clear out the following text file of all older PLATFORM_VERSION's:
+  # cts/tests/tests/os/assets/platform_versions.txt
   PLATFORM_SDK_VERSION := 25
 endif
 
-ifeq "" "$(PLATFORM_JACK_MIN_SDK_VERSION)"
+ifndef PLATFORM_JACK_MIN_SDK_VERSION
   # This is definition of the min SDK version given to Jack for the current
   # platform. For released version it should be the same as
   # PLATFORM_SDK_VERSION. During development, this number may be incremented
@@ -67,21 +116,38 @@
   PLATFORM_JACK_MIN_SDK_VERSION := o-b1
 endif
 
-ifeq "" "$(PLATFORM_VERSION_CODENAME)"
-  # This is the current development code-name, if the build is not a final
-  # release build.  If this is a final release build, it is simply "REL".
-  PLATFORM_VERSION_CODENAME := REL
+ifndef PLATFORM_VERSION_CODENAME
+  PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
+  ifndef PLATFORM_VERSION_CODENAME
+    # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
+    PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
+  endif
 
   # This is all of the development codenames that are active.  Should be either
   # the same as PLATFORM_VERSION_CODENAME or a comma-separated list of additional
   # codenames after PLATFORM_VERSION_CODENAME.
-  PLATFORM_VERSION_ALL_CODENAMES := $(PLATFORM_VERSION_CODENAME)
+  PLATFORM_VERSION_ALL_CODENAMES :=
+
+  # Build a list of all possible code names. Avoid duplicates, and stop when we
+  # reach a codename that matches PLATFORM_VERSION_CODENAME (anything beyond
+  # that is not included in our build.
+  _versions_in_target := \
+    $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
+  $(foreach version,$(_versions_in_target),\
+    $(eval _codename := $(PLATFORM_VERSION_CODENAME.$(version)))\
+    $(if $(filter $(_codename),$(PLATFORM_VERSION_ALL_CODENAMES)),,\
+      $(eval PLATFORM_VERSION_ALL_CODENAMES += $(_codename))))
+
+  # And convert from space separated to comma separated.
+  PLATFORM_VERSION_ALL_CODENAMES := \
+    $(subst $(space),$(comma),$(strip $(PLATFORM_VERSION_ALL_CODENAMES)))
+
 endif
 
-ifeq "REL" "$(PLATFORM_VERSION_CODENAME)"
+ifeq (REL,$(PLATFORM_VERSION_CODENAME))
   PLATFORM_PREVIEW_SDK_VERSION := 0
 else
-  ifeq "" "$(PLATFORM_PREVIEW_SDK_VERSION)"
+  ifndef PLATFORM_PREVIEW_SDK_VERSION
     # This is the definition of a preview SDK version over and above the current
     # platform SDK version. Unlike the platform SDK version, a higher value
     # for preview SDK version does NOT mean that all prior preview APIs are
@@ -95,29 +161,29 @@
   endif
 endif
 
-ifeq "" "$(DEFAULT_APP_TARGET_SDK)"
+ifndef DEFAULT_APP_TARGET_SDK
   # This is the default minSdkVersion and targetSdkVersion to use for
   # all .apks created by the build system.  It can be overridden by explicitly
   # setting these in the .apk's AndroidManifest.xml.  It is either the code
   # name of the development build or, if this is a release build, the official
   # SDK version of this release.
-  ifeq "REL" "$(PLATFORM_VERSION_CODENAME)"
+  ifeq (REL,$(PLATFORM_VERSION_CODENAME))
     DEFAULT_APP_TARGET_SDK := $(PLATFORM_SDK_VERSION)
   else
     DEFAULT_APP_TARGET_SDK := $(PLATFORM_VERSION_CODENAME)
   endif
 endif
 
-ifeq "" "$(PLATFORM_SECURITY_PATCH)"
+ifndef PLATFORM_SECURITY_PATCH
     #  Used to indicate the security patch that has been applied to the device.
     #  It must signify that the build includes all security patches issued up through the designated Android Public Security Bulletin.
     #  It must be of the form "YYYY-MM-DD" on production devices.
     #  It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
     #  If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
-      PLATFORM_SECURITY_PATCH := 2016-11-05
+      PLATFORM_SECURITY_PATCH := 2017-04-05
 endif
 
-ifeq "" "$(PLATFORM_BASE_OS)"
+ifndef PLATFORM_BASE_OS
   # Used to indicate the base os applied to the device.
   # Can be an arbitrary string, but must be a single word.
   #
@@ -125,7 +191,7 @@
   PLATFORM_BASE_OS :=
 endif
 
-ifeq "" "$(BUILD_ID)"
+ifndef BUILD_ID
   # Used to signify special builds.  E.g., branches and/or releases,
   # like "M5-RC7".  Can be an arbitrary string, but must be a single
   # word and a valid file name.
@@ -134,7 +200,7 @@
   BUILD_ID := UNKNOWN
 endif
 
-ifeq "" "$(BUILD_DATETIME)"
+ifndef BUILD_DATETIME
   # Used to reproduce builds by setting the same time. Must be the number
   # of seconds since the Epoch.
   BUILD_DATETIME := $(shell date +%s)
@@ -146,7 +212,7 @@
 DATE := date -d @$(BUILD_DATETIME)
 endif
 
-ifeq "" "$(BUILD_NUMBER)"
+ifndef BUILD_NUMBER
   # BUILD_NUMBER should be set to the source control value that
   # represents the current state of the source code.  E.g., a
   # perforce changelist number or a git hash.  Can be an arbitrary string
diff --git a/envsetup.sh b/envsetup.sh
index 710b28f..03fdf89 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -28,9 +28,9 @@
 
 Look at the source to view more functions. The complete list is:
 EOF
-    T=$(gettop)
-    local A
-    A=""
+    local T=$(gettop)
+    local A=""
+    local i
     for i in `cat $T/build/envsetup.sh | sed -n "/^[[:blank:]]*function /s/function \([a-z_]*\).*/\1/p" | sort | uniq`; do
       A="$A $i"
     done
@@ -40,7 +40,7 @@
 # Get all the build variables needed by this script in a single call to the build system.
 function build_build_var_cache()
 {
-    T=$(gettop)
+    local T=$(gettop)
     # Grep out the variable names from the script.
     cached_vars=`cat $T/build/envsetup.sh | tr '()' '  ' | awk '{for(i=1;i<=NF;i++) if($i~/get_build_var/) print $(i+1)}' | sort -u | tr '\n' ' '`
     cached_abs_vars=`cat $T/build/envsetup.sh | tr '()' '  ' | awk '{for(i=1;i<=NF;i++) if($i~/get_abs_build_var/) print $(i+1)}' | sort -u | tr '\n' ' '`
@@ -74,6 +74,7 @@
 function destroy_build_var_cache()
 {
     unset BUILD_VAR_CACHE_READY
+    local v
     for v in $cached_vars; do
       unset var_cache_$v
     done
@@ -93,7 +94,7 @@
     return
     fi
 
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP." >&2
         return
@@ -111,7 +112,7 @@
     return
     fi
 
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP." >&2
         return
@@ -123,7 +124,7 @@
 # check to see if the supplied product is one we can build
 function check_product()
 {
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP." >&2
         return
@@ -141,6 +142,7 @@
 # check to see if the supplied variant is valid
 function check_variant()
 {
+    local v
     for v in ${VARIANT_CHOICES[@]}
     do
         if [ "$v" = "$1" ]
@@ -153,7 +155,7 @@
 
 function setpaths()
 {
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP."
         return
@@ -184,18 +186,19 @@
     fi
 
     # and in with the new
-    prebuiltdir=$(getprebuilt)
-    gccprebuiltdir=$(get_abs_build_var ANDROID_GCC_PREBUILTS)
+    local prebuiltdir=$(getprebuilt)
+    local gccprebuiltdir=$(get_abs_build_var ANDROID_GCC_PREBUILTS)
 
     # defined in core/config.mk
-    targetgccversion=$(get_build_var TARGET_GCC_VERSION)
-    targetgccversion2=$(get_build_var 2ND_TARGET_GCC_VERSION)
+    local targetgccversion=$(get_build_var TARGET_GCC_VERSION)
+    local targetgccversion2=$(get_build_var 2ND_TARGET_GCC_VERSION)
     export TARGET_GCC_VERSION=$targetgccversion
 
     # The gcc toolchain does not exists for windows/cygwin. In this case, do not reference it.
     export ANDROID_TOOLCHAIN=
     export ANDROID_TOOLCHAIN_2ND_ARCH=
     local ARCH=$(get_build_var TARGET_ARCH)
+    local toolchaindir toolchaindir2=
     case $ARCH in
         x86) toolchaindir=x86/x86_64-linux-android-$targetgccversion/bin
             ;;
@@ -217,7 +220,7 @@
         export ANDROID_TOOLCHAIN=$gccprebuiltdir/$toolchaindir
     fi
 
-    if [ -d "$gccprebuiltdir/$toolchaindir2" ]; then
+    if [ "$toolchaindir2" -a -d "$gccprebuiltdir/$toolchaindir2" ]; then
         export ANDROID_TOOLCHAIN_2ND_ARCH=$gccprebuiltdir/$toolchaindir2
     fi
 
@@ -260,6 +263,12 @@
     unset ANDROID_HOST_OUT
     export ANDROID_HOST_OUT=$(get_abs_build_var HOST_OUT)
 
+    unset ANDROID_HOST_OUT_TESTCASES
+    export ANDROID_HOST_OUT_TESTCASES=$(get_abs_build_var HOST_OUT_TESTCASES)
+
+    unset ANDROID_TARGET_OUT_TESTCASES
+    export ANDROID_TARGET_OUT_TESTCASES=$(get_abs_build_var TARGET_OUT_TESTCASES)
+
     # needed for building linux on MacOS
     # TODO: fix the path
     #export HOST_EXTRACFLAGS="-I "$T/system/kernel_headers/host_include
@@ -267,7 +276,7 @@
 
 function printconfig()
 {
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP." >&2
         return
@@ -290,7 +299,7 @@
 
 function set_sequence_number()
 {
-    export BUILD_ENV_SEQUENCE_NUMBER=12
+    export BUILD_ENV_SEQUENCE_NUMBER=13
 }
 
 function settitle()
@@ -393,6 +402,7 @@
 #
 function chooseproduct()
 {
+    local default_value
     if [ "x$TARGET_PRODUCT" != x ] ; then
         default_value=$TARGET_PRODUCT
     else
@@ -563,50 +573,42 @@
         then
             selection=${LUNCH_MENU_CHOICES[$(($answer-1))]}
         fi
-    elif (echo -n $answer | grep -q -e "^[^\-][^\-]*-[^\-][^\-]*$")
-    then
+    else
         selection=$answer
     fi
 
-    if [ -z "$selection" ]
-    then
-        echo
-        echo "Invalid lunch combo: $answer"
-        return 1
-    fi
-
     export TARGET_BUILD_APPS=
 
-    local variant=$(echo -n $selection | sed -e "s/^[^\-]*-//")
-    check_variant $variant
-    if [ $? -ne 0 ]
-    then
-        echo
-        echo "** Invalid variant: '$variant'"
-        echo "** Must be one of ${VARIANT_CHOICES[@]}"
-        variant=
+    local product variant_and_version variant version
+
+    product=${selection%%-*} # Trim everything after first dash
+    variant_and_version=${selection#*-} # Trim everything up to first dash
+    if [ "$variant_and_version" != "$selection" ]; then
+        variant=${variant_and_version%%-*}
+        if [ "$variant" != "$variant_and_version" ]; then
+            version=${variant_and_version#*-}
+        fi
     fi
 
-    local product=$(echo -n $selection | sed -e "s/-.*$//")
-    TARGET_PRODUCT=$product \
-    TARGET_BUILD_VARIANT=$variant \
-    build_build_var_cache
-    if [ $? -ne 0 ]
+    if [ -z "$product" ]
     then
         echo
-        echo "** Don't have a product spec for: '$product'"
-        echo "** Do you have the right repo manifest?"
-        product=
-    fi
-
-    if [ -z "$product" -o -z "$variant" ]
-    then
-        echo
+        echo "Invalid lunch combo: $selection"
         return 1
     fi
 
-    export TARGET_PRODUCT=$product
-    export TARGET_BUILD_VARIANT=$variant
+    TARGET_PRODUCT=$product \
+    TARGET_BUILD_VARIANT=$variant \
+    TARGET_PLATFORM_VERSION=$version \
+    build_build_var_cache
+    if [ $? -ne 0 ]
+    then
+        return 1
+    fi
+
+    export TARGET_PRODUCT=$(get_build_var TARGET_PRODUCT)
+    export TARGET_BUILD_VARIANT=$(get_build_var TARGET_BUILD_VARIANT)
+    export TARGET_PLATFORM_VERSION=$(get_build_var TARGET_PLATFORM_VERSION)
     export TARGET_BUILD_TYPE=release
 
     echo
@@ -696,7 +698,7 @@
             PWD= /bin/pwd
         else
             local HERE=$PWD
-            T=
+            local T=
             while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
                 \cd ..
                 T=`PWD= /bin/pwd -P`
@@ -744,9 +746,9 @@
 
 function findmakefile()
 {
-    TOPFILE=build/core/envsetup.mk
+    local TOPFILE=build/core/envsetup.mk
     local HERE=$PWD
-    T=
+    local T=
     while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
         T=`PWD= /bin/pwd`
         if [ -f "$T/Android.mk" -o -f "$T/Android.bp" ]; then
@@ -782,6 +784,7 @@
             echo "Couldn't locate a makefile from the current directory."
             return 1
         else
+            local ARG
             for ARG in $@; do
                 case $ARG in
                   GET-INSTALL-PATH) GET_INSTALL_PATH=$ARG;;
@@ -939,7 +942,7 @@
 
 function croot()
 {
-    T=$(gettop)
+    local T=$(gettop)
     if [ "$T" ]; then
         if [ "$1" ]; then
             \cd $(gettop)/$1
@@ -953,9 +956,9 @@
 
 function cproj()
 {
-    TOPFILE=build/core/envsetup.mk
+    local TOPFILE=build/core/envsetup.mk
     local HERE=$PWD
-    T=
+    local T=
     while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
         T=$PWD
         if [ -f "$T/Android.mk" ]; then
@@ -1142,8 +1145,7 @@
             adb shell cat $TMP
         else
             # Dump stacks of native process
-            local USE64BIT="$(is64bit $PID)"
-            adb shell debuggerd$USE64BIT -b $PID
+            adb shell debuggerd -b $PID
         fi
     fi
 }
@@ -1207,6 +1209,7 @@
 
 function resgrep()
 {
+    local dir
     for dir in `find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -name res -type d`; do
         find $dir -type f -name '*\.xml' -exec grep --color -n "$@" {} +
     done
@@ -1268,7 +1271,7 @@
 
 function tracedmdump()
 {
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP."
         return
@@ -1445,7 +1448,7 @@
         echo "Couldn't locate output files.  Try running 'lunch' first." >&2
         return
     fi
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP." >&2
         return
@@ -1462,7 +1465,7 @@
 # simple shortcut to the runtest command
 function runtest()
 {
-    T=$(gettop)
+    local T=$(gettop)
     if [ ! "$T" ]; then
         echo "Couldn't locate the top of the tree.  Try setting TOP." >&2
         return
@@ -1475,7 +1478,8 @@
         echo "Usage: godir <regex>"
         return
     fi
-    T=$(gettop)
+    local T=$(gettop)
+    local FILELIST
     if [ ! "$OUT_DIR" = "" ]; then
         mkdir -p $OUT_DIR
         FILELIST=$OUT_DIR/filelist
diff --git a/target/board/generic/sepolicy/file_contexts b/target/board/generic/sepolicy/file_contexts
index e8d32f7..e9502d9 100644
--- a/target/board/generic/sepolicy/file_contexts
+++ b/target/board/generic/sepolicy/file_contexts
@@ -9,6 +9,7 @@
 /dev/block/vdc               u:object_r:userdata_block_device:s0
 
 /dev/goldfish_pipe           u:object_r:qemu_device:s0
+/dev/goldfish_sync           u:object_r:qemu_device:s0
 /dev/qemu_.*                 u:object_r:qemu_device:s0
 /dev/socket/qemud            u:object_r:qemud_socket:s0
 /dev/ttyGF[0-9]*             u:object_r:serial_device:s0
diff --git a/target/board/generic/sepolicy/logpersist.te b/target/board/generic/sepolicy/logpersist.te
index 0c52986..3fc0250 100644
--- a/target/board/generic/sepolicy/logpersist.te
+++ b/target/board/generic/sepolicy/logpersist.te
@@ -10,3 +10,4 @@
 
 # Write to /dev/ttyS2 and /dev/ttyGF2.
 allow logpersist serial_device:chr_file { write open };
+get_prop(logpersist, qemu_cmdline)
diff --git a/target/board/generic/sepolicy/property.te b/target/board/generic/sepolicy/property.te
index 22d580a..a486702 100644
--- a/target/board/generic/sepolicy/property.te
+++ b/target/board/generic/sepolicy/property.te
@@ -1,3 +1,4 @@
 type qemu_prop, property_type;
+type qemu_cmdline, property_type;
 type radio_noril_prop, property_type;
 type opengles_prop, property_type;
diff --git a/target/board/generic/sepolicy/property_contexts b/target/board/generic/sepolicy/property_contexts
index 142b062..c66a85f 100644
--- a/target/board/generic/sepolicy/property_contexts
+++ b/target/board/generic/sepolicy/property_contexts
@@ -1,4 +1,5 @@
 qemu.                   u:object_r:qemu_prop:s0
+qemu.cmdline            u:object_r:qemu_cmdline:s0
 ro.emu.                 u:object_r:qemu_prop:s0
 ro.emulator.            u:object_r:qemu_prop:s0
 ro.radio.noril          u:object_r:radio_noril_prop:s0
diff --git a/target/board/generic/sepolicy/qemu_props.te b/target/board/generic/sepolicy/qemu_props.te
index 6768ce7..95174d6 100644
--- a/target/board/generic/sepolicy/qemu_props.te
+++ b/target/board/generic/sepolicy/qemu_props.te
@@ -9,3 +9,4 @@
 set_prop(qemu_props, dalvik_prop)
 set_prop(qemu_props, config_prop)
 set_prop(qemu_props, opengles_prop)
+set_prop(qemu_props, qemu_cmdline)
diff --git a/target/product/core.mk b/target/product/core.mk
index 10b2c9e..c4c7cab 100644
--- a/target/product/core.mk
+++ b/target/product/core.mk
@@ -25,6 +25,7 @@
     BlockedNumberProvider \
     BookmarkProvider \
     Browser2 \
+    BuiltInPrintService \
     Calendar \
     CalendarProvider \
     CaptivePortalLogin \
diff --git a/target/product/core_minimal.mk b/target/product/core_minimal.mk
index fe1a382..701a69c 100644
--- a/target/product/core_minimal.mk
+++ b/target/product/core_minimal.mk
@@ -125,6 +125,9 @@
 PRODUCT_COPY_FILES += \
     system/core/rootdir/etc/public.libraries.android.txt:system/etc/public.libraries.txt
 
+PRODUCT_COPY_FILES += \
+    system/core/rootdir/etc/ld.config.txt:system/etc/ld.config.txt
+
 # Different dexopt types for different package update/install times.
 # On eng builds, make "boot" reasons do pure JIT for faster turnaround.
 ifeq (eng,$(TARGET_BUILD_VARIANT))
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index 9585eb0..e04731c 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -20,7 +20,8 @@
 PRODUCT_PACKAGES += \
     adb \
     adbd \
-    android.hidl.memory@1.0-service \
+    android.hardware.configstore@1.0-service \
+    android.hidl.allocator@1.0-service \
     android.hidl.memory@1.0-impl \
     atrace \
     bootanimation \
@@ -32,8 +33,6 @@
     dumpsys \
     fastboot \
     gralloc.default \
-    grep \
-    gzip \
     healthd \
     hwservicemanager \
     init \
@@ -69,32 +68,26 @@
     lmkd \
     logcat \
     logwrapper \
-    mkshrc \
-    reboot \
+    lshal \
     recovery \
     service \
     servicemanager \
-    sh \
+    shell_and_utilities \
     surfaceflinger \
     tombstoned \
-    toolbox \
-    toybox \
     tzdatacheck \
 
 # SELinux packages
 PRODUCT_PACKAGES += \
-    file_contexts.bin \
-    nonplat_file_contexts \
     nonplat_mac_permissions.xml \
+    nonplat_property_contexts \
     nonplat_seapp_contexts \
     nonplat_service_contexts \
-    plat_file_contexts \
     plat_mac_permissions.xml \
+    plat_property_contexts \
     plat_seapp_contexts \
     plat_service_contexts \
-    property_contexts \
-    selinux_version \
-    sepolicy
+    selinux_policy
 
 # AID Generation for
 # <pwd.h> and <grp.h>
@@ -114,3 +107,7 @@
     system/core/rootdir/init.usb.configfs.rc:root/init.usb.configfs.rc \
     system/core/rootdir/ueventd.rc:root/ueventd.rc \
     system/core/rootdir/etc/hosts:system/etc/hosts
+
+# Framework Manifest
+PRODUCT_COPY_FILES += \
+    system/libhidl/manifest.xml:system/manifest.xml
diff --git a/target/product/emulator.mk b/target/product/emulator.mk
index b08a28a..afa8389 100644
--- a/target/product/emulator.mk
+++ b/target/product/emulator.mk
@@ -61,6 +61,7 @@
     device/generic/goldfish/init.ranchu.rc:root/init.ranchu.rc \
     device/generic/goldfish/fstab.ranchu:root/fstab.ranchu \
     device/generic/goldfish/ueventd.ranchu.rc:root/ueventd.ranchu.rc \
+    device/generic/goldfish/input/goldfish_rotary.idc:system/usr/idc/goldfish_rotary.idc \
     frameworks/native/data/etc/android.hardware.usb.accessory.xml:system/etc/permissions/android.hardware.usb.accessory.xml
 
 PRODUCT_PACKAGE_OVERLAYS := device/generic/goldfish/overlay
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 3dd505f..fb52d67 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -53,6 +53,7 @@
 PRODUCT_PACKAGES += \
     dalvikvm \
     dex2oat \
+    dexoptanalyzer \
     libart \
     libart_fake \
     libopenjdkjvmti \
@@ -79,4 +80,5 @@
     ro.dalvik.vm.native.bridge=0 \
     dalvik.vm.usejit=true \
     dalvik.vm.usejitprofiles=true \
+    dalvik.vm.dexopt.secondary=true \
     dalvik.vm.appimageformat=lz4
diff --git a/tests/envsetup_tests.sh b/tests/envsetup_tests.sh
new file mode 100755
index 0000000..4aae255
--- /dev/null
+++ b/tests/envsetup_tests.sh
@@ -0,0 +1,36 @@
+#!/bin/bash -e
+
+source $(dirname $0)/../envsetup.sh
+
+unset TARGET_PRODUCT TARGET_BUILD_VARIANT TARGET_PLATFORM_VERSION
+
+function check_lunch
+(
+    echo lunch $1
+    set +e
+    lunch $1 > /dev/null 2> /dev/null
+    set -e
+    [ "$TARGET_PRODUCT" = "$2" ] || ( echo "lunch $1: expected TARGET_PRODUCT='$2', got '$TARGET_PRODUCT'" && exit 1 )
+    [ "$TARGET_BUILD_VARIANT" = "$3" ] || ( echo "lunch $1: expected TARGET_BUILD_VARIANT='$3', got '$TARGET_BUILD_VARIANT'" && exit 1 )
+    [ "$TARGET_PLATFORM_VERSION" = "$4" ] || ( echo "lunch $1: expected TARGET_PLATFORM_VERSION='$4', got '$TARGET_PLATFORM_VERSION'" && exit 1 )
+)
+
+default_version=$(get_build_var DEFAULT_PLATFORM_VERSION)
+valid_version=PPR1
+
+# lunch tests
+check_lunch "aosp_arm64"                                "aosp_arm64" "eng"       "$default_version"
+check_lunch "aosp_arm64-userdebug"                      "aosp_arm64" "userdebug" "$default_version"
+check_lunch "aosp_arm64-userdebug-$valid_version"       "aosp_arm64" "userdebug" "$valid_version"
+check_lunch "abc"                                       "" "" ""
+check_lunch "aosp_arm64-abc"                            "" "" ""
+check_lunch "aosp_arm64-userdebug-abc"                  "" "" ""
+check_lunch "aosp_arm64-abc-$valid_version"             "" "" ""
+check_lunch "abc-userdebug-$valid_version"              "" "" ""
+check_lunch "-"                                         "" "" ""
+check_lunch "--"                                        "" "" ""
+check_lunch "-userdebug"                                "" "" ""
+check_lunch "-userdebug-"                               "" "" ""
+check_lunch "-userdebug-$valid_version"                 "" "" ""
+check_lunch "aosp_arm64-userdebug-$valid_version-"      "" "" ""
+check_lunch "aosp_arm64-userdebug-$valid_version-abc"   "" "" ""
diff --git a/tools/checkowners.py b/tools/checkowners.py
new file mode 100755
index 0000000..b874955
--- /dev/null
+++ b/tools/checkowners.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+
+"""Parse and check syntax errors of a given OWNERS file."""
+
+import argparse
+import re
+import sys
+import urllib
+import urllib2
+
+parser = argparse.ArgumentParser(description='Check OWNERS file syntax')
+parser.add_argument('-v', '--verbose', dest='verbose',
+                    action='store_true', default=False,
+                    help='Verbose output to debug')
+parser.add_argument('-c', '--check_address', dest='check_address',
+                    action='store_true', default=False,
+                    help='Check email addresses')
+parser.add_argument(dest='owners', metavar='OWNERS', nargs='+',
+                    help='Path to OWNERS file')
+args = parser.parse_args()
+
+gerrit_server = 'https://android-review.googlesource.com'
+checked_addresses = {}
+
+
+def echo(msg):
+  if args.verbose:
+    print msg
+
+
+def find_address(address):
+  if address not in checked_addresses:
+    request = (gerrit_server + '/accounts/?n=1&o=ALL_EMAILS&q=email:'
+               + urllib.quote(address))
+    echo('Checking email address: ' + address)
+    result = urllib2.urlopen(request).read()
+    expected = '"email": "' + address + '"'
+    checked_addresses[address] = (result.find(expected) >= 0)
+  return checked_addresses[address]
+
+
+def main():
+  # One regular expression to check all valid lines.
+  noparent = 'set +noparent'
+  email = '([^@ ]+@[^ @]+|\\*)'
+  directive = '(%s|%s)' % (email, noparent)
+  glob = '[a-zA-Z0-9_\\.\\-\\*\\?]+'
+  perfile = 'per-file +' + glob + ' *= *' + directive
+  pats = '(|%s|%s|%s)$' % (noparent, email, perfile)
+  patterns = re.compile(pats)
+
+  # One pattern to capture email address.
+  email_address = '.*(@| |=|^)([^@ =]+@[^ @]+)'
+  address_pattern = re.compile(email_address)
+
+  error = 0
+  for fname in args.owners:
+    echo('Checking file: ' + fname)
+    num = 0
+    for line in open(fname, 'r'):
+      num += 1
+      stripped_line = re.sub('#.*$', '', line).strip()
+      if not patterns.match(stripped_line):
+        error = 1
+        print('%s:%d: ERROR: unknown line [%s]'
+              % (fname, num, line.strip()))
+      elif args.check_address and address_pattern.match(stripped_line):
+        address = address_pattern.match(stripped_line).group(2)
+        if find_address(address):
+          echo('Found email address: ' + address)
+        else:
+          error = 1
+          print('%s:%d: ERROR: unknown email address: %s'
+                % (fname, num, address))
+  sys.exit(error)
+
+if __name__ == '__main__':
+  main()
diff --git a/tools/fat16copy.py b/tools/fat16copy.py
index af8bd83..c20930a 100755
--- a/tools/fat16copy.py
+++ b/tools/fat16copy.py
@@ -234,11 +234,16 @@
     data.seek(0)
     data_file.write(data.read())
 
-  def new_subdirectory(self, name):
+  def open_subdirectory(self, name):
     """
-    Create a new subdirectory of this directory with the given name.
+    Open a subdirectory of this directory with the given name. If the
+    subdirectory doesn't exist, a new one is created instead.
     Returns a fat_dir().
     """
+    for dent in self.dentries:
+      if dent.longname == name:
+        return dent.open_directory()
+
     chunk = self.backing.fs.allocate(1)
     (shortname, ext) = self.make_short_name(name)
     new_dentry = self.add_dentry(ATTRIBUTE_SUBDIRECTORY, shortname,
@@ -751,7 +756,7 @@
     base = os.path.basename(item)
     if len(base) == 0:
       base = os.path.basename(item[:-1])
-    sub = directory.new_subdirectory(base)
+    sub = directory.open_subdirectory(base)
     for next_item in sorted(os.listdir(item)):
       add_item(sub, os.path.join(item, next_item))
   else:
diff --git a/tools/fileslist.py b/tools/fileslist.py
deleted file mode 100755
index b9e7350..0000000
--- a/tools/fileslist.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import json, hashlib, operator, os, sys
-
-def get_file_size(path):
-  st = os.lstat(path)
-  return st.st_size;
-
-def get_file_digest(path):
-  if os.path.isfile(path) == False:
-    return "----------------------------------------------------------------"
-  digest = hashlib.sha256()
-  with open(path, 'rb') as f:
-    while True:
-      buf = f.read(1024*1024)
-      if not buf:
-        break
-      digest.update(buf)
-  return digest.hexdigest();
-
-def main(argv):
-  output = []
-  roots = argv[1:]
-  for root in roots:
-    base = len(root[:root.rfind(os.path.sep)])
-    for dir, dirs, files in os.walk(root):
-      relative = dir[base:]
-      for f in files:
-        try:
-          path = os.path.sep.join((dir, f))
-          row = {
-              "Size": get_file_size(path),
-              "Name": os.path.sep.join((relative, f)),
-              "SHA256": get_file_digest(path),
-            }
-          output.append(row)
-        except os.error:
-          pass
-  output.sort(key=operator.itemgetter("Size", "Name"), reverse=True)
-  print json.dumps(output, indent=2, separators=(',',': '))
-
-if __name__ == '__main__':
-  main(sys.argv)
diff --git a/tools/fs_config/README b/tools/fs_config/README
index d884e32..9919131 100644
--- a/tools/fs_config/README
+++ b/tools/fs_config/README
@@ -3,8 +3,7 @@
 |  _  <|   __||  _  ||  |  ||  \/  ||   __|
 \__|\_/\_____/\__|__/|_____/\__ \__/\_____/
 
-
-Generating the android_filesystem_config.h
+Generating the android_filesystem_config.h:
 
 To generate the android_filesystem_config.h file, one can choose from
 one of two methods. The first method, is to declare
@@ -140,3 +139,26 @@
 
 To add new tests, simply add a test_<xxx> method to the test class. It will automatically
 get picked up and added to the test suite.
+
+Using the android_filesystem_config.h:
+
+The tool fs_config_generate is built as a dependency to fs_config_dirs and
+fs_config_files host targets, and #includes the above supplied or generated
+android_filesystem_config.h file, and can be instructed to generate the binary
+data that lands in the device target locations /system/etc/fs_config_dirs and
+/system/etc/fs_config_files and in the host's ${OUT} locations
+${OUT}/target/product/<device>/system/etc/fs_config_dirs and
+${OUT}/target/product/<device>/system/etc/fs_config_files. The binary files
+are interpreted by the libcutils fs_conf() function, along with the built-in
+defaults, to serve as overrides to complete the results. The Target files are
+used by filesystem and adb tools to ensure that the file and directory
+properties are preserved during runtime operations. The host files in the
+${OUT} directory are used in the final stages when building the filesystem
+images to set the file and directory properties.
+
+fs_config_generate --help reports:
+
+Generate binary content for fs_config_dirs (-D) and fs_config_files (-F)
+from device-specific android_filesystem_config.h override
+
+Usage: fs_config_generate -D|-F [-o output-file]
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 2cf2fd8..c8d1dd3 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -709,7 +709,7 @@
                 int(cap, 0)
                 tmp.append('(' + cap + ')')
             except ValueError:
-                tmp.append('(1ULL << CAP_' + cap.upper() + ')')
+                tmp.append('CAP_MASK_LONG(CAP_' + cap.upper() + ')')
 
         caps = tmp
 
diff --git a/tools/generate-enforce-rro-android-manifest.py b/tools/generate-enforce-rro-android-manifest.py
new file mode 100755
index 0000000..68331cf
--- /dev/null
+++ b/tools/generate-enforce-rro-android-manifest.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Utility to generate the Android manifest file of runtime resource overlay
+package for source module.
+"""
+from xml.dom.minidom import parseString
+import argparse
+import os
+import sys
+
+ANDROID_MANIFEST_TEMPLATE="""<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="%s.auto_generated_rro__"
+    android:versionCode="1"
+    android:versionName="1.0">
+    <overlay android:targetPackage="%s" android:priority="0" android:isStatic="true"/>
+</manifest>
+"""
+
+
+def get_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        '-u', '--use-package-name', action='store_true',
+        help='Indicate that --package-info is a package name.')
+    parser.add_argument(
+        '-p', '--package-info', required=True,
+        help='Manifest package name or manifest file path of source module.')
+    parser.add_argument(
+        '-o', '--output', required=True,
+        help='Output manifest file path.')
+    return parser.parse_args()
+
+
+def main(argv):
+  args = get_args()
+
+  package_name = args.package_info
+  if not args.use_package_name:
+    with open(args.package_info) as f:
+      data = f.read()
+      f.close()
+      dom = parseString(data)
+      package_name = dom.documentElement.getAttribute('package')
+
+  with open(args.output, 'w+') as f:
+    f.write(ANDROID_MANIFEST_TEMPLATE % (package_name, package_name))
+    f.close()
+
+
+if __name__ == "__main__":
+  main(sys.argv)
diff --git a/tools/kati_all_products.sh b/tools/kati_all_products.sh
deleted file mode 100755
index 4567dbd..0000000
--- a/tools/kati_all_products.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash -e
-
-cd $ANDROID_BUILD_TOP
-mkdir -p out.kati
-source build/envsetup.sh
-
-get_build_var all_named_products | sed "s/ /\n/g" | parallel "$@" --progress "(source build/envsetup.sh; lunch {}-eng && m -j OUT_DIR=out.kati/{} out.kati/{}/build-{}.ninja) >out.kati/log.{} 2>&1"
diff --git a/tools/makeparallel/makeparallel.cpp b/tools/makeparallel/makeparallel.cpp
index 0e1e45c..66babdf 100644
--- a/tools/makeparallel/makeparallel.cpp
+++ b/tools/makeparallel/makeparallel.cpp
@@ -357,12 +357,13 @@
 
   static pid_t pid;
 
-  // Set up signal handlers to forward SIGHUP, SIGINT, SIGQUIT, SIGTERM, and
-  // SIGALRM to child
+  // Set up signal handlers to forward SIGTERM to child.
+  // Assume that all other signals are sent to the entire process group,
+  // and that we'll wait for our child to exit instead of handling them.
   struct sigaction action = {};
-  action.sa_flags = SA_SIGINFO | SA_RESTART,
-  action.sa_sigaction = [](int signal, siginfo_t*, void*) {
-    if (pid > 0) {
+  action.sa_flags = SA_RESTART;
+  action.sa_handler = [](int signal) {
+    if (signal == SIGTERM && pid > 0) {
       kill(pid, signal);
     }
   };
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 9dcaadf..295f8f6 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -19,10 +19,9 @@
 # Usage: post_process_props.py file.prop [blacklist_key, ...]
 # Blacklisted keys are removed from the property file, if present
 
-# See PROP_NAME_MAX and PROP_VALUE_MAX system_properties.h.
-# The constants in system_properties.h includes the termination NUL,
-# so we decrease the values by 1 here.
-PROP_NAME_MAX = 31
+# See PROP_VALUE_MAX in system_properties.h.
+# The constant in system_properties.h includes the terminating NUL,
+# so we decrease the value by 1 here.
 PROP_VALUE_MAX = 91
 
 # Put the modifications that you need to make into the /system/build.prop into this
@@ -59,11 +58,6 @@
   buildprops = prop.to_dict()
   for key, value in buildprops.iteritems():
     # Check build properties' length.
-    if len(key) > PROP_NAME_MAX:
-      check_pass = False
-      sys.stderr.write("error: %s cannot exceed %d bytes: " %
-                       (key, PROP_NAME_MAX))
-      sys.stderr.write("%s (%d)\n" % (key, len(key)))
     if len(value) > PROP_VALUE_MAX:
       check_pass = False
       sys.stderr.write("error: %s cannot exceed %d bytes: " %
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index c80d4bd..7c3679c 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -62,6 +62,7 @@
 
 import build_image
 import common
+import rangelib
 import sparse_img
 
 OPTIONS = common.OPTIONS
@@ -72,16 +73,43 @@
 OPTIONS.replace_verity_private_key = False
 OPTIONS.is_signing = False
 
+
+class OutputFile(object):
+  def __init__(self, output_zip, input_dir, prefix, name):
+    self._output_zip = output_zip
+    self.input_name = os.path.join(input_dir, prefix, name)
+
+    if self._output_zip:
+      self._zip_name = os.path.join(prefix, name)
+
+      root, suffix = os.path.splitext(name)
+      self.name = common.MakeTempFile(prefix=root + '-', suffix=suffix)
+    else:
+      self.name = self.input_name
+
+  def Write(self):
+    if self._output_zip:
+      common.ZipWrite(self._output_zip, self.name, self._zip_name)
+
+
 def GetCareMap(which, imgname):
   """Generate care_map of system (or vendor) partition"""
 
   assert which in ("system", "vendor")
-  _, blk_device = common.GetTypeAndDevice("/" + which, OPTIONS.info_dict)
 
   simg = sparse_img.SparseImage(imgname)
   care_map_list = []
-  care_map_list.append(blk_device)
-  care_map_list.append(simg.care_map.to_string_raw())
+  care_map_list.append(which)
+
+  care_map_ranges = simg.care_map
+  key = which + "_adjusted_partition_size"
+  adjusted_blocks = OPTIONS.info_dict.get(key)
+  if adjusted_blocks:
+    assert adjusted_blocks > 0, "blocks should be positive for " + which
+    care_map_ranges = care_map_ranges.intersect(rangelib.RangeSet(
+        "0-%d" % (adjusted_blocks,)))
+
+  care_map_list.append(care_map_ranges.to_string_raw())
   return care_map_list
 
 
@@ -89,10 +117,10 @@
   """Turn the contents of SYSTEM into a system image and store it in
   output_zip. Returns the name of the system image file."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.img")
+  if os.path.exists(img.input_name):
     print("system.img already exists in %s, no need to rebuild..." % (prefix,))
-    return prebuilt_path
+    return img.input_name
 
   def output_sink(fn, data):
     ofile = open(os.path.join(OPTIONS.input_tmp, "SYSTEM", fn), "w")
@@ -104,74 +132,52 @@
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
-  block_list = common.MakeTempFile(prefix="system-blocklist-", suffix=".map")
-  imgname = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict,
-                        block_list=block_list)
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.map")
+  CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
+              block_list=block_list)
 
-  common.ZipWrite(output_zip, imgname, prefix + "system.img")
-  common.ZipWrite(output_zip, block_list, prefix + "system.map")
-  return imgname
-
-
-def BuildSystem(input_dir, info_dict, block_list=None):
-  """Build the (sparse) system image and return the name of a temp
-  file containing it."""
-  return CreateImage(input_dir, info_dict, "system", block_list=block_list)
+  return img.name
 
 
 def AddSystemOther(output_zip, prefix="IMAGES/"):
   """Turn the contents of SYSTEM_OTHER into a system_other image
   and store it in output_zip."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system_other.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system_other.img")
+  if os.path.exists(img.input_name):
     print("system_other.img already exists in %s, no need to rebuild..." % (
         prefix,))
     return
 
-  imgname = BuildSystemOther(OPTIONS.input_tmp, OPTIONS.info_dict)
-  common.ZipWrite(output_zip, imgname, prefix + "system_other.img")
-
-def BuildSystemOther(input_dir, info_dict):
-  """Build the (sparse) system_other image and return the name of a temp
-  file containing it."""
-  return CreateImage(input_dir, info_dict, "system_other", block_list=None)
+  CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
 
 
 def AddVendor(output_zip, prefix="IMAGES/"):
   """Turn the contents of VENDOR into a vendor image and store in it
   output_zip."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "vendor.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.img")
+  if os.path.exists(img.input_name):
     print("vendor.img already exists in %s, no need to rebuild..." % (prefix,))
-    return prebuilt_path
+    return img.input_name
 
-  block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
-  imgname = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict,
-                        block_list=block_list)
-  common.ZipWrite(output_zip, imgname, prefix + "vendor.img")
-  common.ZipWrite(output_zip, block_list, prefix + "vendor.map")
-  return imgname
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.map")
+  CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img,
+              block_list=block_list)
+  return img.name
 
 
-def BuildVendor(input_dir, info_dict, block_list=None):
-  """Build the (sparse) vendor image and return the name of a temp
-  file containing it."""
-  return CreateImage(input_dir, info_dict, "vendor", block_list=block_list)
-
-
-def CreateImage(input_dir, info_dict, what, block_list=None):
+def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
   print("creating " + what + ".img...")
 
-  img = common.MakeTempFile(prefix=what + "-", suffix=".img")
-
   # The name of the directory it is making an image out of matters to
   # mkyaffs2image.  It wants "system" but we have a directory named
   # "SYSTEM", so create a symlink.
+  temp_dir = tempfile.mkdtemp()
+  OPTIONS.tempfiles.append(temp_dir)
   try:
     os.symlink(os.path.join(input_dir, what.upper()),
-               os.path.join(input_dir, what))
+               os.path.join(temp_dir, what))
   except OSError as e:
     # bogus error on my mac version?
     #   File "./build/tools/releasetools/img_from_target_files"
@@ -206,13 +212,23 @@
   if fs_config:
     image_props["fs_config"] = fs_config
   if block_list:
-    image_props["block_list"] = block_list
+    image_props["block_list"] = block_list.name
 
-  succ = build_image.BuildImage(os.path.join(input_dir, what),
-                                image_props, img)
+  succ = build_image.BuildImage(os.path.join(temp_dir, what),
+                                image_props, output_file.name)
   assert succ, "build " + what + ".img image failed"
 
-  return img
+  output_file.Write()
+  if block_list:
+    block_list.Write()
+
+  is_verity_partition = "verity_block_device" in image_props
+  verity_supported = image_props.get("verity") == "true"
+  if is_verity_partition and verity_supported:
+    adjusted_blocks_value = image_props.get("partition_size")
+    if adjusted_blocks_value:
+      adjusted_blocks_key = what + "_adjusted_partition_size"
+      info_dict[adjusted_blocks_key] = int(adjusted_blocks_value)/4096 - 1
 
 
 def AddUserdata(output_zip, prefix="IMAGES/"):
@@ -224,8 +240,8 @@
   in OPTIONS.info_dict.
   """
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "userdata.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "userdata.img")
+  if os.path.exists(img.input_name):
     print("userdata.img already exists in %s, no need to rebuild..." % (
         prefix,))
     return
@@ -248,6 +264,7 @@
   # empty dir named "data", or a symlink to the DATA dir,
   # and build the image from that.
   temp_dir = tempfile.mkdtemp()
+  OPTIONS.tempfiles.append(temp_dir)
   user_dir = os.path.join(temp_dir, "data")
   empty = (OPTIONS.info_dict.get("userdata_img_with_data") != "true")
   if empty:
@@ -258,8 +275,6 @@
     os.symlink(os.path.join(OPTIONS.input_tmp, "DATA"),
                user_dir)
 
-  img = tempfile.NamedTemporaryFile()
-
   fstab = OPTIONS.info_dict["fstab"]
   if fstab:
     image_props["fs_type"] = fstab["/data"].fs_type
@@ -267,20 +282,22 @@
   assert succ, "build userdata.img image failed"
 
   common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
-  common.ZipWrite(output_zip, img.name, prefix + "userdata.img")
-  img.close()
-  shutil.rmtree(temp_dir)
+  img.Write()
 
 
-def AddVBMeta(output_zip, boot_img_path, system_img_path, prefix="IMAGES/"):
+def AddVBMeta(output_zip, boot_img_path, system_img_path, vendor_img_path,
+              prefix="IMAGES/"):
   """Create a VBMeta image and store it in output_zip."""
-  _, img_file_name = tempfile.mkstemp()
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vbmeta.img")
   avbtool = os.getenv('AVBTOOL') or "avbtool"
   cmd = [avbtool, "make_vbmeta_image",
-         "--output", img_file_name,
+         "--output", img.name,
          "--include_descriptors_from_image", boot_img_path,
-         "--include_descriptors_from_image", system_img_path,
-         "--generate_dm_verity_cmdline_from_hashtree", system_img_path]
+         "--include_descriptors_from_image", system_img_path]
+  if vendor_img_path is not None:
+    cmd.extend(["--include_descriptors_from_image", vendor_img_path])
+  if OPTIONS.info_dict.get("system_root_image", None) == "true":
+    cmd.extend(["--setup_rootfs_from_kernel", system_img_path])
   common.AppendAVBSigningArgs(cmd)
   args = OPTIONS.info_dict.get("board_avb_make_vbmeta_image_args", None)
   if args and args.strip():
@@ -288,19 +305,19 @@
   p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   p.communicate()
   assert p.returncode == 0, "avbtool make_vbmeta_image failed"
-  common.ZipWrite(output_zip, img_file_name, prefix + "vbmeta.img")
+  img.Write()
 
 
 def AddPartitionTable(output_zip, prefix="IMAGES/"):
   """Create a partition table image and store it in output_zip."""
 
-  _, img_file_name = tempfile.mkstemp()
-  _, bpt_file_name = tempfile.mkstemp()
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.img")
+  bpt = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.bpt")
 
   # use BPTTOOL from environ, or "bpttool" if empty or not set.
   bpttool = os.getenv("BPTTOOL") or "bpttool"
-  cmd = [bpttool, "make_table", "--output_json", bpt_file_name,
-         "--output_gpt", img_file_name]
+  cmd = [bpttool, "make_table", "--output_json", bpt.name,
+         "--output_gpt", img.name]
   input_files_str = OPTIONS.info_dict["board_bpt_input_files"]
   input_files = input_files_str.split(" ")
   for i in input_files:
@@ -316,15 +333,15 @@
   p.communicate()
   assert p.returncode == 0, "bpttool make_table failed"
 
-  common.ZipWrite(output_zip, img_file_name, prefix + "partition-table.img")
-  common.ZipWrite(output_zip, bpt_file_name, prefix + "partition-table.bpt")
+  img.Write()
+  bpt.Write()
 
 
 def AddCache(output_zip, prefix="IMAGES/"):
   """Create an empty cache image and store it in output_zip."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "cache.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "cache.img")
+  if os.path.exists(img.input_name):
     print("cache.img already exists in %s, no need to rebuild..." % (prefix,))
     return
 
@@ -345,9 +362,9 @@
   # mkyaffs2image.  So we create a temp dir, and within it we create an
   # empty dir named "cache", and build the image from that.
   temp_dir = tempfile.mkdtemp()
+  OPTIONS.tempfiles.append(temp_dir)
   user_dir = os.path.join(temp_dir, "cache")
   os.mkdir(user_dir)
-  img = tempfile.NamedTemporaryFile()
 
   fstab = OPTIONS.info_dict["fstab"]
   if fstab:
@@ -356,38 +373,41 @@
   assert succ, "build cache.img image failed"
 
   common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
-  common.ZipWrite(output_zip, img.name, prefix + "cache.img")
-  img.close()
-  os.rmdir(user_dir)
-  os.rmdir(temp_dir)
+  img.Write()
 
 
 def AddImagesToTargetFiles(filename):
-  OPTIONS.input_tmp, input_zip = common.UnzipTemp(filename)
+  if os.path.isdir(filename):
+    OPTIONS.input_tmp = os.path.abspath(filename)
+    input_zip = None
+  else:
+    OPTIONS.input_tmp, input_zip = common.UnzipTemp(filename)
 
   if not OPTIONS.add_missing:
-    for n in input_zip.namelist():
-      if n.startswith("IMAGES/"):
-        print("target_files appears to already contain images.")
-        sys.exit(1)
+    if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
+      print("target_files appears to already contain images.")
+      sys.exit(1)
 
-  try:
-    input_zip.getinfo("VENDOR/")
-    has_vendor = True
-  except KeyError:
-    has_vendor = False
+  has_vendor = os.path.isdir(os.path.join(OPTIONS.input_tmp, "VENDOR"))
+  has_system_other = os.path.isdir(os.path.join(OPTIONS.input_tmp,
+                                                "SYSTEM_OTHER"))
 
-  has_system_other = "SYSTEM_OTHER/" in input_zip.namelist()
+  if input_zip:
+    OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.input_tmp)
 
-  OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.input_tmp)
-
-  common.ZipClose(input_zip)
-  output_zip = zipfile.ZipFile(filename, "a",
-                               compression=zipfile.ZIP_DEFLATED,
-                               allowZip64=True)
+    common.ZipClose(input_zip)
+    output_zip = zipfile.ZipFile(filename, "a",
+                                 compression=zipfile.ZIP_DEFLATED,
+                                 allowZip64=True)
+  else:
+    OPTIONS.info_dict = common.LoadInfoDict(filename, filename)
+    output_zip = None
+    images_dir = os.path.join(OPTIONS.input_tmp, "IMAGES")
+    if not os.path.isdir(images_dir):
+      os.makedirs(images_dir)
+    images_dir = None
 
   has_recovery = (OPTIONS.info_dict.get("no_recovery") != "true")
-  system_root_image = (OPTIONS.info_dict.get("system_root_image", None) == "true")
 
   def banner(s):
     print("\n\n++++ " + s + " ++++\n\n")
@@ -405,7 +425,10 @@
     boot_image = common.GetBootableImage(
         "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
     if boot_image:
-      boot_image.AddToZip(output_zip)
+      if output_zip:
+        boot_image.AddToZip(output_zip)
+      else:
+        boot_image.WriteToDir(OPTIONS.input_tmp)
 
   recovery_image = None
   if has_recovery:
@@ -421,7 +444,10 @@
       recovery_image = common.GetBootableImage(
           "IMAGES/recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY")
       if recovery_image:
-        recovery_image.AddToZip(output_zip)
+        if output_zip:
+          recovery_image.AddToZip(output_zip)
+        else:
+          recovery_image.WriteToDir(OPTIONS.input_tmp)
 
       banner("recovery (two-step image)")
       # The special recovery.img for two-step package use.
@@ -429,7 +455,10 @@
           "IMAGES/recovery-two-step.img", "recovery-two-step.img",
           OPTIONS.input_tmp, "RECOVERY", two_step_image=True)
       if recovery_two_step_image:
-        recovery_two_step_image.AddToZip(output_zip)
+        if output_zip:
+          recovery_two_step_image.AddToZip(output_zip)
+        else:
+          recovery_two_step_image.WriteToDir(OPTIONS.input_tmp)
 
   banner("system")
   system_img_path = AddSystem(
@@ -452,7 +481,7 @@
   if OPTIONS.info_dict.get("board_avb_enable", None) == "true":
     banner("vbmeta")
     boot_contents = boot_image.WriteToTemp()
-    AddVBMeta(output_zip, boot_contents.name, system_img_path)
+    AddVBMeta(output_zip, boot_contents.name, system_img_path, vendor_img_path)
 
   # For devices using A/B update, copy over images from RADIO/ and/or
   # VENDOR_IMAGES/ to IMAGES/ and make sure we have all the needed
@@ -485,24 +514,39 @@
       img_vendor_dir = os.path.join(
         OPTIONS.input_tmp, "VENDOR_IMAGES")
       if os.path.exists(img_radio_path):
-        common.ZipWrite(output_zip, img_radio_path,
-                        os.path.join("IMAGES", img_name))
+        if output_zip:
+          common.ZipWrite(output_zip, img_radio_path,
+                          os.path.join("IMAGES", img_name))
+        else:
+          shutil.copy(img_radio_path, prebuilt_path)
       else:
         for root, _, files in os.walk(img_vendor_dir):
           if img_name in files:
-            common.ZipWrite(output_zip, os.path.join(root, img_name),
-              os.path.join("IMAGES", img_name))
+            if output_zip:
+              common.ZipWrite(output_zip, os.path.join(root, img_name),
+                os.path.join("IMAGES", img_name))
+            else:
+              shutil.copy(os.path.join(root, img_name), prebuilt_path)
             break
 
-      # Zip spec says: All slashes MUST be forward slashes.
-      img_path = 'IMAGES/' + img_name
-      assert img_path in output_zip.namelist(), "cannot find " + img_name
+      if output_zip:
+        # Zip spec says: All slashes MUST be forward slashes.
+        img_path = 'IMAGES/' + img_name
+        assert img_path in output_zip.namelist(), "cannot find " + img_name
+      else:
+        img_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
+        assert os.path.exists(img_path), "cannot find " + img_name
 
     if care_map_list:
       file_path = "META/care_map.txt"
-      common.ZipWriteStr(output_zip, file_path, '\n'.join(care_map_list))
+      if output_zip:
+        common.ZipWriteStr(output_zip, file_path, '\n'.join(care_map_list))
+      else:
+        with open(os.path.join(OPTIONS.input_tmp, file_path), 'w') as fp:
+          fp.write('\n'.join(care_map_list))
 
-  common.ZipClose(output_zip)
+  if output_zip:
+    common.ZipClose(output_zip)
 
 def main(argv):
   def option_handler(o, a):
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 433a010..e385866 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -24,8 +24,8 @@
 import os.path
 import re
 import subprocess
+import sys
 import threading
-import tempfile
 
 from collections import deque, OrderedDict
 from hashlib import sha1
@@ -35,69 +35,65 @@
 __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
 
 
-def compute_patch(src, tgt, imgdiff=False):
-  srcfd, srcfile = tempfile.mkstemp(prefix="src-")
-  tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-")
-  patchfd, patchfile = tempfile.mkstemp(prefix="patch-")
-  os.close(patchfd)
+def compute_patch(srcfile, tgtfile, imgdiff=False):
+  patchfile = common.MakeTempFile(prefix='patch-')
 
-  try:
-    with os.fdopen(srcfd, "wb") as f_src:
-      for p in src:
-        f_src.write(p)
+  cmd = ['imgdiff', '-z'] if imgdiff else ['bsdiff']
+  cmd.extend([srcfile, tgtfile, patchfile])
 
-    with os.fdopen(tgtfd, "wb") as f_tgt:
-      for p in tgt:
-        f_tgt.write(p)
-    try:
-      os.unlink(patchfile)
-    except OSError:
-      pass
-    if imgdiff:
-      p = subprocess.call(["imgdiff", "-z", srcfile, tgtfile, patchfile],
-                          stdout=open("/dev/null", "a"),
-                          stderr=subprocess.STDOUT)
-    else:
-      p = subprocess.call(["bsdiff", srcfile, tgtfile, patchfile])
+  # Not using common.Run(), which would otherwise dump all the bsdiff/imgdiff
+  # commands when OPTIONS.verbose is True - not useful for the case here, since
+  # they contain temp filenames only.
+  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+  output, _ = p.communicate()
 
-    if p:
-      raise ValueError("diff failed: " + str(p))
+  if p.returncode != 0:
+    raise ValueError(output)
 
-    with open(patchfile, "rb") as f:
-      return f.read()
-  finally:
-    try:
-      os.unlink(srcfile)
-      os.unlink(tgtfile)
-      os.unlink(patchfile)
-    except OSError:
-      pass
+  with open(patchfile, 'rb') as f:
+    return f.read()
 
 
 class Image(object):
+  def RangeSha1(self, ranges):
+    raise NotImplementedError
+
   def ReadRangeSet(self, ranges):
     raise NotImplementedError
 
   def TotalSha1(self, include_clobbered_blocks=False):
     raise NotImplementedError
 
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise NotImplementedError
+
 
 class EmptyImage(Image):
   """A zero-length image."""
-  blocksize = 4096
-  care_map = RangeSet()
-  clobbered_blocks = RangeSet()
-  extended = RangeSet()
-  total_blocks = 0
-  file_map = {}
+
+  def __init__(self):
+    self.blocksize = 4096
+    self.care_map = RangeSet()
+    self.clobbered_blocks = RangeSet()
+    self.extended = RangeSet()
+    self.total_blocks = 0
+    self.file_map = {}
+
+  def RangeSha1(self, ranges):
+    return sha1().hexdigest()
+
   def ReadRangeSet(self, ranges):
     return ()
+
   def TotalSha1(self, include_clobbered_blocks=False):
     # EmptyImage always carries empty clobbered_blocks, so
     # include_clobbered_blocks can be ignored.
     assert self.clobbered_blocks.size() == 0
     return sha1().hexdigest()
 
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise ValueError("Can't write data from EmptyImage to file")
+
 
 class DataImage(Image):
   """An image wrapped around a single string of data."""
@@ -160,23 +156,39 @@
     if clobbered_blocks:
       self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
 
+  def _GetRangeData(self, ranges):
+    for s, e in ranges:
+      yield self.data[s*self.blocksize:e*self.blocksize]
+
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges):
+      h.update(data)
+    return h.hexdigest()
+
   def ReadRangeSet(self, ranges):
-    return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
+    return [self._GetRangeData(ranges)]
 
   def TotalSha1(self, include_clobbered_blocks=False):
     if not include_clobbered_blocks:
-      ranges = self.care_map.subtract(self.clobbered_blocks)
-      return sha1(self.ReadRangeSet(ranges)).hexdigest()
+      return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
     else:
       return sha1(self.data).hexdigest()
 
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges):
+      fd.write(data)
+
 
 class Transfer(object):
-  def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, style, by_id):
+  def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
+               src_sha1, style, by_id):
     self.tgt_name = tgt_name
     self.src_name = src_name
     self.tgt_ranges = tgt_ranges
     self.src_ranges = src_ranges
+    self.tgt_sha1 = tgt_sha1
+    self.src_sha1 = src_sha1
     self.style = style
     self.intact = (getattr(tgt_ranges, "monotonic", False) and
                    getattr(src_ranges, "monotonic", False))
@@ -251,6 +263,9 @@
 #      Implementations are free to break up the data into list/tuple
 #      elements in any way that is convenient.
 #
+#    RangeSha1(): a function that returns (as a hex string) the SHA-1
+#      hash of all the data in the specified range.
+#
 #    TotalSha1(): a function that returns (as a hex string) the SHA-1
 #      hash of all the data in the image (ie, all the blocks in the
 #      care_map minus clobbered_blocks, or including the clobbered
@@ -277,7 +292,7 @@
     self.touched_src_sha1 = None
     self.disable_imgdiff = disable_imgdiff
 
-    assert version in (1, 2, 3, 4)
+    assert version in (3, 4)
 
     self.tgt = tgt
     if src is None:
@@ -316,14 +331,11 @@
     self.FindVertexSequence()
     # Fix up the ordering dependencies that the sequence didn't
     # satisfy.
-    if self.version == 1:
-      self.RemoveBackwardEdges()
-    else:
-      self.ReverseBackwardEdges()
-      self.ImproveVertexSequence()
+    self.ReverseBackwardEdges()
+    self.ImproveVertexSequence()
 
     # Ensure the runtime stash size is under the limit.
-    if self.version >= 2 and common.OPTIONS.cache_size is not None:
+    if common.OPTIONS.cache_size is not None:
       self.ReviseStashSize()
 
     # Double-check our work.
@@ -332,15 +344,6 @@
     self.ComputePatches(prefix)
     self.WriteTransfers(prefix)
 
-  def HashBlocks(self, source, ranges): # pylint: disable=no-self-use
-    data = source.ReadRangeSet(ranges)
-    ctx = sha1()
-
-    for p in data:
-      ctx.update(p)
-
-    return ctx.hexdigest()
-
   def WriteTransfers(self, prefix):
     def WriteSplitTransfers(out, style, target_blocks):
       """Limit the size of operand in command 'new' and 'zero' to 1024 blocks.
@@ -361,13 +364,6 @@
     out = []
     total = 0
 
-    # In BBOTA v2, 'stashes' records the map from 'stash_raw_id' to 'stash_id'
-    # (aka 'sid', which is the stash slot id). The stash in a 'stash_id' will
-    # be freed immediately after its use. So unlike 'stash_raw_id' (which
-    # uniquely identifies each pair of stashed blocks), the same 'stash_id'
-    # may be reused during the life cycle of an update (maintained by
-    # 'free_stash_ids' heap and 'next_stash_id').
-    #
     # In BBOTA v3+, it uses the hash of the stashed blocks as the stash slot
     # id. 'stashes' records the map from 'hash' to the ref count. The stash
     # will be freed only if the count decrements to zero.
@@ -375,36 +371,17 @@
     stashed_blocks = 0
     max_stashed_blocks = 0
 
-    if self.version == 2:
-      free_stash_ids = []
-      next_stash_id = 0
-
     for xf in self.transfers:
 
-      if self.version < 2:
-        assert not xf.stash_before
-        assert not xf.use_stash
-
-      for stash_raw_id, sr in xf.stash_before:
-        if self.version == 2:
-          assert stash_raw_id not in stashes
-          if free_stash_ids:
-            sid = heapq.heappop(free_stash_ids)
-          else:
-            sid = next_stash_id
-            next_stash_id += 1
-          stashes[stash_raw_id] = sid
-          stashed_blocks += sr.size()
-          out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
+      for _, sr in xf.stash_before:
+        sh = self.src.RangeSha1(sr)
+        if sh in stashes:
+          stashes[sh] += 1
         else:
-          sh = self.HashBlocks(self.src, sr)
-          if sh in stashes:
-            stashes[sh] += 1
-          else:
-            stashes[sh] = 1
-            stashed_blocks += sr.size()
-            self.touched_src_ranges = self.touched_src_ranges.union(sr)
-            out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
+          stashes[sh] = 1
+          stashed_blocks += sr.size()
+          self.touched_src_ranges = self.touched_src_ranges.union(sr)
+          out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
 
       if stashed_blocks > max_stashed_blocks:
         max_stashed_blocks = stashed_blocks
@@ -412,75 +389,47 @@
       free_string = []
       free_size = 0
 
-      if self.version == 1:
-        src_str = xf.src_ranges.to_string_raw() if xf.src_ranges else ""
-      elif self.version >= 2:
+      #   <# blocks> <src ranges>
+      #     OR
+      #   <# blocks> <src ranges> <src locs> <stash refs...>
+      #     OR
+      #   <# blocks> - <stash refs...>
 
-        #   <# blocks> <src ranges>
-        #     OR
-        #   <# blocks> <src ranges> <src locs> <stash refs...>
-        #     OR
-        #   <# blocks> - <stash refs...>
+      size = xf.src_ranges.size()
+      src_str = [str(size)]
 
-        size = xf.src_ranges.size()
-        src_str = [str(size)]
+      unstashed_src_ranges = xf.src_ranges
+      mapped_stashes = []
+      for _, sr in xf.use_stash:
+        unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
+        sh = self.src.RangeSha1(sr)
+        sr = xf.src_ranges.map_within(sr)
+        mapped_stashes.append(sr)
+        assert sh in stashes
+        src_str.append("%s:%s" % (sh, sr.to_string_raw()))
+        stashes[sh] -= 1
+        if stashes[sh] == 0:
+          free_string.append("free %s\n" % (sh,))
+          free_size += sr.size()
+          stashes.pop(sh)
 
-        unstashed_src_ranges = xf.src_ranges
-        mapped_stashes = []
-        for stash_raw_id, sr in xf.use_stash:
-          unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
-          sh = self.HashBlocks(self.src, sr)
-          sr = xf.src_ranges.map_within(sr)
-          mapped_stashes.append(sr)
-          if self.version == 2:
-            sid = stashes.pop(stash_raw_id)
-            src_str.append("%d:%s" % (sid, sr.to_string_raw()))
-            # A stash will be used only once. We need to free the stash
-            # immediately after the use, instead of waiting for the automatic
-            # clean-up at the end. Because otherwise it may take up extra space
-            # and lead to OTA failures.
-            # Bug: 23119955
-            free_string.append("free %d\n" % (sid,))
-            free_size += sr.size()
-            heapq.heappush(free_stash_ids, sid)
-          else:
-            assert sh in stashes
-            src_str.append("%s:%s" % (sh, sr.to_string_raw()))
-            stashes[sh] -= 1
-            if stashes[sh] == 0:
-              free_string.append("free %s\n" % (sh,))
-              free_size += sr.size()
-              stashes.pop(sh)
-
-        if unstashed_src_ranges:
-          src_str.insert(1, unstashed_src_ranges.to_string_raw())
-          if xf.use_stash:
-            mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
-            src_str.insert(2, mapped_unstashed.to_string_raw())
-            mapped_stashes.append(mapped_unstashed)
-            self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
-        else:
-          src_str.insert(1, "-")
+      if unstashed_src_ranges:
+        src_str.insert(1, unstashed_src_ranges.to_string_raw())
+        if xf.use_stash:
+          mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
+          src_str.insert(2, mapped_unstashed.to_string_raw())
+          mapped_stashes.append(mapped_unstashed)
           self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+      else:
+        src_str.insert(1, "-")
+        self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
 
-        src_str = " ".join(src_str)
+      src_str = " ".join(src_str)
 
-      # all versions:
+      # version 3+:
       #   zero <rangeset>
       #   new <rangeset>
       #   erase <rangeset>
-      #
-      # version 1:
-      #   bsdiff patchstart patchlen <src rangeset> <tgt rangeset>
-      #   imgdiff patchstart patchlen <src rangeset> <tgt rangeset>
-      #   move <src rangeset> <tgt rangeset>
-      #
-      # version 2:
-      #   bsdiff patchstart patchlen <tgt rangeset> <src_str>
-      #   imgdiff patchstart patchlen <tgt rangeset> <src_str>
-      #   move <tgt rangeset> <src_str>
-      #
-      # version 3:
       #   bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
       #   imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
       #   move hash <tgt rangeset> <src_str>
@@ -495,41 +444,6 @@
         assert xf.tgt_ranges
         assert xf.src_ranges.size() == tgt_size
         if xf.src_ranges != xf.tgt_ranges:
-          if self.version == 1:
-            out.append("%s %s %s\n" % (
-                xf.style,
-                xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
-          elif self.version == 2:
-            out.append("%s %s %s\n" % (
-                xf.style,
-                xf.tgt_ranges.to_string_raw(), src_str))
-          elif self.version >= 3:
-            # take into account automatic stashing of overlapping blocks
-            if xf.src_ranges.overlaps(xf.tgt_ranges):
-              temp_stash_usage = stashed_blocks + xf.src_ranges.size()
-              if temp_stash_usage > max_stashed_blocks:
-                max_stashed_blocks = temp_stash_usage
-
-            self.touched_src_ranges = self.touched_src_ranges.union(
-                xf.src_ranges)
-
-            out.append("%s %s %s %s\n" % (
-                xf.style,
-                self.HashBlocks(self.tgt, xf.tgt_ranges),
-                xf.tgt_ranges.to_string_raw(), src_str))
-          total += tgt_size
-      elif xf.style in ("bsdiff", "imgdiff"):
-        assert xf.tgt_ranges
-        assert xf.src_ranges
-        if self.version == 1:
-          out.append("%s %d %d %s %s\n" % (
-              xf.style, xf.patch_start, xf.patch_len,
-              xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
-        elif self.version == 2:
-          out.append("%s %d %d %s %s\n" % (
-              xf.style, xf.patch_start, xf.patch_len,
-              xf.tgt_ranges.to_string_raw(), src_str))
-        elif self.version >= 3:
           # take into account automatic stashing of overlapping blocks
           if xf.src_ranges.overlaps(xf.tgt_ranges):
             temp_stash_usage = stashed_blocks + xf.src_ranges.size()
@@ -539,12 +453,28 @@
           self.touched_src_ranges = self.touched_src_ranges.union(
               xf.src_ranges)
 
-          out.append("%s %d %d %s %s %s %s\n" % (
+          out.append("%s %s %s %s\n" % (
               xf.style,
-              xf.patch_start, xf.patch_len,
-              self.HashBlocks(self.src, xf.src_ranges),
-              self.HashBlocks(self.tgt, xf.tgt_ranges),
+              xf.tgt_sha1,
               xf.tgt_ranges.to_string_raw(), src_str))
+          total += tgt_size
+      elif xf.style in ("bsdiff", "imgdiff"):
+        assert xf.tgt_ranges
+        assert xf.src_ranges
+        # take into account automatic stashing of overlapping blocks
+        if xf.src_ranges.overlaps(xf.tgt_ranges):
+          temp_stash_usage = stashed_blocks + xf.src_ranges.size()
+          if temp_stash_usage > max_stashed_blocks:
+            max_stashed_blocks = temp_stash_usage
+
+        self.touched_src_ranges = self.touched_src_ranges.union(xf.src_ranges)
+
+        out.append("%s %d %d %s %s %s %s\n" % (
+            xf.style,
+            xf.patch_start, xf.patch_len,
+            xf.src_sha1,
+            xf.tgt_sha1,
+            xf.tgt_ranges.to_string_raw(), src_str))
         total += tgt_size
       elif xf.style == "zero":
         assert xf.tgt_ranges
@@ -558,7 +488,7 @@
         out.append("".join(free_string))
         stashed_blocks -= free_size
 
-      if self.version >= 2 and common.OPTIONS.cache_size is not None:
+      if common.OPTIONS.cache_size is not None:
         # Sanity check: abort if we're going to need more stash space than
         # the allowed size (cache_size * threshold). There are two purposes
         # of having a threshold here. a) Part of the cache may have been
@@ -567,15 +497,13 @@
         cache_size = common.OPTIONS.cache_size
         stash_threshold = common.OPTIONS.stash_threshold
         max_allowed = cache_size * stash_threshold
-        assert max_stashed_blocks * self.tgt.blocksize < max_allowed, \
+        assert max_stashed_blocks * self.tgt.blocksize <= max_allowed, \
                'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % (
                    max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks,
                    self.tgt.blocksize, max_allowed, cache_size,
                    stash_threshold)
 
-    if self.version >= 3:
-      self.touched_src_sha1 = self.HashBlocks(
-          self.src, self.touched_src_ranges)
+    self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
 
     # Zero out extended blocks as a workaround for bug 20881595.
     if self.tgt.extended:
@@ -603,39 +531,32 @@
 
     out.insert(0, "%d\n" % (self.version,))   # format version number
     out.insert(1, "%d\n" % (total,))
-    if self.version == 2:
-      # v2 only: after the total block count, we give the number of stash slots
-      # needed, and the maximum size needed (in blocks).
-      out.insert(2, str(next_stash_id) + "\n")
-      out.insert(3, str(max_stashed_blocks) + "\n")
-    elif self.version >= 3:
-      # v3+: the number of stash slots is unused.
-      out.insert(2, "0\n")
-      out.insert(3, str(max_stashed_blocks) + "\n")
+    # v3+: the number of stash slots is unused.
+    out.insert(2, "0\n")
+    out.insert(3, str(max_stashed_blocks) + "\n")
 
     with open(prefix + ".transfer.list", "wb") as f:
       for i in out:
         f.write(i)
 
-    if self.version >= 2:
-      self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
-      OPTIONS = common.OPTIONS
-      if OPTIONS.cache_size is not None:
-        max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
-        print("max stashed blocks: %d  (%d bytes), "
-              "limit: %d bytes (%.2f%%)\n" % (
-              max_stashed_blocks, self._max_stashed_size, max_allowed,
-              self._max_stashed_size * 100.0 / max_allowed))
-      else:
-        print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
-              max_stashed_blocks, self._max_stashed_size))
+    self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+    OPTIONS = common.OPTIONS
+    if OPTIONS.cache_size is not None:
+      max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
+      print("max stashed blocks: %d  (%d bytes), "
+            "limit: %d bytes (%.2f%%)\n" % (
+            max_stashed_blocks, self._max_stashed_size, max_allowed,
+            self._max_stashed_size * 100.0 / max_allowed))
+    else:
+      print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
+            max_stashed_blocks, self._max_stashed_size))
 
   def ReviseStashSize(self):
     print("Revising stash size...")
     stash_map = {}
 
     # Create the map between a stash and its def/use points. For example, for a
-    # given stash of (raw_id, sr), stashes[raw_id] = (sr, def_cmd, use_cmd).
+    # given stash of (raw_id, sr), stash_map[raw_id] = (sr, def_cmd, use_cmd).
     for xf in self.transfers:
       # Command xf defines (stores) all the stashes in stash_before.
       for stash_raw_id, sr in xf.stash_before:
@@ -656,10 +577,6 @@
     stashed_blocks = 0
     new_blocks = 0
 
-    if self.version == 2:
-      free_stash_ids = []
-      next_stash_id = 0
-
     # Now go through all the commands. Compute the required stash size on the
     # fly. If a command requires excess stash than available, it deletes the
     # stash by replacing the command that uses the stash with a "new" command
@@ -671,22 +588,9 @@
       for stash_raw_id, sr in xf.stash_before:
         # Check the post-command stashed_blocks.
         stashed_blocks_after = stashed_blocks
-        if self.version == 2:
-          assert stash_raw_id not in stashes
-          if free_stash_ids:
-            sid = heapq.heappop(free_stash_ids)
-          else:
-            sid = next_stash_id
-            next_stash_id += 1
-          stashes[stash_raw_id] = sid
+        sh = self.src.RangeSha1(sr)
+        if sh not in stashes:
           stashed_blocks_after += sr.size()
-        else:
-          sh = self.HashBlocks(self.src, sr)
-          if sh in stashes:
-            stashes[sh] += 1
-          else:
-            stashes[sh] = 1
-            stashed_blocks_after += sr.size()
 
         if stashed_blocks_after > max_allowed:
           # We cannot stash this one for a later command. Find out the command
@@ -695,11 +599,16 @@
           replaced_cmds.append(use_cmd)
           print("%10d  %9s  %s" % (sr.size(), "explicit", use_cmd))
         else:
+          # Update the stashes map.
+          if sh in stashes:
+            stashes[sh] += 1
+          else:
+            stashes[sh] = 1
           stashed_blocks = stashed_blocks_after
 
       # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
       # ComputePatches(), they both have the style of "diff".
-      if xf.style == "diff" and self.version >= 3:
+      if xf.style == "diff":
         assert xf.tgt_ranges and xf.src_ranges
         if xf.src_ranges.overlaps(xf.tgt_ranges):
           if stashed_blocks + xf.src_ranges.size() > max_allowed:
@@ -721,18 +630,13 @@
         cmd.ConvertToNew()
 
       # xf.use_stash may generate free commands.
-      for stash_raw_id, sr in xf.use_stash:
-        if self.version == 2:
-          sid = stashes.pop(stash_raw_id)
+      for _, sr in xf.use_stash:
+        sh = self.src.RangeSha1(sr)
+        assert sh in stashes
+        stashes[sh] -= 1
+        if stashes[sh] == 0:
           stashed_blocks -= sr.size()
-          heapq.heappush(free_stash_ids, sid)
-        else:
-          sh = self.HashBlocks(self.src, sr)
-          assert sh in stashes
-          stashes[sh] -= 1
-          if stashes[sh] == 0:
-            stashed_blocks -= sr.size()
-            stashes.pop(sh)
+          stashes.pop(sh)
 
     num_of_bytes = new_blocks * self.tgt.blocksize
     print("  Total %d blocks (%d bytes) are packed as new blocks due to "
@@ -741,10 +645,10 @@
 
   def ComputePatches(self, prefix):
     print("Reticulating splines...")
-    diff_q = []
+    diff_queue = []
     patch_num = 0
     with open(prefix + ".new.dat", "wb") as new_f:
-      for xf in self.transfers:
+      for index, xf in enumerate(self.transfers):
         if xf.style == "zero":
           tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
           print("%10d %10d (%6.2f%%) %7s %s %s" % (
@@ -752,17 +656,13 @@
               str(xf.tgt_ranges)))
 
         elif xf.style == "new":
-          for piece in self.tgt.ReadRangeSet(xf.tgt_ranges):
-            new_f.write(piece)
+          self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
           tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
           print("%10d %10d (%6.2f%%) %7s %s %s" % (
               tgt_size, tgt_size, 100.0, xf.style,
               xf.tgt_name, str(xf.tgt_ranges)))
 
         elif xf.style == "diff":
-          src = self.src.ReadRangeSet(xf.src_ranges)
-          tgt = self.tgt.ReadRangeSet(xf.tgt_ranges)
-
           # We can't compare src and tgt directly because they may have
           # the same content but be broken up into blocks differently, eg:
           #
@@ -771,20 +671,11 @@
           # We want those to compare equal, ideally without having to
           # actually concatenate the strings (these may be tens of
           # megabytes).
-
-          src_sha1 = sha1()
-          for p in src:
-            src_sha1.update(p)
-          tgt_sha1 = sha1()
-          tgt_size = 0
-          for p in tgt:
-            tgt_sha1.update(p)
-            tgt_size += len(p)
-
-          if src_sha1.digest() == tgt_sha1.digest():
+          if xf.src_sha1 == xf.tgt_sha1:
             # These are identical; we don't need to generate a patch,
             # just issue copy commands on the device.
             xf.style = "move"
+            tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
             if xf.src_ranges != xf.tgt_ranges:
               print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
                   tgt_size, tgt_size, 100.0, xf.style,
@@ -811,38 +702,74 @@
                        xf.tgt_name.split(".")[-1].lower()
                        in ("apk", "jar", "zip"))
             xf.style = "imgdiff" if imgdiff else "bsdiff"
-            diff_q.append((tgt_size, src, tgt, xf, patch_num))
+            diff_queue.append((index, imgdiff, patch_num))
             patch_num += 1
 
         else:
           assert False, "unknown style " + xf.style
 
-    if diff_q:
+    if diff_queue:
       if self.threads > 1:
         print("Computing patches (using %d threads)..." % (self.threads,))
       else:
         print("Computing patches...")
-      diff_q.sort()
 
-      patches = [None] * patch_num
+      diff_total = len(diff_queue)
+      patches = [None] * diff_total
+      error_messages = []
+      if sys.stdout.isatty():
+        global diff_done
+        diff_done = 0
 
-      # TODO: Rewrite with multiprocessing.ThreadPool?
+      # Using multiprocessing doesn't give additional benefits, due to the
+      # pattern of the code. The diffing work is done by subprocess.call, which
+      # already runs in a separate process (not affected much by the GIL -
+      # Global Interpreter Lock). Using multiprocess also requires either a)
+      # writing the diff input files in the main process before forking, or b)
+      # reopening the image file (SparseImage) in the worker processes. Doing
+      # neither of them further improves the performance.
       lock = threading.Lock()
       def diff_worker():
         while True:
           with lock:
-            if not diff_q:
+            if not diff_queue:
               return
-            tgt_size, src, tgt, xf, patchnum = diff_q.pop()
-          patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff"))
-          size = len(patch)
+            xf_index, imgdiff, patch_index = diff_queue.pop()
+
+          xf = self.transfers[xf_index]
+          src_ranges = xf.src_ranges
+          tgt_ranges = xf.tgt_ranges
+
+          # Needs lock since WriteRangeDataToFd() is stateful (calling seek).
           with lock:
-            patches[patchnum] = (patch, xf)
-            print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
-                size, tgt_size, size * 100.0 / tgt_size, xf.style,
-                xf.tgt_name if xf.tgt_name == xf.src_name else (
-                    xf.tgt_name + " (from " + xf.src_name + ")"),
-                str(xf.tgt_ranges), str(xf.src_ranges)))
+            src_file = common.MakeTempFile(prefix="src-")
+            with open(src_file, "wb") as fd:
+              self.src.WriteRangeDataToFd(src_ranges, fd)
+
+            tgt_file = common.MakeTempFile(prefix="tgt-")
+            with open(tgt_file, "wb") as fd:
+              self.tgt.WriteRangeDataToFd(tgt_ranges, fd)
+
+          try:
+            patch = compute_patch(src_file, tgt_file, imgdiff)
+          except ValueError as e:
+            with lock:
+              error_messages.append(
+                  "Failed to generate %s for %s: tgt=%s, src=%s:\n%s" % (
+                      "imgdiff" if imgdiff else "bsdiff",
+                      xf.tgt_name if xf.tgt_name == xf.src_name else
+                          xf.tgt_name + " (from " + xf.src_name + ")",
+                      xf.tgt_ranges, xf.src_ranges, e.message))
+
+          with lock:
+            patches[patch_index] = (xf_index, patch)
+            if sys.stdout.isatty():
+              global diff_done
+              diff_done += 1
+              progress = diff_done * 100 / diff_total
+              # '\033[K' is to clear to EOL.
+              print(' [%d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
+              sys.stdout.flush()
 
       threads = [threading.Thread(target=diff_worker)
                  for _ in range(self.threads)]
@@ -850,16 +777,33 @@
         th.start()
       while threads:
         threads.pop().join()
+
+      if sys.stdout.isatty():
+        print('\n')
+
+      if error_messages:
+        print('\n'.join(error_messages))
+        sys.exit(1)
     else:
       patches = []
 
-    p = 0
-    with open(prefix + ".patch.dat", "wb") as patch_f:
-      for patch, xf in patches:
-        xf.patch_start = p
+    offset = 0
+    with open(prefix + ".patch.dat", "wb") as patch_fd:
+      for index, patch in patches:
+        xf = self.transfers[index]
         xf.patch_len = len(patch)
-        patch_f.write(patch)
-        p += len(patch)
+        xf.patch_start = offset
+        offset += xf.patch_len
+        patch_fd.write(patch)
+
+        if common.OPTIONS.verbose:
+          tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+          print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
+                xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
+                xf.style,
+                xf.tgt_name if xf.tgt_name == xf.src_name else (
+                    xf.tgt_name + " (from " + xf.src_name + ")"),
+                xf.tgt_ranges, xf.src_ranges))
 
   def AssertSequenceGood(self):
     # Simulate the sequences of transfers we will output, and check that:
@@ -874,9 +818,8 @@
       # Check that the input blocks for this transfer haven't yet been touched.
 
       x = xf.src_ranges
-      if self.version >= 2:
-        for _, sr in xf.use_stash:
-          x = x.subtract(sr)
+      for _, sr in xf.use_stash:
+        x = x.subtract(sr)
 
       for s, e in x:
         # Source image could be larger. Don't check the blocks that are in the
@@ -1207,7 +1150,9 @@
       # Change nothing for small files.
       if (tgt_ranges.size() <= max_blocks_per_transfer and
           src_ranges.size() <= max_blocks_per_transfer):
-        Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+        Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
         return
 
       while (tgt_ranges.size() > max_blocks_per_transfer and
@@ -1217,8 +1162,9 @@
         tgt_first = tgt_ranges.first(max_blocks_per_transfer)
         src_first = src_ranges.first(max_blocks_per_transfer)
 
-        Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style,
-                 by_id)
+        Transfer(tgt_split_name, src_split_name, tgt_first, src_first,
+                 self.tgt.RangeSha1(tgt_first), self.src.RangeSha1(src_first),
+                 style, by_id)
 
         tgt_ranges = tgt_ranges.subtract(tgt_first)
         src_ranges = src_ranges.subtract(src_first)
@@ -1230,8 +1176,9 @@
         assert tgt_ranges.size() and src_ranges.size()
         tgt_split_name = "%s-%d" % (tgt_name, pieces)
         src_split_name = "%s-%d" % (src_name, pieces)
-        Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
-                 by_id)
+        Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
 
     def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
                     split=False):
@@ -1240,7 +1187,9 @@
       # We specialize diff transfers only (which covers bsdiff/imgdiff/move);
       # otherwise add the Transfer() as is.
       if style != "diff" or not split:
-        Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+        Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
         return
 
       # Handle .odex files specially to analyze the block-wise difference. If
@@ -1321,7 +1270,7 @@
       elif tgt_fn in self.src.file_map:
         # Look for an exact pathname match in the source.
         AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
-                    "diff", self.transfers, self.version >= 3)
+                    "diff", self.transfers, True)
         continue
 
       b = os.path.basename(tgt_fn)
@@ -1329,7 +1278,7 @@
         # Look for an exact basename match in the source.
         src_fn = self.src_basenames[b]
         AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
-                    "diff", self.transfers, self.version >= 3)
+                    "diff", self.transfers, True)
         continue
 
       b = re.sub("[0-9]+", "#", b)
@@ -1340,7 +1289,7 @@
         # that get bumped.)
         src_fn = self.src_numpatterns[b]
         AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
-                    "diff", self.transfers, self.version >= 3)
+                    "diff", self.transfers, True)
         continue
 
       AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 73cd07e..16c8018 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -25,7 +25,6 @@
 import re
 import subprocess
 import sys
-import commands
 import common
 import shlex
 import shutil
@@ -52,29 +51,24 @@
   return (output, p.returncode)
 
 def GetVerityFECSize(partition_size):
-  cmd = "fec -s %d" % partition_size
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print output
+  cmd = ["fec", "-s", str(partition_size)]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     return False, 0
   return True, int(output)
 
 def GetVerityTreeSize(partition_size):
-  cmd = "build_verity_tree -s %d"
-  cmd %= partition_size
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print output
+  cmd = ["build_verity_tree", "-s", str(partition_size)]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     return False, 0
   return True, int(output)
 
 def GetVerityMetadataSize(partition_size):
-  cmd = "system/extras/verity/build_verity_metadata.py size %d"
-  cmd %= partition_size
-
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print output
+  cmd = ["system/extras/verity/build_verity_metadata.py", "size",
+         str(partition_size)]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     return False, 0
   return True, int(output)
 
@@ -191,21 +185,19 @@
 
 def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
                    padding_size):
-  cmd = "fec -e -p %d %s %s %s" % (padding_size, sparse_image_path,
-                                   verity_path, verity_fec_path)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+  cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
+         verity_path, verity_fec_path]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "Could not build FEC data! Error: %s" % output
     return False
   return True
 
 def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
-  cmd = "build_verity_tree -A %s %s %s" % (
-      FIXED_SALT, sparse_image_path, verity_image_path)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+  cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
+         verity_image_path]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "Could not build verity tree! Error: %s" % output
     return False
   root, salt = output.split()
@@ -215,16 +207,13 @@
 
 def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
                         block_device, signer_path, key, signer_args):
-  cmd_template = (
-      "system/extras/verity/build_verity_metadata.py build " +
-      "%s %s %s %s %s %s %s")
-  cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
-                        block_device, signer_path, key)
+  cmd = ["system/extras/verity/build_verity_metadata.py", "build",
+         str(image_size), verity_metadata_path, root_hash, salt, block_device,
+         signer_path, key]
   if signer_args:
-    cmd += " --signer_args=\"%s\"" % (' '.join(signer_args),)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+    cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "Could not build verity metadata! Error: %s" % output
     return False
   return True
@@ -238,22 +227,19 @@
   Returns:
     True on success, False on failure.
   """
-  cmd = "append2simg %s %s"
-  cmd %= (sparse_image_path, unsparse_image_path)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+  cmd = ["append2simg", sparse_image_path, unsparse_image_path]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "%s: %s" % (error_message, output)
     return False
   return True
 
 def Append(target, file_to_append, error_message):
-  cmd = 'cat %s >> %s' % (file_to_append, target)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print "%s: %s" % (error_message, output)
-    return False
+  print "appending %s to %s" % (file_to_append, target)
+  with open(target, "a") as out_file:
+    with open(file_to_append, "r") as input_file:
+      for line in input_file:
+        out_file.write(line)
   return True
 
 def BuildVerifiedImage(data_image_path, verity_image_path,
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index 0da61b1..548b619 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -104,7 +104,7 @@
 
   # Get the signature from the input package.
   signature = package_bytes[signature_start:-6]
-  sig_file = common.MakeTempFile(prefix='sig-', suffix='')
+  sig_file = common.MakeTempFile(prefix='sig-')
   with open(sig_file, 'wb') as f:
     f.write(signature)
 
@@ -116,12 +116,12 @@
 
   digest_line = sig.strip().split('\n')[-1]
   digest_string = digest_line.split(':')[3]
-  digest_file = common.MakeTempFile(prefix='digest-', suffix='')
+  digest_file = common.MakeTempFile(prefix='digest-')
   with open(digest_file, 'wb') as f:
     f.write(digest_string.decode('hex'))
 
   # Verify the digest by outputing the decrypted result in ASN.1 structure.
-  decrypted_file = common.MakeTempFile(prefix='decrypted-', suffix='')
+  decrypted_file = common.MakeTempFile(prefix='decrypted-')
   cmd = ['openssl', 'rsautl', '-verify', '-certin', '-inkey', cert,
          '-in', digest_file, '-out', decrypted_file]
   p1 = common.Run(cmd, stdout=subprocess.PIPE)
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index 3048488..f9aa4fa 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -235,7 +235,7 @@
     self.certmap = None
 
   def LoadZipFile(self, filename):
-    d, z = common.UnzipTemp(filename, '*.apk')
+    d, z = common.UnzipTemp(filename, ['*.apk'])
     try:
       self.apks = {}
       self.apks_by_basename = {}
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 7b3e9ba..e200f9f 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -146,33 +146,14 @@
       except IOError as e:
         if e.errno == errno.ENOENT:
           raise KeyError(fn)
-  d = {}
+
   try:
     d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
   except KeyError:
-    # ok if misc_info.txt doesn't exist
-    pass
+    raise ValueError("can't find META/misc_info.txt in input target-files")
 
-  # backwards compatibility: These values used to be in their own
-  # files.  Look for them, in case we're processing an old
-  # target_files zip.
-
-  if "recovery_api_version" not in d:
-    try:
-      d["recovery_api_version"] = read_helper(
-          "META/recovery-api-version.txt").strip()
-    except KeyError:
-      raise ValueError("can't find recovery API version in input target-files")
-
-  if "tool_extensions" not in d:
-    try:
-      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
-    except KeyError:
-      # ok if extensions don't exist
-      pass
-
-  if "fstab_version" not in d:
-    d["fstab_version"] = "1"
+  assert "recovery_api_version" in d
+  assert "fstab_version" in d
 
   # A few properties are stored as links to the files in the out/ directory.
   # It works fine with the build system. However, they are no longer available
@@ -268,6 +249,7 @@
   d["build.prop"] = LoadBuildProp(read_helper)
   return d
 
+
 def LoadBuildProp(read_helper):
   try:
     data = read_helper("SYSTEM/build.prop")
@@ -276,6 +258,7 @@
     data = ""
   return LoadDictionaryFromLines(data.split("\n"))
 
+
 def LoadDictionaryFromLines(lines):
   d = {}
   for line in lines:
@@ -287,15 +270,15 @@
       d[name] = value
   return d
 
+
 def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
                       system_root_image=False):
   class Partition(object):
-    def __init__(self, mount_point, fs_type, device, length, device2, context):
+    def __init__(self, mount_point, fs_type, device, length, context):
       self.mount_point = mount_point
       self.fs_type = fs_type
       self.device = device
       self.length = length
-      self.device2 = device2
       self.context = context
 
   try:
@@ -304,81 +287,44 @@
     print("Warning: could not find {}".format(recovery_fstab_path))
     data = ""
 
-  if fstab_version == 1:
-    d = {}
-    for line in data.split("\n"):
-      line = line.strip()
-      if not line or line.startswith("#"):
-        continue
-      pieces = line.split()
-      if not 3 <= len(pieces) <= 4:
-        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
-      options = None
-      if len(pieces) >= 4:
-        if pieces[3].startswith("/"):
-          device2 = pieces[3]
-          if len(pieces) >= 5:
-            options = pieces[4]
-        else:
-          device2 = None
-          options = pieces[3]
+  assert fstab_version == 2
+
+  d = {}
+  for line in data.split("\n"):
+    line = line.strip()
+    if not line or line.startswith("#"):
+      continue
+
+    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
+    pieces = line.split()
+    if len(pieces) != 5:
+      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
+
+    # Ignore entries that are managed by vold.
+    options = pieces[4]
+    if "voldmanaged=" in options:
+      continue
+
+    # It's a good line, parse it.
+    length = 0
+    options = options.split(",")
+    for i in options:
+      if i.startswith("length="):
+        length = int(i[7:])
       else:
-        device2 = None
-
-      mount_point = pieces[0]
-      length = 0
-      if options:
-        options = options.split(",")
-        for i in options:
-          if i.startswith("length="):
-            length = int(i[7:])
-          else:
-            print("%s: unknown option \"%s\"" % (mount_point, i))
-
-      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
-                                 device=pieces[2], length=length,
-                                 device2=device2)
-
-  elif fstab_version == 2:
-    d = {}
-    for line in data.split("\n"):
-      line = line.strip()
-      if not line or line.startswith("#"):
-        continue
-      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
-      pieces = line.split()
-      if len(pieces) != 5:
-        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
-
-      # Ignore entries that are managed by vold
-      options = pieces[4]
-      if "voldmanaged=" in options:
+        # Ignore all unknown options in the unified fstab.
         continue
 
-      # It's a good line, parse it
-      length = 0
-      options = options.split(",")
-      for i in options:
-        if i.startswith("length="):
-          length = int(i[7:])
-        else:
-          # Ignore all unknown options in the unified fstab
-          continue
+    mount_flags = pieces[3]
+    # Honor the SELinux context if present.
+    context = None
+    for i in mount_flags.split(","):
+      if i.startswith("context="):
+        context = i
 
-      mount_flags = pieces[3]
-      # Honor the SELinux context if present.
-      context = None
-      for i in mount_flags.split(","):
-        if i.startswith("context="):
-          context = i
-
-      mount_point = pieces[1]
-      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
-                                 device=pieces[0], length=length,
-                                 device2=None, context=context)
-
-  else:
-    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
+    mount_point = pieces[1]
+    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
+                               device=pieces[0], length=length, context=context)
 
   # / is used for the system mount point when the root directory is included in
   # system. Other areas assume system is always at "/system" so point /system
@@ -518,7 +464,13 @@
   elif info_dict.get("vboot", None):
     path = "/" + os.path.basename(sourcedir).lower()
     img_keyblock = tempfile.NamedTemporaryFile()
-    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
+    # We have switched from the prebuilt futility binary to using the tool
+    # (futility-host) built from the source. Override the setting in the old
+    # TF.zip.
+    futility = info_dict["futility"]
+    if futility.startswith("prebuilts/"):
+      futility = "futility-host"
+    cmd = [info_dict["vboot_signer_cmd"], futility,
            img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
            info_dict["vboot_key"] + ".vbprivk",
            info_dict["vboot_subkey"] + ".vbprivk",
@@ -612,7 +564,7 @@
   def unzip_to_dir(filename, dirname):
     cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
     if pattern is not None:
-      cmd.append(pattern)
+      cmd.extend(pattern)
     p = Run(cmd, stdout=subprocess.PIPE)
     p.communicate()
     if p.returncode != 0:
@@ -920,7 +872,7 @@
   return args
 
 
-def MakeTempFile(prefix=None, suffix=None):
+def MakeTempFile(prefix='tmp', suffix=''):
   """Make a temp file and add it to the list of things to be deleted
   when Cleanup() is called.  Return the filename."""
   fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
@@ -1239,6 +1191,10 @@
     t.flush()
     return t
 
+  def WriteToDir(self, d):
+    with open(os.path.join(d, self.name), "wb") as fp:
+      fp.write(self.data)
+
   def AddToZip(self, z, compression=None):
     ZipWriteStr(z, self.name, self.data, compress_type=compression)
 
@@ -1384,6 +1340,7 @@
         version = max(
             int(i) for i in
             OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+    assert version >= 3
     self.version = version
 
     b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
@@ -1448,7 +1405,7 @@
 
     # incremental OTA
     else:
-      if touched_blocks_only and self.version >= 3:
+      if touched_blocks_only:
         ranges = self.touched_src_ranges
         expected_sha1 = self.touched_src_sha1
       else:
@@ -1460,23 +1417,12 @@
         return
 
       ranges_str = ranges.to_string_raw()
-      if self.version >= 4:
-        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
-                            'block_image_verify("%s", '
-                            'package_extract_file("%s.transfer.list"), '
-                            '"%s.new.dat", "%s.patch.dat")) then') % (
-                            self.device, ranges_str, expected_sha1,
-                            self.device, partition, partition, partition))
-      elif self.version == 3:
-        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
-                            'block_image_verify("%s", '
-                            'package_extract_file("%s.transfer.list"), '
-                            '"%s.new.dat", "%s.patch.dat")) then') % (
-                            self.device, ranges_str, expected_sha1,
-                            self.device, partition, partition, partition))
-      else:
-        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
-                           self.device, ranges_str, self.src.TotalSha1()))
+      script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
+                          'block_image_verify("%s", '
+                          'package_extract_file("%s.transfer.list"), '
+                          '"%s.new.dat", "%s.patch.dat")) then') % (
+                          self.device, ranges_str, expected_sha1,
+                          self.device, partition, partition, partition))
       script.Print('Verified %s image...' % (partition,))
       script.AppendExtra('else')
 
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 3028b2a..2a9a417 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -77,26 +77,28 @@
     with temporary=True) to this one."""
     self.script.extend(other.script)
 
-  def AssertOemProperty(self, name, value):
-    """Assert that a property on the OEM paritition matches a value."""
+  def AssertOemProperty(self, name, values):
+    """Assert that a property on the OEM paritition matches allowed values."""
     if not name:
       raise ValueError("must specify an OEM property")
-    if not value:
+    if not values:
       raise ValueError("must specify the OEM value")
+    get_prop_command = None
     if common.OPTIONS.oem_no_mount:
-      cmd = ('getprop("{name}") == "{value}" || '
-             'abort("E{code}: This package expects the value \\"{value}\\" for '
-             '\\"{name}\\"; this has value \\"" + '
-             'getprop("{name}") + "\\".");').format(
-                 code=common.ErrorCode.OEM_PROP_MISMATCH,
-                 name=name, value=value)
+      get_prop_command = 'getprop("%s")' % name
     else:
-      cmd = ('file_getprop("/oem/oem.prop", "{name}") == "{value}" || '
-             'abort("E{code}: This package expects the value \\"{value}\\" for '
-             '\\"{name}\\" on the OEM partition; this has value \\"" + '
-             'file_getprop("/oem/oem.prop", "{name}") + "\\".");').format(
-                 code=common.ErrorCode.OEM_PROP_MISMATCH,
-                 name=name, value=value)
+      get_prop_command = 'file_getprop("/oem/oem.prop", "%s")' % name
+
+    cmd = ''
+    for value in values:
+      cmd += '%s == "%s" || ' % (get_prop_command, value)
+    cmd += (
+        'abort("E{code}: This package expects the value \\"{values}\\" for '
+        '\\"{name}\\"; this has value \\"" + '
+        '{get_prop_command} + "\\".");').format(
+            code=common.ErrorCode.OEM_PROP_MISMATCH,
+            get_prop_command=get_prop_command, name=name,
+            values='\\" or \\"'.join(values))
     self.script.append(cmd)
 
   def AssertSomeFingerprint(self, *fp):
@@ -275,36 +277,6 @@
 
     self.script.append('wipe_block_device("%s", %s);' % (device, size))
 
-  def DeleteFiles(self, file_list):
-    """Delete all files in file_list."""
-    if not file_list:
-      return
-    cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
-    self.script.append(self.WordWrap(cmd))
-
-  def DeleteFilesIfNotMatching(self, file_list):
-    """Delete the file in file_list if not matching the checksum."""
-    if not file_list:
-      return
-    for name, sha1 in file_list:
-      cmd = ('sha1_check(read_file("{name}"), "{sha1}") || '
-             'delete("{name}");'.format(name=name, sha1=sha1))
-      self.script.append(self.WordWrap(cmd))
-
-  def RenameFile(self, srcfile, tgtfile):
-    """Moves a file from one location to another."""
-    if self.info.get("update_rename_support", False):
-      self.script.append('rename("%s", "%s");' % (srcfile, tgtfile))
-    else:
-      raise ValueError("Rename not supported by update binary")
-
-  def SkipNextActionIfTargetExists(self, tgtfile, tgtsha1):
-    """Prepend an action with an apply_patch_check in order to
-       skip the action if the file exists.  Used when a patch
-       is later renamed."""
-    cmd = ('sha1_check(read_file("%s"), %s) ||' % (tgtfile, tgtsha1))
-    self.script.append(self.WordWrap(cmd))
-
   def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
     """Apply binary patches (in *patchpairs) to the given srcfile to
     produce tgtfile (which may be "-" to indicate overwriting the
@@ -341,48 +313,6 @@
         raise ValueError(
             "don't know how to write \"%s\" partitions" % p.fs_type)
 
-  def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities):
-    """Set file ownership and permissions."""
-    if not self.info.get("use_set_metadata", False):
-      self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
-    else:
-      if capabilities is None:
-        capabilities = "0x0"
-      cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \
-          '"capabilities", %s' % (fn, uid, gid, mode, capabilities)
-      if selabel is not None:
-        cmd += ', "selabel", "%s"' % selabel
-      cmd += ');'
-      self.script.append(cmd)
-
-  def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel,
-                              capabilities):
-    """Recursively set path ownership and permissions."""
-    if not self.info.get("use_set_metadata", False):
-      self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
-                         % (uid, gid, dmode, fmode, fn))
-    else:
-      if capabilities is None:
-        capabilities = "0x0"
-      cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
-          '"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \
-          % (fn, uid, gid, dmode, fmode, capabilities)
-      if selabel is not None:
-        cmd += ', "selabel", "%s"' % selabel
-      cmd += ');'
-      self.script.append(cmd)
-
-  def MakeSymlinks(self, symlink_list):
-    """Create symlinks, given a list of (dest, link) pairs."""
-    by_dest = {}
-    for d, l in symlink_list:
-      by_dest.setdefault(d, []).append(l)
-
-    for dest, links in sorted(by_dest.iteritems()):
-      cmd = ('symlink("%s", ' % (dest,) +
-             ",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
-      self.script.append(self.WordWrap(cmd))
-
   def AppendExtra(self, extra):
     """Append text verbatim to the output script."""
     self.script.append(extra)
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index bad3f4c..f75bb96 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -50,9 +50,10 @@
       Remount and verify the checksums of the files written to the
       system and vendor (if used) partitions.  Incremental builds only.
 
-  -o  (--oem_settings)  <file>
-      Use the file to specify the expected OEM-specific properties
-      on the OEM partition of the intended device.
+  -o  (--oem_settings)  <main_file[,additional_files...]>
+      Comma seperated list of files used to specify the expected OEM-specific
+      properties on the OEM partition of the intended device.
+      Multiple expected values can be used by providing multiple files.
 
   --oem_no_mount
       For devices with OEM-specific properties but without an OEM partition,
@@ -70,7 +71,19 @@
       will be replaced by "ota-downgrade=yes" in the metadata file. A data
       wipe will always be enforced, so "ota-wipe=yes" will also be included in
       the metadata file. The update-binary in the source build will be used in
-      the OTA package, unless --binary flag is specified.
+      the OTA package, unless --binary flag is specified. Please also check the
+      doc for --override_timestamp below.
+
+  --override_timestamp
+      Intentionally generate an incremental OTA that updates from a newer
+      build to an older one (based on timestamp comparison), by overriding the
+      timestamp in package metadata. This differs from --downgrade flag: we
+      know for sure this is NOT an actual downgrade case, but two builds are
+      cut in a reverse order. A legit use case is that we cut a new build C
+      (after having A and B), but want to enfore an update path of A -> C -> B.
+      Specifying --downgrade may not help since that would enforce a data wipe
+      for C -> B update. The value of "post-timestamp" will be set to the newer
+      timestamp plus one, so that the package can be pushed and applied.
 
   -e  (--extra_script)  <file>
       Insert the contents of file at the end of the update script.
@@ -81,9 +94,10 @@
       using the new recovery (new kernel, etc.).
 
   --block
-      Generate a block-based OTA if possible.  Will fall back to a
-      file-based OTA if the target_files is older and doesn't support
-      block-based OTAs.
+      Generate a block-based OTA for non-A/B device. We have deprecated the
+      support for file-based OTA since O. Block-based OTA will be used by
+      default for all non-A/B devices. Keeping this flag here to not break
+      existing callers.
 
   -b  (--binary)  <file>
       Use the given binary as the update-binary in the output package,
@@ -126,8 +140,9 @@
   print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
+import copy
 import multiprocessing
-import os
+import os.path
 import subprocess
 import shlex
 import tempfile
@@ -141,18 +156,17 @@
 OPTIONS.package_key = None
 OPTIONS.incremental_source = None
 OPTIONS.verify = False
-OPTIONS.require_verbatim = set()
-OPTIONS.prohibit_verbatim = set(("system/build.prop",))
 OPTIONS.patch_threshold = 0.95
 OPTIONS.wipe_user_data = False
 OPTIONS.downgrade = False
+OPTIONS.timestamp = False
 OPTIONS.extra_script = None
 OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
 if OPTIONS.worker_threads == 0:
   OPTIONS.worker_threads = 1
 OPTIONS.two_step = False
 OPTIONS.no_signing = False
-OPTIONS.block_based = False
+OPTIONS.block_based = True
 OPTIONS.updater_binary = None
 OPTIONS.oem_source = None
 OPTIONS.oem_no_mount = False
@@ -166,273 +180,10 @@
 OPTIONS.log_diff = None
 OPTIONS.payload_signer = None
 OPTIONS.payload_signer_args = []
+OPTIONS.extracted_input = None
 
-def MostPopularKey(d, default):
-  """Given a dict, return the key corresponding to the largest
-  value.  Returns 'default' if the dict is empty."""
-  x = [(v, k) for (k, v) in d.iteritems()]
-  if not x:
-    return default
-  x.sort()
-  return x[-1][1]
-
-
-def IsSymlink(info):
-  """Return true if the zipfile.ZipInfo object passed in represents a
-  symlink."""
-  return (info.external_attr >> 16) & 0o770000 == 0o120000
-
-def IsRegular(info):
-  """Return true if the zipfile.ZipInfo object passed in represents a
-  regular file."""
-  return (info.external_attr >> 16) & 0o770000 == 0o100000
-
-def ClosestFileMatch(src, tgtfiles, existing):
-  """Returns the closest file match between a source file and list
-     of potential matches.  The exact filename match is preferred,
-     then the sha1 is searched for, and finally a file with the same
-     basename is evaluated.  Rename support in the updater-binary is
-     required for the latter checks to be used."""
-
-  result = tgtfiles.get("path:" + src.name)
-  if result is not None:
-    return result
-
-  if not OPTIONS.target_info_dict.get("update_rename_support", False):
-    return None
-
-  if src.size < 1000:
-    return None
-
-  result = tgtfiles.get("sha1:" + src.sha1)
-  if result is not None and existing.get(result.name) is None:
-    return result
-  result = tgtfiles.get("file:" + src.name.split("/")[-1])
-  if result is not None and existing.get(result.name) is None:
-    return result
-  return None
-
-class ItemSet(object):
-  def __init__(self, partition, fs_config):
-    self.partition = partition
-    self.fs_config = fs_config
-    self.ITEMS = {}
-
-  def Get(self, name, is_dir=False):
-    if name not in self.ITEMS:
-      self.ITEMS[name] = Item(self, name, is_dir=is_dir)
-    return self.ITEMS[name]
-
-  def GetMetadata(self, input_zip):
-    # The target_files contains a record of what the uid,
-    # gid, and mode are supposed to be.
-    output = input_zip.read(self.fs_config)
-
-    for line in output.split("\n"):
-      if not line:
-        continue
-      columns = line.split()
-      name, uid, gid, mode = columns[:4]
-      selabel = None
-      capabilities = None
-
-      # After the first 4 columns, there are a series of key=value
-      # pairs. Extract out the fields we care about.
-      for element in columns[4:]:
-        key, value = element.split("=")
-        if key == "selabel":
-          selabel = value
-        if key == "capabilities":
-          capabilities = value
-
-      i = self.ITEMS.get(name, None)
-      if i is not None:
-        i.uid = int(uid)
-        i.gid = int(gid)
-        i.mode = int(mode, 8)
-        i.selabel = selabel
-        i.capabilities = capabilities
-        if i.is_dir:
-          i.children.sort(key=lambda i: i.name)
-
-    # Set metadata for the files generated by this script. For full recovery
-    # image at system/etc/recovery.img, it will be taken care by fs_config.
-    i = self.ITEMS.get("system/recovery-from-boot.p", None)
-    if i:
-      i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None
-    i = self.ITEMS.get("system/etc/install-recovery.sh", None)
-    if i:
-      i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None
-
-
-class Item(object):
-  """Items represent the metadata (user, group, mode) of files and
-  directories in the system image."""
-  def __init__(self, itemset, name, is_dir=False):
-    self.itemset = itemset
-    self.name = name
-    self.uid = None
-    self.gid = None
-    self.mode = None
-    self.selabel = None
-    self.capabilities = None
-    self.is_dir = is_dir
-    self.descendants = None
-    self.best_subtree = None
-
-    if name:
-      self.parent = itemset.Get(os.path.dirname(name), is_dir=True)
-      self.parent.children.append(self)
-    else:
-      self.parent = None
-    if self.is_dir:
-      self.children = []
-
-  def Dump(self, indent=0):
-    if self.uid is not None:
-      print("%s%s %d %d %o" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode))
-    else:
-      print("%s%s %s %s %s" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode))
-    if self.is_dir:
-      print("%s%s" % ("  " * indent, self.descendants))
-      print("%s%s" % ("  " * indent, self.best_subtree))
-      for i in self.children:
-        i.Dump(indent=indent+1)
-
-  def CountChildMetadata(self):
-    """Count up the (uid, gid, mode, selabel, capabilities) tuples for
-    all children and determine the best strategy for using set_perm_recursive
-    and set_perm to correctly chown/chmod all the files to their desired
-    values.  Recursively calls itself for all descendants.
-
-    Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count}
-    counting up all descendants of this node.  (dmode or fmode may be None.)
-    Also sets the best_subtree of each directory Item to the (uid, gid, dmode,
-    fmode, selabel, capabilities) tuple that will match the most descendants of
-    that Item.
-    """
-
-    assert self.is_dir
-    key = (self.uid, self.gid, self.mode, None, self.selabel,
-           self.capabilities)
-    self.descendants = {key: 1}
-    d = self.descendants
-    for i in self.children:
-      if i.is_dir:
-        for k, v in i.CountChildMetadata().iteritems():
-          d[k] = d.get(k, 0) + v
-      else:
-        k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities)
-        d[k] = d.get(k, 0) + 1
-
-    # Find the (uid, gid, dmode, fmode, selabel, capabilities)
-    # tuple that matches the most descendants.
-
-    # First, find the (uid, gid) pair that matches the most
-    # descendants.
-    ug = {}
-    for (uid, gid, _, _, _, _), count in d.iteritems():
-      ug[(uid, gid)] = ug.get((uid, gid), 0) + count
-    ug = MostPopularKey(ug, (0, 0))
-
-    # Now find the dmode, fmode, selabel, and capabilities that match
-    # the most descendants with that (uid, gid), and choose those.
-    best_dmode = (0, 0o755)
-    best_fmode = (0, 0o644)
-    best_selabel = (0, None)
-    best_capabilities = (0, None)
-    for k, count in d.iteritems():
-      if k[:2] != ug:
-        continue
-      if k[2] is not None and count >= best_dmode[0]:
-        best_dmode = (count, k[2])
-      if k[3] is not None and count >= best_fmode[0]:
-        best_fmode = (count, k[3])
-      if k[4] is not None and count >= best_selabel[0]:
-        best_selabel = (count, k[4])
-      if k[5] is not None and count >= best_capabilities[0]:
-        best_capabilities = (count, k[5])
-    self.best_subtree = ug + (
-        best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1])
-
-    return d
-
-  def SetPermissions(self, script):
-    """Append set_perm/set_perm_recursive commands to 'script' to
-    set all permissions, users, and groups for the tree of files
-    rooted at 'self'."""
-
-    self.CountChildMetadata()
-
-    def recurse(item, current):
-      # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple
-      # that the current item (and all its children) have already been set to.
-      # We only need to issue set_perm/set_perm_recursive commands if we're
-      # supposed to be something different.
-      if item.is_dir:
-        if current != item.best_subtree:
-          script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
-          current = item.best_subtree
-
-        if item.uid != current[0] or item.gid != current[1] or \
-           item.mode != current[2] or item.selabel != current[4] or \
-           item.capabilities != current[5]:
-          script.SetPermissions("/"+item.name, item.uid, item.gid,
-                                item.mode, item.selabel, item.capabilities)
-
-        for i in item.children:
-          recurse(i, current)
-      else:
-        if item.uid != current[0] or item.gid != current[1] or \
-               item.mode != current[3] or item.selabel != current[4] or \
-               item.capabilities != current[5]:
-          script.SetPermissions("/"+item.name, item.uid, item.gid,
-                                item.mode, item.selabel, item.capabilities)
-
-    recurse(self, (-1, -1, -1, -1, None, None))
-
-
-def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
-  """Copies files for the partition in the input zip to the output
-  zip.  Populates the Item class with their metadata, and returns a
-  list of symlinks.  output_zip may be None, in which case the copy is
-  skipped (but the other side effects still happen).  substitute is an
-  optional dict of {output filename: contents} to be output instead of
-  certain input files.
-  """
-
-  symlinks = []
-
-  partition = itemset.partition
-
-  for info in input_zip.infolist():
-    prefix = partition.upper() + "/"
-    if info.filename.startswith(prefix):
-      basefilename = info.filename[len(prefix):]
-      if IsSymlink(info):
-        symlinks.append((input_zip.read(info.filename),
-                         "/" + partition + "/" + basefilename))
-      else:
-        import copy
-        info2 = copy.copy(info)
-        fn = info2.filename = partition + "/" + basefilename
-        if substitute and fn in substitute and substitute[fn] is None:
-          continue
-        if output_zip is not None:
-          if substitute and fn in substitute:
-            data = substitute[fn]
-          else:
-            data = input_zip.read(info.filename)
-          common.ZipWriteStr(output_zip, info2, data)
-        if fn.endswith("/"):
-          itemset.Get(fn[:-1], is_dir=True)
-        else:
-          itemset.Get(fn)
-
-  symlinks.sort()
-  return symlinks
+METADATA_NAME = 'META-INF/com/android/metadata'
+UNZIP_PATTERN = ['IMAGES/*', 'META/*']
 
 
 def SignOutput(temp_zip_name, output_zip_name):
@@ -443,20 +194,38 @@
                   whole_file=True)
 
 
-def AppendAssertions(script, info_dict, oem_dict=None):
+def AppendAssertions(script, info_dict, oem_dicts=None):
   oem_props = info_dict.get("oem_fingerprint_properties")
   if not oem_props:
     device = GetBuildProp("ro.product.device", info_dict)
     script.AssertDevice(device)
   else:
-    if oem_dict is None:
+    if not oem_dicts:
       raise common.ExternalError(
           "No OEM file provided to answer expected assertions")
     for prop in oem_props.split():
-      if oem_dict.get(prop) is None:
+      values = []
+      for oem_dict in oem_dicts:
+        if oem_dict.get(prop):
+          values.append(oem_dict[prop])
+      if not values:
         raise common.ExternalError(
             "The OEM file is missing the property %s" % prop)
-      script.AssertOemProperty(prop, oem_dict.get(prop))
+      script.AssertOemProperty(prop, values)
+
+
+def _LoadOemDicts(script, recovery_mount_options=None):
+  """Returns the list of loaded OEM properties dict."""
+  oem_dicts = None
+  if OPTIONS.oem_source is None:
+    raise common.ExternalError("OEM source required for this build")
+  if not OPTIONS.oem_no_mount and script:
+    script.Mount("/oem", recovery_mount_options)
+  oem_dicts = []
+  for oem_file in OPTIONS.oem_source:
+    oem_dicts.append(common.LoadDictionaryFromLines(
+        open(oem_file).readlines()))
+  return oem_dicts
 
 
 def _WriteRecoveryImageToBoot(script, output_zip):
@@ -497,6 +266,7 @@
   return ("SYSTEM/recovery-from-boot.p" in namelist or
           "SYSTEM/etc/recovery.img" in namelist)
 
+
 def HasVendorPartition(target_files_zip):
   try:
     target_files_zip.getinfo("VENDOR/")
@@ -504,6 +274,7 @@
   except KeyError:
     return False
 
+
 def GetOemProperty(name, oem_props, oem_dict, info_dict):
   if oem_props is not None and name in oem_props:
     return oem_dict[name]
@@ -520,36 +291,21 @@
       GetBuildProp("ro.build.thumbprint", info_dict))
 
 
-def GetImage(which, tmpdir, info_dict):
-  # Return an image object (suitable for passing to BlockImageDiff)
-  # for the 'which' partition (most be "system" or "vendor").  If a
-  # prebuilt image and file map are found in tmpdir they are used,
-  # otherwise they are reconstructed from the individual files.
+def GetImage(which, tmpdir):
+  """Returns an image object suitable for passing to BlockImageDiff.
+
+  'which' partition must be "system" or "vendor". A prebuilt image and file
+  map must already exist in tmpdir.
+  """
 
   assert which in ("system", "vendor")
 
   path = os.path.join(tmpdir, "IMAGES", which + ".img")
   mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
-  if os.path.exists(path) and os.path.exists(mappath):
-    print("using %s.img from target-files" % (which,))
-    # This is a 'new' target-files, which already has the image in it.
 
-  else:
-    print("building %s.img from target-files" % (which,))
-
-    # This is an 'old' target-files, which does not contain images
-    # already built.  Build them.
-
-    mappath = tempfile.mkstemp()[1]
-    OPTIONS.tempfiles.append(mappath)
-
-    import add_img_to_target_files
-    if which == "system":
-      path = add_img_to_target_files.BuildSystem(
-          tmpdir, info_dict, block_list=mappath)
-    elif which == "vendor":
-      path = add_img_to_target_files.BuildVendor(
-          tmpdir, info_dict, block_list=mappath)
+  # The image and map files must have been created prior to calling
+  # ota_from_target_files.py (since LMP).
+  assert os.path.exists(path) and os.path.exists(mappath)
 
   # Bug: http://b/20939131
   # In ext4 filesystems, block 0 might be changed even being mounted
@@ -569,19 +325,16 @@
 
   recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options")
   oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties")
-  oem_dict = None
+  oem_dicts = None
   if oem_props:
-    if OPTIONS.oem_source is None:
-      raise common.ExternalError("OEM source required for this build")
-    if not OPTIONS.oem_no_mount:
-      script.Mount("/oem", recovery_mount_options)
-    oem_dict = common.LoadDictionaryFromLines(
-        open(OPTIONS.oem_source).readlines())
+    oem_dicts = _LoadOemDicts(script, recovery_mount_options)
 
-  target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.info_dict)
+  target_fp = CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
+                                   OPTIONS.info_dict)
   metadata = {
       "post-build": target_fp,
-      "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+      "pre-device": GetOemProperty("ro.product.device", oem_props,
+                                   oem_dicts and oem_dicts[0],
                                    OPTIONS.info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
   }
@@ -595,16 +348,15 @@
       metadata=metadata,
       info_dict=OPTIONS.info_dict)
 
-  has_recovery_patch = HasRecoveryPatch(input_zip)
-  block_based = OPTIONS.block_based and has_recovery_patch
+  assert HasRecoveryPatch(input_zip)
 
-  metadata["ota-type"] = "BLOCK" if block_based else "FILE"
+  metadata["ota-type"] = "BLOCK"
 
   ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict)
   ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict)
   script.AssertOlderBuild(ts, ts_text)
 
-  AppendAssertions(script, OPTIONS.info_dict, oem_dict)
+  AppendAssertions(script, OPTIONS.info_dict, oem_dicts)
   device_specific.FullOTA_Assertions()
 
   # Two-step package strategy (in chronological order, which is *not*
@@ -672,61 +424,27 @@
 
   recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options")
 
-  system_items = ItemSet("system", "META/filesystem_config.txt")
   script.ShowProgress(system_progress, 0)
 
-  if block_based:
-    # Full OTA is done as an "incremental" against an empty source
-    # image.  This has the effect of writing new data from the package
-    # to the entire partition, but lets us reuse the updater code that
-    # writes incrementals to do it.
-    system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict)
-    system_tgt.ResetFileMap()
-    system_diff = common.BlockDifference("system", system_tgt, src=None)
-    system_diff.WriteScript(script, output_zip)
-  else:
-    script.FormatPartition("/system")
-    script.Mount("/system", recovery_mount_options)
-    if not has_recovery_patch:
-      script.UnpackPackageDir("recovery", "/system")
-    script.UnpackPackageDir("system", "/system")
-
-    symlinks = CopyPartitionFiles(system_items, input_zip, output_zip)
-    script.MakeSymlinks(symlinks)
+  # Full OTA is done as an "incremental" against an empty source image. This
+  # has the effect of writing new data from the package to the entire
+  # partition, but lets us reuse the updater code that writes incrementals to
+  # do it.
+  system_tgt = GetImage("system", OPTIONS.input_tmp)
+  system_tgt.ResetFileMap()
+  system_diff = common.BlockDifference("system", system_tgt, src=None)
+  system_diff.WriteScript(script, output_zip)
 
   boot_img = common.GetBootableImage(
       "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
 
-  if not block_based:
-    def output_sink(fn, data):
-      common.ZipWriteStr(output_zip, "recovery/" + fn, data)
-      system_items.Get("system/" + fn)
-
-    common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink,
-                             recovery_img, boot_img)
-
-    system_items.GetMetadata(input_zip)
-    system_items.Get("system").SetPermissions(script)
-
   if HasVendorPartition(input_zip):
-    vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
     script.ShowProgress(0.1, 0)
 
-    if block_based:
-      vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict)
-      vendor_tgt.ResetFileMap()
-      vendor_diff = common.BlockDifference("vendor", vendor_tgt)
-      vendor_diff.WriteScript(script, output_zip)
-    else:
-      script.FormatPartition("/vendor")
-      script.Mount("/vendor", recovery_mount_options)
-      script.UnpackPackageDir("vendor", "/vendor")
-
-      symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip)
-      script.MakeSymlinks(symlinks)
-
-      vendor_items.GetMetadata(input_zip)
-      vendor_items.Get("vendor").SetPermissions(script)
+    vendor_tgt = GetImage("vendor", OPTIONS.input_tmp)
+    vendor_tgt.ResetFileMap()
+    vendor_diff = common.BlockDifference("vendor", vendor_tgt)
+    vendor_diff.WriteScript(script, output_zip)
 
   common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
   common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
@@ -774,23 +492,9 @@
 
 
 def WriteMetadata(metadata, output_zip):
-  common.ZipWriteStr(output_zip, "META-INF/com/android/metadata",
-                     "".join(["%s=%s\n" % kv
-                              for kv in sorted(metadata.iteritems())]))
-
-
-def LoadPartitionFiles(z, partition):
-  """Load all the files from the given partition in a given target-files
-  ZipFile, and return a dict of {filename: File object}."""
-  out = {}
-  prefix = partition.upper() + "/"
-  for info in z.infolist():
-    if info.filename.startswith(prefix) and not IsSymlink(info):
-      basefilename = info.filename[len(prefix):]
-      fn = partition + "/" + basefilename
-      data = z.read(info.filename)
-      out[fn] = common.File(fn, data, info.compress_size)
-  return out
+  value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())])
+  common.ZipWriteStr(output_zip, METADATA_NAME, value,
+                     compress_type=zipfile.ZIP_STORED)
 
 
 def GetBuildProp(prop, info_dict):
@@ -801,21 +505,34 @@
     raise common.ExternalError("couldn't find %s in build.prop" % (prop,))
 
 
-def AddToKnownPaths(filename, known_paths):
-  if filename[-1] == "/":
-    return
-  dirs = filename.split("/")[:-1]
-  while len(dirs) > 0:
-    path = "/".join(dirs)
-    if path in known_paths:
-      break
-    known_paths.add(path)
-    dirs.pop()
+def HandleDowngradeMetadata(metadata):
+  # Only incremental OTAs are allowed to reach here.
+  assert OPTIONS.incremental_source is not None
+
+  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
+  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
+  is_downgrade = long(post_timestamp) < long(pre_timestamp)
+
+  if OPTIONS.downgrade:
+    if not is_downgrade:
+      raise RuntimeError("--downgrade specified but no downgrade detected: "
+                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+    metadata["ota-downgrade"] = "yes"
+  elif OPTIONS.timestamp:
+    if not is_downgrade:
+      raise RuntimeError("--timestamp specified but no timestamp hack needed: "
+                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+    metadata["post-timestamp"] = str(long(pre_timestamp) + 1)
+  else:
+    if is_downgrade:
+      raise RuntimeError("Downgrade detected based on timestamp check: "
+                         "pre: %s, post: %s. Need to specify --timestamp OR "
+                         "--downgrade to allow building the incremental." % (
+                             pre_timestamp, post_timestamp))
+    metadata["post-timestamp"] = post_timestamp
 
 
 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
-  # TODO(tbao): We should factor out the common parts between
-  # WriteBlockIncrementalOTAPackage() and WriteIncrementalOTAPackage().
   source_version = OPTIONS.source_info_dict["recovery_api_version"]
   target_version = OPTIONS.target_info_dict["recovery_api_version"]
 
@@ -830,41 +547,18 @@
       "recovery_mount_options")
   source_oem_props = OPTIONS.source_info_dict.get("oem_fingerprint_properties")
   target_oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
-  oem_dict = None
-  if source_oem_props or target_oem_props:
-    if OPTIONS.oem_source is None:
-      raise common.ExternalError("OEM source required for this build")
-    if not OPTIONS.oem_no_mount:
-      script.Mount("/oem", recovery_mount_options)
-    oem_dict = common.LoadDictionaryFromLines(
-        open(OPTIONS.oem_source).readlines())
+  oem_dicts = None
+  if source_oem_props and target_oem_props:
+    oem_dicts = _LoadOemDicts(script, recovery_mount_options)
 
   metadata = {
       "pre-device": GetOemProperty("ro.product.device", source_oem_props,
-                                   oem_dict, OPTIONS.source_info_dict),
+                                   oem_dicts and oem_dicts[0],
+                                   OPTIONS.source_info_dict),
       "ota-type": "BLOCK",
   }
 
-  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
-  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
-  is_downgrade = long(post_timestamp) < long(pre_timestamp)
-
-  if OPTIONS.downgrade:
-    metadata["ota-downgrade"] = "yes"
-    if not is_downgrade:
-      raise RuntimeError("--downgrade specified but no downgrade detected: "
-                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
-  else:
-    if is_downgrade:
-      # Non-fatal here to allow generating such a package which may require
-      # manual work to adjust the post-timestamp. A legit use case is that we
-      # cut a new build C (after having A and B), but want to enfore the
-      # update path of A -> C -> B. Specifying --downgrade may not help since
-      # that would enforce a data wipe for C -> B update.
-      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
-            "The package may not be deployed properly. "
-            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
-    metadata["post-timestamp"] = post_timestamp
+  HandleDowngradeMetadata(metadata)
 
   device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
@@ -876,9 +570,9 @@
       metadata=metadata,
       info_dict=OPTIONS.source_info_dict)
 
-  source_fp = CalculateFingerprint(source_oem_props, oem_dict,
+  source_fp = CalculateFingerprint(source_oem_props, oem_dicts and oem_dicts[0],
                                    OPTIONS.source_info_dict)
-  target_fp = CalculateFingerprint(target_oem_props, oem_dict,
+  target_fp = CalculateFingerprint(target_oem_props, oem_dicts and oem_dicts[0],
                                    OPTIONS.target_info_dict)
   metadata["pre-build"] = source_fp
   metadata["post-build"] = target_fp
@@ -898,8 +592,8 @@
   target_recovery = common.GetBootableImage(
       "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
 
-  system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict)
-  system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict)
+  system_src = GetImage("system", OPTIONS.source_tmp)
+  system_tgt = GetImage("system", OPTIONS.target_tmp)
 
   blockimgdiff_version = 1
   if OPTIONS.info_dict:
@@ -926,10 +620,8 @@
   if HasVendorPartition(target_zip):
     if not HasVendorPartition(source_zip):
       raise RuntimeError("can't generate incremental that adds /vendor")
-    vendor_src = GetImage("vendor", OPTIONS.source_tmp,
-                          OPTIONS.source_info_dict)
-    vendor_tgt = GetImage("vendor", OPTIONS.target_tmp,
-                          OPTIONS.target_info_dict)
+    vendor_src = GetImage("vendor", OPTIONS.source_tmp)
+    vendor_tgt = GetImage("vendor", OPTIONS.target_tmp)
 
     # Check first block of vendor partition for remount R/W only if
     # disk type is ext4
@@ -943,7 +635,7 @@
   else:
     vendor_diff = None
 
-  AppendAssertions(script, OPTIONS.target_info_dict, oem_dict)
+  AppendAssertions(script, OPTIONS.target_info_dict, oem_dicts)
   device_specific.IncrementalOTA_Assertions()
 
   # Two-step incremental package strategy (in chronological order,
@@ -1153,19 +845,16 @@
   oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties")
   recovery_mount_options = OPTIONS.info_dict.get(
       "recovery_mount_options")
-  oem_dict = None
+  oem_dicts = None
   if oem_props:
-    if OPTIONS.oem_source is None:
-      raise common.ExternalError("OEM source required for this build")
-    if not OPTIONS.oem_no_mount:
-      script.Mount("/oem", recovery_mount_options)
-    oem_dict = common.LoadDictionaryFromLines(
-        open(OPTIONS.oem_source).readlines())
+    oem_dicts = _LoadOemDicts(script, recovery_mount_options)
 
-  target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.info_dict)
+  target_fp = CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
+                                   OPTIONS.info_dict)
   metadata = {
       "post-build": target_fp,
-      "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+      "pre-device": GetOemProperty("ro.product.device", oem_props,
+                                   oem_dicts and oem_dicts[0],
                                    OPTIONS.info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
   }
@@ -1179,7 +868,7 @@
       metadata=metadata,
       info_dict=OPTIONS.info_dict)
 
-  AppendAssertions(script, OPTIONS.info_dict, oem_dict)
+  AppendAssertions(script, OPTIONS.info_dict, oem_dicts)
 
   script.Print("Verifying device images against %s..." % target_fp)
   script.AppendExtra("")
@@ -1202,13 +891,13 @@
       recovery_type, recovery_device, recovery_img.size, recovery_img.sha1))
   script.AppendExtra("")
 
-  system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict)
+  system_tgt = GetImage("system", OPTIONS.input_tmp)
   system_tgt.ResetFileMap()
   system_diff = common.BlockDifference("system", system_tgt, src=None)
   system_diff.WriteStrictVerifyScript(script)
 
   if HasVendorPartition(input_zip):
-    vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict)
+    vendor_tgt = GetImage("vendor", OPTIONS.input_tmp)
     vendor_tgt.ResetFileMap()
     vendor_diff = common.BlockDifference("vendor", vendor_tgt, src=None)
     vendor_diff.WriteStrictVerifyScript(script)
@@ -1226,6 +915,53 @@
                                       source_file=None):
   """Generate an Android OTA package that has A/B update payload."""
 
+  def ComputeStreamingMetadata(zip_file, reserve_space=False,
+                               expected_length=None):
+    """Compute the streaming metadata for a given zip.
+
+    When 'reserve_space' is True, we reserve extra space for the offset and
+    length of the metadata entry itself, although we don't know the final
+    values until the package gets signed. This function will be called again
+    after signing. We then write the actual values and pad the string to the
+    length we set earlier. Note that we can't use the actual length of the
+    metadata entry in the second run. Otherwise the offsets for other entries
+    will be changing again.
+    """
+
+    def ComputeEntryOffsetSize(name):
+      """Compute the zip entry offset and size."""
+      info = zip_file.getinfo(name)
+      offset = info.header_offset + len(info.FileHeader())
+      size = info.file_size
+      return '%s:%d:%d' % (os.path.basename(name), offset, size)
+
+    # payload.bin and payload_properties.txt must exist.
+    offsets = [ComputeEntryOffsetSize('payload.bin'),
+               ComputeEntryOffsetSize('payload_properties.txt')]
+
+    # care_map.txt is available only if dm-verity is enabled.
+    if 'care_map.txt' in zip_file.namelist():
+      offsets.append(ComputeEntryOffsetSize('care_map.txt'))
+
+    # 'META-INF/com/android/metadata' is required. We don't know its actual
+    # offset and length (as well as the values for other entries). So we
+    # reserve 10-byte as a placeholder, which is to cover the space for metadata
+    # entry ('xx:xxx', since it's ZIP_STORED which should appear at the
+    # beginning of the zip), as well as the possible value changes in other
+    # entries.
+    if reserve_space:
+      offsets.append('metadata:' + ' ' * 10)
+    else:
+      offsets.append(ComputeEntryOffsetSize(METADATA_NAME))
+
+    value = ','.join(offsets)
+    if expected_length is not None:
+      assert len(value) <= expected_length, \
+          'Insufficient reserved space: reserved=%d, actual=%d' % (
+              expected_length, len(value))
+      value += ' ' * (expected_length - len(value))
+    return value
+
   # The place where the output from the subprocess should go.
   log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
 
@@ -1254,31 +990,34 @@
 
   # Metadata to comply with Android OTA package format.
   oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties", None)
-  oem_dict = None
+  oem_dicts = None
   if oem_props:
-    if OPTIONS.oem_source is None:
-      raise common.ExternalError("OEM source required for this build")
-    oem_dict = common.LoadDictionaryFromLines(
-        open(OPTIONS.oem_source).readlines())
+    oem_dicts = _LoadOemDicts(None)
 
   metadata = {
-      "post-build": CalculateFingerprint(oem_props, oem_dict,
+      "post-build": CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
                                          OPTIONS.info_dict),
       "post-build-incremental" : GetBuildProp("ro.build.version.incremental",
                                               OPTIONS.info_dict),
-      "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+      "pre-device": GetOemProperty("ro.product.device", oem_props,
+                                   oem_dicts and oem_dicts[0],
                                    OPTIONS.info_dict),
-      "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
       "ota-required-cache": "0",
       "ota-type": "AB",
   }
 
   if source_file is not None:
-    metadata["pre-build"] = CalculateFingerprint(oem_props, oem_dict,
+    metadata["pre-build"] = CalculateFingerprint(oem_props,
+                                                 oem_dicts and oem_dicts[0],
                                                  OPTIONS.source_info_dict)
     metadata["pre-build-incremental"] = GetBuildProp(
         "ro.build.version.incremental", OPTIONS.source_info_dict)
 
+    HandleDowngradeMetadata(metadata)
+  else:
+    metadata["post-timestamp"] = GetBuildProp(
+        "ro.build.date.utc", OPTIONS.info_dict)
+
   # 1. Generate payload.
   payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
   cmd = ["brillo_update_payload", "generate",
@@ -1363,11 +1102,15 @@
       f.write("POWERWASH=1\n")
     metadata["ota-wipe"] = "yes"
 
-  # Add the signed payload file and properties into the zip.
-  common.ZipWrite(output_zip, properties_file, arcname="payload_properties.txt")
+  # Add the signed payload file and properties into the zip. In order to
+  # support streaming, we pack payload.bin, payload_properties.txt and
+  # care_map.txt as ZIP_STORED. So these entries can be read directly with
+  # the offset and length pairs.
   common.ZipWrite(output_zip, signed_payload_file, arcname="payload.bin",
                   compress_type=zipfile.ZIP_STORED)
-  WriteMetadata(metadata, output_zip)
+  common.ZipWrite(output_zip, properties_file,
+                  arcname="payload_properties.txt",
+                  compress_type=zipfile.ZIP_STORED)
 
   # If dm-verity is supported for the device, copy contents of care_map
   # into A/B OTA package.
@@ -1377,588 +1120,62 @@
     namelist = target_zip.namelist()
     if care_map_path in namelist:
       care_map_data = target_zip.read(care_map_path)
-      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data)
+      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data,
+          compress_type=zipfile.ZIP_STORED)
     else:
       print("Warning: cannot find care map file in target_file package")
     common.ZipClose(target_zip)
 
-  # Sign the whole package to comply with the Android OTA package format.
-  common.ZipClose(output_zip)
-  SignOutput(temp_zip_file.name, output_file)
-  temp_zip_file.close()
-
-
-class FileDifference(object):
-  def __init__(self, partition, source_zip, target_zip, output_zip):
-    self.deferred_patch_list = None
-    print("Loading target...")
-    self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
-    print("Loading source...")
-    self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
-
-    self.verbatim_targets = verbatim_targets = []
-    self.patch_list = patch_list = []
-    diffs = []
-    self.renames = renames = {}
-    known_paths = set()
-    largest_source_size = 0
-
-    matching_file_cache = {}
-    for fn, sf in source_data.items():
-      assert fn == sf.name
-      matching_file_cache["path:" + fn] = sf
-      if fn in target_data.keys():
-        AddToKnownPaths(fn, known_paths)
-      # Only allow eligibility for filename/sha matching
-      # if there isn't a perfect path match.
-      if target_data.get(sf.name) is None:
-        matching_file_cache["file:" + fn.split("/")[-1]] = sf
-        matching_file_cache["sha:" + sf.sha1] = sf
-
-    for fn in sorted(target_data.keys()):
-      tf = target_data[fn]
-      assert fn == tf.name
-      sf = ClosestFileMatch(tf, matching_file_cache, renames)
-      if sf is not None and sf.name != tf.name:
-        print("File has moved from " + sf.name + " to " + tf.name)
-        renames[sf.name] = tf
-
-      if sf is None or fn in OPTIONS.require_verbatim:
-        # This file should be included verbatim
-        if fn in OPTIONS.prohibit_verbatim:
-          raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
-        print("send", fn, "verbatim")
-        tf.AddToZip(output_zip)
-        verbatim_targets.append((fn, tf.size, tf.sha1))
-        if fn in target_data.keys():
-          AddToKnownPaths(fn, known_paths)
-      elif tf.sha1 != sf.sha1:
-        # File is different; consider sending as a patch
-        diffs.append(common.Difference(tf, sf))
-      else:
-        # Target file data identical to source (may still be renamed)
-        pass
-
-    common.ComputeDifferences(diffs)
-
-    for diff in diffs:
-      tf, sf, d = diff.GetPatch()
-      path = "/".join(tf.name.split("/")[:-1])
-      if d is None or len(d) > tf.compress_size * OPTIONS.patch_threshold or \
-          path not in known_paths:
-        # patch is almost as big as the file; don't bother patching
-        # or a patch + rename cannot take place due to the target
-        # directory not existing
-        tf.AddToZip(output_zip)
-        verbatim_targets.append((tf.name, tf.size, tf.sha1))
-        if sf.name in renames:
-          del renames[sf.name]
-        AddToKnownPaths(tf.name, known_paths)
-      else:
-        common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
-        patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
-        largest_source_size = max(largest_source_size, sf.size)
-
-    self.largest_source_size = largest_source_size
-
-  def EmitVerification(self, script):
-    so_far = 0
-    for tf, sf, _, _ in self.patch_list:
-      if tf.name != sf.name:
-        script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
-      script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
-      so_far += sf.size
-    return so_far
-
-  def EmitExplicitTargetVerification(self, script):
-    for fn, _, sha1 in self.verbatim_targets:
-      if fn[-1] != "/":
-        script.FileCheck("/"+fn, sha1)
-    for tf, _, _, _ in self.patch_list:
-      script.FileCheck(tf.name, tf.sha1)
-
-  def RemoveUnneededFiles(self, script, extras=()):
-    file_list = ["/" + i[0] for i in self.verbatim_targets]
-    file_list += ["/" + i for i in self.source_data
-                  if i not in self.target_data and i not in self.renames]
-    file_list += list(extras)
-    # Sort the list in descending order, which removes all the files first
-    # before attempting to remove the folder. (Bug: 22960996)
-    script.DeleteFiles(sorted(file_list, reverse=True))
-
-  def TotalPatchSize(self):
-    return sum(i[1].size for i in self.patch_list)
-
-  def EmitPatches(self, script, total_patch_size, so_far):
-    self.deferred_patch_list = deferred_patch_list = []
-    for item in self.patch_list:
-      tf, sf, _, _ = item
-      if tf.name == "system/build.prop":
-        deferred_patch_list.append(item)
-        continue
-      if sf.name != tf.name:
-        script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
-      script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1,
-                        "patch/" + sf.name + ".p")
-      so_far += tf.size
-      script.SetProgress(so_far / total_patch_size)
-    return so_far
-
-  def EmitDeferredPatches(self, script):
-    for item in self.deferred_patch_list:
-      tf, sf, _, _ = item
-      script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1,
-                        "patch/" + sf.name + ".p")
-    script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None)
-
-  def EmitRenames(self, script):
-    if len(self.renames) > 0:
-      script.Print("Renaming files...")
-      for src, tgt in self.renames.iteritems():
-        print("Renaming " + src + " to " + tgt.name)
-        script.RenameFile(src, tgt.name)
-
-
-def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
-  target_has_recovery_patch = HasRecoveryPatch(target_zip)
-  source_has_recovery_patch = HasRecoveryPatch(source_zip)
-
-  if (OPTIONS.block_based and
-      target_has_recovery_patch and
-      source_has_recovery_patch):
-    return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip)
-
-  source_version = OPTIONS.source_info_dict["recovery_api_version"]
-  target_version = OPTIONS.target_info_dict["recovery_api_version"]
-
-  if source_version == 0:
-    print("WARNING: generating edify script for a source that "
-          "can't install it.")
-  script = edify_generator.EdifyGenerator(
-      source_version, OPTIONS.target_info_dict,
-      fstab=OPTIONS.source_info_dict["fstab"])
-
-  recovery_mount_options = OPTIONS.source_info_dict.get(
-      "recovery_mount_options")
-  source_oem_props = OPTIONS.source_info_dict.get("oem_fingerprint_properties")
-  target_oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
-  oem_dict = None
-  if source_oem_props or target_oem_props:
-    if OPTIONS.oem_source is None:
-      raise common.ExternalError("OEM source required for this build")
-    if not OPTIONS.oem_no_mount:
-      script.Mount("/oem", recovery_mount_options)
-    oem_dict = common.LoadDictionaryFromLines(
-        open(OPTIONS.oem_source).readlines())
-
-  metadata = {
-      "pre-device": GetOemProperty("ro.product.device", source_oem_props,
-                                   oem_dict, OPTIONS.source_info_dict),
-      "ota-type": "FILE",
-  }
-
-  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
-  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
-  is_downgrade = long(post_timestamp) < long(pre_timestamp)
-
-  if OPTIONS.downgrade:
-    metadata["ota-downgrade"] = "yes"
-    if not is_downgrade:
-      raise RuntimeError("--downgrade specified but no downgrade detected: "
-                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
-  else:
-    if is_downgrade:
-      # Non-fatal here to allow generating such a package which may require
-      # manual work to adjust the post-timestamp. A legit use case is that we
-      # cut a new build C (after having A and B), but want to enfore the
-      # update path of A -> C -> B. Specifying --downgrade may not help since
-      # that would enforce a data wipe for C -> B update.
-      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
-            "The package may not be deployed properly. "
-            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
-    metadata["post-timestamp"] = post_timestamp
-
-  device_specific = common.DeviceSpecificParams(
-      source_zip=source_zip,
-      source_version=source_version,
-      target_zip=target_zip,
-      target_version=target_version,
-      output_zip=output_zip,
-      script=script,
-      metadata=metadata,
-      info_dict=OPTIONS.source_info_dict)
-
-  system_diff = FileDifference("system", source_zip, target_zip, output_zip)
-  script.Mount("/system", recovery_mount_options)
-  if HasVendorPartition(target_zip):
-    vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip)
-    script.Mount("/vendor", recovery_mount_options)
-  else:
-    vendor_diff = None
-
-  target_fp = CalculateFingerprint(target_oem_props, oem_dict,
-                                   OPTIONS.target_info_dict)
-  source_fp = CalculateFingerprint(source_oem_props, oem_dict,
-                                   OPTIONS.source_info_dict)
-
-  if source_oem_props is None and target_oem_props is None:
-    script.AssertSomeFingerprint(source_fp, target_fp)
-  elif source_oem_props is not None and target_oem_props is not None:
-    script.AssertSomeThumbprint(
-        GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
-        GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
-  elif source_oem_props is None and target_oem_props is not None:
-    script.AssertFingerprintOrThumbprint(
-        source_fp,
-        GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict))
-  else:
-    script.AssertFingerprintOrThumbprint(
-        target_fp,
-        GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
-
-  metadata["pre-build"] = source_fp
-  metadata["post-build"] = target_fp
-  metadata["pre-build-incremental"] = GetBuildProp(
-      "ro.build.version.incremental", OPTIONS.source_info_dict)
-  metadata["post-build-incremental"] = GetBuildProp(
-      "ro.build.version.incremental", OPTIONS.target_info_dict)
-
-  source_boot = common.GetBootableImage(
-      "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT",
-      OPTIONS.source_info_dict)
-  target_boot = common.GetBootableImage(
-      "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT")
-  updating_boot = (not OPTIONS.two_step and
-                   (source_boot.data != target_boot.data))
-
-  source_recovery = common.GetBootableImage(
-      "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY",
-      OPTIONS.source_info_dict)
-  target_recovery = common.GetBootableImage(
-      "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
-  updating_recovery = (source_recovery.data != target_recovery.data)
-
-  # Here's how we divide up the progress bar:
-  #  0.1 for verifying the start state (PatchCheck calls)
-  #  0.8 for applying patches (ApplyPatch calls)
-  #  0.1 for unpacking verbatim files, symlinking, and doing the
-  #      device-specific commands.
-
-  AppendAssertions(script, OPTIONS.target_info_dict, oem_dict)
-  device_specific.IncrementalOTA_Assertions()
-
-  # Two-step incremental package strategy (in chronological order,
-  # which is *not* the order in which the generated script has
-  # things):
-  #
-  # if stage is not "2/3" or "3/3":
-  #    do verification on current system
-  #    write recovery image to boot partition
-  #    set stage to "2/3"
-  #    reboot to boot partition and restart recovery
-  # else if stage is "2/3":
-  #    write recovery image to recovery partition
-  #    set stage to "3/3"
-  #    reboot to recovery partition and restart recovery
-  # else:
-  #    (stage must be "3/3")
-  #    perform update:
-  #       patch system files, etc.
-  #       force full install of new boot image
-  #       set up system to update recovery partition on first boot
-  #    complete script normally
-  #    (allow recovery to mark itself finished and reboot)
-
-  if OPTIONS.two_step:
-    if not OPTIONS.source_info_dict.get("multistage_support", None):
-      assert False, "two-step packages not supported by this build"
-    fs = OPTIONS.source_info_dict["fstab"]["/misc"]
-    assert fs.fs_type.upper() == "EMMC", \
-        "two-step packages only supported on devices with EMMC /misc partitions"
-    bcb_dev = {"bcb_dev": fs.device}
-    common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
-    script.AppendExtra("""
-if get_stage("%(bcb_dev)s") == "2/3" then
-""" % bcb_dev)
-
-    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
-    script.Comment("Stage 2/3")
-    script.AppendExtra("sleep(20);\n")
-    script.WriteRawImage("/recovery", "recovery.img")
-    script.AppendExtra("""
-set_stage("%(bcb_dev)s", "3/3");
-reboot_now("%(bcb_dev)s", "recovery");
-else if get_stage("%(bcb_dev)s") != "3/3" then
-""" % bcb_dev)
-
-    # Stage 1/3: (a) Verify the current system.
-    script.Comment("Stage 1/3")
-
-  # Dump fingerprints
-  script.Print("Source: %s" % (source_fp,))
-  script.Print("Target: %s" % (target_fp,))
-
-  script.Print("Verifying current system...")
-
-  device_specific.IncrementalOTA_VerifyBegin()
-
-  script.ShowProgress(0.1, 0)
-  so_far = system_diff.EmitVerification(script)
-  if vendor_diff:
-    so_far += vendor_diff.EmitVerification(script)
-
-  size = []
-  if system_diff.patch_list:
-    size.append(system_diff.largest_source_size)
-  if vendor_diff:
-    if vendor_diff.patch_list:
-      size.append(vendor_diff.largest_source_size)
-
-  if updating_boot:
-    d = common.Difference(target_boot, source_boot)
-    _, _, d = d.ComputePatch()
-    print("boot      target: %d  source: %d  diff: %d" % (
-        target_boot.size, source_boot.size, len(d)))
-
-    common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
-
-    boot_type, boot_device = common.GetTypeAndDevice(
-        "/boot", OPTIONS.source_info_dict)
-
-    script.PatchCheck("%s:%s:%d:%s:%d:%s" %
-                      (boot_type, boot_device,
-                       source_boot.size, source_boot.sha1,
-                       target_boot.size, target_boot.sha1))
-    so_far += source_boot.size
-    size.append(target_boot.size)
-
-  if size:
-    script.CacheFreeSpaceCheck(max(size))
-
-  device_specific.IncrementalOTA_VerifyEnd()
-
-  if OPTIONS.two_step:
-    # Stage 1/3: (b) Write recovery image to /boot.
-    _WriteRecoveryImageToBoot(script, output_zip)
-
-    script.AppendExtra("""
-set_stage("%(bcb_dev)s", "2/3");
-reboot_now("%(bcb_dev)s", "");
-else
-""" % bcb_dev)
-
-    # Stage 3/3: Make changes.
-    script.Comment("Stage 3/3")
-
-  script.Comment("---- start making changes here ----")
-
-  device_specific.IncrementalOTA_InstallBegin()
-
-  if OPTIONS.two_step:
-    common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
-    script.WriteRawImage("/boot", "boot.img")
-    print("writing full boot image (forced by two-step mode)")
-
-  script.Print("Removing unneeded files...")
-  system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
-  if vendor_diff:
-    vendor_diff.RemoveUnneededFiles(script)
-
-  script.ShowProgress(0.8, 0)
-  total_patch_size = 1.0 + system_diff.TotalPatchSize()
-  if vendor_diff:
-    total_patch_size += vendor_diff.TotalPatchSize()
-  if updating_boot:
-    total_patch_size += target_boot.size
-
-  script.Print("Patching system files...")
-  so_far = system_diff.EmitPatches(script, total_patch_size, 0)
-  if vendor_diff:
-    script.Print("Patching vendor files...")
-    so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far)
-
-  if not OPTIONS.two_step:
-    if updating_boot:
-      # Produce the boot image by applying a patch to the current
-      # contents of the boot partition, and write it back to the
-      # partition.
-      script.Print("Patching boot image...")
-      script.ApplyPatch("%s:%s:%d:%s:%d:%s"
-                        % (boot_type, boot_device,
-                           source_boot.size, source_boot.sha1,
-                           target_boot.size, target_boot.sha1),
-                        "-",
-                        target_boot.size, target_boot.sha1,
-                        source_boot.sha1, "patch/boot.img.p")
-      so_far += target_boot.size
-      script.SetProgress(so_far / total_patch_size)
-      print("boot image changed; including.")
-    else:
-      print("boot image unchanged; skipping.")
-
-  system_items = ItemSet("system", "META/filesystem_config.txt")
-  if vendor_diff:
-    vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
-
-  if updating_recovery:
-    # Recovery is generated as a patch using both the boot image
-    # (which contains the same linux kernel as recovery) and the file
-    # /system/etc/recovery-resource.dat (which contains all the images
-    # used in the recovery UI) as sources.  This lets us minimize the
-    # size of the patch, which must be included in every OTA package.
-    #
-    # For older builds where recovery-resource.dat is not present, we
-    # use only the boot image as the source.
-
-    if not target_has_recovery_patch:
-      def output_sink(fn, data):
-        common.ZipWriteStr(output_zip, "recovery/" + fn, data)
-        system_items.Get("system/" + fn)
-
-      common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink,
-                               target_recovery, target_boot)
-      script.DeleteFiles(["/system/recovery-from-boot.p",
-                          "/system/etc/recovery.img",
-                          "/system/etc/install-recovery.sh"])
-    print("recovery image changed; including as patch from boot.")
-  else:
-    print("recovery image unchanged; skipping.")
-
-  script.ShowProgress(0.1, 10)
-
-  target_symlinks = CopyPartitionFiles(system_items, target_zip, None)
-  if vendor_diff:
-    target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None))
-
-  temp_script = script.MakeTemporary()
-  system_items.GetMetadata(target_zip)
-  system_items.Get("system").SetPermissions(temp_script)
-  if vendor_diff:
-    vendor_items.GetMetadata(target_zip)
-    vendor_items.Get("vendor").SetPermissions(temp_script)
-
-  # Note that this call will mess up the trees of Items, so make sure
-  # we're done with them.
-  source_symlinks = CopyPartitionFiles(system_items, source_zip, None)
-  if vendor_diff:
-    source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None))
-
-  target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
-  source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
-
-  # Delete all the symlinks in source that aren't in target.  This
-  # needs to happen before verbatim files are unpacked, in case a
-  # symlink in the source is replaced by a real file in the target.
-
-  # If a symlink in the source will be replaced by a regular file, we cannot
-  # delete the symlink/file in case the package gets applied again. For such
-  # a symlink, we prepend a sha1_check() to detect if it has been updated.
-  # (Bug: 23646151)
-  replaced_symlinks = dict()
-  if system_diff:
-    for i in system_diff.verbatim_targets:
-      replaced_symlinks["/%s" % (i[0],)] = i[2]
-  if vendor_diff:
-    for i in vendor_diff.verbatim_targets:
-      replaced_symlinks["/%s" % (i[0],)] = i[2]
-
-  if system_diff:
-    for tf in system_diff.renames.values():
-      replaced_symlinks["/%s" % (tf.name,)] = tf.sha1
-  if vendor_diff:
-    for tf in vendor_diff.renames.values():
-      replaced_symlinks["/%s" % (tf.name,)] = tf.sha1
-
-  always_delete = []
-  may_delete = []
-  for dest, link in source_symlinks:
-    if link not in target_symlinks_d:
-      if link in replaced_symlinks:
-        may_delete.append((link, replaced_symlinks[link]))
-      else:
-        always_delete.append(link)
-  script.DeleteFiles(always_delete)
-  script.DeleteFilesIfNotMatching(may_delete)
-
-  if system_diff.verbatim_targets:
-    script.Print("Unpacking new system files...")
-    script.UnpackPackageDir("system", "/system")
-  if vendor_diff and vendor_diff.verbatim_targets:
-    script.Print("Unpacking new vendor files...")
-    script.UnpackPackageDir("vendor", "/vendor")
-
-  if updating_recovery and not target_has_recovery_patch:
-    script.Print("Unpacking new recovery...")
-    script.UnpackPackageDir("recovery", "/system")
-
-  system_diff.EmitRenames(script)
-  if vendor_diff:
-    vendor_diff.EmitRenames(script)
-
-  script.Print("Symlinks and permissions...")
-
-  # Create all the symlinks that don't already exist, or point to
-  # somewhere different than what we want.  Delete each symlink before
-  # creating it, since the 'symlink' command won't overwrite.
-  to_create = []
-  for dest, link in target_symlinks:
-    if link in source_symlinks_d:
-      if dest != source_symlinks_d[link]:
-        to_create.append((dest, link))
-    else:
-      to_create.append((dest, link))
-  script.DeleteFiles([i[1] for i in to_create])
-  script.MakeSymlinks(to_create)
-
-  # Now that the symlinks are created, we can set all the
-  # permissions.
-  script.AppendScript(temp_script)
-
-  # Do device-specific installation (eg, write radio image).
-  device_specific.IncrementalOTA_InstallEnd()
-
-  if OPTIONS.extra_script is not None:
-    script.AppendExtra(OPTIONS.extra_script)
-
-  # Patch the build.prop file last, so if something fails but the
-  # device can still come up, it appears to be the old build and will
-  # get set the OTA package again to retry.
-  script.Print("Patching remaining system files...")
-  system_diff.EmitDeferredPatches(script)
-
-  if OPTIONS.wipe_user_data:
-    script.Print("Erasing user data...")
-    script.FormatPartition("/data")
-    metadata["ota-wipe"] = "yes"
-
-  if OPTIONS.two_step:
-    script.AppendExtra("""
-set_stage("%(bcb_dev)s", "");
-endif;
-endif;
-""" % bcb_dev)
-
-  if OPTIONS.verify and system_diff:
-    script.Print("Remounting and verifying system partition files...")
-    script.Unmount("/system")
-    script.Mount("/system", recovery_mount_options)
-    system_diff.EmitExplicitTargetVerification(script)
-
-  if OPTIONS.verify and vendor_diff:
-    script.Print("Remounting and verifying vendor partition files...")
-    script.Unmount("/vendor")
-    script.Mount("/vendor", recovery_mount_options)
-    vendor_diff.EmitExplicitTargetVerification(script)
-
-  # For downgrade OTAs, we prefer to use the update-binary in the source
-  # build that is actually newer than the one in the target build.
-  if OPTIONS.downgrade:
-    script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
-  else:
-    script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
-
-  metadata["ota-required-cache"] = str(script.required_cache)
+  # Write the current metadata entry with placeholders.
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      output_zip, reserve_space=True)
   WriteMetadata(metadata, output_zip)
+  common.ZipClose(output_zip)
+
+  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the
+  # zip entries, as well as padding the entry headers. We do a preliminary
+  # signing (with an incomplete metadata entry) to allow that to happen. Then
+  # compute the zip entry offsets, write back the final metadata and do the
+  # final signing.
+  prelim_signing = tempfile.NamedTemporaryFile()
+  SignOutput(temp_zip_file.name, prelim_signing.name)
+  common.ZipClose(temp_zip_file)
+
+  # Open the signed zip. Compute the final metadata that's needed for streaming.
+  prelim_zip = zipfile.ZipFile(prelim_signing, "r",
+                               compression=zipfile.ZIP_DEFLATED)
+  expected_length = len(metadata['ota-streaming-property-files'])
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      prelim_zip, reserve_space=False, expected_length=expected_length)
+
+  # Copy the zip entries, as we cannot update / delete entries with zipfile.
+  final_signing = tempfile.NamedTemporaryFile()
+  output_zip = zipfile.ZipFile(final_signing, "w",
+                               compression=zipfile.ZIP_DEFLATED)
+  for item in prelim_zip.infolist():
+    if item.filename == METADATA_NAME:
+      continue
+
+    data = prelim_zip.read(item.filename)
+    out_info = copy.copy(item)
+    common.ZipWriteStr(output_zip, out_info, data)
+
+  # Now write the final metadata entry.
+  WriteMetadata(metadata, output_zip)
+  common.ZipClose(prelim_zip)
+  common.ZipClose(output_zip)
+
+  # Re-sign the package after updating the metadata entry.
+  SignOutput(final_signing.name, output_file)
+  final_signing.close()
+
+  # Reopen the final signed zip to double check the streaming metadata.
+  output_zip = zipfile.ZipFile(output_file, "r")
+  actual = metadata['ota-streaming-property-files'].strip()
+  expected = ComputeStreamingMetadata(output_zip)
+  assert actual == expected, \
+      "Mismatching streaming metadata: %s vs %s." % (actual, expected)
+  common.ZipClose(output_zip)
 
 
 def main(argv):
@@ -1979,8 +1196,10 @@
     elif o == "--downgrade":
       OPTIONS.downgrade = True
       OPTIONS.wipe_user_data = True
+    elif o == "--override_timestamp":
+      OPTIONS.timestamp = True
     elif o in ("-o", "--oem_settings"):
-      OPTIONS.oem_source = a
+      OPTIONS.oem_source = a.split(',')
     elif o == "--oem_no_mount":
       OPTIONS.oem_no_mount = True
     elif o in ("-e", "--extra_script"):
@@ -2017,6 +1236,8 @@
       OPTIONS.payload_signer = a
     elif o == "--payload_signer_args":
       OPTIONS.payload_signer_args = shlex.split(a)
+    elif o == "--extracted_input_target_files":
+      OPTIONS.extracted_input = a
     else:
       return False
     return True
@@ -2031,6 +1252,7 @@
                                  "full_bootloader",
                                  "wipe_user_data",
                                  "downgrade",
+                                 "override_timestamp",
                                  "extra_script=",
                                  "worker_threads=",
                                  "two_step",
@@ -2046,6 +1268,7 @@
                                  "log_diff=",
                                  "payload_signer=",
                                  "payload_signer_args=",
+                                 "extracted_input_target_files=",
                              ], extra_option_handler=option_handler)
 
   if len(args) != 2:
@@ -2063,11 +1286,17 @@
     if OPTIONS.incremental_source is None:
       raise ValueError("Cannot generate downgradable full OTAs")
 
+  assert not (OPTIONS.downgrade and OPTIONS.timestamp), \
+      "Cannot have --downgrade AND --override_timestamp both"
+
   # Load the dict file from the zip directly to have a peek at the OTA type.
   # For packages using A/B update, unzipping is not needed.
-  input_zip = zipfile.ZipFile(args[0], "r")
-  OPTIONS.info_dict = common.LoadInfoDict(input_zip)
-  common.ZipClose(input_zip)
+  if OPTIONS.extracted_input is not None:
+    OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input, OPTIONS.extracted_input)
+  else:
+    input_zip = zipfile.ZipFile(args[0], "r")
+    OPTIONS.info_dict = common.LoadInfoDict(input_zip)
+    common.ZipClose(input_zip)
 
   ab_update = OPTIONS.info_dict.get("ab_update") == "true"
 
@@ -2097,11 +1326,18 @@
   if OPTIONS.extra_script is not None:
     OPTIONS.extra_script = open(OPTIONS.extra_script).read()
 
-  print("unzipping target target-files...")
-  OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
+  if OPTIONS.extracted_input is not None:
+    OPTIONS.input_tmp = OPTIONS.extracted_input
+    OPTIONS.target_tmp = OPTIONS.input_tmp
+    OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, OPTIONS.input_tmp)
+    input_zip = zipfile.ZipFile(args[0], "r")
+  else:
+    print("unzipping target target-files...")
+    OPTIONS.input_tmp, input_zip = common.UnzipTemp(
+        args[0], UNZIP_PATTERN)
 
-  OPTIONS.target_tmp = OPTIONS.input_tmp
-  OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp)
+    OPTIONS.target_tmp = OPTIONS.input_tmp
+    OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp)
 
   if OPTIONS.verbose:
     print("--- target info ---")
@@ -2166,7 +1402,8 @@
   else:
     print("unzipping source target-files...")
     OPTIONS.source_tmp, source_zip = common.UnzipTemp(
-        OPTIONS.incremental_source)
+        OPTIONS.incremental_source,
+        UNZIP_PATTERN)
     OPTIONS.target_info_dict = OPTIONS.info_dict
     OPTIONS.source_info_dict = common.LoadInfoDict(source_zip,
                                                    OPTIONS.source_tmp)
@@ -2174,7 +1411,7 @@
       print("--- source info ---")
       common.DumpInfoDict(OPTIONS.source_info_dict)
     try:
-      WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
+      WriteBlockIncrementalOTAPackage(input_zip, source_zip, output_zip)
       if OPTIONS.log_diff:
         out_file = open(OPTIONS.log_diff, 'w')
         import target_files_diff
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 4ba7560..7eb60d9 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -144,6 +144,12 @@
     f.seek(16, os.SEEK_SET)
     f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
 
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges):
+      h.update(data)
+    return h.hexdigest()
+
   def ReadRangeSet(self, ranges):
     return [d for d in self._GetRangeData(ranges)]
 
@@ -155,10 +161,11 @@
     ranges = self.care_map
     if not include_clobbered_blocks:
       ranges = ranges.subtract(self.clobbered_blocks)
-    h = sha1()
-    for d in self._GetRangeData(ranges):
-      h.update(d)
-    return h.hexdigest()
+    return self.RangeSha1(ranges)
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges):
+      fd.write(data)
 
   def _GetRangeData(self, ranges):
     """Generator that produces all the image data in 'ranges'.  The
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index cc1fa23..e5a3694 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -41,14 +41,14 @@
     block_image_diff = BlockImageDiff(tgt, src)
 
     transfers = block_image_diff.transfers
-    t0 = Transfer(
-        "t1", "t1", RangeSet("10-15"), RangeSet("0-5"), "move", transfers)
-    t1 = Transfer(
-        "t2", "t2", RangeSet("20-25"), RangeSet("0-7"), "move", transfers)
-    t2 = Transfer(
-        "t3", "t3", RangeSet("30-35"), RangeSet("0-4"), "move", transfers)
-    t3 = Transfer(
-        "t4", "t4", RangeSet("0-10"), RangeSet("40-50"), "move", transfers)
+    t0 = Transfer("t1", "t1", RangeSet("10-15"), RangeSet("0-5"), "t1hash",
+                  "t1hash", "move", transfers)
+    t1 = Transfer("t2", "t2", RangeSet("20-25"), RangeSet("0-7"), "t2hash",
+                  "t2hash", "move", transfers)
+    t2 = Transfer("t3", "t3", RangeSet("30-35"), RangeSet("0-4"), "t3hash",
+                  "t3hash", "move", transfers)
+    t3 = Transfer("t4", "t4", RangeSet("0-10"), RangeSet("40-50"), "t4hash",
+                  "t4hash", "move", transfers)
 
     block_image_diff.GenerateDigraph()
     t3_goes_after_copy = t3.goes_after.copy()
@@ -87,10 +87,10 @@
     block_image_diff = BlockImageDiff(tgt, src, version=3)
 
     transfers = block_image_diff.transfers
-    Transfer("t1", "t1", RangeSet("11-15"), RangeSet("20-29"), "diff",
-             transfers)
-    Transfer("t2", "t2", RangeSet("20-29"), RangeSet("11-15"), "diff",
-             transfers)
+    Transfer("t1", "t1", RangeSet("11-15"), RangeSet("20-29"), "t1hash",
+             "t1hash", "diff", transfers)
+    Transfer("t2", "t2", RangeSet("20-29"), RangeSet("11-15"), "t2hash",
+             "t2hash", "diff", transfers)
 
     block_image_diff.GenerateDigraph()
     block_image_diff.FindVertexSequence()
@@ -121,12 +121,12 @@
     block_image_diff = BlockImageDiff(tgt, src, version=3)
 
     transfers = block_image_diff.transfers
-    t1 = Transfer("t1", "t1", RangeSet("11-15"), RangeSet("1-5"), "diff",
-                  transfers)
-    t2 = Transfer("t2", "t2", RangeSet("21-25"), RangeSet("11-15"), "diff",
-                  transfers)
+    t1 = Transfer("t1", "t1", RangeSet("11-15"), RangeSet("1-5"), "t1hash",
+                  "t1hash", "diff", transfers)
+    t2 = Transfer("t2", "t2", RangeSet("21-25"), RangeSet("11-15"), "t2hash",
+                  "t2hash", "diff", transfers)
     t3 = Transfer("t3", "t3", RangeSet("1-5 30-39"), RangeSet("11-15 30-39"),
-                  "diff", transfers)
+                  "t3hash", "t3hash", "diff", transfers)
 
     block_image_diff.GenerateDigraph()
 
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
new file mode 100755
index 0000000..1dd3159
--- /dev/null
+++ b/tools/releasetools/validate_target_files.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Validate a given (signed) target_files.zip.
+
+It performs checks to ensure the integrity of the input zip.
+ - It verifies the file consistency between the ones in IMAGES/system.img (read
+   via IMAGES/system.map) and the ones under unpacked folder of SYSTEM/. The
+   same check also applies to the vendor image if present.
+"""
+
+import common
+import logging
+import os.path
+import sparse_img
+import sys
+
+
+def _GetImage(which, tmpdir):
+  assert which in ('system', 'vendor')
+
+  path = os.path.join(tmpdir, 'IMAGES', which + '.img')
+  mappath = os.path.join(tmpdir, 'IMAGES', which + '.map')
+
+  # Map file must exist (allowed to be empty).
+  assert os.path.exists(path) and os.path.exists(mappath)
+
+  clobbered_blocks = '0'
+  return sparse_img.SparseImage(path, mappath, clobbered_blocks)
+
+
+def ValidateFileConsistency(input_zip, input_tmp):
+  """Compare the files from image files and unpacked folders."""
+
+  def RoundUpTo4K(value):
+    rounded_up = value + 4095
+    return rounded_up - (rounded_up % 4096)
+
+  def CheckAllFiles(which):
+    logging.info('Checking %s image.', which)
+    image = _GetImage(which, input_tmp)
+    prefix = '/' + which
+    for entry in image.file_map:
+      if not entry.startswith(prefix):
+        continue
+
+      # Read the blocks that the file resides. Note that it will contain the
+      # bytes past the file length, which is expected to be padded with '\0's.
+      ranges = image.file_map[entry]
+      blocks_sha1 = image.RangeSha1(ranges)
+
+      # The filename under unpacked directory, such as SYSTEM/bin/sh.
+      unpacked_name = os.path.join(
+          input_tmp, which.upper(), entry[(len(prefix) + 1):])
+      with open(unpacked_name) as f:
+        file_data = f.read()
+      file_size = len(file_data)
+      file_size_rounded_up = RoundUpTo4K(file_size)
+      file_data += '\0' * (file_size_rounded_up - file_size)
+      file_sha1 = common.File(entry, file_data).sha1
+
+      assert blocks_sha1 == file_sha1, \
+          'file: %s, range: %s, blocks_sha1: %s, file_sha1: %s' % (
+              entry, ranges, blocks_sha1, file_sha1)
+
+  logging.info('Validating file consistency.')
+
+  # Verify IMAGES/system.img.
+  CheckAllFiles('system')
+
+  # Verify IMAGES/vendor.img if applicable.
+  if 'VENDOR/' in input_zip.namelist():
+    CheckAllFiles('vendor')
+
+  # Not checking IMAGES/system_other.img since it doesn't have the map file.
+
+
+def main(argv):
+  def option_handler():
+    return True
+
+  args = common.ParseOptions(
+      argv, __doc__, extra_opts="",
+      extra_long_opts=[],
+      extra_option_handler=option_handler)
+
+  if len(args) != 1:
+    common.Usage(__doc__)
+    sys.exit(1)
+
+  logging_format = '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s'
+  date_format = '%Y/%m/%d %H:%M:%S'
+  logging.basicConfig(level=logging.INFO, format=logging_format,
+                      datefmt=date_format)
+
+  logging.info("Unzipping the input target_files.zip: %s", args[0])
+  input_tmp, input_zip = common.UnzipTemp(args[0])
+
+  ValidateFileConsistency(input_zip, input_tmp)
+
+  # TODO: Check if the OTA keys have been properly updated (the ones on /system,
+  # in recovery image).
+
+  # TODO(b/35411009): Verify the contents in /system/bin/install-recovery.sh.
+
+  logging.info("Done.")
+
+
+if __name__ == '__main__':
+  try:
+    main(sys.argv[1:])
+  finally:
+    common.Cleanup()
diff --git a/tools/signapk/Android.mk b/tools/signapk/Android.mk
index 4506e2f..051a51d 100644
--- a/tools/signapk/Android.mk
+++ b/tools/signapk/Android.mk
@@ -30,7 +30,6 @@
 include $(BUILD_HOST_JAVA_LIBRARY)
 
 ifeq ($(TARGET_BUILD_APPS),)
-ifeq ($(BRILLO),)
 # The post-build signing tools need signapk.jar and its shared libraries,
 # but we don't need this if we're just doing unbundled apps.
 my_dist_files := $(LOCAL_INSTALLED_MODULE) \
@@ -39,4 +38,3 @@
 $(call dist-for-goals,droidcore,$(my_dist_files))
 my_dist_files :=
 endif
-endif
diff --git a/tools/soong_to_convert.py b/tools/soong_to_convert.py
index 379a1ad..3d62d43 100755
--- a/tools/soong_to_convert.py
+++ b/tools/soong_to_convert.py
@@ -42,7 +42,6 @@
   dotdot_incs: LOCAL_C_INCLUDES contains paths include '..'
   srcs_dotarm: LOCAL_SRC_FILES contains source files like <...>.c.arm
   aidl: LOCAL_SRC_FILES contains .aidl sources
-  dbus: LOCAL_SRC_FILES contains .dbus-xml sources
   objc: LOCAL_SRC_FILES contains Objective-C sources
   proto: LOCAL_SRC_FILES contains .proto sources
   rs: LOCAL_SRC_FILES contains renderscript sources
diff --git a/tools/warn.py b/tools/warn.py
index 5be6d9d..e786e3c 100755
--- a/tools/warn.py
+++ b/tools/warn.py
@@ -73,14 +73,9 @@
 # New dynamic HTML related function to emit data:
 #   escape_string, strip_escape_string, emit_warning_arrays
 #   emit_js_data():
-#
-# To emit csv files of warning message counts:
-#   flag --gencsv
-#   description_for_csv, string_for_csv:
-#   count_severity(sev, kind):
-#   dump_csv():
 
 import argparse
+import csv
 import multiprocessing
 import os
 import re
@@ -88,6 +83,9 @@
 import sys
 
 parser = argparse.ArgumentParser(description='Convert a build log into HTML')
+parser.add_argument('--csvpath',
+                    help='Save CSV warning file to the passed absolute path',
+                    default=None)
 parser.add_argument('--gencsv',
                     help='Generate a CSV file with number of various warnings',
                     action='store_true',
@@ -2672,48 +2670,46 @@
   return category['description']
 
 
-def string_for_csv(s):
-  # Only some Java warning desciptions have used quotation marks.
-  # TODO(chh): if s has double quote character, s should be quoted.
-  if ',' in s:
-    # TODO(chh): replace a double quote with two double quotes in s.
-    return '"{}"'.format(s)
-  return s
-
-
-def count_severity(sev, kind):
+def count_severity(writer, sev, kind):
   """Count warnings of given severity."""
   total = 0
   for i in warn_patterns:
     if i['severity'] == sev and i['members']:
       n = len(i['members'])
       total += n
-      warning = string_for_csv(kind + ': ' + description_for_csv(i))
-      print '{},,{}'.format(n, warning)
+      warning = kind + ': ' + description_for_csv(i)
+      writer.writerow([n, '', warning])
       # print number of warnings for each project, ordered by project name.
       projects = i['projects'].keys()
       projects.sort()
       for p in projects:
-        print '{},{},{}'.format(i['projects'][p], p, warning)
-  print '{},,{}'.format(total, kind + ' warnings')
+        writer.writerow([i['projects'][p], p, warning])
+  writer.writerow([total, '', kind + ' warnings'])
+
   return total
 
 
 # dump number of warnings in csv format to stdout
-def dump_csv():
+def dump_csv(writer):
   """Dump number of warnings in csv format to stdout."""
   sort_warnings()
   total = 0
   for s in Severity.range:
-    total += count_severity(s, Severity.column_headers[s])
-  print '{},,{}'.format(total, 'All warnings')
+    total += count_severity(writer, s, Severity.column_headers[s])
+  writer.writerow([total, '', 'All warnings'])
 
 
 def main():
   warning_lines = parse_input_file(open(args.buildlog, 'r'))
   parallel_classify_warnings(warning_lines)
+  # If a user pases a csv path, save the fileoutput to the path
+  # If the user also passed gencsv write the output to stdout
+  # If the user did not pass gencsv flag dump the html report to stdout.
+  if args.csvpath:
+    with open(args.csvpath, 'w') as f:
+      dump_csv(csv.writer(f, lineterminator='\n'))
   if args.gencsv:
-    dump_csv()
+    dump_csv(csv.writer(sys.stdout, lineterminator='\n'))
   else:
     dump_html()
 
diff --git a/tools/zipalign/ZipFile.cpp b/tools/zipalign/ZipFile.cpp
index 98d02e0..1b39902 100644
--- a/tools/zipalign/ZipFile.cpp
+++ b/tools/zipalign/ZipFile.cpp
@@ -359,8 +359,7 @@
  * safely written.  Not really a concern for us.
  */
 status_t ZipFile::addCommon(const char* fileName, const void* data, size_t size,
-    const char* storageName, int sourceType, int compressionMethod,
-    ZipEntry** ppEntry)
+    const char* storageName, int compressionMethod, ZipEntry** ppEntry)
 {
     ZipEntry* pEntry = NULL;
     status_t result = NO_ERROR;
@@ -414,81 +413,51 @@
     /*
      * Copy the data in, possibly compressing it as we go.
      */
-    if (sourceType == ZipEntry::kCompressStored) {
-        if (compressionMethod == ZipEntry::kCompressDeflated) {
-            bool failed = false;
-            result = compressFpToFp(mZipFp, inputFp, data, size, &crc);
-            if (result != NO_ERROR) {
-                ALOGD("compression failed, storing\n");
-                failed = true;
-            } else {
-                /*
-                 * Make sure it has compressed "enough".  This probably ought
-                 * to be set through an API call, but I don't expect our
-                 * criteria to change over time.
-                 */
-                long src = inputFp ? ftell(inputFp) : size;
-                long dst = ftell(mZipFp) - startPosn;
-                if (dst + (dst / 10) > src) {
-                    ALOGD("insufficient compression (src=%ld dst=%ld), storing\n",
-                        src, dst);
-                    failed = true;
-                }
-            }
-
-            if (failed) {
-                compressionMethod = ZipEntry::kCompressStored;
-                if (inputFp) rewind(inputFp);
-                fseek(mZipFp, startPosn, SEEK_SET);
-                /* fall through to kCompressStored case */
-            }
-        }
-        /* handle "no compression" request, or failed compression from above */
-        if (compressionMethod == ZipEntry::kCompressStored) {
-            if (inputFp) {
-                result = copyFpToFp(mZipFp, inputFp, &crc);
-            } else {
-                result = copyDataToFp(mZipFp, data, size, &crc);
-            }
-            if (result != NO_ERROR) {
-                // don't need to truncate; happens in CDE rewrite
-                ALOGD("failed copying data in\n");
-                goto bail;
-            }
-        }
-
-        // currently seeked to end of file
-        uncompressedLen = inputFp ? ftell(inputFp) : size;
-    } else if (sourceType == ZipEntry::kCompressDeflated) {
-        /* we should support uncompressed-from-compressed, but it's not
-         * important right now */
-        assert(compressionMethod == ZipEntry::kCompressDeflated);
-
-        bool scanResult;
-        int method;
-        long compressedLen;
-        unsigned long longcrc;
-
-        scanResult = ZipUtils::examineGzip(inputFp, &method, &uncompressedLen,
-                        &compressedLen, &longcrc);
-        if (!scanResult || method != ZipEntry::kCompressDeflated) {
-            ALOGD("this isn't a deflated gzip file?");
-            result = UNKNOWN_ERROR;
-            goto bail;
-        }
-        crc = longcrc;
-
-        result = copyPartialFpToFp(mZipFp, inputFp, compressedLen, NULL);
+    if (compressionMethod == ZipEntry::kCompressDeflated) {
+        bool failed = false;
+        result = compressFpToFp(mZipFp, inputFp, data, size, &crc);
         if (result != NO_ERROR) {
-            ALOGD("failed copying gzip data in\n");
+            ALOGD("compression failed, storing\n");
+            failed = true;
+        } else {
+            /*
+             * Make sure it has compressed "enough".  This probably ought
+             * to be set through an API call, but I don't expect our
+             * criteria to change over time.
+             */
+            long src = inputFp ? ftell(inputFp) : size;
+            long dst = ftell(mZipFp) - startPosn;
+            if (dst + (dst / 10) > src) {
+                ALOGD("insufficient compression (src=%ld dst=%ld), storing\n",
+                    src, dst);
+                failed = true;
+            }
+        }
+
+        if (failed) {
+            compressionMethod = ZipEntry::kCompressStored;
+            if (inputFp) rewind(inputFp);
+            fseek(mZipFp, startPosn, SEEK_SET);
+            /* fall through to kCompressStored case */
+        }
+    }
+    /* handle "no compression" request, or failed compression from above */
+    if (compressionMethod == ZipEntry::kCompressStored) {
+        if (inputFp) {
+            result = copyFpToFp(mZipFp, inputFp, &crc);
+        } else {
+            result = copyDataToFp(mZipFp, data, size, &crc);
+        }
+        if (result != NO_ERROR) {
+            // don't need to truncate; happens in CDE rewrite
+            ALOGD("failed copying data in\n");
             goto bail;
         }
-    } else {
-        assert(false);
-        result = UNKNOWN_ERROR;
-        goto bail;
     }
 
+    // currently seeked to end of file
+    uncompressedLen = inputFp ? ftell(inputFp) : size;
+
     /*
      * We could write the "Data Descriptor", but there doesn't seem to
      * be any point since we're going to go back and write the LFH.
diff --git a/tools/zipalign/ZipFile.h b/tools/zipalign/ZipFile.h
index b0bafe9..d5ace7c 100644
--- a/tools/zipalign/ZipFile.h
+++ b/tools/zipalign/ZipFile.h
@@ -86,24 +86,10 @@
         int compressionMethod, ZipEntry** ppEntry)
     {
         return addCommon(fileName, NULL, 0, storageName,
-                         ZipEntry::kCompressStored,
                          compressionMethod, ppEntry);
     }
 
     /*
-     * Add a file that is already compressed with gzip.
-     *
-     * If "ppEntry" is non-NULL, a pointer to the new entry will be returned.
-     */
-    status_t addGzip(const char* fileName, const char* storageName,
-        ZipEntry** ppEntry)
-    {
-        return addCommon(fileName, NULL, 0, storageName,
-                         ZipEntry::kCompressDeflated,
-                         ZipEntry::kCompressDeflated, ppEntry);
-    }
-
-    /*
      * Add a file from an in-memory data buffer.
      *
      * If "ppEntry" is non-NULL, a pointer to the new entry will be returned.
@@ -112,7 +98,6 @@
         int compressionMethod, ZipEntry** ppEntry)
     {
         return addCommon(NULL, data, size, storageName,
-                         ZipEntry::kCompressStored,
                          compressionMethod, ppEntry);
     }
 
@@ -231,8 +216,7 @@
 
     /* common handler for all "add" functions */
     status_t addCommon(const char* fileName, const void* data, size_t size,
-        const char* storageName, int sourceType, int compressionMethod,
-        ZipEntry** ppEntry);
+        const char* storageName, int compressionMethod, ZipEntry** ppEntry);
 
     /* copy all of "srcFp" into "dstFp" */
     status_t copyFpToFp(FILE* dstFp, FILE* srcFp, uint32_t* pCRC32);