Merge "Store odex files in oat/<isa>/ directory."
diff --git a/CleanSpec.mk b/CleanSpec.mk
index f5f7991..edf0179 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -334,6 +334,21 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
 
+# Rename dalvik.vm.usejit to debug.dalvik.vm.usejit
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# Revert rename dalvik.vm.usejit to debug.dalvik.vm.usejit
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# Change from interpret-only to verify-at-runtime.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/core/Makefile b/core/Makefile
index 539bf71..478c966 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1248,6 +1248,7 @@
 	  $(HOST_OUT_EXECUTABLES)/imgdiff \
 	  $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar \
 	  $(HOST_OUT_JAVA_LIBRARIES)/signapk.jar \
+	  $(HOST_OUT_JAVA_LIBRARIES)/BootSignature.jar \
 	  $(HOST_OUT_EXECUTABLES)/mkuserimg.sh \
 	  $(HOST_OUT_EXECUTABLES)/make_ext4fs \
 	  $(HOST_OUT_EXECUTABLES)/simg2img \
@@ -1263,6 +1264,32 @@
 .PHONY: otatools
 otatools: $(OTATOOLS)
 
+BUILT_OTATOOLS_PACKAGE := $(PRODUCT_OUT)/otatools.zip
+$(BUILT_OTATOOLS_PACKAGE): \
+	intermediate := $(call intermediates-dir-for,PACKAGING,otatools)
+$(BUILT_OTATOOLS_PACKAGE): \
+	zip_root := $(intermediate)/otatools
+
+otatools_lib_path := $(notdir $(HOST_OUT_SHARED_LIBRARIES))
+$(BUILT_OTATOOLS_PACKAGE): \
+		$(OTATOOLS) \
+		$(HOST_OUT_SHARED_LIBRARIES)/libc++.so
+	@echo "Package OTA tools: $@"
+	$(hide) rm -rf $@ $(zip_root)
+	$(hide) mkdir -p $(dir $@) $(zip_root)/bin $(zip_root)/framework $(zip_root)/releasetools $(zip_root)/$(otatools_lib_path)
+	$(hide) $(ACP) -p $(OTATOOLS) $(zip_root)/bin
+	$(hide) mv $(zip_root)/bin/*.jar $(zip_root)/framework/
+	$(hide) $(ACP) $(HOST_OUT_SHARED_LIBRARIES)/libc++.so $(zip_root)/$(otatools_lib_path)
+	$(hide) $(ACP) -r -d -p build/tools/releasetools/* $(zip_root)/releasetools
+	$(hide) rm -rf $@ $(zip_root)/releasetools/*.pyc
+	$(hide) (cd $(zip_root) && zip -qry $(abspath $@) bin framework releasetools $(otatools_lib_path))
+	$(hide) zip -qry $(abspath $@) build/target/product/security/
+	$(hide) find device vendor -name \*.pk8 -o -name \*.x509.pem -o -name oem.prop | xargs zip -qry $(abspath $@)
+
+.PHONY: otatools-package
+otatools-package: $(BUILT_OTATOOLS_PACKAGE)
+
+
 # -----------------------------------------------------------------
 # A zip of the directories that map to the target filesystem.
 # This zip can be used to create an OTA package or filesystem image
@@ -1423,7 +1450,8 @@
 	$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
 endif
 	$(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt)
-	$(hide) ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
+	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
+	    ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
 	@# Zip everything up, preserving symlinks
 	$(hide) (cd $(zip_root) && zip -qry ../$(notdir $@) .)
 	@# Run fs_config on all the system, vendor, boot ramdisk,
@@ -1433,7 +1461,8 @@
 	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/boot_filesystem_config.txt
 	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/recovery_filesystem_config.txt
 	$(hide) (cd $(zip_root) && zip -q ../$(notdir $@) META/*filesystem_config.txt)
-	$(hide) ./build/tools/releasetools/add_img_to_target_files -p $(HOST_OUT) $@
+	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
+	    ./build/tools/releasetools/add_img_to_target_files -p $(HOST_OUT) $@
 
 .PHONY: target-files-package
 target-files-package: $(BUILT_TARGET_FILES_PACKAGE)
@@ -1462,7 +1491,7 @@
 
 $(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS)
 	@echo "Package OTA: $@"
-	$(hide) MKBOOTIMG=$(MKBOOTIMG) \
+	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
 	   ./build/tools/releasetools/ota_from_target_files -v \
 	   --block \
 	   -p $(HOST_OUT) \
@@ -1491,7 +1520,7 @@
 
 $(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS)
 	@echo "Package: $@"
-	$(hide) MKBOOTIMG=$(MKBOOTIMG) \
+	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
 	   ./build/tools/releasetools/img_from_target_files -v \
 	   -p $(HOST_OUT) \
 	   $(BUILT_TARGET_FILES_PACKAGE) $@
diff --git a/core/binary.mk b/core/binary.mk
index 9fa968d..92b08fa 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -213,10 +213,15 @@
 ## Please note that we will do option filtering during FDO build.
 ## i.e. Os->O2, remove -fno-early-inline and -finline-limit.
 ##################################################################
-ifeq ($(strip $(LOCAL_FDO_SUPPORT)), true)
-  ifeq ($(strip $(LOCAL_IS_HOST_MODULE)),)
-    my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_CFLAGS)
-    my_ldflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_LDFLAGS)
+my_fdo_build :=
+ifneq ($(filter true always, $(LOCAL_FDO_SUPPORT)),)
+  ifeq ($(BUILD_FDO_INSTRUMENT),true)
+    my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_CFLAGS)
+    my_ldflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_LDFLAGS)
+    my_fdo_build := true
+  else ifneq ($(filter true,$(BUILD_FDO_OPTIMIZE))$(filter always,$(LOCAL_FDO_SUPPORT)),)
+    my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_OPTIMIZE_CFLAGS)
+    my_fdo_build := true
   endif
 endif
 
@@ -1045,19 +1050,10 @@
 my_ldflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(my_ldflags))
 endif
 
-ifeq ($(LOCAL_FDO_SUPPORT), true)
-  build_with_fdo := false
-  ifeq ($(BUILD_FDO_INSTRUMENT), true)
-    build_with_fdo := true
-  endif
-  ifeq ($(BUILD_FDO_OPTIMIZE), true)
-    build_with_fdo := true
-  endif
-  ifeq ($(build_with_fdo), true)
-    my_cflags := $(patsubst -Os,-O2,$(my_cflags))
-    fdo_incompatible_flags=-fno-early-inlining -finline-limit=%
-    my_cflags := $(filter-out $(fdo_incompatible_flags),$(my_cflags))
-  endif
+ifeq ($(my_fdo_build), true)
+  my_cflags := $(patsubst -Os,-O2,$(my_cflags))
+  fdo_incompatible_flags := -fno-early-inlining -finline-limit=%
+  my_cflags := $(filter-out $(fdo_incompatible_flags),$(my_cflags))
 endif
 
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_YACCFLAGS := $(LOCAL_YACCFLAGS)
diff --git a/core/combo/TARGET_linux-x86_64.mk b/core/combo/TARGET_linux-x86_64.mk
index 89e42f9..2adb157 100644
--- a/core/combo/TARGET_linux-x86_64.mk
+++ b/core/combo/TARGET_linux-x86_64.mk
@@ -23,7 +23,7 @@
 endif
 
 # Decouple NDK library selection with platform compiler version
-TARGET_NDK_GCC_VERSION := 4.8
+TARGET_NDK_GCC_VERSION := 4.9
 
 ifeq ($(strip $(TARGET_GCC_VERSION_EXP)),)
 TARGET_GCC_VERSION := 4.8
diff --git a/core/combo/fdo.mk b/core/combo/fdo.mk
index 9e331b6..8fb8fd3 100644
--- a/core/combo/fdo.mk
+++ b/core/combo/fdo.mk
@@ -18,22 +18,16 @@
 
 $(combo_2nd_arch_prefix)TARGET_FDO_CFLAGS:=
 
-ifeq ($(strip $(BUILD_FDO_INSTRUMENT)), true)
-  # Set BUILD_FDO_INSTRUMENT=true to turn on FDO instrumentation.
-  # The profile will be generated on /sdcard/fdo_profile on the device.
-  $(combo_2nd_arch_prefix)TARGET_FDO_CFLAGS := -fprofile-generate=/sdcard/fdo_profile -DANDROID_FDO
-  $(combo_2nd_arch_prefix)TARGET_FDO_LDFLAGS := -lgcov -lgcc
-else
-  ifeq ($(strip $(BUILD_FDO_OPTIMIZE)), true)
-    # Set TARGET_FDO_PROFILE_PATH to set a custom profile directory for your build.
-    ifeq ($(strip $($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH)),)
-      $(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH := vendor/google_data/fdo_profile
-    endif
+# Set BUILD_FDO_INSTRUMENT=true to turn on FDO instrumentation.
+# The profile will be generated on /sdcard/fdo_profile on the device.
+$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_CFLAGS := -fprofile-generate=/sdcard/fdo_profile -DANDROID_FDO
+$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_LDFLAGS := -lgcov -lgcc
 
-    ifneq ($(strip $(wildcard $($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH)/$(PRODUCT_OUT))),)
-      $(combo_2nd_arch_prefix)TARGET_FDO_CFLAGS := -fprofile-use=$($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH) -DANDROID_FDO -fprofile-correction -Wcoverage-mismatch -Wno-error
-    else
-      $(warning Profile directory $($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH)/$(PRODUCT_OUT) does not exist. Turn off FDO.)
-    endif
-  endif
+# Set TARGET_FDO_PROFILE_PATH to set a custom profile directory for your build.
+ifeq ($(strip $($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH)),)
+  $(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH := vendor/google_data/fdo_profile
 endif
+
+$(combo_2nd_arch_prefix)TARGET_FDO_OPTIMIZE_CFLAGS := \
+    -fprofile-use=$($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH) \
+    -DANDROID_FDO -fprofile-correction -Wcoverage-mismatch -Wno-error
diff --git a/core/config.mk b/core/config.mk
index d6449d5..0b76b53 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -96,8 +96,6 @@
 BUILD_HOST_DALVIK_STATIC_JAVA_LIBRARY := $(BUILD_SYSTEM)/host_dalvik_static_java_library.mk
 
 
--include cts/build/config.mk
-
 # ###############################################################
 # Parse out any modifier targets.
 # ###############################################################
diff --git a/core/cxx_stl_setup.mk b/core/cxx_stl_setup.mk
index 265d8cb..63e2069 100644
--- a/core/cxx_stl_setup.mk
+++ b/core/cxx_stl_setup.mk
@@ -53,6 +53,11 @@
         my_shared_libraries += libc++
     else
         my_static_libraries += libc++_static
+        ifndef LOCAL_IS_HOST_MODULE
+            ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
+                my_static_libraries += libm libc libdl
+            endif
+        endif
     endif
 
     ifdef LOCAL_IS_HOST_MODULE
diff --git a/core/definitions.mk b/core/definitions.mk
index f4fc47d..f59b78e 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -1435,7 +1435,8 @@
 define transform-to-stripped
 @mkdir -p $(dir $@)
 @echo "target Strip: $(PRIVATE_MODULE) ($@)"
-$(hide) $(PRIVATE_STRIP) --strip-all $< -o $@ $(TARGET_STRIP_EXTRA)
+$(hide) $(PRIVATE_STRIP) --strip-all $< -o $@ \
+  $(if $(PRIVATE_NO_DEBUGLINK),,$(TARGET_STRIP_EXTRA))
 endef
 
 define transform-to-stripped-keep-symbols
diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk
index 08fb176..cf06a3d 100644
--- a/core/dynamic_binary.mk
+++ b/core/dynamic_binary.mk
@@ -88,18 +88,21 @@
   my_strip_module := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP_MODULE)
 endif
 
-ifeq ($(my_strip_module),true)
-# Strip the binary
-$(strip_output): PRIVATE_STRIP := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP)
-$(strip_output): PRIVATE_OBJCOPY := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJCOPY)
-$(strip_output): $(strip_input) | $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP)
-	$(transform-to-stripped)
-else
-ifeq ($(my_strip_module),keep_symbols)
-# Strip only the debug frames, but leave the symbol table.
 $(strip_output): PRIVATE_STRIP := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP)
 $(strip_output): PRIVATE_OBJCOPY := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJCOPY)
 $(strip_output): PRIVATE_READELF := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_READELF)
+ifeq ($(my_strip_module),no_debuglink)
+$(strip_output): PRIVATE_NO_DEBUGLINK := true
+else
+$(strip_output): PRIVATE_NO_DEBUGLINK :=
+endif
+
+ifneq ($(filter true no_debuglink,$(my_strip_module)),)
+# Strip the binary
+$(strip_output): $(strip_input) | $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP)
+	$(transform-to-stripped)
+else ifeq ($(my_strip_module),keep_symbols)
+# Strip only the debug frames, but leave the symbol table.
 $(strip_output): $(strip_input) | $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP)
 	$(transform-to-stripped-keep-symbols)
 
@@ -126,7 +129,6 @@
 	@echo "target Unstripped: $(PRIVATE_MODULE) ($@)"
 	$(copy-file-to-target-with-cp)
 endif
-endif
 endif # my_strip_module
 
 
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 43774ea..75391e1 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -72,6 +72,10 @@
   HOST_ARCH := x86_64
   HOST_2ND_ARCH := x86
   HOST_IS_64_BIT := true
+else
+ifneq (,$(findstring x86,$(UNAME)))
+$(error Building on a 32-bit x86 host is not supported: $(UNAME)!)
+endif
 endif
 
 BUILD_ARCH := $(HOST_ARCH)
diff --git a/core/java.mk b/core/java.mk
index c47ea22..cd90f03 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -212,6 +212,11 @@
 
 # We don't need the .so files in bundled branches
 # Prevent these from showing up on the device
+# One exception is librsjni.so, which is needed for
+# both native path and compat path.
+rs_jni_lib := $(TARGET_OUT_INTERMEDIATE_LIBRARIES)/librsjni.so
+LOCAL_JNI_SHARED_LIBRARIES += librsjni
+
 ifneq (,$(TARGET_BUILD_APPS)$(FORCE_BUILD_RS_COMPAT))
 
 rs_compatibility_jni_libs := $(addprefix \
@@ -221,8 +226,7 @@
 $(rs_generated_bc) : $(RenderScript_file_stamp)
 
 rs_support_lib := $(TARGET_OUT_INTERMEDIATE_LIBRARIES)/libRSSupport.so
-rs_jni_lib := $(TARGET_OUT_INTERMEDIATE_LIBRARIES)/librsjni.so
-LOCAL_JNI_SHARED_LIBRARIES += libRSSupport librsjni
+LOCAL_JNI_SHARED_LIBRARIES += libRSSupport
 
 rs_support_io_lib :=
 # check if the target api level support USAGE_IO
diff --git a/core/main.mk b/core/main.mk
index 4a2a626..13465e3 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -93,6 +93,9 @@
 # and host information.
 include $(BUILD_SYSTEM)/config.mk
 
+# CTS-specific config.
+-include cts/build/config.mk
+
 # This allows us to force a clean build - included after the config.mk
 # environment setup is done, but before we generate any dependencies.  This
 # file does the rm -rf inline so the deps which are all done below will
@@ -385,10 +388,10 @@
           ro.setupwizard.mode=OPTIONAL
 endif
 ifndef is_sdk_build
-  # Don't even verify the image on eng builds to speed startup
-  ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.image-dex2oat-filter=verify-none
-  # Don't compile apps on eng builds to speed startup
-  ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.dex2oat-filter=interpret-only
+  # Don't verify or compile the image on eng builds to speed startup.
+  ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.image-dex2oat-filter=verify-at-runtime
+  # Don't verify or compile apps on eng builds to speed startup.
+  ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.dex2oat-filter=verify-at-runtime
 endif
   ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.usejit=true
 endif
diff --git a/core/multi_prebuilt.mk b/core/multi_prebuilt.mk
index bc85cea..ed2fed6 100644
--- a/core/multi_prebuilt.mk
+++ b/core/multi_prebuilt.mk
@@ -76,8 +76,7 @@
      ) \
    ) \
   $(eval LOCAL_MODULE_SUFFIX := $(suffix $(LOCAL_SRC_FILES))) \
-  $(if $(filter user,$(TARGET_BUILD_VARIANT)), \
-    $(eval LOCAL_STRIP_MODULE := $(8))) \
+  $(eval LOCAL_STRIP_MODULE := $(8)) \
   $(eval include $(BUILD_PREBUILT)) \
  )
 endef
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index fc0f56c..47e21ef 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -39,6 +39,10 @@
   # Put the built targets of all shared libraries in a common directory
   # to simplify the link line.
   OVERRIDE_BUILT_MODULE_PATH := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)
+  ifeq ($(LOCAL_IS_HOST_MODULE)$(LOCAL_STRIP_MODULE),)
+    # Strip but not try to add debuglink
+    LOCAL_STRIP_MODULE := no_debuglink
+  endif
 endif
 
 ifneq ($(filter STATIC_LIBRARIES SHARED_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
@@ -59,7 +63,7 @@
 LOCAL_INSTALLED_MODULE_STEM := $(LOCAL_MODULE).apk
 endif
 
-ifeq ($(LOCAL_STRIP_MODULE),true)
+ifneq ($(filter true no_debuglink,$(LOCAL_STRIP_MODULE)),)
   ifdef LOCAL_IS_HOST_MODULE
     $(error Cannot strip host module LOCAL_PATH=$(LOCAL_PATH))
   endif
@@ -71,6 +75,7 @@
   endif
   include $(BUILD_SYSTEM)/dynamic_binary.mk
   built_module := $(linked_module)
+
 else  # LOCAL_STRIP_MODULE not true
   include $(BUILD_SYSTEM)/base_rules.mk
   built_module := $(LOCAL_BUILT_MODULE)
@@ -112,6 +117,8 @@
 endif
 endif
 
+# We need to enclose the above export_includes and built_shared_libraries in
+# "LOCAL_STRIP_MODULE not true" because otherwise the rules are defined in dynamic_binary.mk.
 endif  # LOCAL_STRIP_MODULE not true
 
 ifeq ($(LOCAL_MODULE_CLASS),APPS)
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 8afd5a8..c98624d 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -76,7 +76,7 @@
 USE_OPENGL_RENDERER := true
 
 TARGET_USERIMAGES_USE_EXT4 := true
-BOARD_SYSTEMIMAGE_PARTITION_SIZE := 943718400
+BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1073741824
 BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
 BOARD_CACHEIMAGE_PARTITION_SIZE := 69206016
 BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE := ext4
diff --git a/target/board/generic_mips/BoardConfig.mk b/target/board/generic_mips/BoardConfig.mk
index ff5a4e4..b40f9ba 100644
--- a/target/board/generic_mips/BoardConfig.mk
+++ b/target/board/generic_mips/BoardConfig.mk
@@ -53,7 +53,7 @@
 USE_OPENGL_RENDERER := true
 
 TARGET_USERIMAGES_USE_EXT4 := true
-BOARD_SYSTEMIMAGE_PARTITION_SIZE := 838860800
+BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1073741824
 BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
 BOARD_CACHEIMAGE_PARTITION_SIZE := 69206016
 BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE := ext4
diff --git a/target/product/base.mk b/target/product/base.mk
index 0d052b5..85c01a2 100644
--- a/target/product/base.mk
+++ b/target/product/base.mk
@@ -95,7 +95,6 @@
     monkey \
     mtpd \
     ndc \
-    netcfg \
     netd \
     ping \
     ping6 \
diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk
index 2fd2ce8..9a2c63a 100644
--- a/target/product/full_base_telephony.mk
+++ b/target/product/full_base_telephony.mk
@@ -19,9 +19,6 @@
 # build quite specifically for the emulator, and might not be
 # entirely appropriate to inherit from for on-device configurations.
 
-PRODUCT_PACKAGES := \
-    VoiceDialer
-
 PRODUCT_PROPERTY_OVERRIDES := \
     keyguard.no_require_sim=true \
     ro.com.android.dataroaming=true
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index e98e4b6..8bbe452 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -30,9 +30,6 @@
 
 import errno
 import os
-import re
-import shutil
-import subprocess
 import tempfile
 import zipfile
 
@@ -70,10 +67,8 @@
   block_list = common.MakeTempFile(prefix="system-blocklist-", suffix=".map")
   imgname = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict,
                         block_list=block_list)
-  with open(imgname, "rb") as f:
-    common.ZipWriteStr(output_zip, prefix + "system.img", f.read())
-  with open(block_list, "rb") as f:
-    common.ZipWriteStr(output_zip, prefix + "system.map", f.read())
+  common.ZipWrite(output_zip, imgname, prefix + "system.img")
+  common.ZipWrite(output_zip, block_list, prefix + "system.map")
 
 
 def BuildSystem(input_dir, info_dict, block_list=None):
@@ -94,10 +89,8 @@
   block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
   imgname = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict,
                      block_list=block_list)
-  with open(imgname, "rb") as f:
-    common.ZipWriteStr(output_zip, prefix + "vendor.img", f.read())
-  with open(block_list, "rb") as f:
-    common.ZipWriteStr(output_zip, prefix + "vendor.map", f.read())
+  common.ZipWrite(output_zip, imgname, prefix + "vendor.img")
+  common.ZipWrite(output_zip, block_list, prefix + "vendor.map")
 
 
 def BuildVendor(input_dir, info_dict, block_list=None):
@@ -296,7 +289,6 @@
   output_zip.close()
 
 def main(argv):
-
   def option_handler(o, a):
     if o in ("-a", "--add_missing"):
       OPTIONS.add_missing = True
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 5cf0f2d..5b5c4cc 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -140,8 +140,11 @@
     self.style = style
     self.intact = (getattr(tgt_ranges, "monotonic", False) and
                    getattr(src_ranges, "monotonic", False))
-    self.goes_before = {}
-    self.goes_after = {}
+
+    # We use OrderedDict rather than dict so that the output is repeatable;
+    # otherwise it would depend on the hash values of the Transfer objects.
+    self.goes_before = OrderedDict()
+    self.goes_after = OrderedDict()
 
     self.stash_before = []
     self.use_stash = []
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 14975d5..6903dc66 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -781,6 +781,46 @@
     return result
 
 
+def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
+             compress_type=None):
+  import datetime
+
+  # http://b/18015246
+  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
+  # for files larger than 2GiB. We can work around this by adjusting their
+  # limit. Note that `zipfile.writestr()` will not work for strings larger than
+  # 2GiB. The Python interpreter sometimes rejects strings that large (though
+  # it isn't clear to me exactly what circumstances cause this).
+  # `zipfile.write()` must be used directly to work around this.
+  #
+  # This mess can be avoided if we port to python3.
+  saved_zip64_limit = zipfile.ZIP64_LIMIT
+  zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+  if compress_type is None:
+    compress_type = zip_file.compression
+  if arcname is None:
+    arcname = filename
+
+  saved_stat = os.stat(filename)
+
+  try:
+    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
+    # file to be zipped and reset it when we're done.
+    os.chmod(filename, perms)
+
+    # Use a fixed timestamp so the output is repeatable.
+    epoch = datetime.datetime.fromtimestamp(0)
+    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
+    os.utime(filename, (timestamp, timestamp))
+
+    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
+  finally:
+    os.chmod(filename, saved_stat.st_mode)
+    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
+    zipfile.ZIP64_LIMIT = saved_zip64_limit
+
+
 def ZipWriteStr(zip, filename, data, perms=0644, compression=None):
   # use a fixed timestamp so the output is repeatable.
   zinfo = zipfile.ZipInfo(filename=filename,
@@ -1024,20 +1064,22 @@
 
 
 class BlockDifference:
-  def __init__(self, partition, tgt, src=None, check_first_block=False):
+  def __init__(self, partition, tgt, src=None, check_first_block=False, version=None):
     self.tgt = tgt
     self.src = src
     self.partition = partition
     self.check_first_block = check_first_block
 
-    version = 1
-    if OPTIONS.info_dict:
-      version = max(
-          int(i) for i in
-          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+    if version is None:
+      version = 1
+      if OPTIONS.info_dict:
+        version = max(
+            int(i) for i in
+            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+    self.version = version
 
     b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
-                                    version=version)
+                                    version=self.version)
     tmpdir = tempfile.mkdtemp()
     OPTIONS.tempfiles.append(tmpdir)
     self.path = os.path.join(tmpdir, partition)
@@ -1060,36 +1102,51 @@
     if not self.src:
       script.Print("Image %s will be patched unconditionally." % (partition,))
     else:
-      script.AppendExtra(('if block_image_verify("%s", '
-                          'package_extract_file("%s.transfer.list"), '
-                          '"%s.new.dat", "%s.patch.dat") then') %
-                         (self.device, partition, partition, partition))
-      script.Print("Verified %s image..." % (partition,))
+      if self.version >= 3:
+        script.AppendExtra(('if block_image_verify("%s", '
+                            'package_extract_file("%s.transfer.list"), '
+                            '"%s.new.dat", "%s.patch.dat") then') %
+                           (self.device, partition, partition, partition))
+      else:
+        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' %
+                            (self.device, self.src.care_map.to_string_raw(),
+                            self.src.TotalSha1()))
+      script.Print('Verified %s image...' % (partition,))
       script.AppendExtra('else');
 
+      # When generating incrementals for the system and vendor partitions,
+      # explicitly check the first block (which contains the superblock) of
+      # the partition to see if it's what we expect. If this check fails,
+      # give an explicit log message about the partition having been
+      # remounted R/W (the most likely explanation) and the need to flash to
+      # get OTAs working again.
       if self.check_first_block:
         self._CheckFirstBlock(script)
 
-      script.AppendExtra(('(range_sha1("%s", "%s") == "%s") ||\n'
-                          '  abort("%s partition has unexpected contents");\n'
-                          'endif;') %
-                         (self.device, self.tgt.care_map.to_string_raw(),
-                          self.tgt.TotalSha1(), self.partition))
+      # Abort the OTA update. Note that the incremental OTA cannot be applied
+      # even if it may match the checksum of the target partition.
+      # a) If version < 3, operations like move and erase will make changes
+      #    unconditionally and damage the partition.
+      # b) If version >= 3, it won't even reach here.
+      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
+                          'endif;') % (partition,))
 
   def _WriteUpdate(self, script, output_zip):
-    partition = self.partition
-    with open(self.path + ".transfer.list", "rb") as f:
-      ZipWriteStr(output_zip, partition + ".transfer.list", f.read())
-    with open(self.path + ".new.dat", "rb") as f:
-      ZipWriteStr(output_zip, partition + ".new.dat", f.read())
-    with open(self.path + ".patch.dat", "rb") as f:
-      ZipWriteStr(output_zip, partition + ".patch.dat", f.read(),
-                         compression=zipfile.ZIP_STORED)
+    ZipWrite(output_zip,
+             '{}.transfer.list'.format(self.path),
+             '{}.transfer.list'.format(self.partition))
+    ZipWrite(output_zip,
+             '{}.new.dat'.format(self.path),
+             '{}.new.dat'.format(self.partition))
+    ZipWrite(output_zip,
+             '{}.patch.dat'.format(self.path),
+             '{}.patch.dat'.format(self.partition),
+             compress_type=zipfile.ZIP_STORED)
 
-    call = (('block_image_update("%s", '
-             'package_extract_file("%s.transfer.list"), '
-             '"%s.new.dat", "%s.patch.dat");\n') %
-            (self.device, partition, partition, partition))
+    call = ('block_image_update("{device}", '
+            'package_extract_file("{partition}.transfer.list"), '
+            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
+                device=self.device, partition=self.partition))
     script.AppendExtra(script._WordWrap(call))
 
   def _HashBlocks(self, source, ranges):
@@ -1104,14 +1161,11 @@
   def _CheckFirstBlock(self, script):
     r = RangeSet((0, 1))
     srchash = self._HashBlocks(self.src, r);
-    tgthash = self._HashBlocks(self.tgt, r);
 
     script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
-                        '(range_sha1("%s", "%s") == "%s") || '
                         'abort("%s has been remounted R/W; '
                         'reflash device to reenable OTA updates");')
                        % (self.device, r.to_string_raw(), srchash,
-                          self.device, r.to_string_raw(), tgthash,
                           self.device))
 
 DataImage = blockimgdiff.DataImage
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index e52c264..934d751 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -74,11 +74,11 @@
       raise ValueError("must specify an OEM property")
     if not value:
       raise ValueError("must specify the OEM value")
-    cmd = ('file_getprop("/oem/oem.prop", "%s") == "%s" || '
-           'abort("This package expects the value \\"%s\\"  for '
-           '\\"%s\\" on the OEM partition; '
-           'this has value \\"" + file_getprop("/oem/oem.prop") + "\\".");'
-           ) % (name, value, name, value)
+    cmd = ('file_getprop("/oem/oem.prop", "{name}") == "{value}" || '
+           'abort("This package expects the value \\"{value}\\" for '
+           '\\"{name}\\" on the OEM partition; this has value \\"" + '
+           'file_getprop("/oem/oem.prop", "{name}") + "\\".");'
+           ).format(name=name, value=value)
     self.script.append(cmd)
 
   def AssertSomeFingerprint(self, *fp):
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 4b88e73..4dda0b7 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -88,11 +88,13 @@
       # and all we have to do is copy them to the output zip.
       images = os.listdir(images_path)
       if images:
-        for i in images:
-          if bootable_only and i not in ("boot.img", "recovery.img"): continue
-          if not i.endswith(".img"): continue
-          with open(os.path.join(images_path, i), "r") as f:
-            common.ZipWriteStr(output_zip, i, f.read())
+        for image in images:
+          if bootable_only and image not in ("boot.img", "recovery.img"):
+            continue
+          if not image.endswith(".img"):
+            continue
+          common.ZipWrite(
+              output_zip, os.path.join(images_path, image), image)
         done = True
 
     if not done:
@@ -139,7 +141,13 @@
 
   finally:
     print "cleaning up..."
+    # http://b/18015246
+    # See common.py for context.  zipfile also refers to ZIP64_LIMIT during
+    # close() when it writes out the central directory.
+    saved_zip64_limit = zipfile.ZIP64_LIMIT
+    zipfile.ZIP64_LIMIT = (1 << 32) - 1
     output_zip.close()
+    zipfile.ZIP64_LIMIT = saved_zip64_limit
     shutil.rmtree(OPTIONS.input_tmp)
 
   print "done."
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
index 945f11a..b71baf9 100755
--- a/tools/releasetools/ota_from_target_files
+++ b/tools/releasetools/ota_from_target_files
@@ -349,8 +349,9 @@
   partition = itemset.partition
 
   for info in input_zip.infolist():
-    if info.filename.startswith(partition.upper() + "/"):
-      basefilename = info.filename[7:]
+    prefix = partition.upper() + "/"
+    if info.filename.startswith(prefix):
+      basefilename = info.filename[len(prefix):]
       if IsSymlink(info):
         symlinks.append((input_zip.read(info.filename),
                          "/" + partition + "/" + basefilename))
@@ -646,10 +647,8 @@
   WriteMetadata(metadata, output_zip)
 
 
-def WritePolicyConfig(file_context, output_zip):
-  f = open(file_context, 'r');
-  basename = os.path.basename(file_context)
-  common.ZipWriteStr(output_zip, basename, f.read())
+def WritePolicyConfig(file_name, output_zip):
+  common.ZipWrite(output_zip, file_name, os.path.basename(file_name))
 
 
 def WriteMetadata(metadata, output_zip):
@@ -665,7 +664,7 @@
   prefix = partition.upper() + "/"
   for info in z.infolist():
     if info.filename.startswith(prefix) and not IsSymlink(info):
-      basefilename = info.filename[7:]
+      basefilename = info.filename[len(prefix):]
       fn = partition + "/" + basefilename
       data = z.read(info.filename)
       out[fn] = common.File(fn, data)
@@ -740,8 +739,16 @@
 
   system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict)
   system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict)
+
+  blockimgdiff_version = 1
+  if OPTIONS.info_dict:
+    blockimgdiff_version = max(
+        int(i) for i in
+        OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+
   system_diff = common.BlockDifference("system", system_tgt, system_src,
-                                       check_first_block=True)
+                                       check_first_block=True,
+                                       version=blockimgdiff_version)
 
   if HasVendorPartition(target_zip):
     if not HasVendorPartition(source_zip):
@@ -749,7 +756,8 @@
     vendor_src = GetImage("vendor", OPTIONS.source_tmp, OPTIONS.source_info_dict)
     vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, OPTIONS.target_info_dict)
     vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
-                                         check_first_block=True)
+                                         check_first_block=True,
+                                         version=blockimgdiff_version)
   else:
     vendor_diff = None
 
@@ -810,11 +818,22 @@
   device_specific.IncrementalOTA_VerifyBegin()
 
   if oem_props is None:
-    script.AssertSomeFingerprint(source_fp, target_fp)
+    # When blockimgdiff version is less than 3 (non-resumable block-based OTA),
+    # patching on a device that's already on the target build will damage the
+    # system. Because operations like move don't check the block state, they
+    # always apply the changes unconditionally.
+    if blockimgdiff_version <= 2:
+      script.AssertSomeFingerprint(source_fp)
+    else:
+      script.AssertSomeFingerprint(source_fp, target_fp)
   else:
-    script.AssertSomeThumbprint(
-        GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
-        GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
+    if blockimgdiff_version <= 2:
+      script.AssertSomeThumbprint(
+          GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
+    else:
+      script.AssertSomeThumbprint(
+          GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
+          GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
 
   if updating_boot:
     boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict)
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
new file mode 100644
index 0000000..5fdc132
--- /dev/null
+++ b/tools/releasetools/test_common.py
@@ -0,0 +1,110 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import tempfile
+import time
+import unittest
+import zipfile
+
+import common
+
+
+def random_string_with_holes(size, block_size, step_size):
+  data = ["\0"] * size
+  for begin in range(0, size, step_size):
+    end = begin + block_size
+    data[begin:end] = os.urandom(block_size)
+  return "".join(data)
+
+
+class CommonZipTest(unittest.TestCase):
+  def _test_ZipWrite(self, contents, extra_zipwrite_args=None):
+    extra_zipwrite_args = dict(extra_zipwrite_args or {})
+
+    test_file = tempfile.NamedTemporaryFile(delete=False)
+    zip_file = tempfile.NamedTemporaryFile(delete=False)
+
+    test_file_name = test_file.name
+    zip_file_name = zip_file.name
+
+    # File names within an archive strip the leading slash.
+    arcname = extra_zipwrite_args.get("arcname", test_file_name)
+    if arcname[0] == "/":
+      arcname = arcname[1:]
+
+    zip_file.close()
+    zip_file = zipfile.ZipFile(zip_file_name, "w")
+
+    try:
+      test_file.write(contents)
+      test_file.close()
+
+      old_stat = os.stat(test_file_name)
+      expected_mode = extra_zipwrite_args.get("perms", 0o644)
+
+      time.sleep(5)  # Make sure the atime/mtime will change measurably.
+
+      common.ZipWrite(zip_file, test_file_name, **extra_zipwrite_args)
+
+      new_stat = os.stat(test_file_name)
+      self.assertEqual(int(old_stat.st_mode), int(new_stat.st_mode))
+      self.assertEqual(int(old_stat.st_mtime), int(new_stat.st_mtime))
+      self.assertIsNone(zip_file.testzip())
+
+      zip_file.close()
+      zip_file = zipfile.ZipFile(zip_file_name, "r")
+      info = zip_file.getinfo(arcname)
+
+      self.assertEqual(info.date_time, (2009, 1, 1, 0, 0, 0))
+      mode = (info.external_attr >> 16) & 0o777
+      self.assertEqual(mode, expected_mode)
+      self.assertEqual(zip_file.read(arcname), contents)
+      self.assertIsNone(zip_file.testzip())
+    finally:
+      os.remove(test_file_name)
+      os.remove(zip_file_name)
+
+  def test_ZipWrite(self):
+    file_contents = os.urandom(1024)
+    self._test_ZipWrite(file_contents)
+
+  def test_ZipWrite_with_opts(self):
+    file_contents = os.urandom(1024)
+    self._test_ZipWrite(file_contents, {
+        "arcname": "foobar",
+        "perms": 0o777,
+        "compress_type": zipfile.ZIP_DEFLATED,
+    })
+
+  def test_ZipWrite_large_file(self):
+    kilobytes = 1024
+    megabytes = 1024 * kilobytes
+    gigabytes = 1024 * megabytes
+
+    size = int(2 * gigabytes + 1)
+    block_size = 4 * kilobytes
+    step_size = 4 * megabytes
+    file_contents = random_string_with_holes(
+        size, block_size, step_size)
+    self._test_ZipWrite(file_contents, {
+        "compress_type": zipfile.ZIP_DEFLATED,
+    })
+
+  def test_ZipWrite_resets_ZIP64_LIMIT(self):
+    default_limit = (1 << 31) - 1
+    self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
+    self._test_ZipWrite('')
+    self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)