Merge \"The easter egg is now its own APK.\" into nyc-dev
am: 18ecea3986
Change-Id: Ica7456ab7bf01dba0f294fce3720b0e19db02f38
diff --git a/core/Makefile b/core/Makefile
index d804c37..1e81c14 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -499,11 +499,6 @@
INTERNAL_BOOTIMAGE_FILES := $(filter-out --%,$(INTERNAL_BOOTIMAGE_ARGS))
-BOARD_KERNEL_CMDLINE := $(strip $(BOARD_KERNEL_CMDLINE))
-ifdef BOARD_KERNEL_CMDLINE
- INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
-endif
-
BOARD_KERNEL_BASE := $(strip $(BOARD_KERNEL_BASE))
ifdef BOARD_KERNEL_BASE
INTERNAL_BOOTIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
@@ -514,6 +509,18 @@
INTERNAL_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
endif
+ifeq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),true)
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+VERITY_KEYID := veritykeyid=id:`openssl x509 -in $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem -text \
+ | grep keyid | sed 's/://g' | tr -d '[:space:]' | tr '[:upper:]' '[:lower:]' | sed 's/keyid//g'`
+endif
+endif
+
+BOARD_KERNEL_CMDLINE := $(strip $(BOARD_KERNEL_CMDLINE) $(VERITY_KEYID))
+ifdef BOARD_KERNEL_CMDLINE
+INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
+endif
+
INTERNAL_MKBOOTIMG_VERSION_ARGS := \
--os_version $(PLATFORM_VERSION) \
--os_patch_level $(PLATFORM_SECURITY_PATCH)
@@ -785,6 +792,7 @@
$(if $(BOARD_HAS_EXT4_RESERVED_BLOCKS),$(hide) echo "has_ext4_reserved_blocks=$(BOARD_HAS_EXT4_RESERVED_BLOCKS)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "system_squashfs_compressor=$(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "system_squashfs_compressor_opt=$(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_BLOCK_SIZE),$(hide) echo "system_squashfs_block_size=$(BOARD_SYSTEMIMAGE_SQUASHFS_BLOCK_SIZE)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_DISABLE_4K_ALIGN),$(hide) echo "system_squashfs_disable_4k_align=$(BOARD_SYSTEMIMAGE_SQUASHFS_DISABLE_4K_ALIGN)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH),$(hide) echo "system_base_fs_file=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH)" >> $(1))
$(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -796,6 +804,7 @@
$(if $(BOARD_VENDORIMAGE_JOURNAL_SIZE),$(hide) echo "vendor_journal_size=$(BOARD_VENDORIMAGE_JOURNAL_SIZE)" >> $(1))
$(if $(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "vendor_squashfs_compressor=$(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR)" >> $(1))
$(if $(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "vendor_squashfs_compressor_opt=$(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_SQUASHFS_BLOCK_SIZE),$(hide) echo "vendor_squashfs_block_size=$(BOARD_VENDORIMAGE_SQUASHFS_BLOCK_SIZE)" >> $(1))
$(if $(BOARD_VENDORIMAGE_SQUASHFS_DISABLE_4K_ALIGN),$(hide) echo "vendor_squashfs_disable_4k_align=$(BOARD_VENDORIMAGE_SQUASHFS_DISABLE_4K_ALIGN)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH),$(hide) echo "vendor_base_fs_file=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH)" >> $(1))
$(if $(BOARD_OEMIMAGE_PARTITION_SIZE),$(hide) echo "oem_size=$(BOARD_OEMIMAGE_PARTITION_SIZE)" >> $(1))
@@ -876,6 +885,11 @@
else
recovery_fstab := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery.fstab))
endif
+ifdef TARGET_RECOVERY_WIPE
+recovery_wipe := $(TARGET_RECOVERY_WIPE)
+else
+recovery_wipe :=
+endif
# Prior to A/B update, we used to have:
# boot.img + recovery-from-boot.p + recovery-resource.dat = recovery.img.
@@ -946,7 +960,7 @@
$(hide) mkdir -p $(TARGET_RECOVERY_OUT)
$(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/sdcard $(TARGET_RECOVERY_ROOT_OUT)/tmp
@echo Copying baseline ramdisk...
- $(hide) rsync -a --exclude=etc --exclude=sdcard $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
+ $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
@echo Modifying ramdisk contents...
$(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc
$(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
@@ -961,6 +975,8 @@
cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/$(newline))
$(hide) $(foreach item,$(recovery_fstab), \
cp -f $(item) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.fstab)
+ $(if $(strip $(recovery_wipe)), \
+ $(hide) cp -f $(recovery_wipe) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.wipe)
$(hide) cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys
$(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \
> $(TARGET_RECOVERY_ROOT_OUT)/default.prop
@@ -1393,6 +1409,9 @@
cacheimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
$(build-cacheimage-target)
+else # BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE
+# we need to ignore the broken cache link when doing the rsync
+IGNORE_CACHE_LINK := --exclude=cache
endif # BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE
diff --git a/core/binary.mk b/core/binary.mk
index 918a28d..7b22903 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -629,57 +629,55 @@
## Compile the .proto files to .cc (or .c) and then to .o
###########################################################
proto_sources := $(filter %.proto,$(my_src_files))
-proto_generated_objects :=
-proto_generated_headers :=
ifneq ($(proto_sources),)
-proto_generated_sources_dir := $(generated_sources_dir)/proto
-proto_generated_obj_dir := $(intermediates)/proto
+proto_gen_dir := $(generated_sources_dir)/proto
+my_rename_cpp_ext :=
ifneq (,$(filter nanopb-c nanopb-c-enable_malloc, $(LOCAL_PROTOC_OPTIMIZE_TYPE)))
my_proto_source_suffix := .c
my_proto_c_includes := external/nanopb-c
-my_protoc_flags := --nanopb_out=$(proto_generated_sources_dir) \
+my_protoc_flags := --nanopb_out=$(proto_gen_dir) \
--plugin=external/nanopb-c/generator/protoc-gen-nanopb
else
-my_proto_source_suffix := .cc
+my_proto_source_suffix := $(LOCAL_CPP_EXTENSION)
+ifneq ($(my_proto_source_suffix),.cc)
+# aprotoc is hardcoded to write out only .cc file.
+# We need to rename the extension to $(LOCAL_CPP_EXTENSION) if it's not .cc.
+my_rename_cpp_ext := true
+endif
my_proto_c_includes := external/protobuf/src
my_cflags += -DGOOGLE_PROTOBUF_NO_RTTI
-my_protoc_flags := --cpp_out=$(proto_generated_sources_dir)
+my_protoc_flags := --cpp_out=$(proto_gen_dir)
endif
-my_proto_c_includes += $(proto_generated_sources_dir)
+my_proto_c_includes += $(proto_gen_dir)
proto_sources_fullpath := $(addprefix $(LOCAL_PATH)/, $(proto_sources))
-proto_generated_sources := $(addprefix $(proto_generated_sources_dir)/, \
+proto_generated_cpps := $(addprefix $(proto_gen_dir)/, \
$(patsubst %.proto,%.pb$(my_proto_source_suffix),$(proto_sources_fullpath)))
-proto_generated_headers := $(patsubst %.pb$(my_proto_source_suffix),%.pb.h, $(proto_generated_sources))
-proto_generated_objects := $(addprefix $(proto_generated_obj_dir)/, \
- $(patsubst %.proto,%.pb.o,$(proto_sources_fullpath)))
-$(call track-src-file-obj,$(proto_sources),$(proto_generated_objects))
# Ensure the transform-proto-to-cc rule is only defined once in multilib build.
-ifndef $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_proto_defined
-$(proto_generated_sources): PRIVATE_PROTO_INCLUDES := $(TOP)
-$(proto_generated_sources): PRIVATE_PROTOC_FLAGS := $(LOCAL_PROTOC_FLAGS) $(my_protoc_flags)
-$(proto_generated_sources): $(proto_generated_sources_dir)/%.pb$(my_proto_source_suffix): %.proto $(PROTOC)
+ifndef $(my_host)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_proto_defined
+$(proto_generated_cpps): PRIVATE_PROTO_INCLUDES := $(TOP)
+$(proto_generated_cpps): PRIVATE_PROTOC_FLAGS := $(LOCAL_PROTOC_FLAGS) $(my_protoc_flags)
+$(proto_generated_cpps): PRIVATE_RENAME_CPP_EXT := $(my_rename_cpp_ext)
+$(proto_generated_cpps): $(proto_gen_dir)/%.pb$(my_proto_source_suffix): %.proto $(my_protoc_deps) $(PROTOC)
$(transform-proto-to-cc)
-# This is just a dummy rule to make sure gmake doesn't skip updating the dependents.
-$(proto_generated_headers): $(proto_generated_sources_dir)/%.pb.h: $(proto_generated_sources_dir)/%.pb$(my_proto_source_suffix)
- @echo "Updated header file $@."
- $(hide) touch $@
-
-$(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_proto_defined := true
-endif # transform-proto-to-cc rule included only once
-
-$(proto_generated_objects): PRIVATE_ARM_MODE := $(normal_objects_mode)
-$(proto_generated_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
-$(proto_generated_objects): $(proto_generated_obj_dir)/%.o: $(proto_generated_sources_dir)/%$(my_proto_source_suffix) $(proto_generated_headers)
-ifeq ($(my_proto_source_suffix),.c)
- $(transform-$(PRIVATE_HOST)c-to-o)
-else
- $(transform-$(PRIVATE_HOST)cpp-to-o)
+$(my_host)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_proto_defined := true
endif
-$(call include-depfiles-for-objs, $(proto_generated_objects))
+# Ideally we can generate the source directly into $(intermediates).
+# But many Android.mks assume the .pb.hs are in $(generated_sources_dir).
+# As a workaround, we make a copy in the $(intermediates).
+proto_intermediate_dir := $(intermediates)/proto
+proto_intermediate_cpps := $(patsubst $(proto_gen_dir)/%,$(proto_intermediate_dir)/%,\
+ $(proto_generated_cpps))
+$(proto_intermediate_cpps) : $(proto_intermediate_dir)/% : $(proto_gen_dir)/% | $(ACP)
+ @echo "Copy: $@"
+ $(copy-file-to-target)
+ $(hide) cp $(basename $<).h $(basename $@).h
+$(call track-src-file-gen,$(proto_sources),$(proto_intermediate_cpps))
+
+my_generated_sources += $(proto_intermediate_cpps)
my_c_includes += $(my_proto_c_includes)
# Auto-export the generated proto source dir.
@@ -897,7 +895,7 @@
dotdot_arm_objects :=
$(foreach s,$(dotdot_arm_sources),\
$(eval $(call compile-dotdot-cpp-file,$(s),\
- $(yacc_cpps) $(proto_generated_headers) $(my_additional_dependencies),\
+ $(my_additional_dependencies),\
dotdot_arm_objects)))
$(call track-src-file-obj,$(patsubst %,%.arm,$(dotdot_arm_sources)),$(dotdot_arm_objects))
@@ -905,7 +903,7 @@
dotdot_objects :=
$(foreach s,$(dotdot_sources),\
$(eval $(call compile-dotdot-cpp-file,$(s),\
- $(yacc_cpps) $(proto_generated_headers) $(my_additional_dependencies),\
+ $(my_additional_dependencies),\
dotdot_objects)))
$(call track-src-file-obj,$(dotdot_sources),$(dotdot_objects))
@@ -923,7 +921,6 @@
ifneq ($(strip $(cpp_objects)),)
$(cpp_objects): $(intermediates)/%.o: \
$(TOPDIR)$(LOCAL_PATH)/%$(LOCAL_CPP_EXTENSION) \
- $(yacc_cpps) $(proto_generated_headers) \
$(my_additional_dependencies)
$(transform-$(PRIVATE_HOST)cpp-to-o)
$(call include-depfiles-for-objs, $(cpp_objects))
@@ -945,8 +942,7 @@
$(gen_cpp_objects): PRIVATE_ARM_MODE := $(normal_objects_mode)
$(gen_cpp_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
$(gen_cpp_objects): $(intermediates)/%.o: \
- $(intermediates)/%$(LOCAL_CPP_EXTENSION) $(yacc_cpps) \
- $(proto_generated_headers) \
+ $(intermediates)/%$(LOCAL_CPP_EXTENSION) \
$(my_additional_dependencies)
$(transform-$(PRIVATE_HOST)cpp-to-o)
$(call include-depfiles-for-objs, $(gen_cpp_objects))
@@ -1001,7 +997,7 @@
dotdot_arm_objects :=
$(foreach s,$(dotdot_arm_sources),\
$(eval $(call compile-dotdot-c-file,$(s),\
- $(yacc_cpps) $(proto_generated_headers) $(my_additional_dependencies),\
+ $(my_additional_dependencies),\
dotdot_arm_objects)))
$(call track-src-file-obj,$(patsubst %,%.arm,$(dotdot_arm_sources)),$(dotdot_arm_objects))
@@ -1009,7 +1005,7 @@
dotdot_objects :=
$(foreach s, $(dotdot_sources),\
$(eval $(call compile-dotdot-c-file,$(s),\
- $(yacc_cpps) $(proto_generated_headers) $(my_additional_dependencies),\
+ $(my_additional_dependencies),\
dotdot_objects)))
$(call track-src-file-obj,$(dotdot_sources),$(dotdot_objects))
@@ -1025,7 +1021,7 @@
c_objects := $(c_arm_objects) $(c_normal_objects)
ifneq ($(strip $(c_objects)),)
-$(c_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.c $(yacc_cpps) $(proto_generated_headers) \
+$(c_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.c \
$(my_additional_dependencies)
$(transform-$(PRIVATE_HOST)c-to-o)
$(call include-depfiles-for-objs, $(c_objects))
@@ -1046,7 +1042,7 @@
# TODO: support compiling certain generated files as arm.
$(gen_c_objects): PRIVATE_ARM_MODE := $(normal_objects_mode)
$(gen_c_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
-$(gen_c_objects): $(intermediates)/%.o: $(intermediates)/%.c $(yacc_cpps) $(proto_generated_headers) \
+$(gen_c_objects): $(intermediates)/%.o: $(intermediates)/%.c \
$(my_additional_dependencies)
$(transform-$(PRIVATE_HOST)c-to-o)
$(call include-depfiles-for-objs, $(gen_c_objects))
@@ -1061,7 +1057,7 @@
$(call track-src-file-obj,$(objc_sources),$(objc_objects))
ifneq ($(strip $(objc_objects)),)
-$(objc_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.m $(yacc_cpps) $(proto_generated_headers) \
+$(objc_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.m \
$(my_additional_dependencies)
$(transform-$(PRIVATE_HOST)m-to-o)
$(call include-depfiles-for-objs, $(objc_objects))
@@ -1076,7 +1072,7 @@
$(call track-src-file-obj,$(objcpp_sources),$(objcpp_objects))
ifneq ($(strip $(objcpp_objects)),)
-$(objcpp_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.mm $(yacc_cpps) $(proto_generated_headers) \
+$(objcpp_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.mm \
$(my_additional_dependencies)
$(transform-$(PRIVATE_HOST)mm-to-o)
$(call include-depfiles-for-objs, $(objcpp_objects))
@@ -1206,8 +1202,7 @@
$(c_objects) \
$(gen_c_objects) \
$(objc_objects) \
- $(objcpp_objects) \
- $(proto_generated_objects)
+ $(objcpp_objects)
new_order_normal_objects := $(foreach f,$(my_src_files),$(my_src_file_obj_$(f)))
new_order_normal_objects += $(foreach f,$(my_gen_src_files),$(my_src_file_obj_$(f)))
@@ -1420,11 +1415,9 @@
###########################################################
export_includes := $(intermediates)/export_includes
$(export_includes): PRIVATE_EXPORT_C_INCLUDE_DIRS := $(my_export_c_include_dirs)
-# Make sure .pb.h are already generated before any dependent source files get compiled.
-# Similarly, the generated DBus headers need to exist before we export their location.
-# People are not going to consume the aidl generated cpp file, but the cpp file is
-# generated after the headers, so this is a convenient way to ensure the headers exist.
-$(export_includes) : $(LOCAL_MODULE_MAKEFILE_DEP) $(proto_generated_headers) $(dbus_generated_headers) $(aidl_gen_cpp) $(vts_gen_cpp)
+# By adding $(my_generated_sources) it makes sure the headers get generated
+# before any dependent source files get compiled.
+$(export_includes) : $(my_generated_sources) $(export_include_deps)
@echo Export includes file: $< -- $@
$(hide) mkdir -p $(dir $@) && rm -f $@.tmp
ifdef my_export_c_include_dirs
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 6e96880..f84a66f 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -24,6 +24,23 @@
my_sanitize := $(my_global_sanitize)
endif
+# Add a filter point for 32-bit vs 64-bit sanitization (to lighten the burden).
+SANITIZE_ARCH ?= 32 64
+ifeq ($(filter $(SANITIZE_ARCH),$(my_32_64_bit_suffix)),)
+ my_sanitize :=
+endif
+
+# Add a filter point based on module owner (to lighten the burden). The format is a space- or
+# colon-separated list of owner names.
+ifneq (,$(SANITIZE_NEVER_BY_OWNER))
+ ifneq (,$(LOCAL_MODULE_OWNER))
+ ifneq (,$(filter $(LOCAL_MODULE_OWNER),$(subst :, ,$(SANITIZE_NEVER_BY_OWNER))))
+ $(warning Not sanitizing $(LOCAL_MODULE) based on module owner.)
+ my_sanitize :=
+ endif
+ endif
+endif
+
# Don't apply sanitizers to NDK code.
ifdef LOCAL_SDK_VERSION
my_sanitize :=
diff --git a/core/definitions.mk b/core/definitions.mk
index b9ef4d5..84ea801 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -1169,6 +1169,9 @@
$(addprefix --proto_path=, $(PRIVATE_PROTO_INCLUDES)) \
$(PRIVATE_PROTOC_FLAGS) \
$<
+@# aprotoc outputs only .cc. Rename it to .cpp if necessary.
+$(if $(PRIVATE_RENAME_CPP_EXT),\
+ $(hide) mv $(basename $@).cc $@)
endef
diff --git a/core/tasks/vendor_module_check.mk b/core/tasks/vendor_module_check.mk
index 910c8b7..e3761ae 100644
--- a/core/tasks/vendor_module_check.mk
+++ b/core/tasks/vendor_module_check.mk
@@ -43,6 +43,7 @@
synaptics \
ti \
trusted_logic \
+ verizon \
widevine
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 5dffcac..f6e2bee 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -43,7 +43,7 @@
# which is the version that we reveal to the end user.
# Update this value when the platform version changes (rather
# than overriding it somewhere else). Can be an arbitrary string.
- PLATFORM_VERSION := 7.0
+ PLATFORM_VERSION := NMR1
endif
ifeq "" "$(PLATFORM_SDK_VERSION)"
@@ -70,12 +70,12 @@
ifeq "" "$(PLATFORM_VERSION_CODENAME)"
# This is the current development code-name, if the build is not a final
# release build. If this is a final release build, it is simply "REL".
- PLATFORM_VERSION_CODENAME := REL
+ PLATFORM_VERSION_CODENAME := NMR1
# This is all of the development codenames that are active. Should be either
# the same as PLATFORM_VERSION_CODENAME or a comma-separated list of additional
# codenames after PLATFORM_VERSION_CODENAME.
- PLATFORM_VERSION_ALL_CODENAMES := $(PLATFORM_VERSION_CODENAME)
+ PLATFORM_VERSION_ALL_CODENAMES := $(PLATFORM_VERSION_CODENAME),N
endif
ifeq "REL" "$(PLATFORM_VERSION_CODENAME)"
@@ -91,7 +91,7 @@
# assuming the device can only support APIs as of the previous official
# public release.
# This value will always be 0 for release builds.
- PLATFORM_PREVIEW_SDK_VERSION := 0
+ PLATFORM_PREVIEW_SDK_VERSION := 1
endif
endif
diff --git a/target/product/core.mk b/target/product/core.mk
index 75cf649..0a4e0fd 100644
--- a/target/product/core.mk
+++ b/target/product/core.mk
@@ -53,6 +53,7 @@
QuickSearchBox \
Settings \
SharedStorageBackup \
+ StorageManager \
Telecom \
TeleService \
VpnDialogs \
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 66d5907..0d9aabd 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -342,19 +342,20 @@
return ctx.hexdigest()
def WriteTransfers(self, prefix):
- def WriteTransfersZero(out, to_zero):
- """Limit the number of blocks in command zero to 1024 blocks.
+ def WriteSplitTransfers(out, style, target_blocks):
+ """Limit the size of operand in command 'new' and 'zero' to 1024 blocks.
This prevents the target size of one command from being too large; and
might help to avoid fsync errors on some devices."""
- zero_blocks_limit = 1024
+ assert (style == "new" or style == "zero")
+ blocks_limit = 1024
total = 0
- while to_zero:
- zero_blocks = to_zero.first(zero_blocks_limit)
- out.append("zero %s\n" % (zero_blocks.to_string_raw(),))
- total += zero_blocks.size()
- to_zero = to_zero.subtract(zero_blocks)
+ while target_blocks:
+ blocks_to_write = target_blocks.first(blocks_limit)
+ out.append("%s %s\n" % (style, blocks_to_write.to_string_raw()))
+ total += blocks_to_write.size()
+ target_blocks = target_blocks.subtract(blocks_to_write)
return total
out = []
@@ -478,7 +479,7 @@
if xf.style == "new":
assert xf.tgt_ranges
- out.append("%s %s\n" % (xf.style, xf.tgt_ranges.to_string_raw()))
+ assert tgt_size == WriteSplitTransfers(out, xf.style, xf.tgt_ranges)
total += tgt_size
elif xf.style == "move":
assert xf.tgt_ranges
@@ -538,7 +539,7 @@
elif xf.style == "zero":
assert xf.tgt_ranges
to_zero = xf.tgt_ranges.subtract(xf.src_ranges)
- assert WriteTransfersZero(out, to_zero) == to_zero.size()
+ assert WriteSplitTransfers(out, xf.style, to_zero) == to_zero.size()
total += to_zero.size()
else:
raise ValueError("unknown transfer style '%s'\n" % xf.style)
@@ -568,7 +569,7 @@
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
- assert (WriteTransfersZero(out, self.tgt.extended) ==
+ assert (WriteSplitTransfers(out, "zero", self.tgt.extended) ==
self.tgt.extended.size())
total += self.tgt.extended.size()
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index d78896a..3d41e83 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -406,6 +406,8 @@
build_command.extend(["-z", prop_dict["squashfs_compressor"]])
if "squashfs_compressor_opt" in prop_dict:
build_command.extend(["-zo", prop_dict["squashfs_compressor_opt"]])
+ if "squashfs_block_size" in prop_dict:
+ build_command.extend(["-b", prop_dict["squashfs_block_size"]])
if "squashfs_disable_4k_align" in prop_dict and prop_dict.get("squashfs_disable_4k_align") == "true":
build_command.extend(["-a"])
elif fs_type.startswith("f2fs"):
@@ -551,6 +553,7 @@
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
copy_prop("system_squashfs_compressor", "squashfs_compressor")
copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
+ copy_prop("system_squashfs_block_size", "squashfs_block_size")
copy_prop("system_squashfs_disable_4k_align", "squashfs_disable_4k_align")
copy_prop("system_base_fs_file", "base_fs_file")
elif mount_point == "data":
@@ -569,6 +572,7 @@
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
copy_prop("vendor_squashfs_compressor", "squashfs_compressor")
copy_prop("vendor_squashfs_compressor_opt", "squashfs_compressor_opt")
+ copy_prop("vendor_squashfs_block_size", "squashfs_block_size")
copy_prop("vendor_squashfs_disable_4k_align", "squashfs_disable_4k_align")
copy_prop("vendor_base_fs_file", "base_fs_file")
elif mount_point == "oem":
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index b69ddac..915a29e 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -113,6 +113,17 @@
Generate a log file that shows the differences in the source and target
builds for an incremental package. This option is only meaningful when
-i is specified.
+
+ --payload_signer <signer>
+ Specify the signer when signing the payload and metadata for A/B OTAs.
+ By default (i.e. without this flag), it calls 'openssl pkeyutl' to sign
+ with the package private key. If the private key cannot be accessed
+ directly, a payload signer that knows how to do that should be specified.
+ The signer will be supplied with "-inkey <path_to_key>",
+ "-in <input_file>" and "-out <output_file>" parameters.
+
+ --payload_signer_args <args>
+ Specify the arguments needed for payload signer.
"""
import sys
@@ -124,6 +135,7 @@
import multiprocessing
import os
import subprocess
+import shlex
import tempfile
import zipfile
@@ -160,6 +172,8 @@
OPTIONS.stash_threshold = 0.8
OPTIONS.gen_verify = False
OPTIONS.log_diff = None
+OPTIONS.payload_signer = None
+OPTIONS.payload_signer_args = []
def MostPopularKey(d, default):
"""Given a dict, return the key corresponding to the largest
@@ -1163,17 +1177,19 @@
"default_system_dev_certificate",
"build/target/product/security/testkey")
- # A/B updater expects key in RSA format.
- cmd = ["openssl", "pkcs8",
- "-in", OPTIONS.package_key + OPTIONS.private_key_suffix,
- "-inform", "DER", "-nocrypt"]
- rsa_key = common.MakeTempFile(prefix="key-", suffix=".key")
- cmd.extend(["-out", rsa_key])
- p1 = common.Run(cmd, stdout=subprocess.PIPE)
- p1.wait()
- assert p1.returncode == 0, "openssl pkcs8 failed"
+ # A/B updater expects a signing key in RSA format. Gets the key ready for
+ # later use in step 3, unless a payload_signer has been specified.
+ if OPTIONS.payload_signer is None:
+ cmd = ["openssl", "pkcs8",
+ "-in", OPTIONS.package_key + OPTIONS.private_key_suffix,
+ "-inform", "DER", "-nocrypt"]
+ rsa_key = common.MakeTempFile(prefix="key-", suffix=".key")
+ cmd.extend(["-out", rsa_key])
+ p1 = common.Run(cmd, stdout=subprocess.PIPE)
+ p1.wait()
+ assert p1.returncode == 0, "openssl pkcs8 failed"
- # Stage the output zip package for signing.
+ # Stage the output zip package for package signing.
temp_zip_file = tempfile.NamedTemporaryFile()
output_zip = zipfile.ZipFile(temp_zip_file, "w",
compression=zipfile.ZIP_DEFLATED)
@@ -1234,21 +1250,30 @@
signed_metadata_sig_file = common.MakeTempFile(prefix="signed-sig-",
suffix=".bin")
# 3a. Sign the payload hash.
- cmd = ["openssl", "pkeyutl", "-sign",
- "-inkey", rsa_key,
- "-pkeyopt", "digest:sha256",
- "-in", payload_sig_file,
- "-out", signed_payload_sig_file]
+ if OPTIONS.payload_signer is not None:
+ cmd = [OPTIONS.payload_signer]
+ cmd.extend(OPTIONS.payload_signer_args)
+ else:
+ cmd = ["openssl", "pkeyutl", "-sign",
+ "-inkey", rsa_key,
+ "-pkeyopt", "digest:sha256"]
+ cmd.extend(["-in", payload_sig_file,
+ "-out", signed_payload_sig_file])
+
p1 = common.Run(cmd, stdout=subprocess.PIPE)
p1.wait()
assert p1.returncode == 0, "openssl sign payload failed"
# 3b. Sign the metadata hash.
- cmd = ["openssl", "pkeyutl", "-sign",
- "-inkey", rsa_key,
- "-pkeyopt", "digest:sha256",
- "-in", metadata_sig_file,
- "-out", signed_metadata_sig_file]
+ if OPTIONS.payload_signer is not None:
+ cmd = [OPTIONS.payload_signer]
+ cmd.extend(OPTIONS.payload_signer_args)
+ else:
+ cmd = ["openssl", "pkeyutl", "-sign",
+ "-inkey", rsa_key,
+ "-pkeyopt", "digest:sha256"]
+ cmd.extend(["-in", metadata_sig_file,
+ "-out", signed_metadata_sig_file])
p1 = common.Run(cmd, stdout=subprocess.PIPE)
p1.wait()
assert p1.returncode == 0, "openssl sign metadata failed"
@@ -1276,6 +1301,11 @@
p1.wait()
assert p1.returncode == 0, "brillo_update_payload properties failed"
+ if OPTIONS.wipe_user_data:
+ with open(properties_file, "a") as f:
+ f.write("POWERWASH=1\n")
+ metadata["ota-wipe"] = "yes"
+
# Add the signed payload file and properties into the zip.
common.ZipWrite(output_zip, properties_file, arcname="payload_properties.txt")
common.ZipWrite(output_zip, signed_payload_file, arcname="payload.bin",
@@ -1900,6 +1930,10 @@
OPTIONS.gen_verify = True
elif o == "--log_diff":
OPTIONS.log_diff = a
+ elif o == "--payload_signer":
+ OPTIONS.payload_signer = a
+ elif o == "--payload_signer_args":
+ OPTIONS.payload_signer_args = shlex.split(a)
else:
return False
return True
@@ -1929,6 +1963,8 @@
"stash_threshold=",
"gen_verify",
"log_diff=",
+ "payload_signer=",
+ "payload_signer_args=",
], extra_option_handler=option_handler)
if len(args) != 2:
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index f758ae0..3341f9f 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -51,10 +51,12 @@
in which they appear on the command line.
-o (--replace_ota_keys)
- Replace the certificate (public key) used by OTA package
- verification with the one specified in the input target_files
- zip (in the META/otakeys.txt file). Key remapping (-k and -d)
- is performed on this key.
+ Replace the certificate (public key) used by OTA package verification
+ with the ones specified in the input target_files zip (in the
+ META/otakeys.txt file). Key remapping (-k and -d) is performed on the
+ keys. For A/B devices, the payload verification key will be replaced
+ as well. If there're multiple OTA keys, only the first one will be used
+ for payload verification.
-t (--tag_changes) <+tag>,<-tag>,...
Comma-separated list of changes to make to the set of tags (in
@@ -63,6 +65,19 @@
removed. Changes are processed in the order they appear.
Default value is "-test-keys,-dev-keys,+release-keys".
+ --replace_verity_private_key <key>
+ Replace the private key used for verity signing. It expects a filename
+ WITHOUT the extension (e.g. verity_key).
+
+ --replace_verity_public_key <key>
+ Replace the certificate (public key) used for verity verification. The
+ key file replaces the one at BOOT/RAMDISK/verity_key (or ROOT/verity_key
+ for devices using system_root_image). It expects the key filename WITH
+ the extension (e.g. verity_key.pub).
+
+ --replace_verity_keyid <path_to_X509_PEM_cert_file>
+ Replace the veritykeyid in BOOT/cmdline of input_target_file_zip
+ with keyid of the cert pointed by <path_to_X509_PEM_cert_file>.
"""
import sys
@@ -92,6 +107,7 @@
OPTIONS.replace_ota_keys = False
OPTIONS.replace_verity_public_key = False
OPTIONS.replace_verity_private_key = False
+OPTIONS.replace_verity_keyid = False
OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys")
def GetApkCerts(tf_zip):
@@ -171,7 +187,9 @@
for i in input_tf_zip.infolist()
if i.filename.endswith('.apk')])
rebuild_recovery = False
+ system_root_image = misc_info.get("system_root_image") == "true"
+ # tmpdir will only be used to regenerate the recovery-from-boot patch.
tmpdir = tempfile.mkdtemp()
def write_to_temp(fn, attr, data):
fn = os.path.join(tmpdir, fn)
@@ -196,25 +214,6 @@
data = input_tf_zip.read(info.filename)
out_info = copy.copy(info)
- # Replace keys if requested.
- if (info.filename == "META/misc_info.txt" and
- OPTIONS.replace_verity_private_key):
- ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info,
- OPTIONS.replace_verity_private_key[1])
- elif (info.filename in ("BOOT/RAMDISK/verity_key",
- "BOOT/verity_key") and
- OPTIONS.replace_verity_public_key):
- new_data = ReplaceVerityPublicKey(output_tf_zip, info.filename,
- OPTIONS.replace_verity_public_key[1])
- write_to_temp(info.filename, info.external_attr, new_data)
- # Copy BOOT/, RECOVERY/, META/, ROOT/ to rebuild recovery patch.
- elif (info.filename.startswith("BOOT/") or
- info.filename.startswith("RECOVERY/") or
- info.filename.startswith("META/") or
- info.filename.startswith("ROOT/") or
- info.filename == "SYSTEM/etc/recovery-resource.dat"):
- write_to_temp(info.filename, info.external_attr, data)
-
# Sign APKs.
if info.filename.endswith(".apk"):
name = os.path.basename(info.filename)
@@ -228,6 +227,8 @@
# an APK we're not supposed to sign.
print "NOT signing: %s" % (name,)
common.ZipWriteStr(output_tf_zip, out_info, data)
+
+ # System properties.
elif info.filename in ("SYSTEM/build.prop",
"VENDOR/build.prop",
"BOOT/RAMDISK/default.prop",
@@ -238,34 +239,94 @@
if info.filename in ("BOOT/RAMDISK/default.prop",
"RECOVERY/RAMDISK/default.prop"):
write_to_temp(info.filename, info.external_attr, new_data)
+
elif info.filename.endswith("mac_permissions.xml"):
print "rewriting %s with new keys." % (info.filename,)
new_data = ReplaceCerts(data)
common.ZipWriteStr(output_tf_zip, out_info, new_data)
+
+ # Trigger a rebuild of the recovery patch if needed.
elif info.filename in ("SYSTEM/recovery-from-boot.p",
"SYSTEM/etc/recovery.img",
"SYSTEM/bin/install-recovery.sh"):
rebuild_recovery = True
+
+ # Don't copy OTA keys if we're replacing them.
elif (OPTIONS.replace_ota_keys and
- info.filename in ("RECOVERY/RAMDISK/res/keys",
- "SYSTEM/etc/security/otacerts.zip")):
- # don't copy these files if we're regenerating them below
+ info.filename in (
+ "BOOT/RAMDISK/res/keys",
+ "RECOVERY/RAMDISK/res/keys",
+ "SYSTEM/etc/security/otacerts.zip",
+ "SYSTEM/etc/update_engine/update-payload-key.pub.pem")):
pass
+
+ # Skip META/misc_info.txt if we will replace the verity private key later.
elif (OPTIONS.replace_verity_private_key and
info.filename == "META/misc_info.txt"):
pass
+
+ # Skip verity public key if we will replace it.
elif (OPTIONS.replace_verity_public_key and
info.filename in ("BOOT/RAMDISK/verity_key",
- "BOOT/verity_key")):
+ "ROOT/verity_key")):
pass
+
+ # Skip verity keyid (for system_root_image use) if we will replace it.
+ elif (OPTIONS.replace_verity_keyid and
+ info.filename == "BOOT/cmdline"):
+ pass
+
+ # Copy BOOT/, RECOVERY/, META/, ROOT/ to rebuild recovery patch. This case
+ # must come AFTER other matching rules.
+ elif (info.filename.startswith("BOOT/") or
+ info.filename.startswith("RECOVERY/") or
+ info.filename.startswith("META/") or
+ info.filename.startswith("ROOT/") or
+ info.filename == "SYSTEM/etc/recovery-resource.dat"):
+ write_to_temp(info.filename, info.external_attr, data)
+ common.ZipWriteStr(output_tf_zip, out_info, data)
+
+ # A non-APK file; copy it verbatim.
else:
- # a non-APK file; copy it verbatim
common.ZipWriteStr(output_tf_zip, out_info, data)
if OPTIONS.replace_ota_keys:
new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info)
if new_recovery_keys:
- write_to_temp("RECOVERY/RAMDISK/res/keys", 0o755 << 16, new_recovery_keys)
+ if system_root_image:
+ recovery_keys_location = "BOOT/RAMDISK/res/keys"
+ else:
+ recovery_keys_location = "RECOVERY/RAMDISK/res/keys"
+ # The "new_recovery_keys" has been already written into the output_tf_zip
+ # while calling ReplaceOtaKeys(). We're just putting the same copy to
+ # tmpdir in case we need to regenerate the recovery-from-boot patch.
+ write_to_temp(recovery_keys_location, 0o755 << 16, new_recovery_keys)
+
+ # Replace the keyid string in META/misc_info.txt.
+ if OPTIONS.replace_verity_private_key:
+ ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info,
+ OPTIONS.replace_verity_private_key[1])
+
+ if OPTIONS.replace_verity_public_key:
+ if system_root_image:
+ dest = "ROOT/verity_key"
+ else:
+ dest = "BOOT/RAMDISK/verity_key"
+ # We are replacing the one in boot image only, since the one under
+ # recovery won't ever be needed.
+ new_data = ReplaceVerityPublicKey(
+ output_tf_zip, dest, OPTIONS.replace_verity_public_key[1])
+ write_to_temp(dest, 0o755 << 16, new_data)
+
+ # Replace the keyid string in BOOT/cmdline.
+ if OPTIONS.replace_verity_keyid:
+ new_cmdline = ReplaceVerityKeyId(input_tf_zip, output_tf_zip,
+ OPTIONS.replace_verity_keyid[1])
+ # Writing the new cmdline to tmpdir is redundant as the bootimage
+ # gets build in the add_image_to_target_files and rebuild_recovery
+ # is not exercised while building the boot image for the A/B
+ # path
+ write_to_temp("BOOT/cmdline", 0o755 << 16, new_cmdline)
if rebuild_recovery:
recovery_img = common.GetBootableImage(
@@ -398,7 +459,8 @@
"build/target/product/security/testkey")
mapped_keys.append(
OPTIONS.key_map.get(devkey, devkey) + ".x509.pem")
- print "META/otakeys.txt has no keys; using", mapped_keys[0]
+ print("META/otakeys.txt has no keys; using %s for OTA package"
+ " verification." % (mapped_keys[0],))
# recovery uses a version of the key that has been slightly
# predigested (by DumpPublicKey.java) and put in res/keys.
@@ -411,8 +473,13 @@
new_recovery_keys, _ = p.communicate()
if p.returncode != 0:
raise common.ExternalError("failed to run dumpkeys")
- common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys",
- new_recovery_keys)
+
+ # system_root_image puts the recovery keys at BOOT/RAMDISK.
+ if misc_info.get("system_root_image") == "true":
+ recovery_keys_location = "BOOT/RAMDISK/res/keys"
+ else:
+ recovery_keys_location = "RECOVERY/RAMDISK/res/keys"
+ common.ZipWriteStr(output_tf_zip, recovery_keys_location, new_recovery_keys)
# SystemUpdateActivity uses the x509.pem version of the keys, but
# put into a zipfile system/etc/security/otacerts.zip.
@@ -426,8 +493,23 @@
common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip",
temp_file.getvalue())
+ # For A/B devices, update the payload verification key.
+ if misc_info.get("ab_update") == "true":
+ # Unlike otacerts.zip that may contain multiple keys, we can only specify
+ # ONE payload verification key.
+ if len(mapped_keys) > 1:
+ print("\n WARNING: Found more than one OTA keys; Using the first one"
+ " as payload verification key.\n\n")
+
+ print "Using %s for payload verification." % (mapped_keys[0],)
+ common.ZipWrite(
+ output_tf_zip,
+ mapped_keys[0],
+ arcname="SYSTEM/etc/update_engine/update-payload-key.pub.pem")
+
return new_recovery_keys
+
def ReplaceVerityPublicKey(targetfile_zip, filename, key_path):
print "Replacing verity public key with %s" % key_path
with open(key_path) as f:
@@ -435,6 +517,7 @@
common.ZipWriteStr(targetfile_zip, filename, data)
return data
+
def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip,
misc_info, key_path):
print "Replacing verity private key with %s" % key_path
@@ -444,6 +527,32 @@
common.ZipWriteStr(targetfile_output_zip, "META/misc_info.txt", new_misc_info)
misc_info["verity_key"] = key_path
+
+def ReplaceVerityKeyId(targetfile_input_zip, targetfile_output_zip, keypath):
+ in_cmdline = targetfile_input_zip.read("BOOT/cmdline")
+ # copy in_cmdline to output_zip if veritykeyid is not present in in_cmdline
+ if "veritykeyid" not in in_cmdline:
+ common.ZipWriteStr(targetfile_output_zip, "BOOT/cmdline", in_cmdline)
+ return in_cmdline
+ out_cmdline = []
+ for param in in_cmdline.split():
+ if "veritykeyid" in param:
+ # extract keyid using openssl command
+ p = common.Run(["openssl", "x509", "-in", keypath, "-text"], stdout=subprocess.PIPE)
+ keyid, stderr = p.communicate()
+ keyid = re.search(r'keyid:([0-9a-fA-F:]*)', keyid).group(1).replace(':', '').lower()
+ print "Replacing verity keyid with %s error=%s" % (keyid, stderr)
+ out_cmdline.append("veritykeyid=id:%s" % (keyid,))
+ else:
+ out_cmdline.append(param)
+
+ out_cmdline = ' '.join(out_cmdline)
+ out_cmdline = out_cmdline.strip()
+ print "out_cmdline %s" % (out_cmdline)
+ common.ZipWriteStr(targetfile_output_zip, "BOOT/cmdline", out_cmdline)
+ return out_cmdline
+
+
def BuildKeyMap(misc_info, key_mapping_options):
for s, d in key_mapping_options:
if s is None: # -d option
@@ -541,6 +650,8 @@
OPTIONS.replace_verity_public_key = (True, a)
elif o == "--replace_verity_private_key":
OPTIONS.replace_verity_private_key = (True, a)
+ elif o == "--replace_verity_keyid":
+ OPTIONS.replace_verity_keyid = (True, a)
else:
return False
return True
@@ -553,7 +664,8 @@
"replace_ota_keys",
"tag_changes=",
"replace_verity_public_key=",
- "replace_verity_private_key="],
+ "replace_verity_private_key=",
+ "replace_verity_keyid="],
extra_option_handler=option_handler)
if len(args) != 2: