diff options
67 files changed, 995 insertions, 388 deletions
diff --git a/banchanHelp.sh b/banchanHelp.sh new file mode 100755 index 0000000000..eab22e4f95 --- /dev/null +++ b/banchanHelp.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# locate some directories +cd "$(dirname $0)" +SCRIPT_DIR="${PWD}" +cd ../.. +TOP="${PWD}" + +message='usage: banchan <module> ... [<product>|arm|x86|arm64|x86_64] [eng|userdebug|user] + +banchan selects individual APEX modules to be built by the Android build system. +Like "tapas", "banchan" does not request the building of images for a device but +instead configures it for an unbundled build of the given modules, suitable for +installing on any api-compatible device. + +The difference from "tapas" is that "banchan" sets the appropriate products etc +for building APEX modules rather than apps (APKs). + +The module names should match apex{} modules in Android.bp files, typically +starting with "com.android.". + +The product argument should be a product name ending in "_<arch>", where <arch> +is one of arm, x86, arm64, x86_64. It can also be just an arch, in which case +the standard product for building modules with that architecture is used, i.e. +module_<arch>. + +The usage of the other arguments matches that of the rest of the platform +build system and can be found by running `m help`' + +echo "$message" diff --git a/core/Makefile b/core/Makefile index ac59b08631..cf24ba05e5 100644 --- a/core/Makefile +++ b/core/Makefile @@ -211,6 +211,62 @@ $(INSTALLED_SDK_BUILD_PROP_TARGET): $(INSTALLED_BUILD_PROP_TARGET) $(hide) mv $@.tmp $@ # ----------------------------------------------------------------- +# declare recovery ramdisk files +ifeq ($(BUILDING_RECOVERY_IMAGE),true) +INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP := $(call intermediates-dir-for,PACKAGING,recovery)/ramdisk_files-timestamp +endif + +# ----------------------------------------------------------------- +# Declare vendor ramdisk fragments +INTERNAL_VENDOR_RAMDISK_FRAGMENTS := + +ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT)) + ifneq (,$(filter recovery,$(BOARD_VENDOR_RAMDISK_FRAGMENTS))) + $(error BOARD_VENDOR_RAMDISK_FRAGMENTS must not contain "recovery" if \ + BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT is set) + endif + INTERNAL_VENDOR_RAMDISK_FRAGMENTS += recovery + VENDOR_RAMDISK_FRAGMENT.recovery.STAGING_DIR := $(TARGET_RECOVERY_ROOT_OUT) + VENDOR_RAMDISK_FRAGMENT.recovery.FILES := $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP) + BOARD_VENDOR_RAMDISK_FRAGMENT.recovery.MKBOOTIMG_ARGS += --ramdisk_type RECOVERY + .KATI_READONLY := VENDOR_RAMDISK_FRAGMENT.recovery.STAGING_DIR +endif + +# Validation check and assign default --ramdisk_type. +$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \ + $(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \ + $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)), \ + $(error Must not specify KERNEL_MODULE_DIRS for prebuilt vendor ramdisk fragment "$(vendor_ramdisk_fragment)": $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS))) \ + $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragment-stage-$(vendor_ramdisk_fragment))) \ + $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES :=) \ + $(if $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \ + $(if $(filter --ramdisk_type,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)),, \ + $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_type DLKM))) \ +) + +# Create the "kernel module directory" to "vendor ramdisk fragment" inverse mapping. +$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \ + $(foreach kmd,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \ + $(eval kmd_vrf := KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(kmd)) \ + $(if $($(kmd_vrf)),$(error Kernel module directory "$(kmd)" belongs to multiple vendor ramdisk fragments: "$($(kmd_vrf))" "$(vendor_ramdisk_fragment)", each kernel module directory should belong to exactly one or none vendor ramdisk fragment)) \ + $(eval $(kmd_vrf) := $(vendor_ramdisk_fragment)) \ + ) \ +) +INTERNAL_VENDOR_RAMDISK_FRAGMENTS += $(BOARD_VENDOR_RAMDISK_FRAGMENTS) + +# Strip the list in case of any whitespace. +INTERNAL_VENDOR_RAMDISK_FRAGMENTS := \ + $(strip $(INTERNAL_VENDOR_RAMDISK_FRAGMENTS)) + +# Assign --ramdisk_name for each vendor ramdisk fragment. +$(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \ + $(if $(filter --ramdisk_name,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)), \ + $(error Must not specify --ramdisk_name for vendor ramdisk fragment: $(vendor_ramdisk_fragment))) \ + $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_name $(vendor_ramdisk_fragment)) \ + $(eval .KATI_READONLY := BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS) \ +) + +# ----------------------------------------------------------------- # kernel modules # Depmod requires a well-formed kernel version so 0.0 is used as a placeholder. @@ -315,14 +371,34 @@ $(1)/$(DEPMOD_STAGING_SUBDIR)/$(4): $(2) @echo '$$(strip $$(notdir $$(PRIVATE_LOAD_MODULES)))' | tr ' ' '\n' > $$(@) endef +# $(1): source options file +# $(2): destination pathname +# Returns a build rule that checks the syntax of and installs a kernel modules +# options file. Strip and squeeze any extra space and blank lines. +# For use via $(eval). +define build-image-kernel-modules-options-file +$(2): $(1) + @echo "libmodprobe options $$(@)" + $(hide) mkdir -p "$$(dir $$@)" + $(hide) rm -f "$$@" + $(hide) awk <"$$<" >"$$@" \ + '/^#/ { print; next } \ + NF == 0 { next } \ + NF < 2 || $$$$1 != "options" \ + { print "Invalid options line " FNR ": " $$$$0 >"/dev/stderr"; \ + exit_status = 1; next } \ + { $$$$1 = $$$$1; print } \ + END { exit exit_status }' +endef + # $(1): source blocklist file # $(2): destination pathname # Returns a build rule that checks the syntax of and installs a kernel modules -# blocklist file. Strip and squeeze any extra space in the blocklist. +# blocklist file. Strip and squeeze any extra space and blank lines. # For use via $(eval). define build-image-kernel-modules-blocklist-file $(2): $(1) - @echo "modprobe blocklist $$(@)" + @echo "libmodprobe blocklist $$(@)" $(hide) mkdir -p "$$(dir $$@)" $(hide) rm -f "$$@" $(hide) awk <"$$<" >"$$@" \ @@ -352,11 +428,19 @@ $(if $(strip $(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver))$(BOARD_$(1)_KERNEL_MODU $(if $(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),,\ $(eval BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver) := $(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)))) \ $(call copy-many-files,$(call build-image-kernel-modules,$(BOARD_$(1)_KERNEL_MODULES$(_sep)$(_kver)),$(2),$(3),$(call intermediates-dir-for,PACKAGING,depmod_$(1)$(_sep)$(_kver)),$(BOARD_$(1)_KERNEL_MODULES_LOAD$(_sep)$(_kver)),$(4),$(BOARD_$(1)_KERNEL_MODULES_ARCHIVE$(_sep)$(_kver)),$(_stripped_staging_dir),$(_kver)))) \ +$(if $(_kver), \ + $(eval _dir := $(_kver)/), \ + $(eval _dir :=)) \ +$(if $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \ + $(eval $(call build-image-kernel-modules-options-file, \ + $(BOARD_$(1)_KERNEL_MODULES_OPTIONS_FILE$(_sep)$(_kver)), \ + $(2)/lib/modules/$(_dir)modules.options)) \ + $(2)/lib/modules/$(_dir)modules.options) \ $(if $(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \ $(eval $(call build-image-kernel-modules-blocklist-file, \ $(BOARD_$(1)_KERNEL_MODULES_BLOCKLIST_FILE$(_sep)$(_kver)), \ - $(2)/lib/modules/modules.blocklist)) \ - $(2)/lib/modules/modules.blocklist) + $(2)/lib/modules/$(_dir)modules.blocklist)) \ + $(2)/lib/modules/$(_dir)modules.blocklist) endef # $(1): kernel module directory name (top is an out of band value for no directory) @@ -415,38 +499,24 @@ else VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR := endif -# Create the "kernel module directory" to "vendor ramdisk fragment" inverse mapping. -$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \ - $(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \ - $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)), \ - $(error Must not specify KERNEL_MODULE_DIRS for prebuilt vendor ramdisk fragment "$(vendor_ramdisk_fragment)": $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS))) \ - $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragment-dlkm-$(vendor_ramdisk_fragment))) \ - $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES :=) \ - $(foreach dir,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \ - $(eval kmd_vrf := KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir)) \ - $(if $($(kmd_vrf)),$(error Kernel module directory "$(dir)" belongs to multiple vendor ramdisk fragments: "$($(kmd_vrf))" "$(vendor_ramdisk_fragment)", each kernel module directory should belong to exactly one or none vendor ramdisk fragment)) \ - $(eval $(kmd_vrf) := $(vendor_ramdisk_fragment)) \ - ) \ -) - BOARD_KERNEL_MODULE_DIRS += top -$(foreach dir,$(BOARD_KERNEL_MODULE_DIRS), \ - $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,,$(dir))) \ - $(eval vendor_ramdisk_fragment := $(KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir))) \ +$(foreach kmd,$(BOARD_KERNEL_MODULE_DIRS), \ + $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,,$(kmd))) \ + $(eval vendor_ramdisk_fragment := $(KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(kmd))) \ $(if $(vendor_ramdisk_fragment), \ $(eval output_dir := $(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR)) \ $(eval result_var := VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES) \ $(eval ### else ###), \ $(eval output_dir := $(TARGET_VENDOR_RAMDISK_OUT)) \ $(eval result_var := ALL_DEFAULT_INSTALLED_MODULES)) \ - $(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(dir))) \ - $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(dir))) \ - $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(dir))) \ - $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(dir))) \ - $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(if $(filter true,$(BOARD_USES_ODM_DLKMIMAGE)),$(TARGET_OUT_ODM_DLKM),$(TARGET_OUT_ODM)),odm,modules.load,,$(dir))) \ + $(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \ + $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(kmd))) \ + $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(kmd))) \ + $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(kmd))) \ + $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(if $(filter true,$(BOARD_USES_ODM_DLKMIMAGE)),$(TARGET_OUT_ODM_DLKM),$(TARGET_OUT_ODM)),odm,modules.load,,$(kmd))) \ $(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\ - $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(dir))),\ - $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,GENERIC_RAMDISK,$(TARGET_RAMDISK_OUT),,modules.load,,$(dir))))) + $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(kmd))),\ + $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,GENERIC_RAMDISK,$(TARGET_RAMDISK_OUT),,modules.load,,$(kmd))))) # ----------------------------------------------------------------- # Cert-to-package mapping. Used by the post-build signing tools. @@ -981,12 +1051,6 @@ my_apex_extracted_boot_image := my_installed_prebuilt_gki_apex := # ----------------------------------------------------------------- -# declare recovery ramdisk files -ifeq ($(BUILDING_RECOVERY_IMAGE),true) -INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP := $(call intermediates-dir-for,PACKAGING,recovery)/ramdisk_files-timestamp -endif - -# ----------------------------------------------------------------- # vendor boot image ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true) @@ -1000,10 +1064,14 @@ INTERNAL_VENDOR_RAMDISK_FILES := $(filter $(TARGET_VENDOR_RAMDISK_OUT)/%, \ INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot)/vendor_ramdisk.cpio$(RAMDISK_EXT) +# Exclude recovery files in the default vendor ramdisk if including a standalone +# recovery ramdisk in vendor_boot. ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)) +ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT)) $(INTERNAL_VENDOR_RAMDISK_TARGET): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP) $(INTERNAL_VENDOR_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT) endif +endif $(INTERNAL_VENDOR_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@ @@ -1040,13 +1108,13 @@ ifdef INTERNAL_KERNEL_CMDLINE endif ifdef INTERNAL_BOOTCONFIG -ifneq (,$(findstring androidboot.hardware=, $(INTERNAL_BOOTCONFIG))) -$(error "androidboot.hardware" BOOTCONFIG parameter is not supported due to \ - bootconfig limitations. Use "hardware" instead. INTERNAL_BOOTCONFIG: \ - $(INTERNAL_BOOTCONFIG)) -endif -INTERNAL_VENDOR_BOOTCONFIG_TARGET := $(PRODUCT_OUT)/vendor-bootconfig.img -$(INTERNAL_VENDOR_BOOTCONFIG_TARGET): + ifneq (,$(findstring androidboot.hardware=, $(INTERNAL_BOOTCONFIG))) + $(error "androidboot.hardware" BOOTCONFIG parameter is not supported due \ + to bootconfig limitations. Use "hardware" instead. INTERNAL_BOOTCONFIG: \ + $(INTERNAL_BOOTCONFIG)) + endif + INTERNAL_VENDOR_BOOTCONFIG_TARGET := $(PRODUCT_OUT)/vendor-bootconfig.img + $(INTERNAL_VENDOR_BOOTCONFIG_TARGET): rm -f $@ $(foreach param,$(INTERNAL_BOOTCONFIG), \ printf "%s\n" $(param) >> $@;) @@ -1083,17 +1151,12 @@ endef INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS := INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS := -$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \ +$(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \ $(eval prebuilt_vendor_ramdisk_fragment_file := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \ $(if $(prebuilt_vendor_ramdisk_fragment_file), \ $(eval vendor_ramdisk_fragment_target := $(call build-prebuilt-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment),$(prebuilt_vendor_ramdisk_fragment_file))) \ $(eval ### else ###), \ - $(eval vendor_ramdisk_fragment_target := $(call build-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment))) \ - $(if $(filter --ramdisk_type,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)),, \ - $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_type DLKM))) \ - $(if $(filter --ramdisk_name,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)), \ - $(error Must not specify --ramdisk_name for vendor ramdisk fragment: $(vendor_ramdisk_fragment))) \ - $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_name $(vendor_ramdisk_fragment)) \ + $(eval vendor_ramdisk_fragment_target := $(call build-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment)))) \ $(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS += $(vendor_ramdisk_fragment_target)) \ $(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS += $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS) --vendor_ramdisk_fragment $(vendor_ramdisk_fragment_target)) \ ) @@ -2401,9 +2464,14 @@ INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,v $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_FILES := $(INTERNAL_DEBUG_RAMDISK_FILES) $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_RAMDISK_DIR := $(TARGET_VENDOR_RAMDISK_OUT) +# Exclude recovery files in the default vendor ramdisk if including a standalone +# recovery ramdisk in vendor_boot. ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)) +ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT)) +$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP) $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT) endif +endif INTERNAL_VENDOR_DEBUG_RAMDISK_FILES := $(filter $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/%, \ $(ALL_GENERATED_SOURCES) \ @@ -2509,7 +2577,7 @@ endef $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(INSTALLED_DEBUG_RAMDISK_TARGET) $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_TEST_HARNESS_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS) $(call pretty,"Target test harness ramdisk: $@") - rsync -a $(TEST_HARNESS_RAMDISK_SYNC_DIR)/ $(TEST_HARNESS_RAMDISK_ROOT_DIR) + rsync --chmod=u+w -a $(TEST_HARNESS_RAMDISK_SYNC_DIR)/ $(TEST_HARNESS_RAMDISK_ROOT_DIR) $(call append-test-harness-props,$(ADDITIONAL_TEST_HARNESS_PROPERTIES),$(TEST_HARNESS_PROP_TARGET)) $(MKBOOTFS) -d $(TARGET_OUT) $(TEST_HARNESS_RAMDISK_ROOT_DIR) | $(COMPRESSION_COMMAND) > $@ @@ -2527,6 +2595,7 @@ endif # BUILDING_RAMDISK_IMAGE # # Note: it's intentional to skip signing for boot-test-harness.img, because it # can only be used if the device is unlocked with verification error. +ifneq ($(INSTALLED_BOOTIMAGE_TARGET),) ifneq ($(strip $(TARGET_NO_KERNEL)),true) ifneq ($(strip $(BOARD_KERNEL_BINARIES)),) @@ -2567,6 +2636,7 @@ bootimage_test_harness-nodeps: $(MKBOOTIMG) $(BOARD_GKI_SIGNING_KEY_PATH) $(AVBT $(foreach b,$(INSTALLED_TEST_HARNESS_BOOTIMAGE_TARGET),$(call build-boot-test-harness-target,$b)) endif # TARGET_NO_KERNEL +endif # INSTALLED_BOOTIMAGE_TARGET endif # BOARD_BUILD_SYSTEM_ROOT_IMAGE is not true # Creates a compatibility symlink between two partitions, e.g. /system/vendor to /vendor @@ -3937,6 +4007,9 @@ $(warning No INSTALLED_KERNEL_TARGET is defined when PRODUCT_OTA_ENFORCE_VINTF_K or (2) extracting kernel configuration and defining BOARD_KERNEL_CONFIG_FILE and \ BOARD_KERNEL_VERSION manually; or (3) unsetting PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS \ manually.) +# Clear their values to indicate that these two files does not exist. +BUILT_KERNEL_CONFIGS_FILE := +BUILT_KERNEL_VERSION_FILE := else # Tools for decompression that is not in PATH. @@ -3981,8 +4054,10 @@ check_vintf_compatible_args := check_vintf_compatible_deps := $(check_vintf_common_srcs) ifeq ($(PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS),true) +ifneq (,$(BUILT_KERNEL_VERSION_FILE)$(BUILT_KERNEL_CONFIGS_FILE)) check_vintf_compatible_args += --kernel $(BUILT_KERNEL_VERSION_FILE):$(BUILT_KERNEL_CONFIGS_FILE) check_vintf_compatible_deps += $(BUILT_KERNEL_CONFIGS_FILE) $(BUILT_KERNEL_VERSION_FILE) +endif # BUILT_KERNEL_VERSION_FILE != "" || BUILT_KERNEL_CONFIGS_FILE != "" endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS check_vintf_compatible_args += \ @@ -4796,8 +4871,12 @@ $(BUILT_TARGET_FILES_PACKAGE): \ ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT))$(filter true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))) @# Components of the recovery image $(hide) mkdir -p $(zip_root)/$(PRIVATE_RECOVERY_OUT) +# Exclude recovery files in the default vendor ramdisk if including a standalone +# recovery ramdisk in vendor_boot. +ifneq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT)) $(hide) $(call package_files-copy-root, \ $(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/$(PRIVATE_RECOVERY_OUT)/RAMDISK) +endif ifdef INSTALLED_KERNEL_TARGET ifneq (,$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT))) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/ @@ -4887,9 +4966,9 @@ ifdef BOARD_KERNEL_PAGESIZE echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/VENDOR_BOOT/pagesize endif echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/VENDOR_BOOT/vendor_cmdline -ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS - echo "$(BOARD_VENDOR_RAMDISK_FRAGMENTS)" > "$(zip_root)/VENDOR_BOOT/vendor_ramdisk_fragments" - $(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \ +ifdef INTERNAL_VENDOR_RAMDISK_FRAGMENTS + echo "$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS)" > "$(zip_root)/VENDOR_BOOT/vendor_ramdisk_fragments" + $(foreach vendor_ramdisk_fragment,$(INTERNAL_VENDOR_RAMDISK_FRAGMENTS), \ mkdir -p $(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment); \ echo "$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)" > "$(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/mkbootimg_args"; \ $(eval prebuilt_ramdisk := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \ @@ -4899,7 +4978,7 @@ ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS $(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR), \ $(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/RAMDISK); \ )) -endif # BOARD_VENDOR_RAMDISK_FRAGMENTS != "" +endif # INTERNAL_VENDOR_RAMDISK_FRAGMENTS != "" endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET ifdef BUILDING_SYSTEM_IMAGE @# Contents of the system image @@ -5347,10 +5426,18 @@ ifeq (true,$(EMMA_INSTRUMENT)) # Any dependencies are set up later in build/make/core/main.mk. JACOCO_REPORT_CLASSES_ALL := $(PRODUCT_OUT)/jacoco-report-classes-all.jar +$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_TARGET_JACOCO_DIR := $(call intermediates-dir-for,PACKAGING,jacoco) +$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_HOST_JACOCO_DIR := $(call intermediates-dir-for,PACKAGING,jacoco,HOST) +$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_TARGET_PROGUARD_USAGE_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage) +$(JACOCO_REPORT_CLASSES_ALL): PRIVATE_HOST_PROGUARD_USAGE_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage,HOST) $(JACOCO_REPORT_CLASSES_ALL) : @echo "Collecting uninstrumented classes" - find $(TARGET_COMMON_OUT_ROOT) $(HOST_COMMON_OUT_ROOT) -name "jacoco-report-classes.jar" -o -name "proguard_usage.zip" 2>/dev/null | sort > $@.list - $(SOONG_ZIP) -o $@ -L 0 -C $(OUT_DIR) -P out -l $@.list + mkdir -p $(PRIVATE_TARGET_JACOCO_DIR) $(PRIVATE_HOST_JACOCO_DIR) $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) $(PRIVATE_HOST_PROGUARD_USAGE_DIR) + $(SOONG_ZIP) -o $@ -L 0 \ + -C $(PRIVATE_TARGET_JACOCO_DIR) -P out/target/common/obj -D $(PRIVATE_TARGET_JACOCO_DIR) \ + -C $(PRIVATE_HOST_JACOCO_DIR) -P out/target/common/obj -D $(PRIVATE_HOST_JACOCO_DIR) \ + -C $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) -P out/target/common/obj -D $(PRIVATE_TARGET_PROGUARD_USAGE_DIR) \ + -C $(PRIVATE_HOST_PROGUARD_USAGE_DIR) -P out/target/common/obj -D $(PRIVATE_HOST_PROGUARD_USAGE_DIR) ifeq (,$(TARGET_BUILD_UNBUNDLED)) $(JACOCO_REPORT_CLASSES_ALL): $(INTERNAL_ALLIMAGES_FILES) @@ -5366,13 +5453,11 @@ PROGUARD_DICT_ZIP := $(PRODUCT_OUT)/$(TARGET_PRODUCT)-proguard-dict-$(FILE_NAME_ ifeq (,$(TARGET_BUILD_UNBUNDLED)) $(PROGUARD_DICT_ZIP): $(INTERNAL_ALLIMAGES_FILES) $(updater_dep) endif -$(PROGUARD_DICT_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard)/filelist +$(PROGUARD_DICT_ZIP): PRIVATE_PACKAGING_DIR := $(call intermediates-dir-for,PACKAGING,proguard_dictionary) $(PROGUARD_DICT_ZIP): $(SOONG_ZIP) @echo "Packaging Proguard obfuscation dictionary files." - mkdir -p $(dir $@) $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS $(dir $(PRIVATE_LIST_FILE)) - find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary | \ - sed -e 's/\(.*\)\/proguard_dictionary/\0\n\1\/classes.jar/' > $(PRIVATE_LIST_FILE) - $(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE) + mkdir -p $(dir $@) $(PRIVATE_PACKAGING_DIR) + $(SOONG_ZIP) --ignore_missing_files -d -o $@ -C $(PRIVATE_PACKAGING_DIR) -P out/target/common/obj -D $(PRIVATE_PACKAGING_DIR) #------------------------------------------------------------------ # A zip of Proguard usage files. @@ -5393,11 +5478,12 @@ $(PROGUARD_USAGE_ZIP): \ $(INSTALLED_ODM_DLKMIMAGE_TARGET) \ $(updater_dep) endif -$(PROGUARD_USAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard_usage)/filelist +$(PROGUARD_USAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard_usage.zip)/filelist +$(PROGUARD_USAGE_ZIP): PRIVATE_PACKAGING_DIR := $(call intermediates-dir-for,PACKAGING,proguard_usage) $(PROGUARD_USAGE_ZIP): $(MERGE_ZIPS) @echo "Packaging Proguard usage files." - mkdir -p $(dir $@) $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS $(dir $(PRIVATE_LIST_FILE)) - find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_usage.zip > $(PRIVATE_LIST_FILE) + mkdir -p $(dir $@) $(PRIVATE_PACKAGING_DIR) $(dir $(PRIVATE_LIST_FILE)) + find $(PRIVATE_PACKAGING_DIR) -name proguard_usage.zip > $(PRIVATE_LIST_FILE) $(MERGE_ZIPS) $@ @$(PRIVATE_LIST_FILE) ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS)) diff --git a/core/app_prebuilt_internal.mk b/core/app_prebuilt_internal.mk index fe04b84d4e..86a4adf324 100644 --- a/core/app_prebuilt_internal.mk +++ b/core/app_prebuilt_internal.mk @@ -169,12 +169,13 @@ LOCAL_DEX_PREOPT := false endif my_dex_jar := $(my_prebuilt_src_file) -my_manifest_or_apk := $(my_prebuilt_src_file) dex_preopt_profile_src_file := $(my_prebuilt_src_file) ####################################### # defines built_odex along with rule to install odex +my_manifest_or_apk := $(my_prebuilt_src_file) include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk +my_manifest_or_apk := ####################################### ifneq ($(LOCAL_REPLACE_PREBUILT_APK_INSTALLED),) # There is a replacement for the prebuilt .apk we can install without any processing. @@ -208,7 +209,7 @@ endif ifeq ($(module_run_appcompat),true) $(built_module) : $(AAPT2) endif -$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(ZIP2ZIP) $(SIGNAPK_JAR) +$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(ZIP2ZIP) $(SIGNAPK_JAR) $(SIGNAPK_JNI_LIBRARY_PATH) $(transform-prebuilt-to-target) $(uncompress-prebuilt-embedded-jni-libs) $(remove-unwanted-prebuilt-embedded-jni-libs) diff --git a/core/base_rules.mk b/core/base_rules.mk index 68f880f26b..5f654a677d 100644 --- a/core/base_rules.mk +++ b/core/base_rules.mk @@ -533,13 +533,17 @@ endif ifndef LOCAL_IS_HOST_MODULE # Rule to install the module's companion init.rc. -my_init_rc := $(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC) +ifneq ($(strip $(LOCAL_FULL_INIT_RC)),) +my_init_rc := $(LOCAL_FULL_INIT_RC) +else +my_init_rc := $(foreach rc,$(LOCAL_INIT_RC_$(my_32_64_bit_suffix)) $(LOCAL_INIT_RC),$(LOCAL_PATH)/$(rc)) +endif ifneq ($(strip $(my_init_rc)),) # Make doesn't support recovery as an output partition, but some Soong modules installed in recovery # have init.rc files that need to be installed alongside them. Manually handle the case where the # output file is in the recovery partition. my_init_rc_path := $(if $(filter $(TARGET_RECOVERY_ROOT_OUT)/%,$(my_module_path)),$(TARGET_RECOVERY_ROOT_OUT)/system/etc,$(TARGET_OUT$(partition_tag)_ETC)) -my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(LOCAL_PATH)/$(rc):$(my_init_rc_path)/init/$(notdir $(rc))) +my_init_rc_pairs := $(foreach rc,$(my_init_rc),$(rc):$(my_init_rc_path)/init/$(notdir $(rc))) my_init_rc_installed := $(foreach rc,$(my_init_rc_pairs),$(call word-colon,2,$(rc))) # Make sure we only set up the copy rules once, even if another arch variant @@ -569,9 +573,14 @@ my_vintf_installed:= my_vintf_pairs:= ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE)) ifndef LOCAL_IS_HOST_MODULE -ifneq ($(strip $(LOCAL_VINTF_FRAGMENTS)),) +ifneq ($(strip $(LOCAL_FULL_VINTF_FRAGMENTS)),) +my_vintf_fragments := $(LOCAL_FULL_VINTF_FRAGMENTS) +else +my_vintf_fragments := $(foreach xml,$(LOCAL_VINTF_FRAGMENTS),$(LOCAL_PATH)/$(xml)) +endif +ifneq ($(strip $(my_vintf_fragments)),) -my_vintf_pairs := $(foreach xml,$(LOCAL_VINTF_FRAGMENTS),$(LOCAL_PATH)/$(xml):$(TARGET_OUT$(partition_tag)_ETC)/vintf/manifest/$(notdir $(xml))) +my_vintf_pairs := $(foreach xml,$(my_vintf_fragments),$(xml):$(TARGET_OUT$(partition_tag)_ETC)/vintf/manifest/$(notdir $(xml))) my_vintf_installed := $(foreach xml,$(my_vintf_pairs),$(call word-colon,2,$(xml))) # Only set up copy rules once, even if another arch variant shares it @@ -1001,7 +1010,9 @@ ALL_MODULES.$(my_register_name).TEST_MAINLINE_MODULES := $(LOCAL_TEST_MAINLINE_M ifndef LOCAL_IS_HOST_MODULE ALL_MODULES.$(my_register_name).FILE_CONTEXTS := $(LOCAL_FILE_CONTEXTS) endif +ifdef LOCAL_IS_UNIT_TEST ALL_MODULES.$(my_register_name).IS_UNIT_TEST := $(LOCAL_IS_UNIT_TEST) +endif test_config := INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name) diff --git a/core/binary.mk b/core/binary.mk index 0d7206f1a4..cf47374b5e 100644 --- a/core/binary.mk +++ b/core/binary.mk @@ -471,27 +471,6 @@ ifneq ($(foreach i,$(my_c_includes),$(filter %/..,$(i))$(findstring /../,$(i))), my_soong_problems += dotdot_incs endif -#################################################### -## Add FDO flags if FDO is turned on and supported -## Please note that we will do option filtering during FDO build. -## i.e. Os->O2, remove -fno-early-inline and -finline-limit. -################################################################## -my_fdo_build := -ifneq ($(filter true always, $(LOCAL_FDO_SUPPORT)),) - ifeq ($(BUILD_FDO_INSTRUMENT),true) - my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_CFLAGS) - my_ldflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_LDFLAGS) - my_fdo_build := true - else ifneq ($(filter true,$(BUILD_FDO_OPTIMIZE))$(filter always,$(LOCAL_FDO_SUPPORT)),) - my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_OPTIMIZE_CFLAGS) - my_fdo_build := true - endif - # Disable ccache (or other compiler wrapper) except gomacc, which - # can handle -fprofile-use properly. - my_cc_wrapper := $(filter $(GOMA_CC) $(RBE_WRAPPER),$(my_cc_wrapper)) - my_cxx_wrapper := $(filter $(GOMA_CC) $(RBE_WRAPPER),$(my_cxx_wrapper)) -endif - ########################################################### ## Explicitly declare assembly-only __ASSEMBLY__ macro for ## assembly source @@ -1479,12 +1458,6 @@ my_cppflags := $(call convert-to-clang-flags,$(my_cppflags)) my_asflags := $(call convert-to-clang-flags,$(my_asflags)) my_ldflags := $(call convert-to-clang-flags,$(my_ldflags)) -ifeq ($(my_fdo_build), true) - my_cflags := $(patsubst -Os,-O2,$(my_cflags)) - fdo_incompatible_flags := -fno-early-inlining -finline-limit=% - my_cflags := $(filter-out $(fdo_incompatible_flags),$(my_cflags)) -endif - # No one should ever use this flag. On GCC it's mere presence will disable all # warnings, even those that are specified after it (contrary to typical warning # flag behavior). This circumvents CFLAGS_NO_OVERRIDE from forcibly enabling the diff --git a/core/board_config.mk b/core/board_config.mk index a739784fe2..be37292c7e 100644 --- a/core/board_config.mk +++ b/core/board_config.mk @@ -25,6 +25,7 @@ _board_strip_readonly_list += BOARD_EGL_CFG _board_strip_readonly_list += BOARD_HAVE_BLUETOOTH _board_strip_readonly_list += BOARD_INSTALLER_CMDLINE _board_strip_readonly_list += BOARD_KERNEL_CMDLINE +_board_strip_readonly_list += BOARD_BOOT_HEADER_VERSION _board_strip_readonly_list += BOARD_BOOTCONFIG _board_strip_readonly_list += BOARD_KERNEL_BASE _board_strip_readonly_list += BOARD_USES_GENERIC_AUDIO @@ -107,6 +108,8 @@ _board_strip_readonly_list += BOARD_KERNEL_MODULE_INTERFACE_VERSIONS # contains a kernel or not. # - BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT controls whether ramdisk # recovery resources are built to vendor_boot. +# - BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT controls whether recovery +# resources are built as a standalone recovery ramdisk in vendor_boot. # - BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT controls whether GSI AVB keys are # built to vendor_boot. # - BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES controls whether boot images in $OUT are added @@ -114,6 +117,7 @@ _board_strip_readonly_list += BOARD_KERNEL_MODULE_INTERFACE_VERSIONS _board_strip_readonly_list += BOARD_USES_GENERIC_KERNEL_IMAGE _board_strip_readonly_list += BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE _board_strip_readonly_list += BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT +_board_strip_readonly_list += BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT _board_strip_readonly_list += BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT _board_strip_readonly_list += BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES @@ -828,12 +832,30 @@ ifndef BUILDING_VENDOR_BOOT_IMAGE ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS $(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if not building vendor_boot image) endif -endif +else # BUILDING_VENDOR_BOOT_IMAGE + ifneq (,$(call math_lt,$(BOARD_BOOT_HEADER_VERSION),4)) + ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS + $(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if \ + BOARD_BOOT_HEADER_VERSION is less than 4) + endif + ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT)) + $(error Should not set BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT if \ + BOARD_BOOT_HEADER_VERSION is less than 4) + endif + endif +endif # BUILDING_VENDOR_BOOT_IMAGE ifneq ($(words $(BOARD_VENDOR_RAMDISK_FRAGMENTS)),$(words $(sort $(BOARD_VENDOR_RAMDISK_FRAGMENTS)))) $(error BOARD_VENDOR_RAMDISK_FRAGMENTS has duplicate entries: $(BOARD_VENDOR_RAMDISK_FRAGMENTS)) endif +ifeq (true,$(BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT)) + ifneq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)) + $(error Should not set BOARD_INCLUDE_RECOVERY_RAMDISK_IN_VENDOR_BOOT if \ + BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT is not set) + endif +endif + # If BOARD_USES_GENERIC_KERNEL_IMAGE is set, BOARD_USES_RECOVERY_AS_BOOT must not be set. # Devices without a dedicated recovery partition uses BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT to # build recovery into vendor_boot. diff --git a/core/clear_vars.mk b/core/clear_vars.mk index 019892e5e5..e2acb678d9 100644 --- a/core/clear_vars.mk +++ b/core/clear_vars.mk @@ -100,15 +100,16 @@ LOCAL_EXPORT_STATIC_LIBRARY_HEADERS:= LOCAL_EXTRA_FULL_TEST_CONFIGS:= LOCAL_EXTRACT_APK:= LOCAL_EXTRACT_DPI_APK:= -LOCAL_FDO_SUPPORT:= LOCAL_FILE_CONTEXTS:= LOCAL_FINDBUGS_FLAGS:= LOCAL_FORCE_STATIC_EXECUTABLE:= LOCAL_FULL_CLASSES_JACOCO_JAR:= LOCAL_FULL_CLASSES_PRE_JACOCO_JAR:= +LOCAL_FULL_INIT_RC:= LOCAL_FULL_LIBS_MANIFEST_FILES:= LOCAL_FULL_MANIFEST_FILE:= LOCAL_FULL_TEST_CONFIG:= +LOCAL_FULL_VINTF_FRAGMENTS:= LOCAL_FUZZ_ENGINE:= LOCAL_FUZZ_INSTALLED_SHARED_DEPS:= LOCAL_GCNO_FILES:= diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk index e45c1a6bab..11c19444be 100644 --- a/core/combo/TARGET_linux-arm.mk +++ b/core/combo/TARGET_linux-arm.mk @@ -64,7 +64,6 @@ ifeq ($(strip $(wildcard $(TARGET_ARCH_SPECIFIC_MAKEFILE))),) endif include $(TARGET_ARCH_SPECIFIC_MAKEFILE) -include $(BUILD_SYSTEM)/combo/fdo.mk define $(combo_var_prefix)transform-shared-lib-to-toc $(call _gen_toc_command_for_elf,$(1),$(2)) diff --git a/core/combo/TARGET_linux-arm64.mk b/core/combo/TARGET_linux-arm64.mk index a3f59a774c..5d481cb8fe 100644 --- a/core/combo/TARGET_linux-arm64.mk +++ b/core/combo/TARGET_linux-arm64.mk @@ -39,7 +39,6 @@ $(error Unknown ARM architecture version: $(TARGET_ARCH_VARIANT)) endif include $(TARGET_ARCH_SPECIFIC_MAKEFILE) -include $(BUILD_SYSTEM)/combo/fdo.mk define $(combo_var_prefix)transform-shared-lib-to-toc $(call _gen_toc_command_for_elf,$(1),$(2)) diff --git a/core/combo/TARGET_linux-x86.mk b/core/combo/TARGET_linux-x86.mk index 2c4614b6cd..acbae519fe 100644 --- a/core/combo/TARGET_linux-x86.mk +++ b/core/combo/TARGET_linux-x86.mk @@ -32,7 +32,6 @@ $(error Unknown $(TARGET_$(combo_2nd_arch_prefix)ARCH) architecture version: $(T endif include $(TARGET_ARCH_SPECIFIC_MAKEFILE) -include $(BUILD_SYSTEM)/combo/fdo.mk define $(combo_var_prefix)transform-shared-lib-to-toc $(call _gen_toc_command_for_elf,$(1),$(2)) diff --git a/core/combo/TARGET_linux-x86_64.mk b/core/combo/TARGET_linux-x86_64.mk index d2172d601e..9e7e3630d1 100644 --- a/core/combo/TARGET_linux-x86_64.mk +++ b/core/combo/TARGET_linux-x86_64.mk @@ -32,7 +32,6 @@ $(error Unknown $(TARGET_ARCH) architecture version: $(TARGET_ARCH_VARIANT)) endif include $(TARGET_ARCH_SPECIFIC_MAKEFILE) -include $(BUILD_SYSTEM)/combo/fdo.mk define $(combo_var_prefix)transform-shared-lib-to-toc $(call _gen_toc_command_for_elf,$(1),$(2)) diff --git a/core/combo/fdo.mk b/core/combo/fdo.mk deleted file mode 100644 index 8fb8fd3a57..0000000000 --- a/core/combo/fdo.mk +++ /dev/null @@ -1,33 +0,0 @@ -# -# Copyright (C) 2006 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Setup FDO related flags. - -$(combo_2nd_arch_prefix)TARGET_FDO_CFLAGS:= - -# Set BUILD_FDO_INSTRUMENT=true to turn on FDO instrumentation. -# The profile will be generated on /sdcard/fdo_profile on the device. -$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_CFLAGS := -fprofile-generate=/sdcard/fdo_profile -DANDROID_FDO -$(combo_2nd_arch_prefix)TARGET_FDO_INSTRUMENT_LDFLAGS := -lgcov -lgcc - -# Set TARGET_FDO_PROFILE_PATH to set a custom profile directory for your build. -ifeq ($(strip $($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH)),) - $(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH := vendor/google_data/fdo_profile -endif - -$(combo_2nd_arch_prefix)TARGET_FDO_OPTIMIZE_CFLAGS := \ - -fprofile-use=$($(combo_2nd_arch_prefix)TARGET_FDO_PROFILE_PATH) \ - -DANDROID_FDO -fprofile-correction -Wcoverage-mismatch -Wno-error diff --git a/core/config.mk b/core/config.mk index 3c493df934..d1746ef60b 100644 --- a/core/config.mk +++ b/core/config.mk @@ -1083,12 +1083,13 @@ endef # This produces a list like "current/core current/public current/system 4/public" TARGET_AVAILABLE_SDK_VERSIONS := $(wildcard $(HISTORICAL_SDK_VERSIONS_ROOT)/*/*/android.jar) TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst $(HISTORICAL_SDK_VERSIONS_ROOT)/%/android.jar,%,$(TARGET_AVAILABLE_SDK_VERSIONS)) -# Strips and reorganizes the "public", "core" and "system" subdirs. +# Strips and reorganizes the "public", "core", "system" and "test" subdirs. TARGET_AVAILABLE_SDK_VERSIONS := $(subst /public,,$(TARGET_AVAILABLE_SDK_VERSIONS)) TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst %/core,core_%,$(TARGET_AVAILABLE_SDK_VERSIONS)) TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst %/system,system_%,$(TARGET_AVAILABLE_SDK_VERSIONS)) -# No prebuilt for test_current. -TARGET_AVAILABLE_SDK_VERSIONS += test_current +TARGET_AVAILABLE_SDK_VERSIONS := $(patsubst %/test,test_%,$(TARGET_AVAILABLE_SDK_VERSIONS)) +# module-lib and system-server are not supported in Make. +TARGET_AVAILABLE_SDK_VERSIONS := $(filter-out %/module-lib %/system-server,$(TARGET_AVAILABLE_SDK_VERSIONS)) TARGET_AVAIALBLE_SDK_VERSIONS := $(call numerically_sort,$(TARGET_AVAILABLE_SDK_VERSIONS)) TARGET_SDK_VERSIONS_WITHOUT_JAVA_18_SUPPORT := $(call numbers_less_than,24,$(TARGET_AVAILABLE_SDK_VERSIONS)) diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk index 228bad682b..90f00c0e08 100644 --- a/core/config_sanitizers.mk +++ b/core/config_sanitizers.mk @@ -115,14 +115,17 @@ ifeq ($(LOCAL_SANITIZE),never) my_sanitize_diag := endif -# Enable CFI in included paths (for Arm64 only). +# Enable CFI in included paths. ifeq ($(filter cfi, $(my_sanitize)),) - ifneq ($(filter arm64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),) - combined_include_paths := $(CFI_INCLUDE_PATHS) \ - $(PRODUCT_CFI_INCLUDE_PATHS) + combined_include_paths := $(CFI_INCLUDE_PATHS) \ + $(PRODUCT_CFI_INCLUDE_PATHS) + combined_exclude_paths := $(CFI_EXCLUDE_PATHS) \ + $(PRODUCT_CFI_EXCLUDE_PATHS) - ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_include_paths)),\ - $(filter $(dir)%,$(LOCAL_PATH)))),) + ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_include_paths)),\ + $(filter $(dir)%,$(LOCAL_PATH)))),) + ifeq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_exclude_paths)),\ + $(filter $(dir)%,$(LOCAL_PATH)))),) my_sanitize := cfi $(my_sanitize) endif endif @@ -135,14 +138,19 @@ ifeq ($(filter memtag_heap, $(my_sanitize)),) $(PRODUCT_MEMTAG_HEAP_SYNC_INCLUDE_PATHS) combined_async_include_paths := $(MEMTAG_HEAP_ASYNC_INCLUDE_PATHS) \ $(PRODUCT_MEMTAG_HEAP_ASYNC_INCLUDE_PATHS) - - ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\ - $(filter $(dir)%,$(LOCAL_PATH)))),) - my_sanitize := memtag_heap $(my_sanitize) - my_sanitize_diag := memtag_heap $(my_sanitize) - else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\ - $(filter $(dir)%,$(LOCAL_PATH)))),) - my_sanitize := memtag_heap $(my_sanitize) + combined_exclude_paths := $(MEMTAG_HEAP_EXCLUDE_PATHS) \ + $(PRODUCT_MEMTAG_HEAP_EXCLUDE_PATHS) + + ifeq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_exclude_paths)),\ + $(filter $(dir)%,$(LOCAL_PATH)))),) + ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\ + $(filter $(dir)%,$(LOCAL_PATH)))),) + my_sanitize := memtag_heap $(my_sanitize) + my_sanitize_diag := memtag_heap $(my_sanitize_diag) + else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\ + $(filter $(dir)%,$(LOCAL_PATH)))),) + my_sanitize := memtag_heap $(my_sanitize) + endif endif endif endif @@ -211,10 +219,12 @@ endif ifneq ($(filter memtag_heap,$(my_sanitize)),) # Add memtag ELF note. - ifneq ($(filter memtag_heap,$(my_sanitize_diag)),) - my_whole_static_libraries += note_memtag_heap_sync - else - my_whole_static_libraries += note_memtag_heap_async + ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),) + ifneq ($(filter memtag_heap,$(my_sanitize_diag)),) + my_whole_static_libraries += note_memtag_heap_sync + else + my_whole_static_libraries += note_memtag_heap_async + endif endif # This is all that memtag_heap does - it is not an actual -fsanitize argument. # Remove it from the list. diff --git a/core/definitions.mk b/core/definitions.mk index 2883f0d30c..2951c05626 100644 --- a/core/definitions.mk +++ b/core/definitions.mk @@ -745,6 +745,42 @@ $(strip \ endef ########################################################### +## The packaging directory for a module. Similar to intermedates, but +## in a location that will be wiped by an m installclean. +########################################################### + +# $(1): subdir in PACKAGING +# $(2): target class, like "APPS" +# $(3): target name, like "NotePad" +# $(4): { HOST, HOST_CROSS, <empty (TARGET)>, <other non-empty (HOST)> } +define packaging-dir-for +$(strip \ + $(eval _pdfClass := $(strip $(2))) \ + $(if $(_pdfClass),, \ + $(error $(LOCAL_PATH): Class not defined in call to generated-sources-dir-for)) \ + $(eval _pdfName := $(strip $(3))) \ + $(if $(_pdfName),, \ + $(error $(LOCAL_PATH): Name not defined in call to generated-sources-dir-for)) \ + $(call intermediates-dir-for,PACKAGING,$(1),$(4))/$(_pdfClass)/$(_pdfName)_intermediates \ +) +endef + +# Uses LOCAL_MODULE_CLASS, LOCAL_MODULE, and LOCAL_IS_HOST_MODULE +# to determine the packaging directory. +# +# $(1): subdir in PACKAGING +define local-packaging-dir +$(strip \ + $(if $(strip $(LOCAL_MODULE_CLASS)),, \ + $(error $(LOCAL_PATH): LOCAL_MODULE_CLASS not defined before call to local-generated-sources-dir)) \ + $(if $(strip $(LOCAL_MODULE)),, \ + $(error $(LOCAL_PATH): LOCAL_MODULE not defined before call to local-generated-sources-dir)) \ + $(call packaging-dir-for,$(1),$(LOCAL_MODULE_CLASS),$(LOCAL_MODULE),$(if $(strip $(LOCAL_IS_HOST_MODULE)),HOST)) \ +) +endef + + +########################################################### ## Convert a list of short module names (e.g., "framework", "Browser") ## into the list of files that are built for those modules. ## NOTE: this won't return reliable results until after all @@ -1712,7 +1748,6 @@ $(hide) $(PRIVATE_CXX_LINK) \ $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--start-group) \ $(PRIVATE_ALL_STATIC_LIBRARIES) \ $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \ - $(if $(filter true,$(NATIVE_COVERAGE)),-lgcov) \ $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_HOST_LIBPROFILE_RT)) \ $(PRIVATE_ALL_SHARED_LIBRARIES) \ -o $@ \ @@ -1752,7 +1787,6 @@ $(hide) $(PRIVATE_CXX_LINK) \ $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \ $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \ $(PRIVATE_TARGET_LIBCRT_BUILTINS) \ - $(PRIVATE_TARGET_LIBATOMIC) \ $(PRIVATE_TARGET_GLOBAL_LDFLAGS) \ $(PRIVATE_LDFLAGS) \ $(PRIVATE_ALL_SHARED_LIBRARIES) \ @@ -1787,7 +1821,6 @@ $(hide) $(PRIVATE_CXX_LINK) -pie \ $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \ $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \ $(PRIVATE_TARGET_LIBCRT_BUILTINS) \ - $(PRIVATE_TARGET_LIBATOMIC) \ $(PRIVATE_TARGET_GLOBAL_LDFLAGS) \ $(PRIVATE_LDFLAGS) \ $(PRIVATE_ALL_SHARED_LIBRARIES) \ @@ -1831,7 +1864,6 @@ $(hide) $(PRIVATE_CXX_LINK) \ $(filter %libc.a %libc.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \ $(filter %libc_nomalloc.a %libc_nomalloc.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \ $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \ - $(PRIVATE_TARGET_LIBATOMIC) \ $(filter %libcompiler_rt.a %libcompiler_rt.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \ $(PRIVATE_TARGET_LIBCRT_BUILTINS) \ -Wl,--end-group \ @@ -1859,7 +1891,6 @@ $(hide) $(PRIVATE_CXX_LINK) \ $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--start-group) \ $(PRIVATE_ALL_STATIC_LIBRARIES) \ $(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \ - $(if $(filter true,$(NATIVE_COVERAGE)),-lgcov) \ $(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_HOST_LIBPROFILE_RT)) \ $(PRIVATE_ALL_SHARED_LIBRARIES) \ $(foreach path,$(PRIVATE_RPATHS), \ @@ -2734,7 +2765,7 @@ endef define _symlink-file $(3): $(1) @echo "Symlink: $$@ -> $(2)" - @mkdir -p $(dir $$@) + @mkdir -p $$(dir $$@) @rm -rf $$@ $(hide) ln -sf $(2) $$@ $(3): .KATI_SYMLINK_OUTPUTS := $(3) diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk index 2762b44296..51238a36ca 100644 --- a/core/dex_preopt_config.mk +++ b/core/dex_preopt_config.mk @@ -105,7 +105,7 @@ ifeq ($(WRITE_SOONG_VARIABLES),true) $(call add_json_str, ProfileDir, $(PRODUCT_DEX_PREOPT_PROFILE_DIR)) $(call add_json_list, BootJars, $(PRODUCT_BOOT_JARS)) $(call add_json_list, UpdatableBootJars, $(PRODUCT_UPDATABLE_BOOT_JARS)) - $(call add_json_list, ArtApexJars, $(ART_APEX_JARS)) + $(call add_json_list, ArtApexJars, $(filter $(PRODUCT_BOOT_JARS),$(ART_APEX_JARS))) $(call add_json_list, SystemServerJars, $(PRODUCT_SYSTEM_SERVER_JARS)) $(call add_json_list, SystemServerApps, $(PRODUCT_SYSTEM_SERVER_APPS)) $(call add_json_list, UpdatableSystemServerJars, $(PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS)) diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk index 4eeca2ba4a..a23bae2f70 100644 --- a/core/dex_preopt_odex_install.mk +++ b/core/dex_preopt_odex_install.mk @@ -31,9 +31,8 @@ ifeq (false,$(LOCAL_DEX_PREOPT)) LOCAL_DEX_PREOPT := endif -# Disable <uses-library> checks and preopt for tests. +# Disable preopt for tests. ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests)) - LOCAL_ENFORCE_USES_LIBRARIES := false LOCAL_DEX_PREOPT := endif @@ -52,25 +51,12 @@ ifneq (true,$(WITH_DEXPREOPT)) LOCAL_DEX_PREOPT := endif -# Disable <uses-library> checks if dexpreopt is globally disabled. -# Without dexpreopt the check is not necessary, and although it is good to have, -# it is difficult to maintain on non-linux build platforms where dexpreopt is -# generally disabled (the check may fail due to various unrelated reasons, such -# as a failure to get manifest from an APK). -ifneq (true,$(WITH_DEXPREOPT)) - LOCAL_ENFORCE_USES_LIBRARIES := false -endif -ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY)) - LOCAL_ENFORCE_USES_LIBRARIES := false -endif - ifdef LOCAL_UNINSTALLABLE_MODULE LOCAL_DEX_PREOPT := endif -# Disable <uses-library> checks and preopt if the app contains no java code. +# Disable preopt if the app contains no java code. ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR))) - LOCAL_ENFORCE_USES_LIBRARIES := false LOCAL_DEX_PREOPT := endif @@ -208,6 +194,38 @@ add_json_class_loader_context = \ # Verify <uses-library> coherence between the build system and the manifest. ################################################################################ +# Some libraries do not have a manifest, so there is nothing to check against. +# Handle it as if the manifest had zero <uses-library> tags: it is ok unless the +# module has non-empty LOCAL_USES_LIBRARIES or LOCAL_OPTIONAL_USES_LIBRARIES. +ifndef my_manifest_or_apk + ifneq (,$(strip $(LOCAL_USES_LIBRARIES)$(LOCAL_OPTIONAL_USES_LIBRARIES))) + $(error $(LOCAL_MODULE) has non-empty <uses-library> list but no manifest) + else + LOCAL_ENFORCE_USES_LIBRARIES := false + endif +endif + +# Disable the check for tests. +ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests)) + LOCAL_ENFORCE_USES_LIBRARIES := false +endif + +# Disable the check if the app contains no java code. +ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR))) + LOCAL_ENFORCE_USES_LIBRARIES := false +endif + +# Disable <uses-library> checks if dexpreopt is globally disabled. +# Without dexpreopt the check is not necessary, and although it is good to have, +# it is difficult to maintain on non-linux build platforms where dexpreopt is +# generally disabled (the check may fail due to various unrelated reasons, such +# as a failure to get manifest from an APK). +ifneq (true,$(WITH_DEXPREOPT)) + LOCAL_ENFORCE_USES_LIBRARIES := false +else ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY)) + LOCAL_ENFORCE_USES_LIBRARIES := false +endif + # Verify LOCAL_USES_LIBRARIES/LOCAL_OPTIONAL_USES_LIBRARIES # If LOCAL_ENFORCE_USES_LIBRARIES is not set, default to true if either of LOCAL_USES_LIBRARIES or # LOCAL_OPTIONAL_USES_LIBRARIES are specified. @@ -360,7 +378,7 @@ ifdef LOCAL_DEX_PREOPT $(call add_json_str, ProfileClassListing, $(if $(my_process_profile),$(LOCAL_DEX_PREOPT_PROFILE))) $(call add_json_bool, ProfileIsTextListing, $(my_profile_is_text_listing)) $(call add_json_str, EnforceUsesLibrariesStatusFile, $(my_enforced_uses_libraries)) - $(call add_json_bool, EnforceUsesLibraries, $(LOCAL_ENFORCE_USES_LIBRARIES)) + $(call add_json_bool, EnforceUsesLibraries, $(filter true,$(LOCAL_ENFORCE_USES_LIBRARIES))) $(call add_json_str, ProvidesUsesLibrary, $(firstword $(LOCAL_PROVIDES_USES_LIBRARY) $(LOCAL_MODULE))) $(call add_json_map, ClassLoaderContexts) $(call add_json_class_loader_context, any, $(my_dexpreopt_libs)) diff --git a/core/executable_internal.mk b/core/executable_internal.mk index c6a8fafc25..fb14ccea41 100644 --- a/core/executable_internal.mk +++ b/core/executable_internal.mk @@ -41,7 +41,6 @@ my_target_libcrt_builtins := else my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS) endif -my_target_libatomic := $(call intermediates-dir-for,STATIC_LIBRARIES,libatomic,,,$(LOCAL_2ND_ARCH_VAR_PREFIX))/libatomic.a ifeq ($(LOCAL_NO_CRT),true) my_target_crtbegin_dynamic_o := my_target_crtbegin_static_o := @@ -61,18 +60,17 @@ my_target_crtbegin_static_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT my_target_crtend_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_android.sdk.$(my_ndk_crt_version)) endif $(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins) -$(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic) $(linked_module): PRIVATE_TARGET_CRTBEGIN_DYNAMIC_O := $(my_target_crtbegin_dynamic_o) $(linked_module): PRIVATE_TARGET_CRTBEGIN_STATIC_O := $(my_target_crtbegin_static_o) $(linked_module): PRIVATE_TARGET_CRTEND_O := $(my_target_crtend_o) $(linked_module): PRIVATE_POST_LINK_CMD := $(LOCAL_POST_LINK_CMD) ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true) -$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libatomic) $(CLANG_CXX) +$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(CLANG_CXX) $(transform-o-to-static-executable) $(PRIVATE_POST_LINK_CMD) else -$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libatomic) $(CLANG_CXX) +$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(CLANG_CXX) $(transform-o-to-executable) $(PRIVATE_POST_LINK_CMD) endif diff --git a/core/jacoco.mk b/core/jacoco.mk index e8fb89bf6c..e8c74eea30 100644 --- a/core/jacoco.mk +++ b/core/jacoco.mk @@ -71,7 +71,11 @@ $(my_classes_to_report_on_path): $(my_unzipped_timestamp_path) zip -q $@ \ -r $(PRIVATE_UNZIPPED_PATH) - +# Make a rule to copy the jacoco-report-classes.jar to a packaging directory. +$(eval $(call copy-one-file,$(my_classes_to_report_on_path),\ + $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)) +$(call add-dependency,$(LOCAL_BUILT_MODULE),\ + $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar) # make a task that invokes instrumentation my_instrumented_path := $(my_files)/work/instrumented/classes diff --git a/core/java.mk b/core/java.mk index 3f147bae99..123cbe8e22 100644 --- a/core/java.mk +++ b/core/java.mk @@ -470,6 +470,17 @@ endif ifneq ($(filter obfuscation,$(LOCAL_PROGUARD_ENABLED)),) $(built_dex_intermediate): .KATI_IMPLICIT_OUTPUTS := $(proguard_dictionary) $(proguard_configuration) + + # Make a rule to copy the proguard_dictionary to a packaging directory. + $(eval $(call copy-one-file,$(proguard_dictionary),\ + $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)) + $(call add-dependency,$(LOCAL_BUILT_MODULE),\ + $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary) + + $(eval $(call copy-one-file,$(full_classes_pre_proguard_jar),\ + $(call local-packaging-dir,proguard_dictionary)/classes.jar)) + $(call add-dependency,$(LOCAL_BUILT_MODULE),\ + $(call local-packaging-dir,proguard_dictionary)/classes.jar) endif endif # LOCAL_PROGUARD_ENABLED defined diff --git a/core/java_prebuilt_internal.mk b/core/java_prebuilt_internal.mk index 990b7d4c1a..be733ff6c3 100644 --- a/core/java_prebuilt_internal.mk +++ b/core/java_prebuilt_internal.mk @@ -33,7 +33,6 @@ endif ifeq ($(prebuilt_module_is_dex_javalib),true) my_dex_jar := $(my_prebuilt_src_file) -my_manifest_or_apk := $(my_prebuilt_src_file) # This is a target shared library, i.e. a jar with classes.dex. $(foreach pair,$(PRODUCT_BOOT_JARS), \ diff --git a/core/main.mk b/core/main.mk index 1c3a20e134..c9fa14874a 100644 --- a/core/main.mk +++ b/core/main.mk @@ -290,11 +290,25 @@ ADDITIONAL_VENDOR_PROPERTIES += \ ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL) endif +ifneq ($(TARGET_BUILD_VARIANT),user) + ifdef PRODUCT_SET_DEBUGFS_RESTRICTIONS + ADDITIONAL_VENDOR_PROPERTIES += \ + ro.product.debugfs_restrictions.enabled=$(PRODUCT_SET_DEBUGFS_RESTRICTIONS) + endif +endif + # Vendors with GRF must define BOARD_SHIPPING_API_LEVEL for the vendor API level. # This must not be defined for the non-GRF devices. ifdef BOARD_SHIPPING_API_LEVEL ADDITIONAL_VENDOR_PROPERTIES += \ ro.board.first_api_level=$(BOARD_SHIPPING_API_LEVEL) + +# To manually set the vendor API level of the vendor modules, BOARD_API_LEVEL can be used. +# The values of the GRF properties will be verified by post_process_props.py +ifdef BOARD_API_LEVEL +ADDITIONAL_VENDOR_PROPERTIES += \ + ro.board.api_level=$(BOARD_API_LEVEL) +endif endif ADDITIONAL_VENDOR_PROPERTIES += \ @@ -900,7 +914,7 @@ endef # Scan all modules in general-tests, device-tests and other selected suites and # flatten the shared library dependencies. define update-host-shared-libs-deps-for-suites -$(foreach suite,general-tests device-tests vts art-host-tests host-unit-tests,\ +$(foreach suite,general-tests device-tests vts tvts art-host-tests host-unit-tests,\ $(foreach m,$(COMPATIBILITY.$(suite).MODULES),\ $(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\ $(foreach dep,$(my_deps),\ diff --git a/core/product.mk b/core/product.mk index 7c27614eca..015fe44ba9 100644 --- a/core/product.mk +++ b/core/product.mk @@ -341,6 +341,9 @@ _product_list_vars += PRODUCT_FORCE_PRODUCT_MODULES_TO_SYSTEM_PARTITION # This flag implies PRODUCT_USE_DYNAMIC_PARTITIONS. _product_single_value_vars += PRODUCT_RETROFIT_DYNAMIC_PARTITIONS +# When this is true, various build time as well as runtime debugfs restrictions are enabled. +_product_single_value_vars += PRODUCT_SET_DEBUGFS_RESTRICTIONS + # Other dynamic partition feature flags.PRODUCT_USE_DYNAMIC_PARTITION_SIZE and # PRODUCT_BUILD_SUPER_PARTITION default to the value of PRODUCT_USE_DYNAMIC_PARTITIONS. _product_single_value_vars += \ diff --git a/core/product_config.mk b/core/product_config.mk index d703ee31bb..eb6f69fec6 100644 --- a/core/product_config.mk +++ b/core/product_config.mk @@ -359,6 +359,14 @@ ifeq ($(PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS),) endif endif +ifeq ($(PRODUCT_SET_DEBUGFS_RESTRICTIONS),) + ifdef PRODUCT_SHIPPING_API_LEVEL + ifeq (true,$(call math_gt_or_eq,$(PRODUCT_SHIPPING_API_LEVEL),31)) + PRODUCT_SET_DEBUGFS_RESTRICTIONS := true + endif + endif +endif + ifdef PRODUCT_SHIPPING_API_LEVEL ifneq (,$(call math_gt_or_eq,29,$(PRODUCT_SHIPPING_API_LEVEL))) PRODUCT_PACKAGES += $(PRODUCT_PACKAGES_SHIPPING_API_LEVEL_29) diff --git a/core/rust_device_benchmark_config_template.xml b/core/rust_device_benchmark_config_template.xml new file mode 100644 index 0000000000..2055df2096 --- /dev/null +++ b/core/rust_device_benchmark_config_template.xml @@ -0,0 +1,28 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Copyright (C) 2021 The Android Open Source Project + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<!-- This test config file is auto-generated. --> +<configuration description="Config to run {MODULE} rust benchmark tests."> + <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer"> + <option name="cleanup" value="false" /> + <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" /> + </target_preparer> + + <test class="com.android.tradefed.testtype.rust.RustBinaryTest" > + <option name="test-device-path" value="/data/local/tmp" /> + <option name="module-name" value="{MODULE}" /> + <option name="is-benchmark" value="true" /> + </test> +</configuration> diff --git a/core/rust_host_benchmark_config_template.xml b/core/rust_host_benchmark_config_template.xml new file mode 100644 index 0000000000..bb7c1b5a4d --- /dev/null +++ b/core/rust_host_benchmark_config_template.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Copyright (C) 2021 The Android Open Source Project + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<configuration description="Config to run {MODULE} rust benchmark host tests"> + <test class="com.android.tradefed.testtype.rust.RustBinaryHostTest" > + <option name="test-file" value="{MODULE}" /> + <option name="test-timeout" value="5m" /> + <option name="is-benchmark" value="true" /> + </test> +</configuration> diff --git a/core/shared_library_internal.mk b/core/shared_library_internal.mk index 12b7f44f44..139de1077d 100644 --- a/core/shared_library_internal.mk +++ b/core/shared_library_internal.mk @@ -39,7 +39,6 @@ my_target_libcrt_builtins := else my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS) endif -my_target_libatomic := $(call intermediates-dir-for,STATIC_LIBRARIES,libatomic,,,$(LOCAL_2ND_ARCH_VAR_PREFIX))/libatomic.a ifeq ($(LOCAL_NO_CRT),true) my_target_crtbegin_so_o := my_target_crtend_so_o := @@ -55,7 +54,6 @@ my_target_crtbegin_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crt my_target_crtend_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_so.sdk.$(my_ndk_crt_version)) endif $(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins) -$(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic) $(linked_module): PRIVATE_TARGET_CRTBEGIN_SO_O := $(my_target_crtbegin_so_o) $(linked_module): PRIVATE_TARGET_CRTEND_SO_O := $(my_target_crtend_so_o) @@ -65,7 +63,6 @@ $(linked_module): \ $(my_target_crtbegin_so_o) \ $(my_target_crtend_so_o) \ $(my_target_libcrt_builtins) \ - $(my_target_libatomic) \ $(LOCAL_ADDITIONAL_DEPENDENCIES) $(CLANG_CXX) $(transform-o-to-shared-lib) diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk index 295c290bf2..b7c21b862b 100644 --- a/core/soong_app_prebuilt.mk +++ b/core/soong_app_prebuilt.mk @@ -7,7 +7,7 @@ # LOCAL_SOONG_HEADER_JAR # LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR # LOCAL_SOONG_PROGUARD_DICT -# LOCAL_SOONG_PROGUARD_USAGE +# LOCAL_SOONG_PROGUARD_USAGE_ZIP # LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE # LOCAL_SOONG_RRO_DIRS # LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH) @@ -83,23 +83,31 @@ endif ifdef LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR $(eval $(call copy-one-file,$(LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR),\ - $(intermediates.COMMON)/jacoco-report-classes.jar)) + $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)) $(call add-dependency,$(LOCAL_BUILT_MODULE),\ - $(intermediates.COMMON)/jacoco-report-classes.jar) + $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar) endif ifdef LOCAL_SOONG_PROGUARD_DICT $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\ $(intermediates.COMMON)/proguard_dictionary)) + $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\ + $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)) + $(eval $(call copy-one-file,$(LOCAL_SOONG_CLASSES_JAR),\ + $(call local-packaging-dir,proguard_dictionary)/classes.jar)) $(call add-dependency,$(LOCAL_BUILT_MODULE),\ $(intermediates.COMMON)/proguard_dictionary) + $(call add-dependency,$(LOCAL_BUILT_MODULE),\ + $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary) + $(call add-dependency,$(LOCAL_BUILT_MODULE),\ + $(call local-packaging-dir,proguard_dictionary)/classes.jar) endif ifdef LOCAL_SOONG_PROGUARD_USAGE_ZIP $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_USAGE_ZIP),\ - $(intermediates.COMMON)/proguard_usage.zip)) + $(call local-packaging-dir,proguard_usage)/proguard_usage.zip)) $(call add-dependency,$(LOCAL_BUILT_MODULE),\ - $(intermediates.COMMON)/proguard_usage.zip) + $(call local-packaging-dir,proguard_usage)/proguard_usage.zip) endif ifdef LOCAL_SOONG_RESOURCE_EXPORT_PACKAGE diff --git a/core/soong_config.mk b/core/soong_config.mk index b87eba1080..ec67560aa0 100644 --- a/core/soong_config.mk +++ b/core/soong_config.mk @@ -240,7 +240,7 @@ $(call add_json_list, InterPartitionJavaLibraryAllowList, $(PRODUCT_INTER_PARTIT $(call add_json_bool, InstallExtraFlattenedApexes, $(PRODUCT_INSTALL_EXTRA_FLATTENED_APEXES)) -$(call add_json_bool, CompressedApex, $(PRODUCT_COMPRESSED_APEX)) +$(call add_json_bool, CompressedApex, $(filter true,$(PRODUCT_COMPRESSED_APEX))) $(call add_json_bool, BoardUsesRecoveryAsBoot, $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT))) @@ -256,6 +256,8 @@ $(call add_json_bool, BuildBrokenEnforceSyspropOwner, $(filter true,$(BUILD_ $(call add_json_bool, BuildBrokenTrebleSyspropNeverallow, $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW))) $(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(filter true,$(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE))) +$(call add_json_bool, BuildDebugfsRestrictionsEnabled, $(filter true,$(PRODUCT_SET_DEBUGFS_RESTRICTIONS))) + $(call add_json_bool, RequiresInsecureExecmemForSwiftshader, $(filter true,$(PRODUCT_REQUIRES_INSECURE_EXECMEM_FOR_SWIFTSHADER))) $(call add_json_bool, SelinuxIgnoreNeverallows, $(filter true,$(SELINUX_IGNORE_NEVERALLOWS))) diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk index a94e12b045..1ebbf14ecf 100644 --- a/core/soong_java_prebuilt.mk +++ b/core/soong_java_prebuilt.mk @@ -47,23 +47,31 @@ $(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(LOCAL_BUILT_MODULE)) ifdef LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR $(eval $(call copy-one-file,$(LOCAL_SOONG_JACOCO_REPORT_CLASSES_JAR),\ - $(intermediates.COMMON)/jacoco-report-classes.jar)) + $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar)) $(call add-dependency,$(common_javalib.jar),\ - $(intermediates.COMMON)/jacoco-report-classes.jar) + $(call local-packaging-dir,jacoco)/jacoco-report-classes.jar) endif ifdef LOCAL_SOONG_PROGUARD_DICT $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\ $(intermediates.COMMON)/proguard_dictionary)) - $(call add-dependency,$(LOCAL_BUILT_MODULE),\ + $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_DICT),\ + $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary)) + $(eval $(call copy-one-file,$(LOCAL_SOONG_CLASSES_JAR),\ + $(call local-packaging-dir,proguard_dictionary)/classes.jar)) + $(call add-dependency,$(common_javalib.jar),\ $(intermediates.COMMON)/proguard_dictionary) + $(call add-dependency,$(common_javalib.jar),\ + $(call local-packaging-dir,proguard_dictionary)/proguard_dictionary) + $(call add-dependency,$(common_javalib.jar),\ + $(call local-packaging-dir,proguard_dictionary)/classes.jar) endif -ifdef LOCAL_SOONG_PROGUARD_USAGE +ifdef LOCAL_SOONG_PROGUARD_USAGE_ZIP $(eval $(call copy-one-file,$(LOCAL_SOONG_PROGUARD_USAGE_ZIP),\ - $(intermediates.COMMON)/proguard_usage.zip)) - $(call add-dependency,$(LOCAL_BUILT_MODULE),\ - $(intermediates.COMMON)/proguard_usage.zip) + $(call local-packaging-dir,proguard_usage)/proguard_usage.zip)) + $(call add-dependency,$(common_javalib.jar),\ + $(call local-packaging-dir,proguard_usage)/proguard_usage.zip) endif @@ -120,9 +128,11 @@ ifdef LOCAL_SOONG_DEX_JAR $(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar))) $(eval $(call add-dependency,$(LOCAL_BUILT_MODULE),$(common_javalib.jar))) - $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_jar))) - ifneq ($(TURBINE_ENABLED),false) - $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_header_jar))) + ifdef LOCAL_SOONG_CLASSES_JAR + $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_jar))) + ifneq ($(TURBINE_ENABLED),false) + $(eval $(call add-dependency,$(common_javalib.jar),$(full_classes_header_jar))) + endif endif endif @@ -156,8 +166,10 @@ ifdef LOCAL_SOONG_DEXPREOPT_CONFIG $(LOCAL_BUILT_MODULE): $(my_dexpreopt_config) endif +ifdef LOCAL_SOONG_CLASSES_JAR javac-check : $(full_classes_jar) javac-check-$(LOCAL_MODULE) : $(full_classes_jar) +endif .PHONY: javac-check-$(LOCAL_MODULE) ifndef LOCAL_IS_HOST_MODULE diff --git a/core/soong_rust_prebuilt.mk b/core/soong_rust_prebuilt.mk index 4cfb01f545..c382f6a01e 100644 --- a/core/soong_rust_prebuilt.mk +++ b/core/soong_rust_prebuilt.mk @@ -40,17 +40,58 @@ endif include $(BUILD_SYSTEM)/base_rules.mk ####################################### +ifneq ($(filter STATIC_LIBRARIES SHARED_LIBRARIES RLIB_LIBRARIES DYLIB_LIBRARIES,$(LOCAL_MODULE_CLASS)),) + # Soong module is a static or shared library + EXPORTS_LIST += $(intermediates) + EXPORTS.$(intermediates).FLAGS := $(LOCAL_EXPORT_CFLAGS) + EXPORTS.$(intermediates).DEPS := $(LOCAL_EXPORT_C_INCLUDE_DEPS) + + SOONG_ALREADY_CONV += $(LOCAL_MODULE) + + my_link_type := $(LOCAL_SOONG_LINK_TYPE) + my_warn_types := + my_allowed_types := + my_link_deps := + my_2nd_arch_prefix := $(LOCAL_2ND_ARCH_VAR_PREFIX) + my_common := + include $(BUILD_SYSTEM)/link_type.mk +endif + + +ifdef LOCAL_USE_VNDK + ifneq ($(LOCAL_VNDK_DEPEND_ON_CORE_VARIANT),true) + name_without_suffix := $(patsubst %.vendor,%,$(LOCAL_MODULE)) + ifneq ($(name_without_suffix),$(LOCAL_MODULE)) + SPLIT_VENDOR.$(LOCAL_MODULE_CLASS).$(name_without_suffix) := 1 + else + name_without_suffix := $(patsubst %.product,%,$(LOCAL_MODULE)) + ifneq ($(name_without_suffix),$(LOCAL_MODULE)) + SPLIT_PRODUCT.$(LOCAL_MODULE_CLASS).$(name_without_suffix) := 1 + endif + endif + name_without_suffix := + endif +endif + # The real dependency will be added after all Android.mks are loaded and the install paths # of the shared libraries are determined. ifdef LOCAL_INSTALLED_MODULE ifdef LOCAL_SHARED_LIBRARIES my_shared_libraries := $(LOCAL_SHARED_LIBRARIES) + ifdef LOCAL_USE_VNDK + my_shared_libraries := $(foreach l,$(my_shared_libraries),\ + $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l))) + endif $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \ $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries)) endif ifdef LOCAL_DYLIB_LIBRARIES my_dylibs := $(LOCAL_DYLIB_LIBRARIES) # Treat these as shared library dependencies for installation purposes. + ifdef LOCAL_USE_VNDK + my_dylibs := $(foreach l,$(my_dylibs),\ + $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l))) + endif $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \ $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_dylibs)) endif diff --git a/core/sysprop.mk b/core/sysprop.mk index 359d3d242c..daebdd341f 100644 --- a/core/sysprop.mk +++ b/core/sysprop.mk @@ -122,7 +122,7 @@ endif echo "$$(line)" >> $$@;\ )\ ) - $(hide) $(POST_PROCESS_PROPS) $$(_option) $$@ $(5) + $(hide) $(POST_PROCESS_PROPS) $$(_option) --sdk-version $(PLATFORM_SDK_VERSION) $$@ $(5) $(hide) $(foreach file,$(strip $(6)),\ if [ -f "$(file)" ]; then\ cat $(file) >> $$@;\ diff --git a/core/version_defaults.mk b/core/version_defaults.mk index c9e3e80590..413827705a 100644 --- a/core/version_defaults.mk +++ b/core/version_defaults.mk @@ -240,7 +240,7 @@ ifndef PLATFORM_SECURITY_PATCH # It must be of the form "YYYY-MM-DD" on production devices. # It must match one of the Android Security Patch Level strings of the Public Security Bulletins. # If there is no $PLATFORM_SECURITY_PATCH set, keep it empty. - PLATFORM_SECURITY_PATCH := 2021-03-05 + PLATFORM_SECURITY_PATCH := 2021-04-05 endif .KATI_READONLY := PLATFORM_SECURITY_PATCH diff --git a/envsetup.sh b/envsetup.sh index 344a01af01..b5c729d297 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -9,6 +9,9 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y build, and stores those selections in the environment to be read by subsequent invocations of 'm' etc. - tapas: tapas [<App1> <App2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user] + Sets up the build environment for building unbundled apps (APKs). +- banchan: banchan <module1> [<module2> ...] [arm|x86|arm64|x86_64] [eng|userdebug|user] + Sets up the build environment for building unbundled modules (APEXes). - croot: Changes directory to the top of the tree, or a subdirectory thereof. - m: Makes from the top of the tree. - mm: Builds and installs all of the modules in the current directory, and their @@ -108,7 +111,7 @@ function get_abs_build_var() if [ "$BUILD_VAR_CACHE_READY" = "true" ] then eval "echo \"\${abs_var_cache_$1}\"" - return + return fi local T=$(gettop) @@ -605,7 +608,7 @@ function print_lunch_menu() { local uname=$(uname) local choices - choices=$(TARGET_BUILD_APPS= get_build_var COMMON_LUNCH_CHOICES 2>/dev/null) + choices=$(TARGET_BUILD_APPS= TARGET_PRODUCT= TARGET_BUILD_VARIANT= get_build_var COMMON_LUNCH_CHOICES 2>/dev/null) local ret=$? echo @@ -791,6 +794,60 @@ function tapas() destroy_build_var_cache } +# Configures the build to build unbundled Android modules (APEXes). +# Run banchan with one or more module names (from apex{} modules). +function banchan() +{ + local showHelp="$(echo $* | xargs -n 1 echo | \grep -E '^(help)$' | xargs)" + local product="$(echo $* | xargs -n 1 echo | \grep -E '^(.*_)?(arm|x86|arm64|x86_64)$' | xargs)" + local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)" + local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|(.*_)?(arm|x86|arm64|x86_64))$' | xargs)" + + if [ "$showHelp" != "" ]; then + $(gettop)/build/make/banchanHelp.sh + return + fi + + if [ -z "$product" ]; then + product=arm + elif [ $(echo $product | wc -w) -gt 1 ]; then + echo "banchan: Error: Multiple build archs or products supplied: $products" + return + fi + if [ $(echo $variant | wc -w) -gt 1 ]; then + echo "banchan: Error: Multiple build variants supplied: $variant" + return + fi + if [ -z "$apps" ]; then + echo "banchan: Error: No modules supplied" + return + fi + + case $product in + arm) product=module_arm;; + x86) product=module_x86;; + arm64) product=module_arm64;; + x86_64) product=module_x86_64;; + esac + if [ -z "$variant" ]; then + variant=eng + fi + + export TARGET_PRODUCT=$product + export TARGET_BUILD_VARIANT=$variant + export TARGET_BUILD_DENSITY=alldpi + export TARGET_BUILD_TYPE=release + + # This setup currently uses TARGET_BUILD_APPS just like tapas, but the use + # case is different and it may diverge in the future. + export TARGET_BUILD_APPS=$apps + + build_build_var_cache + set_stuff_for_environment + printconfig + destroy_build_var_cache +} + function gettop { local TOPFILE=build/make/core/envsetup.mk diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk index 342abd71ec..845225da33 100644 --- a/target/board/BoardConfigEmuCommon.mk +++ b/target/board/BoardConfigEmuCommon.mk @@ -74,7 +74,7 @@ endif #vendor boot BOARD_INCLUDE_DTB_IN_BOOTIMG := false -BOARD_BOOT_HEADER_VERSION := 3 +BOARD_BOOT_HEADER_VERSION := 4 BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION) BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE := 0x06000000 BOARD_RAMDISK_USE_LZ4 := true diff --git a/target/board/BoardConfigModuleCommon.mk b/target/board/BoardConfigModuleCommon.mk index 24c01a58ef..983247434e 100644 --- a/target/board/BoardConfigModuleCommon.mk +++ b/target/board/BoardConfigModuleCommon.mk @@ -4,3 +4,7 @@ # Required for all module devices. TARGET_USES_64_BIT_BINDER := true + +# Necessary to make modules able to use the VNDK via 'use_vendor: true' +# TODO(b/185769808): look into whether this is still used. +BOARD_VNDK_VERSION := current diff --git a/target/board/emulator_arm64/BoardConfig.mk b/target/board/emulator_arm64/BoardConfig.mk index 9293625bf6..963e558b6b 100644 --- a/target/board/emulator_arm64/BoardConfig.mk +++ b/target/board/emulator_arm64/BoardConfig.mk @@ -57,9 +57,6 @@ BOARD_USES_RECOVERY_AS_BOOT := true BOARD_BOOTIMAGE_PARTITION_SIZE := 0x02000000 BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800 -BOARD_BOOT_HEADER_VERSION := 3 -BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION) - # Wifi. BOARD_WLAN_DEVICE := emulator BOARD_HOSTAPD_DRIVER := NL80211 diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk index 30c033d7f0..15c311c952 100644 --- a/target/board/generic_arm64/BoardConfig.mk +++ b/target/board/generic_arm64/BoardConfig.mk @@ -74,9 +74,13 @@ BOARD_KERNEL-MAINLINE-LZ4_BOOTIMAGE_PARTITION_SIZE := 53477376 BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800 BOARD_RAMDISK_USE_LZ4 := true -BOARD_BOOT_HEADER_VERSION := 3 +BOARD_BOOT_HEADER_VERSION := 4 BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION) +# Enable GKI 2.0 signing. +BOARD_GKI_SIGNING_KEY_PATH := build/make/target/product/gsi/testkey_rsa2048.pem +BOARD_GKI_SIGNING_ALGORITHM := SHA256_RSA2048 + BOARD_KERNEL_BINARIES := \ kernel-4.19-gz \ kernel-5.4 kernel-5.4-gz kernel-5.4-lz4 \ diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk index 37c0f2541a..27dc158fbd 100644 --- a/target/board/generic_arm64/device.mk +++ b/target/board/generic_arm64/device.mk @@ -24,7 +24,12 @@ PRODUCT_COPY_FILES += \ kernel/prebuilts/5.10/arm64/kernel-5.10-lz4:kernel-5.10-lz4 \ kernel/prebuilts/mainline/arm64/kernel-mainline-allsyms:kernel-mainline \ kernel/prebuilts/mainline/arm64/kernel-mainline-gz-allsyms:kernel-mainline-gz \ - kernel/prebuilts/mainline/arm64/kernel-mainline-lz4-allsyms:kernel-mainline-lz4 + kernel/prebuilts/mainline/arm64/kernel-mainline-lz4-allsyms:kernel-mainline-lz4 \ + +$(call dist-for-goals, dist_files, kernel/prebuilts/4.19/arm64/prebuilt-info.txt:kernel/4.19/prebuilt-info.txt) +$(call dist-for-goals, dist_files, kernel/prebuilts/5.4/arm64/prebuilt-info.txt:kernel/5.4/prebuilt-info.txt) +$(call dist-for-goals, dist_files, kernel/prebuilts/5.10/arm64/prebuilt-info.txt:kernel/5.10/prebuilt-info.txt) +$(call dist-for-goals, dist_files, kernel/prebuilts/mainline/arm64/prebuilt-info.txt:kernel/mainline/prebuilt-info.txt) ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT))) PRODUCT_COPY_FILES += \ diff --git a/target/product/base_system.mk b/target/product/base_system.mk index 4fdfc889e3..21beda9ebf 100644 --- a/target/product/base_system.mk +++ b/target/product/base_system.mk @@ -116,7 +116,6 @@ PRODUCT_PACKAGES += \ iptables \ ip-up-vpn \ javax.obex \ - keystore \ keystore2 \ credstore \ ld.mc \ @@ -292,10 +291,16 @@ endif ifeq ($(EMMA_INSTRUMENT),true) ifneq ($(EMMA_INSTRUMENT_STATIC),true) # For instrumented build, if Jacoco is not being included statically - # in instrumented packages then include Jacoco classes into the - # bootclasspath. + # in instrumented packages then include Jacoco classes in the product + # packages. PRODUCT_PACKAGES += jacocoagent - PRODUCT_BOOT_JARS += jacocoagent + ifneq ($(EMMA_INSTRUMENT_FRAMEWORK),true) + # For instrumented build, if Jacoco is not being included statically + # in instrumented packages and has not already been included in the + # bootclasspath via ART_APEX_JARS then include Jacoco classes into the + # bootclasspath. + PRODUCT_BOOT_JARS += jacocoagent + endif # EMMA_INSTRUMENT_FRAMEWORK endif # EMMA_INSTRUMENT_STATIC endif # EMMA_INSTRUMENT @@ -396,8 +401,4 @@ PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\ PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\ frameworks/base/config/dirty-image-objects:system/etc/dirty-image-objects) -# This property allows enabling Keystore 2.0 selectively for testing. -# TODO Remove when Keystore 2.0 migration is complete. b/171563717 -PRODUCT_SYSTEM_PROPERTIES += persist.android.security.keystore2.enable=true - $(call inherit-product, $(SRC_TARGET_DIR)/product/runtime_libart.mk) diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk index 15457803f3..bb17dda3c6 100644 --- a/target/product/default_art_config.mk +++ b/target/product/default_art_config.mk @@ -39,16 +39,6 @@ PRODUCT_UPDATABLE_BOOT_JARS := \ com.android.tethering:framework-tethering \ com.android.ipsec:android.net.ipsec.ike -# Add the compatibility library that is needed when android.test.base -# is removed from the bootclasspath. -# Default to excluding android.test.base from the bootclasspath. -ifneq ($(REMOVE_ATB_FROM_BCP),false) - PRODUCT_PACKAGES += framework-atb-backward-compatibility - PRODUCT_BOOT_JARS += framework-atb-backward-compatibility -else - PRODUCT_BOOT_JARS += android.test.base -endif - # Minimal configuration for running dex2oat (default argument values). # PRODUCT_USES_DEFAULT_ART_CONFIG must be true to enable boot image compilation. PRODUCT_USES_DEFAULT_ART_CONFIG := true diff --git a/target/product/gsi/gsi_skip_mount.cfg b/target/product/gsi/gsi_skip_mount.cfg index ad3c7d9382..28f4349666 100644 --- a/target/product/gsi/gsi_skip_mount.cfg +++ b/target/product/gsi/gsi_skip_mount.cfg @@ -1,3 +1,9 @@ +# Skip "system" mountpoints. /oem /product /system_ext +# Skip sub-mountpoints of system mountpoints. +/oem/* +/product/* +/system_ext/* +/system/* diff --git a/target/product/gsi/testkey_rsa2048.pem b/target/product/gsi/testkey_rsa2048.pem new file mode 100644 index 0000000000..64de31cf42 --- /dev/null +++ b/target/product/gsi/testkey_rsa2048.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA3fDgwU4JKVRHhAfofi/g8daTNplB2mTJCX9fIMy9FnZDXNij +1zijRQ8HKbt3bAGImQvb3GxSV4M5eIdiLDUF7RsUpE7K+s939i/AaTtcuyqimQbJ +QjP9emTsgngHzuKWMg1mwlRZYDfdv62zIQmZcbM9a0CZE36hAYvEBiDB8qT4ob++ +godGAx3rpF2Wi7mhIYDINvkCw8/16Qi9CZgvOUrEolt3mz8Sps41z9j7YAsPbAa8 +fg7dUu61s6NkZEykl4G67loOaf7h+SyP//LpFZ0gV+STZ+EMGofL0SXb8A+hdIYE +QxsnKUYo8e+GaQg92FLxVZqcfyG3AZuMB04R1QIDAQABAoIBAQDGj3/1UaSepjlJ +ZW3an2lH1Cpm2ZxyEGNQLPVluead1vaTdXq3zYM9AKHu8zp3lbOpAVQVk4/jnZJo +Q+9QD6waonTIP3oYBE+WIMirHSHsjctkzw52PV9VBkAWxd5ueIfZheXejGpdy/2H +RJcTQqxWbf7QGr4ZE9xmLq4UsW/zbXwy8qGEp9eMQIIaWBua43FkqmWYLSnVFVJI +Gl8mfVJctLNSZHhS3tKiV8up6NxZlDjO8o7kYVFCkv0xJ9yzQNBc3P2MEmvfZ06D +QnimHBqSxr0M9X6hqP43CnqtCbpsHS8A12Dm4l6fkXfkrAY0UNrEaCSDb8aN7TEc +7bc1MB4NAoGBAPK7xSuvQE9CH05Iy+G6mEQTtNmpfcQosqhi6dF60h4bqlkeGzUu +gF/PKHwwffHAxQSv4V831P3A/IoJFa9IFkg218mYPNfzpj4vJA4aNCDp+SYZAIYm +h6hMOmuByI97wds2yCBGt4mP0eow5B3A1b3UQeqW6LVSuobZ22QVlSk/AoGBAOoS +L82yda9hUa7vuXtqTraf9EGjSXhyjoPqWxa+a1ooI9l24f7mokS5Iof+a/SLfPUj +pwj8eOeOZksjAaWJIdrRb3TaYLaqhDkWQeV5N5XxYbn3+TvVJQyR+OSBfGoEpVP/ +IS6fusvpT3eULJDax10By+gDcoLT5M1FNs4rBIvrAoGBAM8yJP5DHDwLjzl9vjsy +0iLaR3e8zBQTQV2nATvFAXKd3u0vW74rsX0XEdHgesFP8V0s3M4wlGj+wRL66j2y +5QJDfjMg9l7IJlHSX46CI5ks33X7xYy9evLYDs4R/Kct1q5OtsmGU8jisSadETus +jUb61kFvC7krovjVIgbuvWJ1AoGAVikzp4gVgeVU6AwePqu3JcpjYvX0SX4Br9VI +imq1oY49BAOa1PWYratoZp7kpjPiX2osRkaJStNEHExagtCjwaRuXpk0GIlT+p+S +yiGAsJUV4BrDh57B8IqbD6IKZgwnv2+ei0cIv562PdIxRXEDCd1rbZA3SqktA9KC +hgmXttkCgYBPU1lqRpwoHP9lpOBTDa6/Xi6WaDEWrG/tUF/wMlvrZ4hEVMDJRs1d +9JCXBxL/O4TMvpmyVKBZW15iZOcLM3EpiZ00UD+ChcAaFstup+oYKrs8gL9hgyTd +cvWMxGQm13KwSj2CLzEQpPAN5xG14njXaee5ksshxkzBz9z3MVWiiw== +-----END RSA PRIVATE KEY----- diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk index e8f1c2e35c..25fa68b644 100644 --- a/target/product/gsi_release.mk +++ b/target/product/gsi_release.mk @@ -31,8 +31,10 @@ PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST += \ system/product/% \ system/system_ext/% -# Split selinux policy -PRODUCT_FULL_TREBLE_OVERRIDE := true +# GSI should always support up-to-date platform features. +# Keep this value at the latest API level to ensure latest build system +# default configs are applied. +PRODUCT_SHIPPING_API_LEVEL := 30 # Enable dynamic partitions to facilitate mixing onto Cuttlefish PRODUCT_USE_DYNAMIC_PARTITIONS := true diff --git a/target/product/media_system.mk b/target/product/media_system.mk index 143131e3bc..c7ac9071b4 100644 --- a/target/product/media_system.mk +++ b/target/product/media_system.mk @@ -57,6 +57,7 @@ PRODUCT_SYSTEM_SERVER_JARS := \ # system server jars which are updated via apex modules. # The values should be of the format <apex name>:<jar name> PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS := \ + com.android.art:service-art \ com.android.permission:service-permission \ PRODUCT_COPY_FILES += \ diff --git a/target/product/module_common.mk b/target/product/module_common.mk index eedd47999c..03340db542 100644 --- a/target/product/module_common.mk +++ b/target/product/module_common.mk @@ -16,3 +16,8 @@ $(call inherit-product, $(SRC_TARGET_DIR)/product/default_art_config.mk) $(call inherit-product, $(SRC_TARGET_DIR)/product/languages_default.mk) +$(call inherit-product, $(SRC_TARGET_DIR)/product/cfi-common.mk) + +# Enables treble, which enabled certain -D compilation flags. In particular, libhidlbase +# uses -DENFORCE_VINTF_MANIFEST. See b/185759877 +PRODUCT_SHIPPING_API_LEVEL := 29 diff --git a/target/product/security/Android.bp b/target/product/security/Android.bp index 98698c579e..99f774252a 100644 --- a/target/product/security/Android.bp +++ b/target/product/security/Android.bp @@ -13,7 +13,16 @@ android_app_certificate { certificate: "testkey", } -// Google-owned certificate for CTS testing, since we can't trust arbitrary keys on release devices. +// Certificate for CTS tests that rely on UICC hardware conforming to the +// updated CTS UICC card specification introduced in 2021. See +// //cts/tests/tests/carrierapi/Android.bp for more details. +android_app_certificate { + name: "cts-uicc-2021-testkey", + certificate: "cts_uicc_2021", +} + +// Google-owned certificate for CTS testing, since we can't trust arbitrary keys +// on release devices. prebuilt_etc { name: "fsverity-release-cert-der", src: "fsverity-release.x509.der", diff --git a/target/product/security/README b/target/product/security/README index 6e75e4de01..2b161bb0ee 100644 --- a/target/product/security/README +++ b/target/product/security/README @@ -11,10 +11,11 @@ key generation The following commands were used to generate the test key pairs: - development/tools/make_key testkey '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' - development/tools/make_key platform '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' - development/tools/make_key shared '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' - development/tools/make_key media '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' + development/tools/make_key testkey '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' + development/tools/make_key platform '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' + development/tools/make_key shared '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' + development/tools/make_key media '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' + development/tools/make_key cts_uicc_2021 '/C=US/ST=California/L=Mountain View/O=Android/OU=Android/CN=Android/emailAddress=android@android.com' signing using the openssl commandline (for boot/system images) -------------------------------------------------------------- diff --git a/target/product/security/cts_uicc_2021.pk8 b/target/product/security/cts_uicc_2021.pk8 Binary files differnew file mode 100644 index 0000000000..3b2a7fa94a --- /dev/null +++ b/target/product/security/cts_uicc_2021.pk8 diff --git a/target/product/security/cts_uicc_2021.x509.pem b/target/product/security/cts_uicc_2021.x509.pem new file mode 100644 index 0000000000..744afea80e --- /dev/null +++ b/target/product/security/cts_uicc_2021.x509.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIECzCCAvOgAwIBAgIUHYLIIL60vWPD6aOBwZUcdbsae+cwDQYJKoZIhvcNAQEL +BQAwgZQxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQH +DA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRy +b2lkMRAwDgYDVQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFu +ZHJvaWQuY29tMB4XDTIxMDEyNjAwMjAyMVoXDTQ4MDYxMzAwMjAyMVowgZQxCzAJ +BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFp +biBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRAwDgYD +VQQDDAdBbmRyb2lkMSIwIAYJKoZIhvcNAQkBFhNhbmRyb2lkQGFuZHJvaWQuY29t +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlOMSHqBu0ihUDfFgwMfO +pJtpyxHe0KKfHRndUQcYU/1v6/auy2YqkgKv+AraoukuU3gJeOiWoaqaWFNcm6md +WfGRNT4oABhhNS43n5PI4NlLjI4yeUJJppZn5LPpc/8vZ0P8ZFE9CJmtckCh+hES +BzqnxkCnq1PoxlcF3S/f8lOtd6ymaMDf3sYcePaoU8yTWFksl7EWRVwhBUIf7/r8 +epbNiV14/aH2cQfHVfpf54TIdk7s0/ehVA70A5gQp7Utn6mY2zEJlMrTKWRqA/a5 +oYiob3y+v2JWNcljHY6twwDOGwW7G0NWJVtaWj76Z3o9RpIhAglivhOrHTflIU3+ +2QIDAQABo1MwUTAdBgNVHQ4EFgQUZJ1oGb33n/OY+Mm8ykci4I6c9OcwHwYDVR0j +BBgwFoAUZJ1oGb33n/OY+Mm8ykci4I6c9OcwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAQEASajvU0KCN2kfATPV95LQVE3N/URPi/lX9MfQptE54E+R +6dHwHQIwU/fBFapAHfGgrpwUZftJO+Bad2iu5s1IhTJ0Q5v0yHdvWfo4EzVeMzPV ++/DWU786pPEomFkb9ZKhgVkFNPcbXlkUm/9HxRHPRTm8x+BE/75PKI+kh+pDmM+P +5v4W0qDKPgFzIY/D4F++gVyPZ3O+/GhunjsJozO+dvN+50FH6o/kBHm2+QqQNYPW +f232F3CYtH4uWI0TkbwmSvVGW8iOqh330Cef5zqwSdOkzybUirXFsHUu1Zad1aLT +t0mu6RgNEmX8efOQCcz2Z/on8lkIAxCBwLX7wkH5JA== +-----END CERTIFICATE----- diff --git a/target/product/updatable_apex.mk b/target/product/updatable_apex.mk index c8dc8b0027..d606e0037d 100644 --- a/target/product/updatable_apex.mk +++ b/target/product/updatable_apex.mk @@ -22,4 +22,9 @@ ifneq ($(OVERRIDE_TARGET_FLATTEN_APEX),true) PRODUCT_PACKAGES += com.android.apex.cts.shim.v1_prebuilt PRODUCT_VENDOR_PROPERTIES := ro.apex.updatable=true TARGET_FLATTEN_APEX := false + # Use compressed apexes in pre-installed partitions. + # Note: this doesn't mean that all pre-installed apexes will be compressed. + # Whether an apex is compressed or not is controlled at apex Soong module + # via compresible property. + PRODUCT_COMPRESSED_APEX := true endif diff --git a/tools/build-license-metadata.sh b/tools/build-license-metadata.sh index 3bad358dba..a138dbe7c4 100755 --- a/tools/build-license-metadata.sh +++ b/tools/build-license-metadata.sh @@ -201,6 +201,7 @@ calculate_effective_conditions() { for d in ${depfiles}; do if cat "${d}" | egrep -q 'effective_condition\s*:.*restricted' ; then lconditions="${lconditions}${lconditions:+ }restricted" + break fi done ;; diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py index 18f2166fbd..bf958fbae4 100755 --- a/tools/generate-notice-files.py +++ b/tools/generate-notice-files.py @@ -231,8 +231,8 @@ def main(argv): input_dirs = [os.path.normpath(source_dir) for source_dir in args.source_dir] # Find all the notice files and md5 them + files_with_same_hash = defaultdict(list) for input_dir in input_dirs: - files_with_same_hash = defaultdict(list) for root, dir, files in os.walk(input_dir): for file in files: matched = True @@ -254,8 +254,7 @@ def main(argv): file_md5sum = md5sum(filename) files_with_same_hash[file_md5sum].append(filename) - filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())] - + filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())] combine_notice_files_text(filesets, input_dirs, txt_output_file, file_title) if html_output_file is not None: diff --git a/tools/post_process_props.py b/tools/post_process_props.py index d8c9cb157f..efbf614fd5 100755 --- a/tools/post_process_props.py +++ b/tools/post_process_props.py @@ -42,7 +42,46 @@ def mangle_build_prop(prop_list): # default to "adb". That might not the right policy there, but it's better # to be explicit. if not prop_list.get_value("persist.sys.usb.config"): - prop_list.put("persist.sys.usb.config", "none"); + prop_list.put("persist.sys.usb.config", "none") + +def validate_grf_props(prop_list, sdk_version): + """Validate GRF properties if exist. + + If ro.board.first_api_level is defined, check if its value is valid for the + sdk version. + Also, validate the value of ro.board.api_level if defined. + + Returns: + True if the GRF properties are valid. + """ + grf_api_level = prop_list.get_value("ro.board.first_api_level") + board_api_level = prop_list.get_value("ro.board.api_level") + + if not grf_api_level: + if board_api_level: + sys.stderr.write("error: non-GRF device must not define " + "ro.board.api_level\n") + return False + # non-GRF device skips the GRF validation test + return True + + grf_api_level = int(grf_api_level) + if grf_api_level > sdk_version: + sys.stderr.write("error: ro.board.first_api_level(%d) must be less than " + "or equal to ro.build.version.sdk(%d)\n" + % (grf_api_level, sdk_version)) + return False + + if board_api_level: + board_api_level = int(board_api_level) + if board_api_level < grf_api_level or board_api_level > sdk_version: + sys.stderr.write("error: ro.board.api_level(%d) must be neither less " + "than ro.board.first_api_level(%d) nor greater than " + "ro.build.version.sdk(%d)\n" + % (board_api_level, grf_api_level, sdk_version)) + return False + + return True def validate(prop_list): """Validate the properties. @@ -215,6 +254,7 @@ def main(argv): default=False) parser.add_argument("filename") parser.add_argument("disallowed_keys", metavar="KEY", type=str, nargs="*") + parser.add_argument("--sdk-version", type=int, required=True) args = parser.parse_args() if not args.filename.endswith("/build.prop"): @@ -225,6 +265,8 @@ def main(argv): mangle_build_prop(props) if not override_optional_props(props, args.allow_dup): sys.exit(1) + if not validate_grf_props(props, args.sdk_version): + sys.exit(1) if not validate(props): sys.exit(1) diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py index d7de85b903..00bbb212df 100644 --- a/tools/releasetools/add_img_to_target_files.py +++ b/tools/releasetools/add_img_to_target_files.py @@ -62,7 +62,7 @@ import common import verity_utils import ota_metadata_pb2 -from apex_utils import GetSystemApexInfoFromTargetFiles +from apex_utils import GetApexInfoFromTargetFiles from common import AddCareMapForAbOta if sys.hexversion < 0x02070000: @@ -134,13 +134,12 @@ def AddSystem(output_zip, recovery_img=None, boot_img=None): "board_uses_vendorimage") == "true" if (OPTIONS.rebuild_recovery and not board_uses_vendorimage and - recovery_img is not None and boot_img is not None): + recovery_img is not None and boot_img is not None): logger.info("Building new recovery patch on system at system/vendor") common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, boot_img, info_dict=OPTIONS.info_dict) - block_list = OutputFile(output_zip, OPTIONS.input_tmp, - "IMAGES", "system.map") + block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.map") CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img, block_list=block_list) return img.name @@ -183,13 +182,12 @@ def AddVendor(output_zip, recovery_img=None, boot_img=None): "board_uses_vendorimage") == "true" if (OPTIONS.rebuild_recovery and board_uses_vendorimage and - recovery_img is not None and boot_img is not None): + recovery_img is not None and boot_img is not None): logger.info("Building new recovery patch on vendor") common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, boot_img, info_dict=OPTIONS.info_dict) - block_list = OutputFile(output_zip, OPTIONS.input_tmp, - "IMAGES", "vendor.map") + block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map") CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img, block_list=block_list) return img.name @@ -261,7 +259,6 @@ def AddVendorDlkm(output_zip): block_list=block_list) return img.name - def AddOdmDlkm(output_zip): """Turn the contents of OdmDlkm into an odm_dlkm image and store it in output_zip.""" @@ -313,7 +310,6 @@ def AddDtbo(output_zip): img.Write() return img.name - def AddPvmfw(output_zip): """Adds the pvmfw image. @@ -349,7 +345,6 @@ def AddPvmfw(output_zip): img.Write() return img.name - def AddCustomImages(output_zip, partition_name): """Adds and signs custom images in IMAGES/. @@ -378,16 +373,15 @@ def AddCustomImages(output_zip, partition_name): key_path, algorithm, extra_args) for img_name in OPTIONS.info_dict.get( - "avb_{}_image_list".format(partition_name)).split(): - custom_image = OutputFile( - output_zip, OPTIONS.input_tmp, "IMAGES", img_name) + "avb_{}_image_list".format(partition_name)).split(): + custom_image = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", img_name) if os.path.exists(custom_image.name): continue custom_image_prebuilt_path = os.path.join( OPTIONS.input_tmp, "PREBUILT_IMAGES", img_name) assert os.path.exists(custom_image_prebuilt_path), \ - "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path) + "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path) shutil.copy(custom_image_prebuilt_path, custom_image.name) @@ -685,15 +679,14 @@ def HasPartition(partition_name): return ((os.path.isdir( os.path.join(OPTIONS.input_tmp, partition_name.upper())) and - OPTIONS.info_dict.get( - "building_{}_image".format(partition_name)) == "true") or - os.path.exists( - os.path.join(OPTIONS.input_tmp, "IMAGES", - "{}.img".format(partition_name)))) - + OPTIONS.info_dict.get( + "building_{}_image".format(partition_name)) == "true") or + os.path.exists( + os.path.join(OPTIONS.input_tmp, "IMAGES", + "{}.img".format(partition_name)))) def AddApexInfo(output_zip): - apex_infos = GetSystemApexInfoFromTargetFiles(OPTIONS.input_tmp) + apex_infos = GetApexInfoFromTargetFiles(OPTIONS.input_tmp, 'system') apex_metadata_proto = ota_metadata_pb2.ApexMetadata() apex_metadata_proto.apex_info.extend(apex_infos) apex_info_bytes = apex_metadata_proto.SerializeToString() @@ -779,7 +772,7 @@ def AddImagesToTargetFiles(filename): boot_images = OPTIONS.info_dict.get("boot_images") if boot_images is None: boot_images = "boot.img" - for index, b in enumerate(boot_images.split()): + for index,b in enumerate(boot_images.split()): # common.GetBootableImage() returns the image directly if present. boot_image = common.GetBootableImage( "IMAGES/" + b, b, OPTIONS.input_tmp, "BOOT") @@ -934,7 +927,7 @@ def AddImagesToTargetFiles(filename): if OPTIONS.info_dict.get("build_super_partition") == "true": if OPTIONS.info_dict.get( - "build_retrofit_dynamic_partitions_ota_package") == "true": + "build_retrofit_dynamic_partitions_ota_package") == "true": banner("super split images") AddSuperSplit(output_zip) @@ -951,7 +944,9 @@ def AddImagesToTargetFiles(filename): # Generate care_map.pb for ab_partitions, then write this file to # target_files package. - AddCareMapForAbOta(output_zip, ab_partitions, partitions) + output_care_map = os.path.join(OPTIONS.input_tmp, "META", "care_map.pb") + AddCareMapForAbOta(output_zip if output_zip else output_care_map, + ab_partitions, partitions) # Radio images that need to be packed into IMAGES/, and product-img.zip. pack_radioimages_txt = os.path.join( @@ -1000,7 +995,6 @@ def main(argv): AddImagesToTargetFiles(args[0]) logger.info("done.") - if __name__ == '__main__': try: common.CloseInheritedPipes() diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py index 1c88053454..893266f95a 100644 --- a/tools/releasetools/apex_utils.py +++ b/tools/releasetools/apex_utils.py @@ -516,7 +516,7 @@ def SignApex(avbtool, apex_data, payload_key, container_key, container_pw, raise ApexInfoError( 'Failed to get type for {}:\n{}'.format(apex_file, e)) -def GetSystemApexInfoFromTargetFiles(input_file): +def GetApexInfoFromTargetFiles(input_file, partition, compressed_only=True): """ Get information about system APEX stored in the input_file zip @@ -532,15 +532,17 @@ def GetSystemApexInfoFromTargetFiles(input_file): if not isinstance(input_file, str): raise RuntimeError("must pass filepath to target-files zip or directory") + apex_subdir = os.path.join(partition.upper(), 'apex') if os.path.isdir(input_file): tmp_dir = input_file else: - tmp_dir = UnzipTemp(input_file, ["SYSTEM/apex/*"]) - target_dir = os.path.join(tmp_dir, "SYSTEM/apex/") + tmp_dir = UnzipTemp(input_file, [os.path.join(apex_subdir, '*')]) + target_dir = os.path.join(tmp_dir, apex_subdir) # Partial target-files packages for vendor-only builds may not contain # a system apex directory. if not os.path.exists(target_dir): + logger.info('No APEX directory at path: %s', target_dir) return [] apex_infos = [] @@ -585,6 +587,7 @@ def GetSystemApexInfoFromTargetFiles(input_file): '--output', decompressed_file_path]) apex_info.decompressed_size = os.path.getsize(decompressed_file_path) + if not compressed_only or apex_info.is_compressed: apex_infos.append(apex_info) return apex_infos diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py index 3726df6b62..301d0dafef 100755 --- a/tools/releasetools/build_image.py +++ b/tools/releasetools/build_image.py @@ -73,9 +73,9 @@ def GetInodeUsage(path): """ cmd = ["find", path, "-print"] output = common.RunAndCheckOutput(cmd, verbose=False) - # increase by > 4% as number of files and directories is not whole picture. + # increase by > 6% as number of files and directories is not whole picture. inodes = output.count('\n') - spare_inodes = inodes * 4 // 100 + spare_inodes = inodes * 6 // 100 min_spare_inodes = 12 if spare_inodes < min_spare_inodes: spare_inodes = min_spare_inodes diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index f025bb6298..83425cc8d6 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -138,7 +138,6 @@ PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot'] # existing search paths. RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop'] - class ErrorCode(object): """Define error_codes for failures that happen during the actual update package installation. @@ -227,7 +226,6 @@ def InitLogging(): def SetHostToolLocation(tool_name, location): OPTIONS.host_tools[tool_name] = location - def FindHostToolPath(tool_name): """Finds the path to the host tool. @@ -248,7 +246,6 @@ def FindHostToolPath(tool_name): return tool_name - def Run(args, verbose=None, **kwargs): """Creates and returns a subprocess.Popen object. @@ -464,7 +461,7 @@ class BuildInfo(object): """Returns the inquired build property for the provided partition.""" # Boot image uses ro.[product.]bootimage instead of boot. - prop_partition = "bootimage" if partition == "boot" else partition + prop_partition = "bootimage" if partition == "boot" else partition # If provided a partition for this property, only look within that # partition's build.prop. @@ -659,6 +656,13 @@ class RamdiskFormat(object): LZ4 = 1 GZ = 2 +def _GetRamdiskFormat(info_dict): + if info_dict.get('lz4_ramdisks') == 'true': + ramdisk_format = RamdiskFormat.LZ4 + else: + ramdisk_format = RamdiskFormat.GZ + return ramdisk_format + def LoadInfoDict(input_file, repacking=False): """Loads the key/value pairs from the given input target_files. @@ -760,10 +764,7 @@ def LoadInfoDict(input_file, repacking=False): # Load recovery fstab if applicable. d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper) - if d.get('lz4_ramdisks') == 'true': - ramdisk_format = RamdiskFormat.LZ4 - else: - ramdisk_format = RamdiskFormat.GZ + ramdisk_format = _GetRamdiskFormat(d) # Tries to load the build props for all partitions with care_map, including # system and vendor. @@ -780,8 +781,7 @@ def LoadInfoDict(input_file, repacking=False): for partition in PARTITIONS_WITH_BUILD_PROP: fingerprint = build_info.GetPartitionFingerprint(partition) if fingerprint: - d["avb_{}_salt".format(partition)] = sha256( - fingerprint.encode()).hexdigest() + d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest() try: d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n") except KeyError: @@ -789,6 +789,7 @@ def LoadInfoDict(input_file, repacking=False): return d + def LoadListFromFile(file_path): with open(file_path) as f: return f.read().splitlines() @@ -1105,7 +1106,7 @@ def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict): return " ".join(sorted(combined)) if (framework_dict.get("use_dynamic_partitions") != - "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): + "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): raise ValueError("Both dictionaries must have use_dynamic_partitions=true") merged_dict = {"use_dynamic_partitions": "true"} @@ -1450,7 +1451,8 @@ def BuildVBMeta(image_path, partitions, name, needed_partitions): AddAftlInclusionProof(image_path) -def _MakeRamdisk(sourcedir, fs_config_file=None, lz4_ramdisks=False): +def _MakeRamdisk(sourcedir, fs_config_file=None, + ramdisk_format=RamdiskFormat.GZ): ramdisk_img = tempfile.NamedTemporaryFile() if fs_config_file is not None and os.access(fs_config_file, os.F_OK): @@ -1459,11 +1461,13 @@ def _MakeRamdisk(sourcedir, fs_config_file=None, lz4_ramdisks=False): else: cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")] p1 = Run(cmd, stdout=subprocess.PIPE) - if lz4_ramdisks: + if ramdisk_format == RamdiskFormat.LZ4: p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) - else: + elif ramdisk_format == RamdiskFormat.GZ: p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) + else: + raise ValueError("Only support lz4 or minigzip ramdisk format.") p2.wait() p1.wait() @@ -1510,8 +1514,9 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, img = tempfile.NamedTemporaryFile() if has_ramdisk: - use_lz4 = info_dict.get("lz4_ramdisks") == 'true' - ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, lz4_ramdisks=use_lz4) + ramdisk_format = _GetRamdiskFormat(info_dict) + ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, + ramdisk_format=ramdisk_format) # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" @@ -1583,7 +1588,7 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, RunAndCheckOutput(cmd) if (info_dict.get("boot_signer") == "true" and - info_dict.get("verity_key")): + info_dict.get("verity_key")): # Hard-code the path as "/boot" for two-step special recovery image (which # will be loaded into /boot during the two-step OTA). if two_step_image: @@ -1699,8 +1704,8 @@ def _BuildVendorBootImage(sourcedir, info_dict=None): img = tempfile.NamedTemporaryFile() - use_lz4 = info_dict.get("lz4_ramdisks") == 'true' - ramdisk_img = _MakeRamdisk(sourcedir, lz4_ramdisks=use_lz4) + ramdisk_format = _GetRamdiskFormat(info_dict) + ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format) # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" @@ -1748,19 +1753,16 @@ def _BuildVendorBootImage(sourcedir, info_dict=None): if os.access(fn, os.F_OK): ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n")) for ramdisk_fragment in ramdisk_fragments: - fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", - ramdisk_fragment, "mkbootimg_args") + fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args") cmd.extend(shlex.split(open(fn).read().rstrip("\n"))) - fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", - ramdisk_fragment, "prebuilt_ramdisk") + fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk") # Use prebuilt image if found, else create ramdisk from supplied files. if os.access(fn, os.F_OK): ramdisk_fragment_pathname = fn else: - ramdisk_fragment_root = os.path.join( - sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment) - ramdisk_fragment_img = _MakeRamdisk( - ramdisk_fragment_root, lz4_ramdisks=use_lz4) + ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment) + ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, + ramdisk_format=ramdisk_format) ramdisk_fragment_imgs.append(ramdisk_fragment_img) ramdisk_fragment_pathname = ramdisk_fragment_img.name cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname]) @@ -3531,7 +3533,7 @@ class DynamicPartitionsDifference(object): for g in tgt_groups: for p in shlex.split(info_dict.get( - "super_%s_partition_list" % g, "").strip()): + "super_%s_partition_list" % g, "").strip()): assert p in self._partition_updates, \ "{} is in target super_{}_partition_list but no BlockDifference " \ "object is provided.".format(p, g) @@ -3539,7 +3541,7 @@ class DynamicPartitionsDifference(object): for g in src_groups: for p in shlex.split(source_info_dict.get( - "super_%s_partition_list" % g, "").strip()): + "super_%s_partition_list" % g, "").strip()): assert p in self._partition_updates, \ "{} is in source super_{}_partition_list but no BlockDifference " \ "object is provided.".format(p, g) @@ -3648,7 +3650,7 @@ class DynamicPartitionsDifference(object): if u.src_size is not None and u.tgt_size is None: append('remove_group %s' % g) if (u.src_size is not None and u.tgt_size is not None and - u.src_size > u.tgt_size): + u.src_size > u.tgt_size): comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size)) append('resize_group %s %d' % (g, u.tgt_size)) @@ -3657,7 +3659,7 @@ class DynamicPartitionsDifference(object): comment('Add group %s with maximum size %d' % (g, u.tgt_size)) append('add_group %s %d' % (g, u.tgt_size)) if (u.src_size is not None and u.tgt_size is not None and - u.src_size < u.tgt_size): + u.src_size < u.tgt_size): comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size)) append('resize_group %s %d' % (g, u.tgt_size)) @@ -3691,8 +3693,7 @@ def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4): """ tmp_dir = MakeTempDir('boot_', suffix='.img') try: - RunAndCheckOutput(['unpack_bootimg', '--boot_img', - boot_img, '--out', tmp_dir]) + RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir]) ramdisk = os.path.join(tmp_dir, 'ramdisk') if not os.path.isfile(ramdisk): logger.warning('Unable to get boot image timestamp: no ramdisk in boot') @@ -3714,14 +3715,13 @@ def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4): # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from # the host environment. RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'], - cwd=extracted_ramdisk) + cwd=extracted_ramdisk) for search_path in RAMDISK_BUILD_PROP_REL_PATHS: prop_file = os.path.join(extracted_ramdisk, search_path) if os.path.isfile(prop_file): return prop_file - logger.warning( - 'Unable to get boot image timestamp: no %s in ramdisk', search_path) + logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path) return None @@ -3754,8 +3754,7 @@ def GetBootImageTimestamp(boot_img): timestamp = props.GetProp('ro.bootimage.build.date.utc') if timestamp: return int(timestamp) - logger.warning( - 'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined') + logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined') return None except ExternalError as e: @@ -3802,15 +3801,18 @@ def GetCareMap(which, imgname): return [which, care_map_ranges.to_string_raw()] -def AddCareMapForAbOta(output_zip, ab_partitions, image_paths): +def AddCareMapForAbOta(output_file, ab_partitions, image_paths): """Generates and adds care_map.pb for a/b partition that has care_map. Args: - output_zip: The output zip file (needs to be already open), or None to - write care_map.pb to OPTIONS.input_tmp/. + output_file: The output zip file (needs to be already open), + or file path to write care_map.pb. ab_partitions: The list of A/B partitions. image_paths: A map from the partition name to the image path. """ + if not output_file: + raise ExternalError('Expected output_file for AddCareMapForAbOta') + care_map_list = [] for partition in ab_partitions: partition = partition.strip() @@ -3821,8 +3823,13 @@ def AddCareMapForAbOta(output_zip, ab_partitions, image_paths): avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition) if (verity_block_device in OPTIONS.info_dict or OPTIONS.info_dict.get(avb_hashtree_enable) == "true"): + if partition not in image_paths: + logger.warning('Potential partition with care_map missing from images: %s', + partition) + continue image_path = image_paths[partition] - assert os.path.exists(image_path) + if not os.path.exists(image_path): + raise ExternalError('Expected image at path {}'.format(image_path)) care_map = GetCareMap(partition, image_path) if not care_map: @@ -3860,10 +3867,17 @@ def AddCareMapForAbOta(output_zip, ab_partitions, image_paths): care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map] RunAndCheckOutput(care_map_gen_cmd) + if not isinstance(output_file, zipfile.ZipFile): + shutil.copy(temp_care_map, output_file) + return + # output_file is a zip file care_map_path = "META/care_map.pb" - if output_zip and care_map_path not in output_zip.namelist(): - ZipWrite(output_zip, temp_care_map, arcname=care_map_path) - else: + if care_map_path in output_file.namelist(): + # Copy the temp file into the OPTIONS.input_tmp dir and update the + # replace_updated_files_list used by add_img_to_target_files + if not OPTIONS.replace_updated_files_list: + OPTIONS.replace_updated_files_list = [] shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path)) - if output_zip: - OPTIONS.replace_updated_files_list.append(care_map_path) + OPTIONS.replace_updated_files_list.append(care_map_path) + else: + ZipWrite(output_file, temp_care_map, arcname=care_map_path) diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py index 54091941ce..cbb51e1788 100755 --- a/tools/releasetools/img_from_target_files.py +++ b/tools/releasetools/img_from_target_files.py @@ -187,6 +187,9 @@ def ImgFromTargetFiles(input_file, output_file): Raises: ValueError: On invalid input. """ + if not os.path.exists(input_file): + raise ValueError('%s is not exist' % input_file) + if not zipfile.is_zipfile(input_file): raise ValueError('%s is not a valid zipfile' % input_file) diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py index c2fd45073b..5e6c42d84e 100755 --- a/tools/releasetools/merge_target_files.py +++ b/tools/releasetools/merge_target_files.py @@ -96,12 +96,16 @@ import zipfile from xml.etree import ElementTree import add_img_to_target_files +import apex_utils +import build_image import build_super_image import check_target_files_vintf import common import img_from_target_files import find_shareduid_violation import ota_from_target_files +import sparse_img +import verity_utils from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP @@ -357,8 +361,9 @@ def validate_config_lists(framework_item_list, framework_misc_info_keys, ' includes %s.', partition, partition) has_error = True - if ('dynamic_partition_list' in framework_misc_info_keys) or ( - 'super_partition_groups' in framework_misc_info_keys): + if ('dynamic_partition_list' + in framework_misc_info_keys) or ('super_partition_groups' + in framework_misc_info_keys): logger.error('Dynamic partition misc info keys should come from ' 'the vendor instance of META/misc_info.txt.') has_error = True @@ -449,8 +454,8 @@ def process_misc_info_txt(framework_target_files_temp_dir, merged_dict[key] = framework_dict[key] # Merge misc info keys used for Dynamic Partitions. - if (merged_dict.get('use_dynamic_partitions') == 'true') and ( - framework_dict.get('use_dynamic_partitions') == 'true'): + if (merged_dict.get('use_dynamic_partitions') + == 'true') and (framework_dict.get('use_dynamic_partitions') == 'true'): merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts( framework_dict=framework_dict, vendor_dict=merged_dict) merged_dict.update(merged_dynamic_partitions_dict) @@ -695,7 +700,7 @@ def compile_split_sepolicy(product_out, partition_map, output_policy): vendor_plat_version_file = get_file('vendor', 'etc/selinux/plat_sepolicy_vers.txt') if not vendor_plat_version_file or not os.path.exists( - vendor_plat_version_file): + vendor_plat_version_file): raise ExternalError('Missing required sepolicy file %s', vendor_plat_version_file) with open(vendor_plat_version_file) as f: @@ -735,6 +740,71 @@ def compile_split_sepolicy(product_out, partition_map, output_policy): return cmd +def validate_merged_apex_info(output_target_files_dir, partitions): + """Validates the APEX files in the merged target files directory. + + Checks the APEX files in all possible preinstalled APEX directories. + Depends on the <partition>/apex/* APEX files within partitions. + + Args: + output_target_files_dir: Output directory containing merged partition directories. + partitions: A list of all the partitions in the output directory. + + Raises: + RuntimeError: if apex_utils fails to parse any APEX file. + ExternalError: if the same APEX package is provided by multiple partitions. + """ + apex_packages = set() + + apex_partitions = ('system', 'system_ext', 'product', 'vendor') + for partition in filter(lambda p: p in apex_partitions, partitions): + apex_info = apex_utils.GetApexInfoFromTargetFiles( + output_target_files_dir, partition, compressed_only=False) + partition_apex_packages = set([info.package_name for info in apex_info]) + duplicates = apex_packages.intersection(partition_apex_packages) + if duplicates: + raise ExternalError( + 'Duplicate APEX packages found in multiple partitions: %s' % + ' '.join(duplicates)) + apex_packages.update(partition_apex_packages) + + +def generate_care_map(partitions, output_target_files_dir): + """Generates a merged META/care_map.pb file in the output target files dir. + + Depends on the info dict from META/misc_info.txt, as well as built images + within IMAGES/. + + Args: + partitions: A list of partitions to potentially include in the care map. + output_target_files_dir: The name of a directory that will be used to create + the output target files package after all the special cases are processed. + """ + OPTIONS.info_dict = common.LoadInfoDict(output_target_files_dir) + partition_image_map = {} + for partition in partitions: + image_path = os.path.join(output_target_files_dir, 'IMAGES', + '{}.img'.format(partition)) + if os.path.exists(image_path): + partition_image_map[partition] = image_path + # Regenerated images should have their image_size property already set. + image_size_prop = '{}_image_size'.format(partition) + if image_size_prop not in OPTIONS.info_dict: + # Images copied directly from input target files packages will need + # their image sizes calculated. + partition_size = sparse_img.GetImagePartitionSize(image_path) + image_props = build_image.ImagePropFromGlobalDict( + OPTIONS.info_dict, partition) + verity_image_builder = verity_utils.CreateVerityImageBuilder( + image_props) + image_size = verity_image_builder.CalculateMaxImageSize(partition_size) + OPTIONS.info_dict[image_size_prop] = image_size + + AddCareMapForAbOta( + os.path.join(output_target_files_dir, 'META', 'care_map.pb'), + PARTITIONS_WITH_CARE_MAP, partition_image_map) + + def process_special_cases(framework_target_files_temp_dir, vendor_target_files_temp_dir, output_target_files_temp_dir, @@ -1076,6 +1146,9 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list, common.RunAndCheckOutput(split_sepolicy_cmd) # TODO(b/178864050): Run tests on the combined.policy file. + # Run validation checks on the pre-installed APEX files. + validate_merged_apex_info(output_target_files_temp_dir, partition_map.keys()) + generate_images(output_target_files_temp_dir, rebuild_recovery) generate_super_empty_image(output_target_files_temp_dir, output_super_empty) @@ -1089,14 +1162,14 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list, if not output_target_files: return + # Create the merged META/care_map.bp + generate_care_map(partition_map.keys(), output_target_files_temp_dir) + output_zip = create_target_files_archive(output_target_files, output_target_files_temp_dir, temp_dir) # Create the IMG package from the merged target files package. - with zipfile.ZipFile(output_zip, allowZip64=True) as zfp: - AddCareMapForAbOta(zfp, PARTITIONS_WITH_CARE_MAP, partition_map) - if output_img: img_from_target_files.main([output_zip, output_img]) @@ -1168,8 +1241,7 @@ def main(): elif o == '--vendor-target-files': OPTIONS.vendor_target_files = a elif o == '--other-item-list': - logger.warning( - '--other-item-list has been renamed to --vendor-item-list') + logger.warning('--other-item-list has been renamed to --vendor-item-list') OPTIONS.vendor_item_list = a elif o == '--vendor-item-list': OPTIONS.vendor_item_list = a @@ -1225,7 +1297,7 @@ def main(): if (args or OPTIONS.framework_target_files is None or OPTIONS.vendor_target_files is None or (OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or - (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)): + (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)): common.Usage(__doc__) sys.exit(1) @@ -1251,9 +1323,9 @@ def main(): output_item_list = None if not validate_config_lists( - framework_item_list=framework_item_list, - framework_misc_info_keys=framework_misc_info_keys, - vendor_item_list=vendor_item_list): + framework_item_list=framework_item_list, + framework_misc_info_keys=framework_misc_info_keys, + vendor_item_list=vendor_item_list): sys.exit(1) call_func_with_temp_dir( diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py index 02b2b4de0d..94dbd4ef8b 100755 --- a/tools/releasetools/ota_from_target_files.py +++ b/tools/releasetools/ota_from_target_files.py @@ -1051,15 +1051,6 @@ def GenerateAbOtaPackage(target_file, output_file, source_file=None): "META/ab_partitions.txt is required for ab_update." target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts) source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts) - vendor_prop = source_info.info_dict.get("vendor.build.prop") - vabc_used = vendor_prop and \ - vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true" and \ - not OPTIONS.disable_vabc - if vabc_used: - # TODO(zhangkelvin) Remove this once FEC on VABC is supported - logger.info("Virtual AB Compression enabled, disabling FEC") - OPTIONS.disable_fec_computation = True - OPTIONS.disable_verity_computation = True else: assert "ab_partitions" in OPTIONS.info_dict, \ "META/ab_partitions.txt is required for ab_update." diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py index 3db5559741..22c6ac46c1 100755 --- a/tools/releasetools/sign_target_files_apks.py +++ b/tools/releasetools/sign_target_files_apks.py @@ -1383,6 +1383,6 @@ if __name__ == '__main__': main(sys.argv[1:]) except common.ExternalError as e: print("\n ERROR: %s\n" % (e,)) - sys.exit(1) + raise finally: common.Cleanup() diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py index 3d5300e571..a5850d3092 100644 --- a/tools/releasetools/test_add_img_to_target_files.py +++ b/tools/releasetools/test_add_img_to_target_files.py @@ -22,9 +22,9 @@ import common import test_utils from add_img_to_target_files import ( AddPackRadioImages, - CheckAbOtaImages, GetCareMap) + CheckAbOtaImages) from rangelib import RangeSet -from common import AddCareMapForAbOta +from common import AddCareMapForAbOta, GetCareMap OPTIONS = common.OPTIONS @@ -124,9 +124,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): def _test_AddCareMapForAbOta(): """Helper function to set up the test for test_AddCareMapForAbOta().""" OPTIONS.info_dict = { - 'extfs_sparse_flag': '-s', - 'system_image_size': 65536, - 'vendor_image_size': 40960, + 'extfs_sparse_flag' : '-s', + 'system_image_size' : 65536, + 'vendor_image_size' : 40960, 'system_verity_block_device': '/dev/block/system', 'vendor_verity_block_device': '/dev/block/vendor', 'system.build.prop': common.PartitionBuildProps.FromDictionary( @@ -154,8 +154,8 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): (0xCAC2, 12)]) image_paths = { - 'system': system_image, - 'vendor': vendor_image, + 'system' : system_image, + 'vendor' : vendor_image, } return image_paths @@ -175,9 +175,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): def test_AddCareMapForAbOta(self): image_paths = self._test_AddCareMapForAbOta() - AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) - care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths) + expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.fingerprint", "google/sailfish/12345:user/dev-keys", @@ -192,10 +192,10 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): """Partitions without care_map should be ignored.""" image_paths = self._test_AddCareMapForAbOta() + care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') AddCareMapForAbOta( - None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths) + care_map_file, ['boot', 'system', 'vendor', 'vbmeta'], image_paths) - care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.fingerprint", "google/sailfish/12345:user/dev-keys", @@ -227,9 +227,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): ), } - AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) - care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths) + expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.fingerprint", "google/sailfish/12345:user/dev-keys", @@ -244,20 +244,19 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): """Tests the case for partitions without fingerprint.""" image_paths = self._test_AddCareMapForAbOta() OPTIONS.info_dict = { - 'extfs_sparse_flag': '-s', - 'system_image_size': 65536, - 'vendor_image_size': 40960, + 'extfs_sparse_flag' : '-s', + 'system_image_size' : 65536, + 'vendor_image_size' : 40960, 'system_verity_block_device': '/dev/block/system', 'vendor_verity_block_device': '/dev/block/vendor', } - AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) - care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths) + expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown", - "unknown", 'vendor', RangeSet( - "0-9").to_string_raw(), "unknown", - "unknown"] + "unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown", + "unknown"] self._verifyCareMap(expected, care_map_file) @@ -283,9 +282,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): ), } - AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) - care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths) + expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.thumbprint", "google/sailfish/123:user/dev-keys", @@ -302,9 +301,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): # Remove vendor_image_size to invalidate the care_map for vendor.img. del OPTIONS.info_dict['vendor_image_size'] - AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) - care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths) + expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.fingerprint", "google/sailfish/12345:user/dev-keys"] @@ -319,25 +318,26 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): del OPTIONS.info_dict['system_image_size'] del OPTIONS.info_dict['vendor_image_size'] - AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) + care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths) - self.assertFalse( - os.path.exists(os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb'))) + self.assertFalse(os.path.exists(care_map_file)) def test_AddCareMapForAbOta_verityNotEnabled(self): """No care_map.pb should be generated if verity not enabled.""" image_paths = self._test_AddCareMapForAbOta() OPTIONS.info_dict = {} - AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) - care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + AddCareMapForAbOta(care_map_file, ['system', 'vendor'], image_paths) + self.assertFalse(os.path.exists(care_map_file)) def test_AddCareMapForAbOta_missingImageFile(self): """Missing image file should be considered fatal.""" image_paths = self._test_AddCareMapForAbOta() image_paths['vendor'] = '' - self.assertRaises(AssertionError, AddCareMapForAbOta, None, + care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + self.assertRaises(common.ExternalError, AddCareMapForAbOta, care_map_file, ['system', 'vendor'], image_paths) @test_utils.SkipIfExternalToolsUnavailable() @@ -397,8 +397,8 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): (0xCAC3, 4), (0xCAC1, 6)]) OPTIONS.info_dict = { - 'extfs_sparse_flag': '-s', - 'system_image_size': 53248, + 'extfs_sparse_flag' : '-s', + 'system_image_size' : 53248, } name, care_map = GetCareMap('system', sparse_image) self.assertEqual('system', name) @@ -413,14 +413,14 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): (0xCAC3, 4), (0xCAC1, 6)]) OPTIONS.info_dict = { - 'extfs_sparse_flag': '-s', - 'system_image_size': -45056, + 'extfs_sparse_flag' : '-s', + 'system_image_size' : -45056, } self.assertRaises(AssertionError, GetCareMap, 'system', sparse_image) def test_GetCareMap_nonSparseImage(self): OPTIONS.info_dict = { - 'system_image_size': 53248, + 'system_image_size' : 53248, } # 'foo' is the image filename, which is expected to be not used by # GetCareMap(). diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py index 072bb01353..4f61472571 100644 --- a/tools/releasetools/test_merge_target_files.py +++ b/tools/releasetools/test_merge_target_files.py @@ -15,6 +15,7 @@ # import os.path +import shutil import common import test_utils @@ -22,7 +23,7 @@ from merge_target_files import ( validate_config_lists, DEFAULT_FRAMEWORK_ITEM_LIST, DEFAULT_VENDOR_ITEM_LIST, DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items, item_list_to_partition_set, process_apex_keys_apk_certs_common, - compile_split_sepolicy) + compile_split_sepolicy, validate_merged_apex_info) class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase): @@ -274,3 +275,36 @@ class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase): '{OTP}/vendor/etc/selinux/plat_pub_versioned.cil ' '{OTP}/product/etc/selinux/mapping/30.0.cil').format( OTP=product_out_dir)) + + def _copy_apex(self, source, output_dir, partition): + shutil.copy( + source, + os.path.join(output_dir, partition, 'apex', os.path.basename(source))) + + @test_utils.SkipIfExternalToolsUnavailable() + def test_validate_merged_apex_info(self): + output_dir = common.MakeTempDir() + os.makedirs(os.path.join(output_dir, 'SYSTEM/apex')) + os.makedirs(os.path.join(output_dir, 'VENDOR/apex')) + + self._copy_apex( + os.path.join(self.testdata_dir, 'has_apk.apex'), output_dir, 'SYSTEM') + self._copy_apex( + os.path.join(test_utils.get_current_dir(), + 'com.android.apex.compressed.v1.capex'), output_dir, + 'VENDOR') + validate_merged_apex_info(output_dir, ('system', 'vendor')) + + @test_utils.SkipIfExternalToolsUnavailable() + def test_validate_merged_apex_info_RaisesOnPackageInMultiplePartitions(self): + output_dir = common.MakeTempDir() + os.makedirs(os.path.join(output_dir, 'SYSTEM/apex')) + os.makedirs(os.path.join(output_dir, 'VENDOR/apex')) + + same_apex_package = os.path.join(self.testdata_dir, 'has_apk.apex') + self._copy_apex(same_apex_package, output_dir, 'SYSTEM') + self._copy_apex(same_apex_package, output_dir, 'VENDOR') + self.assertRaisesRegexp( + common.ExternalError, + 'Duplicate APEX packages found in multiple partitions: com.android.wifi', + validate_merged_apex_info, output_dir, ('system', 'vendor')) diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py index 9f6484924f..661712a80c 100644 --- a/tools/releasetools/test_ota_from_target_files.py +++ b/tools/releasetools/test_ota_from_target_files.py @@ -33,7 +33,7 @@ from ota_from_target_files import ( GetTargetFilesZipWithoutPostinstallConfig, Payload, PayloadSigner, POSTINSTALL_CONFIG, StreamingPropertyFiles, AB_PARTITIONS) -from apex_utils import GetSystemApexInfoFromTargetFiles +from apex_utils import GetApexInfoFromTargetFiles from test_utils import PropertyFilesTestCase @@ -281,9 +281,9 @@ class OtaFromTargetFilesTest(test_utils.ReleaseToolsTestCase): metadata) @test_utils.SkipIfExternalToolsUnavailable() - def test_GetSystemApexInfoFromTargetFiles(self): + def test_GetApexInfoFromTargetFiles(self): target_files = construct_target_files(compressedApex=True) - apex_infos = GetSystemApexInfoFromTargetFiles(target_files) + apex_infos = GetApexInfoFromTargetFiles(target_files, 'system') self.assertEqual(len(apex_infos), 1) self.assertEqual(apex_infos[0].package_name, "com.android.apex.compressed") self.assertEqual(apex_infos[0].version, 1) diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py index 12d52e566d..236f9edfdf 100644 --- a/tools/test_post_process_props.py +++ b/tools/test_post_process_props.py @@ -53,7 +53,7 @@ class PropTestCase(unittest.TestCase): p.make_as_comment() self.assertTrue(p.is_comment()) - self.assertTrue("# a comment\n#a=b", str(p)) + self.assertEqual("# a comment\n#a=b", str(p)) class PropListTestcase(unittest.TestCase): def setUp(self): @@ -251,5 +251,27 @@ class PropListTestcase(unittest.TestCase): # because it's explicitly allowed self.assertTrue(override_optional_props(props, allow_dup=True)) + def test_validateGrfProps(self): + stderr_redirect = io.StringIO() + with contextlib.redirect_stderr(stderr_redirect): + props = PropList("hello") + props.put("ro.board.first_api_level","25") + + # ro.board.first_api_level must be less than or equal to the sdk version + self.assertFalse(validate_grf_props(props, 20)) + self.assertTrue(validate_grf_props(props, 26)) + self.assertTrue(validate_grf_props(props, 35)) + + # manually set ro.board.api_level to an invalid value + props.put("ro.board.api_level","20") + self.assertFalse(validate_grf_props(props, 26)) + + props.get_all_props()[-1].make_as_comment() + # manually set ro.board.api_level to a valid value + props.put("ro.board.api_level","26") + self.assertTrue(validate_grf_props(props, 26)) + # ro.board.api_level must be less than or equal to the sdk version + self.assertFalse(validate_grf_props(props, 25)) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/tools/zipalign/ZipAlignMain.cpp b/tools/zipalign/ZipAlignMain.cpp index 49be916b38..47ebd126af 100644 --- a/tools/zipalign/ZipAlignMain.cpp +++ b/tools/zipalign/ZipAlignMain.cpp @@ -39,7 +39,7 @@ void usage(void) " <align>: alignment in bytes, e.g. '4' provides 32-bit alignment\n"); fprintf(stderr, " -c: check alignment only (does not modify file)\n"); fprintf(stderr, " -f: overwrite existing outfile.zip\n"); - fprintf(stderr, " -p: memory page alignment for stored shared object files\n"); + fprintf(stderr, " -p: page-align uncompressed .so files\n"); fprintf(stderr, " -v: verbose output\n"); fprintf(stderr, " -z: recompress using Zopfli\n"); } |