diff options
95 files changed, 3244 insertions, 1239 deletions
diff --git a/ci/build_metadata b/ci/build_metadata index a8eb65dd36..cd011c8679 100755 --- a/ci/build_metadata +++ b/ci/build_metadata @@ -20,6 +20,9 @@ export TARGET_PRODUCT=aosp_arm64 export TARGET_RELEASE=trunk_staging export TARGET_BUILD_VARIANT=eng -build/soong/bin/m dist \ +TARGETS=( all_teams + release_config_metadata +) +build/soong/bin/m dist ${TARGETS[@]} diff --git a/ci/build_test_suites.py b/ci/build_test_suites.py index 933e43e387..b8c4a385e0 100644 --- a/ci/build_test_suites.py +++ b/ci/build_test_suites.py @@ -22,6 +22,7 @@ import os import pathlib import subprocess import sys +from typing import Callable from build_context import BuildContext import optimized_targets @@ -68,7 +69,7 @@ class BuildPlanner: return BuildPlan(set(self.args.extra_targets), set()) build_targets = set() - packaging_commands = [] + packaging_commands_getters = [] for target in self.args.extra_targets: if self._unused_target_exclusion_enabled( target @@ -84,9 +85,11 @@ class BuildPlanner: target, self.build_context, self.args ) build_targets.update(target_optimizer.get_build_targets()) - packaging_commands.extend(target_optimizer.get_package_outputs_commands()) + packaging_commands_getters.append( + target_optimizer.get_package_outputs_commands + ) - return BuildPlan(build_targets, packaging_commands) + return BuildPlan(build_targets, packaging_commands_getters) def _unused_target_exclusion_enabled(self, target: str) -> bool: return ( @@ -98,7 +101,7 @@ class BuildPlanner: @dataclass(frozen=True) class BuildPlan: build_targets: set[str] - packaging_commands: list[list[str]] + packaging_commands_getters: list[Callable[[], list[list[str]]]] def build_test_suites(argv: list[str]) -> int: @@ -180,9 +183,10 @@ def execute_build_plan(build_plan: BuildPlan): except subprocess.CalledProcessError as e: raise BuildFailureError(e.returncode) from e - for packaging_command in build_plan.packaging_commands: + for packaging_commands_getter in build_plan.packaging_commands_getters: try: - run_command(packaging_command) + for packaging_command in packaging_commands_getter(): + run_command(packaging_command) except subprocess.CalledProcessError as e: raise BuildFailureError(e.returncode) from e diff --git a/ci/build_test_suites_test.py b/ci/build_test_suites_test.py index fd06a3ae1e..2afaab7711 100644 --- a/ci/build_test_suites_test.py +++ b/ci/build_test_suites_test.py @@ -276,7 +276,8 @@ class BuildPlannerTest(unittest.TestCase): build_plan = build_planner.create_build_plan() - self.assertEqual(len(build_plan.packaging_commands), 0) + for packaging_command in self.run_packaging_commands(build_plan): + self.assertEqual(len(packaging_command), 0) def test_build_optimization_on_optimizes_target(self): build_targets = {'target_1', 'target_2'} @@ -306,7 +307,7 @@ class BuildPlannerTest(unittest.TestCase): build_plan = build_planner.create_build_plan() - self.assertIn([f'packaging {optimized_target_name}'], build_plan.packaging_commands) + self.assertIn(packaging_commands, self.run_packaging_commands(build_plan)) def test_individual_build_optimization_off_doesnt_optimize(self): build_targets = {'target_1', 'target_2'} @@ -328,7 +329,8 @@ class BuildPlannerTest(unittest.TestCase): build_plan = build_planner.create_build_plan() - self.assertFalse(build_plan.packaging_commands) + for packaging_command in self.run_packaging_commands(build_plan): + self.assertEqual(len(packaging_command), 0) def test_target_output_used_target_built(self): build_target = 'test_target' @@ -485,6 +487,12 @@ class BuildPlannerTest(unittest.TestCase): ], } + def run_packaging_commands(self, build_plan: build_test_suites.BuildPlan): + return [ + packaging_command_getter() + for packaging_command_getter in build_plan.packaging_commands_getters + ] + def wait_until( condition_function: Callable[[], bool], diff --git a/ci/optimized_targets.py b/ci/optimized_targets.py index 4bee401569..688bdd8370 100644 --- a/ci/optimized_targets.py +++ b/ci/optimized_targets.py @@ -121,13 +121,13 @@ class OptimizedBuildTarget(ABC): process_result = subprocess.run( args=[ f'{src_top / self._SOONG_UI_BASH_PATH}', - '--dumpvar-mode', - '--abs', - soong_vars, + '--dumpvars-mode', + f'--abs-vars={" ".join(soong_vars)}', ], env=os.environ, check=False, capture_output=True, + text=True, ) if not process_result.returncode == 0: logging.error('soong dumpvars command failed! stderr:') @@ -142,7 +142,7 @@ class OptimizedBuildTarget(ABC): try: return { line.split('=')[0]: line.split('=')[1].strip("'") - for line in process_result.stdout.split('\n') + for line in process_result.stdout.strip().split('\n') } except IndexError as e: raise RuntimeError( @@ -214,10 +214,13 @@ class GeneralTestsOptimizer(OptimizedBuildTarget): normally built. """ - # List of modules that are always required to be in general-tests.zip. - _REQUIRED_MODULES = frozenset( - ['cts-tradefed', 'vts-tradefed', 'compatibility-host-util'] - ) + # List of modules that are built alongside general-tests as dependencies. + _REQUIRED_MODULES = frozenset([ + 'cts-tradefed', + 'vts-tradefed', + 'compatibility-host-util', + 'general-tests-shared-libs', + ]) def get_build_targets_impl(self) -> set[str]: change_info_file_path = os.environ.get('CHANGE_INFO') @@ -286,6 +289,10 @@ class GeneralTestsOptimizer(OptimizedBuildTarget): host_config_files = [] target_config_files = [] for module in self.modules_to_build: + # The required modules are handled separately, no need to package. + if module in self._REQUIRED_MODULES: + continue + host_path = host_out_testcases / module if os.path.exists(host_path): host_paths.append(host_path) @@ -303,6 +310,7 @@ class GeneralTestsOptimizer(OptimizedBuildTarget): zip_commands.extend( self._get_zip_test_configs_zips_commands( + src_top, dist_dir, host_out, product_out, @@ -311,27 +319,27 @@ class GeneralTestsOptimizer(OptimizedBuildTarget): ) ) - zip_command = self._base_zip_command( - host_out, dist_dir, 'general-tests.zip' - ) + zip_command = self._base_zip_command(src_top, dist_dir, 'general-tests.zip') # Add host testcases. - zip_command.extend( - self._generate_zip_options_for_items( - prefix='host', - relative_root=f'{src_top / soong_host_out}', - directories=host_paths, - ) - ) + if host_paths: + zip_command.extend( + self._generate_zip_options_for_items( + prefix='host', + relative_root=f'{src_top / soong_host_out}', + directories=host_paths, + ) + ) # Add target testcases. - zip_command.extend( - self._generate_zip_options_for_items( - prefix='target', - relative_root=f'{src_top / product_out}', - directories=target_paths, - ) - ) + if target_paths: + zip_command.extend( + self._generate_zip_options_for_items( + prefix='target', + relative_root=f'{src_top / product_out}', + directories=target_paths, + ) + ) # TODO(lucafarsi): Push this logic into a general-tests-minimal build command # Add necessary tools. These are also hardcoded in general-tests.mk. @@ -365,6 +373,7 @@ class GeneralTestsOptimizer(OptimizedBuildTarget): def _get_zip_test_configs_zips_commands( self, + src_top: pathlib.Path, dist_dir: pathlib.Path, host_out: pathlib.Path, product_out: pathlib.Path, @@ -428,7 +437,7 @@ class GeneralTestsOptimizer(OptimizedBuildTarget): zip_commands = [] tests_config_zip_command = self._base_zip_command( - host_out, dist_dir, 'general-tests_configs.zip' + src_top, dist_dir, 'general-tests_configs.zip' ) tests_config_zip_command.extend( self._generate_zip_options_for_items( @@ -442,16 +451,14 @@ class GeneralTestsOptimizer(OptimizedBuildTarget): self._generate_zip_options_for_items( prefix='target', relative_root=str(product_out), - list_files=[ - f"{product_out / 'target_general-tests_list'}" - ], + list_files=[f"{product_out / 'target_general-tests_list'}"], ), ) zip_commands.append(tests_config_zip_command) tests_list_zip_command = self._base_zip_command( - host_out, dist_dir, 'general-tests_list.zip' + src_top, dist_dir, 'general-tests_list.zip' ) tests_list_zip_command.extend( self._generate_zip_options_for_items( diff --git a/ci/optimized_targets_test.py b/ci/optimized_targets_test.py index 762b62e664..0b0c0ec087 100644 --- a/ci/optimized_targets_test.py +++ b/ci/optimized_targets_test.py @@ -220,18 +220,6 @@ class GeneralTestsOptimizerTest(fake_filesystem_unittest.TestCase): ): package_commands = optimizer.get_package_outputs_commands() - @mock.patch('subprocess.run') - def test_no_build_outputs_packaging_fails(self, subprocess_run): - subprocess_run.return_value = self._get_soong_vars_output() - optimizer = self._create_general_tests_optimizer() - - targets = optimizer.get_build_targets() - - with self.assertRaisesRegex( - RuntimeError, 'No items specified to be added to zip' - ): - package_commands = optimizer.get_package_outputs_commands() - def _create_general_tests_optimizer(self, build_context: BuildContext = None): if not build_context: build_context = self._create_build_context() @@ -321,7 +309,7 @@ class GeneralTestsOptimizerTest(fake_filesystem_unittest.TestCase): """ for command in commands: self.assertEqual( - '/tmp/top/host_out/prebuilts/build-tools/linux-x86/bin/soong_zip', + '/tmp/top/prebuilts/build-tools/linux-x86/bin/soong_zip', command[0], ) self.assertEqual('-d', command[1]) diff --git a/cogsetup.sh b/cogsetup.sh deleted file mode 100644 index 5c64a068e0..0000000000 --- a/cogsetup.sh +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (C) 2023 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This file is executed by build/envsetup.sh, and can use anything -# defined in envsetup.sh. -function _create_out_symlink_for_cog() { - if [[ "${OUT_DIR}" == "" ]]; then - OUT_DIR="out" - fi - - # getoutdir ensures paths are absolute. envsetup could be called from a - # directory other than the root of the source tree - local outdir=$(getoutdir) - if [[ -L "${outdir}" ]]; then - return - fi - if [ -d "${outdir}" ]; then - echo -e "\tOutput directory ${outdir} cannot be present in a Cog workspace." - echo -e "\tDelete \"${outdir}\" or create a symlink from \"${outdir}\" to a directory outside your workspace." - return 1 - fi - - DEFAULT_OUTPUT_DIR="${HOME}/.cog/android-build-out" - mkdir -p ${DEFAULT_OUTPUT_DIR} - ln -s ${DEFAULT_OUTPUT_DIR} ${outdir} -} - -# This function sets up the build environment to be appropriate for Cog. -function _setup_cog_env() { - _create_out_symlink_for_cog - if [ "$?" -eq "1" ]; then - echo -e "\e[0;33mWARNING:\e[00m Cog environment setup failed!" - return 1 - fi - - export ANDROID_BUILD_ENVIRONMENT_CONFIG="googler-cog" - - # Running repo command within Cog workspaces is not supported, so override - # it with this function. If the user is running repo within a Cog workspace, - # we'll fail with an error, otherwise, we run the original repo command with - # the given args. - if ! ORIG_REPO_PATH=`which repo`; then - return 0 - fi - function repo { - if [[ "${PWD}" == /google/cog/* ]]; then - echo -e "\e[01;31mERROR:\e[0mrepo command is disallowed within Cog workspaces." - return 1 - fi - ${ORIG_REPO_PATH} "$@" - } -} - -if [[ "${PWD}" != /google/cog/* ]]; then - echo -e "\e[01;31mERROR:\e[0m This script must be run from a Cog workspace." -fi - -_setup_cog_env diff --git a/core/Makefile b/core/Makefile index b0392cdc8b..90668a1dee 100644 --- a/core/Makefile +++ b/core/Makefile @@ -1964,7 +1964,7 @@ target_system_dlkm_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_SYST installed_system_dlkm_notice_xml_gz := $(TARGET_OUT_SYSTEM_DLKM)/etc/NOTICE.xml.gz ALL_INSTALLED_NOTICE_FILES := \ - $(installed_notice_html_or_xml_gz) \ + $(if $(USE_SOONG_DEFINED_SYSTEM_IMAGE),,$(installed_notice_html_or_xml_gz)) \ $(installed_vendor_notice_xml_gz) \ $(installed_product_notice_xml_gz) \ $(installed_system_ext_notice_xml_gz) \ @@ -2051,7 +2051,9 @@ endif endif # PRODUCT_NOTICE_SPLIT +ifneq ($(USE_SOONG_DEFINED_SYSTEM_IMAGE),true) ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz) +endif need_vendor_notice:=false ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true) @@ -3511,6 +3513,8 @@ $(SYSTEM_LINKER_CONFIG): $(INTERNAL_SYSTEMIMAGE_FILES) $(SYSTEM_LINKER_CONFIG_SO --output $@ --value "$(STUB_LIBRARIES)" --system "$(TARGET_OUT)" $(HOST_OUT_EXECUTABLES)/conv_linker_config append --source $@ --output $@ --key requireLibs \ --value "$(foreach lib,$(LLNDK_MOVED_TO_APEX_LIBRARIES), $(lib).so)" + $(HOST_OUT_EXECUTABLES)/conv_linker_config append --source $@ --output $@ --key provideLibs \ + --value "$(foreach lib,$(PRODUCT_EXTRA_STUB_LIBRARIES), $(lib).so)" $(call declare-1p-target,$(SYSTEM_LINKER_CONFIG),) $(call declare-license-deps,$(SYSTEM_LINKER_CONFIG),$(INTERNAL_SYSTEMIMAGE_FILES) $(SYSTEM_LINKER_CONFIG_SOURCE)) @@ -3563,14 +3567,24 @@ ifneq ($(PRODUCT_SOONG_DEFINED_SYSTEM_IMAGE),) file_list_diff := $(HOST_OUT_EXECUTABLES)/file_list_diff$(HOST_EXECUTABLE_SUFFIX) system_file_diff_timestamp := $(systemimage_intermediates)/file_diff.timestamp +# The build configuration to build the REL version may have more files to allow. +# Use allowlist_next in addition to the allowlist in this case. +system_file_diff_allowlist_next := +ifeq (REL,$(PLATFORM_VERSION_CODENAME)) +system_file_diff_allowlist_next := $(ALL_MODULES.system_image_diff_allowlist_next.INSTALLED) +$(system_file_diff_timestamp): PRIVATE_ALLOWLIST_NEXT := $(system_file_diff_allowlist_next) +endif $(system_file_diff_timestamp): \ $(systemimage_intermediates)/file_list.txt \ $(ALL_MODULES.$(PRODUCT_SOONG_DEFINED_SYSTEM_IMAGE).FILESYSTEM_FILELIST) \ $(ALL_MODULES.system_image_diff_allowlist.INSTALLED) \ + $(system_file_diff_allowlist_next) \ $(file_list_diff) $(file_list_diff) $(systemimage_intermediates)/file_list.txt \ $(ALL_MODULES.$(PRODUCT_SOONG_DEFINED_SYSTEM_IMAGE).FILESYSTEM_FILELIST) \ - $(ALL_MODULES.system_image_diff_allowlist.INSTALLED) $(PRODUCT_SOONG_DEFINED_SYSTEM_IMAGE) + $(PRODUCT_SOONG_DEFINED_SYSTEM_IMAGE) \ + --allowlists $(ALL_MODULES.system_image_diff_allowlist.INSTALLED) \ + $(PRIVATE_ALLOWLIST_NEXT) touch $@ $(BUILT_SYSTEMIMAGE): $(system_file_diff_timestamp) @@ -7946,9 +7960,14 @@ endif # PACK_DESKTOP_FILESYSTEM_IMAGES # Desktop pack recovery image hook. ifneq (,$(strip $(PACK_DESKTOP_RECOVERY_IMAGE))) PACK_RECOVERY_IMAGE_TARGET := $(PRODUCT_OUT)/android-desktop_recovery_image.bin +PACK_RECOVERY_IMAGE_ARGS := --noarchive --recovery + +ifneq (,$(strip $(PACK_RECOVERY_IMAGE_EXPERIMENTAL))) +PACK_RECOVERY_IMAGE_ARGS += --experimental +endif # PACK_RECOVERY_IMAGE_EXPERIMENTAL $(PACK_RECOVERY_IMAGE_TARGET): $(IMAGES) $(PACK_IMAGE_SCRIPT) - $(PACK_IMAGE_SCRIPT) --out_dir $(PRODUCT_OUT) --noarchive --recovery + $(PACK_IMAGE_SCRIPT) --out_dir $(PRODUCT_OUT) $(PACK_RECOVERY_IMAGE_ARGS) PACKED_RECOVERY_IMAGE_ARCHIVE_TARGET := $(PACK_RECOVERY_IMAGE_TARGET).gz @@ -7982,6 +8001,28 @@ pack-update-image: $(PACK_UPDATE_IMAGE_TARGET) endif # PACK_DESKTOP_UPDATE_IMAGE +PACK_MIGRATION_IMAGE_SCRIPT := $(HOST_OUT_EXECUTABLES)/pack_migration_image + +# ----------------------------------------------------------------- +# Desktop pack migration image hook. +ifeq ($(ANDROID_DESKTOP_MIGRATION_IMAGE),true) +PACK_MIGRATION_IMAGE_TARGET := $(PRODUCT_OUT)/android-desktop_migration_image.bin + +$(PACK_MIGRATION_IMAGE_TARGET): $(IMAGES) $(PACK_MIGRATION_IMAGE_SCRIPT) + $(PACK_MIGRATION_IMAGE_SCRIPT) --out_dir $(PRODUCT_OUT) --noarchive + +PACKED_MIGRATION_IMAGE_ARCHIVE_TARGET := $(PACK_MIGRATION_IMAGE_TARGET).gz + +$(PACKED_MIGRATION_IMAGE_ARCHIVE_TARGET): $(PACK_MIGRATION_IMAGE_TARGET) | $(GZIP) + $(GZIP) -fk $(PACK_MIGRATION_IMAGE_TARGET) + +$(call dist-for-goals,dist_files,$(PACKED_MIGRATION_IMAGE_ARCHIVE_TARGET)) + +.PHONY: pack-migration-image +pack-migration-image: $(PACK_MIGRATION_IMAGE_TARGET) + +endif # ANDROID_DESKTOP_MIGRATION_IMAGE + # ----------------------------------------------------------------- # OS Licensing diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk index 5c122bc37b..5944e4a856 100644 --- a/core/android_soong_config_vars.mk +++ b/core/android_soong_config_vars.mk @@ -195,3 +195,23 @@ endif # Add target_use_pan_display flag for hardware/libhardware:gralloc.default $(call soong_config_set_bool,gralloc,target_use_pan_display,$(if $(filter true,$(TARGET_USE_PAN_DISPLAY)),true,false)) + +# Add use_camera_v4l2_hal flag for hardware/libhardware/modules/camera/3_4:camera.v4l2 +$(call soong_config_set_bool,camera,use_camera_v4l2_hal,$(if $(filter true,$(USE_CAMERA_V4L2_HAL)),true,false)) + +# Add audioserver_multilib flag for hardware/interfaces/soundtrigger/2.0/default:android.hardware.soundtrigger@2.0-impl +ifneq ($(strip $(AUDIOSERVER_MULTILIB)),) + $(call soong_config_set,soundtrigger,audioserver_multilib,$(AUDIOSERVER_MULTILIB)) +endif + +# Add sim_count, disable_rild_oem_hook, and use_aosp_rild flag for ril related modules +$(call soong_config_set,ril,sim_count,$(SIM_COUNT)) +ifneq ($(DISABLE_RILD_OEM_HOOK), false) + $(call soong_config_set_bool,ril,disable_rild_oem_hook,true) +endif +ifneq ($(ENABLE_VENDOR_RIL_SERVICE), true) + $(call soong_config_set_bool,ril,use_aosp_rild,true) +endif + +# Export target_board_platform to soong for hardware/google/graphics/common/libmemtrack:memtrack.$(TARGET_BOARD_PLATFORM) +$(call soong_config_set,ANDROID,target_board_platform,$(TARGET_BOARD_PLATFORM)) diff --git a/core/binary.mk b/core/binary.mk index 1e98bc08fb..34811449e9 100644 --- a/core/binary.mk +++ b/core/binary.mk @@ -330,18 +330,20 @@ ifneq ($(call module-in-vendor-or-product),) ifneq ($(LOCAL_IN_VENDOR),) # Vendor modules have LOCAL_IN_VENDOR my_cflags += -D__ANDROID_VENDOR__ - - ifeq ($(BOARD_API_LEVEL),) - # TODO(b/314036847): This is a fallback for UDC targets. - # This must be a build failure when UDC is no longer built from this source tree. - my_cflags += -D__ANDROID_VENDOR_API__=$(PLATFORM_SDK_VERSION) - else - my_cflags += -D__ANDROID_VENDOR_API__=$(BOARD_API_LEVEL) - endif else ifneq ($(LOCAL_IN_PRODUCT),) # Product modules have LOCAL_IN_PRODUCT my_cflags += -D__ANDROID_PRODUCT__ endif + + # Define __ANDROID_VENDOR_API__ for both product and vendor variants because + # they both use the same LLNDK libraries. + ifeq ($(BOARD_API_LEVEL),) + # TODO(b/314036847): This is a fallback for UDC targets. + # This must be a build failure when UDC is no longer built from this source tree. + my_cflags += -D__ANDROID_VENDOR_API__=$(PLATFORM_SDK_VERSION) + else + my_cflags += -D__ANDROID_VENDOR_API__=$(BOARD_API_LEVEL) + endif endif ifndef LOCAL_IS_HOST_MODULE diff --git a/core/combo/arch/arm64/armv9-2a.mk b/core/combo/arch/arm64/armv9-2a.mk new file mode 100644 index 0000000000..69ffde014b --- /dev/null +++ b/core/combo/arch/arm64/armv9-2a.mk @@ -0,0 +1,18 @@ +# +# Copyright (C) 2023 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# .mk file required to support build for the ARMv9.2-A arch variant. +# The file just needs to be present, it does not need to contain anything. diff --git a/core/combo/arch/x86/alderlake.mk b/core/combo/arch/x86/alderlake.mk new file mode 100644 index 0000000000..a7ae6ed679 --- /dev/null +++ b/core/combo/arch/x86/alderlake.mk @@ -0,0 +1,6 @@ +# Configuration for Linux on x86. +# Generating binaries for processors +# that have AVX2 feature flag +# + +ARCH_X86_HAVE_SSE4_1 := true diff --git a/core/combo/arch/x86_64/alderlake.mk b/core/combo/arch/x86_64/alderlake.mk new file mode 100644 index 0000000000..a7ae6ed679 --- /dev/null +++ b/core/combo/arch/x86_64/alderlake.mk @@ -0,0 +1,6 @@ +# Configuration for Linux on x86. +# Generating binaries for processors +# that have AVX2 feature flag +# + +ARCH_X86_HAVE_SSE4_1 := true diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk index 906d7f0163..88e0cc7452 100644 --- a/core/dex_preopt.mk +++ b/core/dex_preopt.mk @@ -13,28 +13,6 @@ else install-on-system-other = $(filter-out $(PRODUCT_DEXPREOPT_SPEED_APPS) $(PRODUCT_SYSTEM_SERVER_APPS),$(basename $(notdir $(filter $(foreach f,$(SYSTEM_OTHER_ODEX_FILTER),$(TARGET_OUT)/$(f)),$(1))))) endif -# Install boot images for testing on host. We exclude framework image as it is not part of art manifest. -my_boot_image_arch := HOST_ARCH -my_boot_image_out := $(HOST_OUT) -my_boot_image_syms := $(HOST_OUT)/symbols -HOST_BOOT_IMAGE_MODULE := \ - $(foreach my_boot_image_name,art_host,$(strip \ - $(eval include $(BUILD_SYSTEM)/dex_preopt_libart.mk) \ - $(my_boot_image_module))) -HOST_BOOT_IMAGE := $(call module-installed-files,$(HOST_BOOT_IMAGE_MODULE)) -ifdef HOST_2ND_ARCH - my_boot_image_arch := HOST_2ND_ARCH - 2ND_HOST_BOOT_IMAGE_MODULE := \ - $(foreach my_boot_image_name,art_host,$(strip \ - $(eval include $(BUILD_SYSTEM)/dex_preopt_libart.mk) \ - $(my_boot_image_module))) - 2ND_HOST_BOOT_IMAGE := $(call module-installed-files,$(2ND_HOST_BOOT_IMAGE_MODULE)) -endif -my_boot_image_arch := -my_boot_image_out := -my_boot_image_syms := -my_boot_image_module := - # Build the boot.zip which contains the boot jars and their compilation output # We can do this only if preopt is enabled and if the product uses libart config (which sets the # default properties for preopting). diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk deleted file mode 100644 index a2c9942a41..0000000000 --- a/core/dex_preopt_libart.mk +++ /dev/null @@ -1,109 +0,0 @@ -#################################### -# ART boot image installation -# Input variables: -# my_boot_image_name: the boot image to install -# my_boot_image_arch: the architecture to install (e.g. TARGET_ARCH, not expanded) -# my_boot_image_out: the install directory (e.g. $(PRODUCT_OUT)) -# my_boot_image_syms: the symbols director (e.g. $(TARGET_OUT_UNSTRIPPED)) -# -# Output variables: -# my_boot_image_module: the created module name. Empty if no module is created. -# -# Install the boot images compiled by Soong. -# Create a module named dexpreopt_bootjar.$(my_boot_image_name)_$($(my_boot_image_arch)) -# that installs all of boot image files. -# If there is no file to install for $(my_boot_image_name), for example when -# building an unbundled build, then no module is created. -# -#################################### - -# Takes a list of src:dest install pairs and returns a new list with a path -# prefixed to each dest value. -# $(1): list of src:dest install pairs -# $(2): path to prefix to each dest value -define prefix-copy-many-files-dest -$(foreach v,$(1),$(call word-colon,1,$(v)):$(2)$(call word-colon,2,$(v))) -endef - -# Converts an architecture-specific vdex path into a location that can be shared -# between architectures. -define vdex-shared-install-path -$(dir $(patsubst %/,%,$(dir $(1))))$(notdir $(1)) -endef - -# Takes a list of src:dest install pairs of vdex files and returns a new list -# where each dest has been rewritten to the shared location for vdex files. -define vdex-copy-many-files-shared-dest -$(foreach v,$(1),$(call word-colon,1,$(v)):$(call vdex-shared-install-path,$(call word-colon,2,$(v)))) -endef - -# Creates a rule to symlink an architecture specific vdex file to the shared -# location for that vdex file. -define symlink-vdex-file -$(strip \ - $(call symlink-file,\ - $(call vdex-shared-install-path,$(1)),\ - ../$(notdir $(1)),\ - $(1))\ - $(1)) -endef - -# Takes a list of src:dest install pairs of vdex files and creates rules to -# symlink each dest to the shared location for that vdex file. -define symlink-vdex-files -$(foreach v,$(1),$(call symlink-vdex-file,$(call word-colon,2,$(v)))) -endef - -my_boot_image_module := - -my_suffix := $(my_boot_image_name)_$($(my_boot_image_arch)) -my_copy_pairs := $(call prefix-copy-many-files-dest,$(DEXPREOPT_IMAGE_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_out)) -my_vdex_copy_pairs := $(call prefix-copy-many-files-dest,$(DEXPREOPT_IMAGE_VDEX_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_out)) -my_vdex_copy_shared_pairs := $(call vdex-copy-many-files-shared-dest,$(my_vdex_copy_pairs)) -ifeq (,$(filter %_2ND_ARCH,$(my_boot_image_arch))) - # Only install the vdex to the shared location for the primary architecture. - my_copy_pairs += $(my_vdex_copy_shared_pairs) -endif - -my_unstripped_copy_pairs := $(call prefix-copy-many-files-dest,$(DEXPREOPT_IMAGE_UNSTRIPPED_BUILT_INSTALLED_$(my_suffix)),$(my_boot_image_syms)) - -# Generate the boot image module only if there is any file to install. -ifneq (,$(strip $(my_copy_pairs))) - my_first_pair := $(firstword $(my_copy_pairs)) - my_rest_pairs := $(wordlist 2,$(words $(my_copy_pairs)),$(my_copy_pairs)) - - my_first_src := $(call word-colon,1,$(my_first_pair)) - my_first_dest := $(call word-colon,2,$(my_first_pair)) - - my_installed := $(call copy-many-files,$(my_copy_pairs)) - my_unstripped_installed := $(call copy-many-files,$(my_unstripped_copy_pairs)) - - my_symlinks := $(call symlink-vdex-files,$(my_vdex_copy_pairs)) - - # We don't have a LOCAL_PATH for the auto-generated modules, so let it be the $(BUILD_SYSTEM). - LOCAL_PATH := $(BUILD_SYSTEM) - # Hack to let these pseudo-modules wrapped around Soong modules use LOCAL_SOONG_INSTALLED_MODULE. - LOCAL_MODULE_MAKEFILE := $(SOONG_ANDROID_MK) - - include $(CLEAR_VARS) - LOCAL_MODULE := dexpreopt_bootjar.$(my_suffix) - LOCAL_PREBUILT_MODULE_FILE := $(my_first_src) - LOCAL_MODULE_PATH := $(dir $(my_first_dest)) - LOCAL_MODULE_STEM := $(notdir $(my_first_dest)) - LOCAL_SOONG_INSTALL_PAIRS := $(my_copy_pairs) - LOCAL_SOONG_INSTALL_SYMLINKS := $(my_symlinks) - LOCAL_SOONG_INSTALLED_MODULE := $(my_first_dest) - LOCAL_SOONG_LICENSE_METADATA := $(DEXPREOPT_IMAGE_LICENSE_METADATA_$(my_suffix)) - ifneq (,$(strip $(filter HOST_%,$(my_boot_image_arch)))) - LOCAL_IS_HOST_MODULE := true - endif - LOCAL_MODULE_CLASS := ETC - include $(BUILD_PREBUILT) - $(LOCAL_BUILT_MODULE): | $(my_unstripped_installed) - # Installing boot.art causes all boot image bits to be installed. - # Keep this old behavior in case anyone still needs it. - $(LOCAL_INSTALLED_MODULE): $(wordlist 2,$(words $(my_installed)),$(my_installed)) $(my_symlinks) - $(my_all_targets): $(my_installed) $(my_symlinks) - - my_boot_image_module := $(LOCAL_MODULE) -endif # my_copy_pairs != empty diff --git a/core/envsetup.mk b/core/envsetup.mk index c063f60a15..f82e861abf 100644 --- a/core/envsetup.mk +++ b/core/envsetup.mk @@ -417,6 +417,7 @@ HOST_OUT_SDK_ADDON := $(HOST_OUT)/sdk_addon HOST_OUT_NATIVE_TESTS := $(HOST_OUT)/nativetest64 HOST_OUT_COVERAGE := $(HOST_OUT)/coverage HOST_OUT_TESTCASES := $(HOST_OUT)/testcases +HOST_OUT_ETC := $(HOST_OUT)/etc .KATI_READONLY := \ HOST_OUT_EXECUTABLES \ HOST_OUT_SHARED_LIBRARIES \ @@ -425,7 +426,8 @@ HOST_OUT_TESTCASES := $(HOST_OUT)/testcases HOST_OUT_SDK_ADDON \ HOST_OUT_NATIVE_TESTS \ HOST_OUT_COVERAGE \ - HOST_OUT_TESTCASES + HOST_OUT_TESTCASES \ + HOST_OUT_ETC HOST_CROSS_OUT_EXECUTABLES := $(HOST_CROSS_OUT)/bin HOST_CROSS_OUT_SHARED_LIBRARIES := $(HOST_CROSS_OUT)/lib diff --git a/core/main.mk b/core/main.mk index 80ffec43b7..5bbe1b12bd 100644 --- a/core/main.mk +++ b/core/main.mk @@ -687,12 +687,12 @@ endef # Scan all modules in general-tests, device-tests and other selected suites and # flatten the shared library dependencies. define update-host-shared-libs-deps-for-suites -$(foreach suite,general-tests device-tests vts tvts art-host-tests host-unit-tests,\ +$(foreach suite,general-tests device-tests vts tvts art-host-tests host-unit-tests camera-hal-tests,\ $(foreach m,$(COMPATIBILITY.$(suite).MODULES),\ $(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\ $(foreach dep,$(my_deps),\ $(foreach f,$(ALL_MODULES.$(dep).HOST_SHARED_LIBRARY_FILES),\ - $(if $(filter $(suite),device-tests general-tests art-host-tests host-unit-tests),\ + $(if $(filter $(suite),device-tests general-tests art-host-tests host-unit-tests camera-hal-tests),\ $(eval my_testcases := $(HOST_OUT_TESTCASES)),\ $(eval my_testcases := $$(COMPATIBILITY_TESTCASES_OUT_$(suite))))\ $(eval target := $(my_testcases)/$(lastword $(subst /, ,$(dir $(f))))/$(notdir $(f)))\ @@ -1899,7 +1899,6 @@ $(SOONG_OUT_DIR)/compliance-metadata/$(TARGET_PRODUCT)/make-metadata.csv: $(eval _kernel_module_copy_files := $(sort $(filter %$(_path_on_device),$(KERNEL_MODULE_COPY_FILES)))) \ $(eval _is_build_prop := $(call is-build-prop,$f)) \ $(eval _is_notice_file := $(call is-notice-file,$f)) \ - $(eval _is_dexpreopt_image_profile := $(if $(filter %:/$(_path_on_device),$(DEXPREOPT_IMAGE_PROFILE_BUILT_INSTALLED)),Y)) \ $(eval _is_product_system_other_avbkey := $(if $(findstring $f,$(INSTALLED_PRODUCT_SYSTEM_OTHER_AVBKEY_TARGET)),Y)) \ $(eval _is_event_log_tags_file := $(if $(findstring $f,$(event_log_tags_file)),Y)) \ $(eval _is_system_other_odex_marker := $(if $(findstring $f,$(INSTALLED_SYSTEM_OTHER_ODEX_MARKER)),Y)) \ @@ -1909,7 +1908,7 @@ $(SOONG_OUT_DIR)/compliance-metadata/$(TARGET_PRODUCT)/make-metadata.csv: $(eval _is_partition_compat_symlink := $(if $(findstring $f,$(PARTITION_COMPAT_SYMLINKS)),Y)) \ $(eval _is_flags_file := $(if $(findstring $f, $(ALL_FLAGS_FILES)),Y)) \ $(eval _is_rootdir_symlink := $(if $(findstring $f, $(ALL_ROOTDIR_SYMLINKS)),Y)) \ - $(eval _is_platform_generated := $(_is_build_prop)$(_is_notice_file)$(_is_dexpreopt_image_profile)$(_is_product_system_other_avbkey)$(_is_event_log_tags_file)$(_is_system_other_odex_marker)$(_is_kernel_modules_blocklist)$(_is_fsverity_build_manifest_apk)$(_is_linker_config)$(_is_partition_compat_symlink)$(_is_flags_file)$(_is_rootdir_symlink)) \ + $(eval _is_platform_generated := $(_is_build_prop)$(_is_notice_file)$(_is_product_system_other_avbkey)$(_is_event_log_tags_file)$(_is_system_other_odex_marker)$(_is_kernel_modules_blocklist)$(_is_fsverity_build_manifest_apk)$(_is_linker_config)$(_is_partition_compat_symlink)$(_is_flags_file)$(_is_rootdir_symlink)) \ $(eval _static_libs := $(if $(_is_soong_module),,$(ALL_INSTALLED_FILES.$f.STATIC_LIBRARIES))) \ $(eval _whole_static_libs := $(if $(_is_soong_module),,$(ALL_INSTALLED_FILES.$f.WHOLE_STATIC_LIBRARIES))) \ $(eval _license_text := $(if $(filter $(_build_output_path),$(ALL_NON_MODULES)),$(ALL_NON_MODULES.$(_build_output_path).NOTICES))) \ diff --git a/core/os_licensing.mk b/core/os_licensing.mk index 1e1b7df7a9..d15a3d0715 100644 --- a/core/os_licensing.mk +++ b/core/os_licensing.mk @@ -17,13 +17,17 @@ $(eval $(call xml-notice-rule,$(target_notice_file_xml_gz),"System image",$(syst $(eval $(call text-notice-rule,$(target_notice_file_txt),"System image",$(system_notice_file_message),$(SYSTEM_NOTICE_DEPS),$(SYSTEM_NOTICE_DEPS))) +ifneq ($(USE_SOONG_DEFINED_SYSTEM_IMAGE),true) $(installed_notice_html_or_xml_gz): $(target_notice_file_xml_gz) $(copy-file-to-target) endif +endif $(call declare-1p-target,$(target_notice_file_xml_gz)) +ifneq ($(USE_SOONG_DEFINED_SYSTEM_IMAGE),true) $(call declare-1p-target,$(installed_notice_html_or_xml_gz)) endif +endif .PHONY: vendorlicense vendorlicense: $(call corresponding-license-metadata, $(VENDOR_NOTICE_DEPS)) reportmissinglicenses diff --git a/core/packaging/flags.mk b/core/packaging/flags.mk index 4693bcd6d8..ccb502ca11 100644 --- a/core/packaging/flags.mk +++ b/core/packaging/flags.mk @@ -97,42 +97,54 @@ $(eval $(call generate-global-aconfig-flag-file, \ # $(1): built aconfig flags storage package map file (out) # $(2): built aconfig flags storage flag map file (out) # $(3): built aconfig flags storage flag val file (out) -# $(4): installed aconfig flags storage package map file (out) -# $(5): installed aconfig flags storage flag map file (out) -# $(6): installed aconfig flags storage flag value file (out) -# $(7): input aconfig files for the partition (in) -# $(8): partition name +# $(4): built aconfig flags storage flag info file (out) +# $(5): installed aconfig flags storage package map file (out) +# $(6): installed aconfig flags storage flag map file (out) +# $(7): installed aconfig flags storage flag value file (out) +# $(8): installed aconfig flags storage flag info file (out) +# $(9): input aconfig files for the partition (in) +# $(10): partition name define generate-partition-aconfig-storage-file $(eval $(strip $(1)): PRIVATE_OUT := $(strip $(1))) -$(eval $(strip $(1)): PRIVATE_IN := $(strip $(7))) -$(strip $(1)): $(ACONFIG) $(strip $(7)) +$(eval $(strip $(1)): PRIVATE_IN := $(strip $(9))) +$(strip $(1)): $(ACONFIG) $(strip $(9)) mkdir -p $$(dir $$(PRIVATE_OUT)) $$(if $$(PRIVATE_IN), \ - $$(ACONFIG) create-storage --container $(8) --file package_map --out $$(PRIVATE_OUT) \ + $$(ACONFIG) create-storage --container $(10) --file package_map --out $$(PRIVATE_OUT) \ $$(addprefix --cache ,$$(PRIVATE_IN)), \ ) touch $$(PRIVATE_OUT) $(eval $(strip $(2)): PRIVATE_OUT := $(strip $(2))) -$(eval $(strip $(2)): PRIVATE_IN := $(strip $(7))) -$(strip $(2)): $(ACONFIG) $(strip $(7)) +$(eval $(strip $(2)): PRIVATE_IN := $(strip $(9))) +$(strip $(2)): $(ACONFIG) $(strip $(9)) mkdir -p $$(dir $$(PRIVATE_OUT)) $$(if $$(PRIVATE_IN), \ - $$(ACONFIG) create-storage --container $(8) --file flag_map --out $$(PRIVATE_OUT) \ + $$(ACONFIG) create-storage --container $(10) --file flag_map --out $$(PRIVATE_OUT) \ $$(addprefix --cache ,$$(PRIVATE_IN)), \ ) touch $$(PRIVATE_OUT) $(eval $(strip $(3)): PRIVATE_OUT := $(strip $(3))) -$(eval $(strip $(3)): PRIVATE_IN := $(strip $(7))) -$(strip $(3)): $(ACONFIG) $(strip $(7)) +$(eval $(strip $(3)): PRIVATE_IN := $(strip $(9))) +$(strip $(3)): $(ACONFIG) $(strip $(9)) mkdir -p $$(dir $$(PRIVATE_OUT)) $$(if $$(PRIVATE_IN), \ - $$(ACONFIG) create-storage --container $(8) --file flag_val --out $$(PRIVATE_OUT) \ + $$(ACONFIG) create-storage --container $(10) --file flag_val --out $$(PRIVATE_OUT) \ $$(addprefix --cache ,$$(PRIVATE_IN)), \ ) touch $$(PRIVATE_OUT) -$(call copy-one-file, $(strip $(1)), $(4)) -$(call copy-one-file, $(strip $(2)), $(5)) -$(call copy-one-file, $(strip $(3)), $(6)) +$(eval $(strip $(4)): PRIVATE_OUT := $(strip $(4))) +$(eval $(strip $(4)): PRIVATE_IN := $(strip $(9))) +$(strip $(4)): $(ACONFIG) $(strip $(9)) + mkdir -p $$(dir $$(PRIVATE_OUT)) + $$(if $$(PRIVATE_IN), \ + $$(ACONFIG) create-storage --container $(10) --file flag_info --out $$(PRIVATE_OUT) \ + $$(addprefix --cache ,$$(PRIVATE_IN)), \ + ) + touch $$(PRIVATE_OUT) +$(call copy-one-file, $(strip $(1)), $(5)) +$(call copy-one-file, $(strip $(2)), $(6)) +$(call copy-one-file, $(strip $(3)), $(7)) +$(call copy-one-file, $(strip $(4)), $(8)) endef ifeq ($(RELEASE_CREATE_ACONFIG_STORAGE_FILE),true) @@ -140,13 +152,16 @@ $(foreach partition, $(_FLAG_PARTITIONS), \ $(eval aconfig_storage_package_map.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/package.map) \ $(eval aconfig_storage_flag_map.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/flag.map) \ $(eval aconfig_storage_flag_val.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/flag.val) \ + $(eval aconfig_storage_flag_info.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/flag.info) \ $(eval $(call generate-partition-aconfig-storage-file, \ $(TARGET_OUT_FLAGS)/$(partition)/package.map, \ $(TARGET_OUT_FLAGS)/$(partition)/flag.map, \ $(TARGET_OUT_FLAGS)/$(partition)/flag.val, \ + $(TARGET_OUT_FLAGS)/$(partition)/flag.info, \ $(aconfig_storage_package_map.$(partition)), \ $(aconfig_storage_flag_map.$(partition)), \ $(aconfig_storage_flag_val.$(partition)), \ + $(aconfig_storage_flag_info.$(partition)), \ $(aconfig_flag_summaries_protobuf.$(partition)), \ $(partition), \ )) \ @@ -162,6 +177,7 @@ required_flags_files := \ $(aconfig_storage_package_map.$(partition)) \ $(aconfig_storage_flag_map.$(partition)) \ $(aconfig_storage_flag_val.$(partition)) \ + $(aconfig_storage_flag_info.$(partition)) \ )) ALL_DEFAULT_INSTALLED_MODULES += $(required_flags_files) @@ -181,4 +197,5 @@ $(foreach partition, $(_FLAG_PARTITIONS), \ $(eval aconfig_storage_package_map.$(partition):=) \ $(eval aconfig_storage_flag_map.$(partition):=) \ $(eval aconfig_storage_flag_val.$(partition):=) \ + $(eval aconfig_storage_flag_info.$(partition):=) \ ) diff --git a/core/product.mk b/core/product.mk index 4c23e5dfdd..93a656d85d 100644 --- a/core/product.mk +++ b/core/product.mk @@ -390,20 +390,6 @@ _product_single_value_vars += PRODUCT_OTA_FORCE_NON_AB_PACKAGE # If set, Java module in product partition cannot use hidden APIs. _product_single_value_vars += PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE -# If set, only java_sdk_library can be used at inter-partition dependency. -# Note: Build error if BOARD_VNDK_VERSION is not set while -# PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY is true, because -# PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY has no meaning if -# BOARD_VNDK_VERSION is not set. -# Note: When PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE is not set, there are -# no restrictions at dependency between system and product partition. -_product_single_value_vars += PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY - -# Allowlist for PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY option. -# Listed modules are allowed at inter-partition dependency even if it isn't -# a java_sdk_library module. -_product_list_vars += PRODUCT_INTER_PARTITION_JAVA_LIBRARY_ALLOWLIST - # Install a copy of the debug policy to the system_ext partition, and allow # init-second-stage to load debug policy from system_ext. # This option is only meant to be set by compliance GSI targets. @@ -499,6 +485,10 @@ _product_single_value_vars += PRODUCT_BUILD_APPS_WITH_BUILD_NUMBER # If set, build would generate system image from Soong-defined module. _product_single_value_vars += PRODUCT_SOONG_DEFINED_SYSTEM_IMAGE +# List of stub libraries specific to the product that are already present in the system image and +# should be included in the system_linker_config. +_product_list_vars += PRODUCT_EXTRA_STUB_LIBRARIES + .KATI_READONLY := _product_single_value_vars _product_list_vars _product_var_list :=$= $(_product_single_value_vars) $(_product_list_vars) diff --git a/core/proguard.flags b/core/proguard.flags index aa406b983e..5148e56407 100644 --- a/core/proguard.flags +++ b/core/proguard.flags @@ -38,6 +38,17 @@ @com.android.internal.annotations.KeepForWeakReference <fields>; } +# Needed to ensure callback field references are kept in their respective +# owning classes when the downstream callback registrars only store weak refs. +-if @com.android.internal.annotations.WeaklyReferencedCallback class * +-keepclassmembers,allowaccessmodification class * { + <1> *; +} +-if class * extends @com.android.internal.annotations.WeaklyReferencedCallback ** +-keepclassmembers,allowaccessmodification class * { + <1> *; +} + # Understand the common @Keep annotation from various Android packages: # * android.support.annotation # * androidx.annotation diff --git a/core/soong_config.mk b/core/soong_config.mk index 1e6388a5ba..0421e19ae1 100644 --- a/core/soong_config.mk +++ b/core/soong_config.mk @@ -265,9 +265,6 @@ $(call end_json_map) $(call add_json_bool, EnforceProductPartitionInterface, $(filter true,$(PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE))) $(call add_json_str, DeviceCurrentApiLevelForVendorModules, $(BOARD_CURRENT_API_LEVEL_FOR_VENDOR_MODULES)) -$(call add_json_bool, EnforceInterPartitionJavaSdkLibrary, $(filter true,$(PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY))) -$(call add_json_list, InterPartitionJavaLibraryAllowList, $(PRODUCT_INTER_PARTITION_JAVA_LIBRARY_ALLOWLIST)) - $(call add_json_bool, CompressedApex, $(filter true,$(PRODUCT_COMPRESSED_APEX))) ifndef APEX_BUILD_FOR_PRE_S_DEVICES @@ -367,6 +364,62 @@ $(call add_json_list, DeviceProductCompatibilityMatrixFile, $(DEVICE_PRODUCT_COM $(call add_json_list, BoardAvbSystemAddHashtreeFooterArgs, $(BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS)) $(call add_json_bool, BoardAvbEnable, $(filter true,$(BOARD_AVB_ENABLE))) +$(call add_json_map, PartitionVarsForSoongMigrationOnlyDoNotUse) + $(call add_json_str, ProductDirectory, $(dir $(INTERNAL_PRODUCT))) + + $(call add_json_map,PartitionQualifiedVariables) + $(foreach image_type,SYSTEM VENDOR CACHE USERDATA PRODUCT SYSTEM_EXT OEM ODM VENDOR_DLKM ODM_DLKM SYSTEM_DLKM, \ + $(call add_json_map,$(call to-lower,$(image_type))) \ + $(call add_json_bool, BuildingImage, $(filter true,$(BUILDING_$(image_type)_IMAGE))) \ + $(call add_json_str, BoardErofsCompressor, $(BOARD_$(image_type)IMAGE_EROFS_COMPRESSOR)) \ + $(call add_json_str, BoardErofsCompressHints, $(BOARD_$(image_type)IMAGE_EROFS_COMPRESS_HINTS)) \ + $(call add_json_str, BoardErofsPclusterSize, $(BOARD_$(image_type)IMAGE_EROFS_PCLUSTER_SIZE)) \ + $(call add_json_str, BoardExtfsInodeCount, $(BOARD_$(image_type)IMAGE_EXTFS_INODE_COUNT)) \ + $(call add_json_str, BoardExtfsRsvPct, $(BOARD_$(image_type)IMAGE_EXTFS_RSV_PCT)) \ + $(call add_json_str, BoardF2fsSloadCompressFlags, $(BOARD_$(image_type)IMAGE_F2FS_SLOAD_COMPRESS_FLAGS)) \ + $(call add_json_str, BoardFileSystemCompress, $(BOARD_$(image_type)IMAGE_FILE_SYSTEM_COMPRESS)) \ + $(call add_json_str, BoardFileSystemType, $(BOARD_$(image_type)IMAGE_FILE_SYSTEM_TYPE)) \ + $(call add_json_str, BoardJournalSize, $(BOARD_$(image_type)IMAGE_JOURNAL_SIZE)) \ + $(call add_json_str, BoardPartitionReservedSize, $(BOARD_$(image_type)IMAGE_PARTITION_RESERVED_SIZE)) \ + $(call add_json_str, BoardPartitionSize, $(BOARD_$(image_type)IMAGE_PARTITION_SIZE)) \ + $(call add_json_str, BoardSquashfsBlockSize, $(BOARD_$(image_type)IMAGE_SQUASHFS_BLOCK_SIZE)) \ + $(call add_json_str, BoardSquashfsCompressor, $(BOARD_$(image_type)IMAGE_SQUASHFS_COMPRESSOR)) \ + $(call add_json_str, BoardSquashfsCompressorOpt, $(BOARD_$(image_type)IMAGE_SQUASHFS_COMPRESSOR_OPT)) \ + $(call add_json_str, BoardSquashfsDisable4kAlign, $(BOARD_$(image_type)IMAGE_SQUASHFS_DISABLE_4K_ALIGN)) \ + $(call add_json_str, ProductBaseFsPath, $(PRODUCT_$(image_type)_BASE_FS_PATH)) \ + $(call add_json_str, ProductHeadroom, $(PRODUCT_$(image_type)_HEADROOM)) \ + $(call add_json_str, ProductVerityPartition, $(PRODUCT_$(image_type)_VERITY_PARTITION)) \ + $(call end_json_map) \ + ) + $(call end_json_map) + + $(call add_json_bool, TargetUserimagesUseExt2, $(filter true,$(TARGET_USERIMAGES_USE_EXT2))) + $(call add_json_bool, TargetUserimagesUseExt3, $(filter true,$(TARGET_USERIMAGES_USE_EXT3))) + $(call add_json_bool, TargetUserimagesUseExt4, $(filter true,$(TARGET_USERIMAGES_USE_EXT4))) + + $(call add_json_bool, TargetUserimagesSparseExtDisabled, $(filter true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED))) + $(call add_json_bool, TargetUserimagesSparseErofsDisabled, $(filter true,$(TARGET_USERIMAGES_SPARSE_EROFS_DISABLED))) + $(call add_json_bool, TargetUserimagesSparseSquashfsDisabled, $(filter true,$(TARGET_USERIMAGES_SPARSE_SQUASHFS_DISABLED))) + $(call add_json_bool, TargetUserimagesSparseF2fsDisabled, $(filter true,$(TARGET_USERIMAGES_SPARSE_F2FS_DISABLED))) + + $(call add_json_str, BoardErofsCompressor, $(BOARD_EROFS_COMPRESSOR)) + $(call add_json_str, BoardErofsCompressorHints, $(BOARD_EROFS_COMPRESS_HINTS)) + $(call add_json_str, BoardErofsPclusterSize, $(BOARD_EROFS_PCLUSTER_SIZE)) + $(call add_json_str, BoardErofsShareDupBlocks, $(BOARD_EROFS_SHARE_DUP_BLOCKS)) + $(call add_json_str, BoardErofsUseLegacyCompression, $(BOARD_EROFS_USE_LEGACY_COMPRESSION)) + $(call add_json_str, BoardExt4ShareDupBlocks, $(BOARD_EXT4_SHARE_DUP_BLOCKS)) + $(call add_json_str, BoardFlashLogicalBlockSize, $(BOARD_FLASH_LOGICAL_BLOCK_SIZE)) + $(call add_json_str, BoardFlashEraseBlockSize, $(BOARD_FLASH_ERASE_BLOCK_SIZE)) + + $(call add_json_bool, BoardUsesRecoveryAsBoot, $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT))) + $(call add_json_bool, ProductUseDynamicPartitionSize, $(filter true,$(PRODUCT_USE_DYNAMIC_PARTITION_SIZE))) + $(call add_json_bool, CopyImagesForTargetFilesZip, $(filter true,$(COPY_IMAGES_FOR_TARGET_FILES_ZIP))) + + $(call add_json_list, ProductPackages, $(PRODUCT_PACKAGES)) + $(call add_json_list, ProductPackagesDebug, $(PRODUCT_PACKAGES_DEBUG)) + +$(call end_json_map) + $(call json_end) $(file >$(SOONG_VARIABLES).tmp,$(json_contents)) diff --git a/core/tasks/device-tests.mk b/core/tasks/device-tests.mk index 5850c4ed73..6164c2e94b 100644 --- a/core/tasks/device-tests.mk +++ b/core/tasks/device-tests.mk @@ -14,6 +14,7 @@ .PHONY: device-tests +.PHONY: device-tests-host-shared-libs device-tests-zip := $(PRODUCT_OUT)/device-tests.zip # Create an artifact to include a list of test config files in device-tests. @@ -23,37 +24,45 @@ device-tests-configs-zip := $(PRODUCT_OUT)/device-tests_configs.zip my_host_shared_lib_for_device_tests := $(call copy-many-files,$(COMPATIBILITY.device-tests.HOST_SHARED_LIBRARY.FILES)) device_tests_host_shared_libs_zip := $(PRODUCT_OUT)/device-tests_host-shared-libs.zip -$(device-tests-zip) : .KATI_IMPLICIT_OUTPUTS := $(device-tests-list-zip) $(device-tests-configs-zip) $(device_tests_host_shared_libs_zip) +$(device-tests-zip) : .KATI_IMPLICIT_OUTPUTS := $(device-tests-list-zip) $(device-tests-configs-zip) $(device-tests-zip) : PRIVATE_device_tests_list := $(PRODUCT_OUT)/device-tests_list $(device-tests-zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_device_tests) -$(device-tests-zip) : PRIVATE_device_host_shared_libs_zip := $(device_tests_host_shared_libs_zip) $(device-tests-zip) : $(COMPATIBILITY.device-tests.FILES) $(COMPATIBILITY.device-tests.SOONG_INSTALLED_COMPATIBILITY_SUPPORT_FILES) $(my_host_shared_lib_for_device_tests) $(SOONG_ZIP) - rm -f $@-shared-libs.list echo $(sort $(COMPATIBILITY.device-tests.FILES) $(COMPATIBILITY.device-tests.SOONG_INSTALLED_COMPATIBILITY_SUPPORT_FILES)) | tr " " "\n" > $@.list grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true grep -e .*\\.config$$ $@-host.list > $@-host-test-configs.list || true $(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \ echo $$shared_lib >> $@-host.list; \ - echo $$shared_lib >> $@-shared-libs.list; \ done - grep $(HOST_OUT_TESTCASES) $@-shared-libs.list > $@-host-shared-libs.list || true grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true grep -e .*\\.config$$ $@-target.list > $@-target-test-configs.list || true $(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list -sha256 $(hide) $(SOONG_ZIP) -d -o $(device-tests-configs-zip) \ -P host -C $(HOST_OUT) -l $@-host-test-configs.list \ -P target -C $(PRODUCT_OUT) -l $@-target-test-configs.list - $(SOONG_ZIP) -d -o $(PRIVATE_device_host_shared_libs_zip) \ - -P host -C $(HOST_OUT) -l $@-host-shared-libs.list rm -f $(PRIVATE_device_tests_list) $(hide) grep -e .*\\.config$$ $@-host.list | sed s%$(HOST_OUT)%host%g > $(PRIVATE_device_tests_list) $(hide) grep -e .*\\.config$$ $@-target.list | sed s%$(PRODUCT_OUT)%target%g >> $(PRIVATE_device_tests_list) $(hide) $(SOONG_ZIP) -d -o $(device-tests-list-zip) -C $(dir $@) -f $(PRIVATE_device_tests_list) rm -f $@.list $@-host.list $@-target.list $@-host-test-configs.list $@-target-test-configs.list \ - $@-shared-libs.list $@-host-shared-libs.list $(PRIVATE_device_tests_list) + $(PRIVATE_device_tests_list) + +$(device_tests_host_shared_libs_zip) : PRIVATE_device_host_shared_libs_zip := $(device_tests_host_shared_libs_zip) +$(device_tests_host_shared_libs_zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_device_tests) +$(device_tests_host_shared_libs_zip) : $(my_host_shared_lib_for_device_tests) $(SOONG_ZIP) + rm -f $@-shared-libs.list + $(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \ + echo $$shared_lib >> $@-shared-libs.list; \ + done + grep $(HOST_OUT_TESTCASES) $@-shared-libs.list > $@-host-shared-libs.list || true + $(SOONG_ZIP) -d -o $(PRIVATE_device_host_shared_libs_zip) \ + -P host -C $(HOST_OUT) -l $@-host-shared-libs.list device-tests: $(device-tests-zip) +device-tests-host-shared-libs: $(device_tests_host_shared_libs_zip) + $(call dist-for-goals, device-tests, $(device-tests-zip) $(device-tests-list-zip) $(device-tests-configs-zip) $(device_tests_host_shared_libs_zip)) +$(call dist-for-goals, device-tests-host-shared-libs, $(device_tests_host_shared_libs_zip)) $(call declare-1p-container,$(device-tests-zip),) $(call declare-container-license-deps,$(device-tests-zip),$(COMPATIBILITY.device-tests.FILES) $(my_host_shared_lib_for_device_tests),$(PRODUCT_OUT)/:/) diff --git a/core/tasks/mke2fs-dist.mk b/core/tasks/mke2fs-dist.mk new file mode 100644 index 0000000000..3540c1f985 --- /dev/null +++ b/core/tasks/mke2fs-dist.mk @@ -0,0 +1,22 @@ +# Copyright (C) 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: After Soong's recovery partition variation can be set to selectable +# and the meta_lic file duplication issue is resolved, move it to the +# dist section of the corresponding module's Android.bp. +my_dist_files := $(HOST_OUT_EXECUTABLES)/mke2fs +my_dist_files += $(HOST_OUT_EXECUTABLES)/make_f2fs +my_dist_files += $(HOST_OUT_EXECUTABLES)/make_f2fs_casefold +$(call dist-for-goals,dist_files sdk,$(my_dist_files)) +my_dist_files := diff --git a/envsetup.sh b/envsetup.sh index 06dadd3f38..3fed5aed6d 100644 --- a/envsetup.sh +++ b/envsetup.sh @@ -442,6 +442,7 @@ function print_lunch_menu() function lunch() { local answer + setup_cog_env_if_needed if [[ $# -gt 1 ]]; then echo "usage: lunch [target]" >&2 @@ -1079,10 +1080,7 @@ function source_vendorsetup() { done done - if [[ "${PWD}" == /google/cog/* ]]; then - f="build/make/cogsetup.sh" - echo "including $f"; . "$T/$f" - fi + setup_cog_env_if_needed } function showcommands() { diff --git a/shell_utils.sh b/shell_utils.sh index 86f3f49f50..c4a67564c2 100644 --- a/shell_utils.sh +++ b/shell_utils.sh @@ -63,6 +63,70 @@ function require_lunch } fi +# This function sets up the build environment to be appropriate for Cog. +function setup_cog_env_if_needed() { + local top=$(gettop) + + # return early if not in a cog workspace + if [[ ! "$top" =~ ^/google/cog ]]; then + return 0 + fi + + setup_cog_symlink + + export ANDROID_BUILD_ENVIRONMENT_CONFIG="googler-cog" + + # Running repo command within Cog workspaces is not supported, so override + # it with this function. If the user is running repo within a Cog workspace, + # we'll fail with an error, otherwise, we run the original repo command with + # the given args. + if ! ORIG_REPO_PATH=`which repo`; then + return 0 + fi + function repo { + if [[ "${PWD}" == /google/cog/* ]]; then + echo -e "\e[01;31mERROR:\e[0mrepo command is disallowed within Cog workspaces." + kill -INT $$ # exits the script without exiting the user's shell + fi + ${ORIG_REPO_PATH} "$@" + } +} + +# creates a symlink for the out/ dir when inside a cog workspace. +function setup_cog_symlink() { + local out_dir=$(getoutdir) + local top=$(gettop) + + # return early if out dir is already a symlink + if [[ -L "$out_dir" ]]; then + return 0 + fi + + # return early if out dir is not in the workspace + if [[ ! "$out_dir" =~ ^$top/ ]]; then + return 0 + fi + + local link_destination="${HOME}/.cog/android-build-out" + + # remove existing out/ dir if it exists + if [[ -d "$out_dir" ]]; then + echo "Detected existing out/ directory in the Cog workspace which is not supported. Repairing workspace by removing it and creating the symlink to ~/.cog/android-build-out" + if ! rm -rf "$out_dir"; then + echo "Failed to remove existing out/ directory: $out_dir" >&2 + kill -INT $$ # exits the script without exiting the user's shell + fi + fi + + # create symlink + echo "Creating symlink: $out_dir -> $link_destination" + mkdir -p ${link_destination} + if ! ln -s "$link_destination" "$out_dir"; then + echo "Failed to create cog symlink: $out_dir -> $link_destination" >&2 + kill -INT $$ # exits the script without exiting the user's shell + fi +} + function getoutdir { local top=$(gettop) diff --git a/target/product/base_system.mk b/target/product/base_system.mk index d806c061b6..563511fd12 100644 --- a/target/product/base_system.mk +++ b/target/product/base_system.mk @@ -345,6 +345,11 @@ ifeq ($(RELEASE_USE_WEBVIEW_BOOTSTRAP_MODULE),true) com.android.webview.bootstrap endif +ifneq (,$(RELEASE_RANGING_STACK)) + PRODUCT_PACKAGES += \ + com.android.ranging +endif + # VINTF data for system image PRODUCT_PACKAGES += \ system_manifest.xml \ diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk index 61d7235aac..668f054773 100644 --- a/target/product/default_art_config.mk +++ b/target/product/default_art_config.mk @@ -114,6 +114,12 @@ ifeq ($(RELEASE_PACKAGE_PROFILING_MODULE),true) endif +ifneq (,$(RELEASE_RANGING_STACK)) + PRODUCT_APEX_BOOT_JARS += \ + com.android.uwb:framework-ranging \ + $(call soong_config_set,bootclasspath,release_ranging_stack,true) +endif + # List of system_server classpath jars delivered via apex. # Keep the list sorted by module names and then library names. # Note: For modules available in Q, DO NOT add new entries here. @@ -169,6 +175,11 @@ ifeq ($(RELEASE_PACKAGE_PROFILING_MODULE),true) endif +ifneq (,$(RELEASE_RANGING_STACK)) + PRODUCT_APEX_STANDALONE_SYSTEM_SERVER_JARS += \ + com.android.uwb:service-ranging +endif + # Overrides the (apex, jar) pairs above when determining the on-device location. The format is: # <old_apex>:<old_jar>:<new_apex>:<new_jar> PRODUCT_CONFIGURED_JAR_LOCATION_OVERRIDES := \ diff --git a/target/product/generic/Android.bp b/target/product/generic/Android.bp new file mode 100644 index 0000000000..bf82ff1059 --- /dev/null +++ b/target/product/generic/Android.bp @@ -0,0 +1,822 @@ +android_rootdirs = [ + "acct", + "apex", + "bootstrap-apex", + "config", + "data", + "data_mirror", + "debug_ramdisk", + "dev", + "linkerconfig", + "metadata", + "mnt", + "odm", + "odm_dlkm", + "oem", + "postinstall", + "proc", + "product", + "second_stage_resources", + "storage", + "sys", + "system", + "system_dlkm", + "system_ext", + "tmp", + "vendor", + "vendor_dlkm", +] + +android_symlinks = [ + { + target: "/system/bin/init", + name: "init", + }, + { + target: "/system/etc", + name: "etc", + }, + { + target: "/system/bin", + name: "bin", + }, + { + target: "/product", + name: "system/product", + }, + { + target: "/vendor", + name: "system/vendor", + }, + { + target: "/system_ext", + name: "system/system_ext", + }, + { + target: "/system_dlkm/lib/modules", + name: "system/lib/modules", + }, + { + target: "/data/user_de/0/com.android.shell/files/bugreports", + name: "bugreports", + }, + { + target: "/data/cache", + name: "cache", + }, + { + target: "/sys/kernel/debug", + name: "d", + }, + { + target: "/storage/self/primary", + name: "sdcard", + }, +] + +filegroup { + name: "generic_system_sign_key", + srcs: [":avb_testkey_rsa4096"], +} + +phony { + name: "generic_system_fonts", + required: [ + "AndroidClock.ttf", + "CarroisGothicSC-Regular.ttf", + "ComingSoon.ttf", + "CutiveMono.ttf", + "DancingScript-Regular.ttf", + "DroidSansMono.ttf", + "NotoColorEmoji.ttf", + "NotoColorEmojiFlags.ttf", + "NotoNaskhArabic-Bold.ttf", + "NotoNaskhArabic-Regular.ttf", + "NotoNaskhArabicUI-Bold.ttf", + "NotoNaskhArabicUI-Regular.ttf", + "NotoSansAdlam-VF.ttf", + "NotoSansAhom-Regular.otf", + "NotoSansAnatolianHieroglyphs-Regular.otf", + "NotoSansArmenian-VF.ttf", + "NotoSansAvestan-Regular.ttf", + "NotoSansBalinese-Regular.ttf", + "NotoSansBamum-Regular.ttf", + "NotoSansBassaVah-Regular.otf", + "NotoSansBatak-Regular.ttf", + "NotoSansBengali-VF.ttf", + "NotoSansBengaliUI-VF.ttf", + "NotoSansBhaiksuki-Regular.otf", + "NotoSansBrahmi-Regular.ttf", + "NotoSansBuginese-Regular.ttf", + "NotoSansBuhid-Regular.ttf", + "NotoSansCJK-Regular.ttc", + "NotoSansCanadianAboriginal-Regular.ttf", + "NotoSansCarian-Regular.ttf", + "NotoSansChakma-Regular.otf", + "NotoSansCham-Bold.ttf", + "NotoSansCham-Regular.ttf", + "NotoSansCherokee-Regular.ttf", + "NotoSansCoptic-Regular.ttf", + "NotoSansCuneiform-Regular.ttf", + "NotoSansCypriot-Regular.ttf", + "NotoSansDeseret-Regular.ttf", + "NotoSansDevanagari-VF.ttf", + "NotoSansDevanagariUI-VF.ttf", + "NotoSansEgyptianHieroglyphs-Regular.ttf", + "NotoSansElbasan-Regular.otf", + "NotoSansEthiopic-VF.ttf", + "NotoSansGeorgian-VF.ttf", + "NotoSansGlagolitic-Regular.ttf", + "NotoSansGothic-Regular.ttf", + "NotoSansGrantha-Regular.ttf", + "NotoSansGujarati-Bold.ttf", + "NotoSansGujarati-Regular.ttf", + "NotoSansGujaratiUI-Bold.ttf", + "NotoSansGujaratiUI-Regular.ttf", + "NotoSansGunjalaGondi-Regular.otf", + "NotoSansGurmukhi-VF.ttf", + "NotoSansGurmukhiUI-VF.ttf", + "NotoSansHanifiRohingya-Regular.otf", + "NotoSansHanunoo-Regular.ttf", + "NotoSansHatran-Regular.otf", + "NotoSansHebrew-Bold.ttf", + "NotoSansHebrew-Regular.ttf", + "NotoSansImperialAramaic-Regular.ttf", + "NotoSansInscriptionalPahlavi-Regular.ttf", + "NotoSansInscriptionalParthian-Regular.ttf", + "NotoSansJavanese-Regular.otf", + "NotoSansKaithi-Regular.ttf", + "NotoSansKannada-VF.ttf", + "NotoSansKannadaUI-VF.ttf", + "NotoSansKayahLi-Regular.ttf", + "NotoSansKharoshthi-Regular.ttf", + "NotoSansKhmer-VF.ttf", + "NotoSansKhmerUI-Bold.ttf", + "NotoSansKhmerUI-Regular.ttf", + "NotoSansKhojki-Regular.otf", + "NotoSansLao-Bold.ttf", + "NotoSansLao-Regular.ttf", + "NotoSansLaoUI-Bold.ttf", + "NotoSansLaoUI-Regular.ttf", + "NotoSansLepcha-Regular.ttf", + "NotoSansLimbu-Regular.ttf", + "NotoSansLinearA-Regular.otf", + "NotoSansLinearB-Regular.ttf", + "NotoSansLisu-Regular.ttf", + "NotoSansLycian-Regular.ttf", + "NotoSansLydian-Regular.ttf", + "NotoSansMalayalam-VF.ttf", + "NotoSansMalayalamUI-VF.ttf", + "NotoSansMandaic-Regular.ttf", + "NotoSansManichaean-Regular.otf", + "NotoSansMarchen-Regular.otf", + "NotoSansMasaramGondi-Regular.otf", + "NotoSansMedefaidrin-VF.ttf", + "NotoSansMeeteiMayek-Regular.ttf", + "NotoSansMeroitic-Regular.otf", + "NotoSansMiao-Regular.otf", + "NotoSansModi-Regular.ttf", + "NotoSansMongolian-Regular.ttf", + "NotoSansMro-Regular.otf", + "NotoSansMultani-Regular.otf", + "NotoSansMyanmar-Bold.otf", + "NotoSansMyanmar-Medium.otf", + "NotoSansMyanmar-Regular.otf", + "NotoSansMyanmarUI-Bold.otf", + "NotoSansMyanmarUI-Medium.otf", + "NotoSansMyanmarUI-Regular.otf", + "NotoSansNKo-Regular.ttf", + "NotoSansNabataean-Regular.otf", + "NotoSansNewTaiLue-Regular.ttf", + "NotoSansNewa-Regular.otf", + "NotoSansOgham-Regular.ttf", + "NotoSansOlChiki-Regular.ttf", + "NotoSansOldItalic-Regular.ttf", + "NotoSansOldNorthArabian-Regular.otf", + "NotoSansOldPermic-Regular.otf", + "NotoSansOldPersian-Regular.ttf", + "NotoSansOldSouthArabian-Regular.ttf", + "NotoSansOldTurkic-Regular.ttf", + "NotoSansOriya-Bold.ttf", + "NotoSansOriya-Regular.ttf", + "NotoSansOriyaUI-Bold.ttf", + "NotoSansOriyaUI-Regular.ttf", + "NotoSansOsage-Regular.ttf", + "NotoSansOsmanya-Regular.ttf", + "NotoSansPahawhHmong-Regular.otf", + "NotoSansPalmyrene-Regular.otf", + "NotoSansPauCinHau-Regular.otf", + "NotoSansPhagsPa-Regular.ttf", + "NotoSansPhoenician-Regular.ttf", + "NotoSansRejang-Regular.ttf", + "NotoSansRunic-Regular.ttf", + "NotoSansSamaritan-Regular.ttf", + "NotoSansSaurashtra-Regular.ttf", + "NotoSansSharada-Regular.otf", + "NotoSansShavian-Regular.ttf", + "NotoSansSinhala-VF.ttf", + "NotoSansSinhalaUI-VF.ttf", + "NotoSansSoraSompeng-Regular.otf", + "NotoSansSoyombo-VF.ttf", + "NotoSansSundanese-Regular.ttf", + "NotoSansSylotiNagri-Regular.ttf", + "NotoSansSymbols-Regular-Subsetted.ttf", + "NotoSansSymbols-Regular-Subsetted2.ttf", + "NotoSansSyriacEastern-Regular.ttf", + "NotoSansSyriacEstrangela-Regular.ttf", + "NotoSansSyriacWestern-Regular.ttf", + "NotoSansTagalog-Regular.ttf", + "NotoSansTagbanwa-Regular.ttf", + "NotoSansTaiLe-Regular.ttf", + "NotoSansTaiTham-Regular.ttf", + "NotoSansTaiViet-Regular.ttf", + "NotoSansTakri-VF.ttf", + "NotoSansTamil-VF.ttf", + "NotoSansTamilUI-VF.ttf", + "NotoSansTelugu-VF.ttf", + "NotoSansTeluguUI-VF.ttf", + "NotoSansThaana-Bold.ttf", + "NotoSansThaana-Regular.ttf", + "NotoSansThai-Bold.ttf", + "NotoSansThai-Regular.ttf", + "NotoSansThaiUI-Bold.ttf", + "NotoSansThaiUI-Regular.ttf", + "NotoSansTifinagh-Regular.otf", + "NotoSansUgaritic-Regular.ttf", + "NotoSansVai-Regular.ttf", + "NotoSansWancho-Regular.otf", + "NotoSansWarangCiti-Regular.otf", + "NotoSansYi-Regular.ttf", + "NotoSerif-Bold.ttf", + "NotoSerif-BoldItalic.ttf", + "NotoSerif-Italic.ttf", + "NotoSerif-Regular.ttf", + "NotoSerifArmenian-VF.ttf", + "NotoSerifBengali-VF.ttf", + "NotoSerifCJK-Regular.ttc", + "NotoSerifDevanagari-VF.ttf", + "NotoSerifDogra-Regular.ttf", + "NotoSerifEthiopic-VF.ttf", + "NotoSerifGeorgian-VF.ttf", + "NotoSerifGujarati-VF.ttf", + "NotoSerifGurmukhi-VF.ttf", + "NotoSerifHebrew-Bold.ttf", + "NotoSerifHebrew-Regular.ttf", + "NotoSerifHentaigana.ttf", + "NotoSerifKannada-VF.ttf", + "NotoSerifKhmer-Bold.otf", + "NotoSerifKhmer-Regular.otf", + "NotoSerifLao-Bold.ttf", + "NotoSerifLao-Regular.ttf", + "NotoSerifMalayalam-VF.ttf", + "NotoSerifMyanmar-Bold.otf", + "NotoSerifMyanmar-Regular.otf", + "NotoSerifNyiakengPuachueHmong-VF.ttf", + "NotoSerifSinhala-VF.ttf", + "NotoSerifTamil-VF.ttf", + "NotoSerifTelugu-VF.ttf", + "NotoSerifThai-Bold.ttf", + "NotoSerifThai-Regular.ttf", + "NotoSerifTibetan-VF.ttf", + "NotoSerifYezidi-VF.ttf", + "Roboto-Regular.ttf", + "RobotoFlex-Regular.ttf", + "RobotoStatic-Regular.ttf", + "SourceSansPro-Bold.ttf", + "SourceSansPro-BoldItalic.ttf", + "SourceSansPro-Italic.ttf", + "SourceSansPro-Regular.ttf", + "SourceSansPro-SemiBold.ttf", + "SourceSansPro-SemiBoldItalic.ttf", + "font_fallback.xml", + "fonts.xml", + ], +} + +android_system_image { + name: "generic_system_image", + + partition_name: "system", + base_dir: "system", + dirs: android_rootdirs, + symlinks: android_symlinks, + file_contexts: ":plat_file_contexts", + linker_config_src: ":system_linker_config_json_file", + fsverity: { + inputs: [ + "etc/boot-image.prof", + "etc/classpaths/*.pb", + "etc/dirty-image-objects", + "etc/preloaded-classes", + "framework/*", + "framework/*/*", // framework/{arch} + "framework/oat/*/*", // framework/oat/{arch} + ], + libs: [":framework-res{.export-package.apk}"], + }, + build_logtags: true, + gen_aconfig_flags_pb: true, + + compile_multilib: "both", + + use_avb: true, + avb_private_key: ":generic_system_sign_key", + avb_algorithm: "SHA256_RSA4096", + avb_hash_algorithm: "sha256", + + deps: [ + "abx", + "aconfigd", + "aflags", + "am", + "android.software.credentials.prebuilt.xml", // generic_system + "android.software.webview.prebuilt.xml", // media_system + "android.software.window_magnification.prebuilt.xml", // handheld_system + "android.system.suspend-service", + "prebuilt_vintf_manifest", + "apexd", + "appops", + "approved-ogki-builds.xml", // base_system + "appwidget", + "atrace", + "audioserver", + "bcc", + "blank_screen", + "blkid", + "bmgr", + "bootanimation", + "bootstat", + "bpfloader", + "bu", + "bugreport", + "bugreportz", + "cameraserver", + "cgroups.json", + "cmd", + "content", + "cppreopts.sh", // generic_system + "credstore", + "debuggerd", + "device_config", + "dirty-image-objects", + "dmctl", + "dmesgd", + "dnsmasq", + "dpm", + "dump.erofs", + "dumpstate", + "dumpsys", + "e2fsck", + "enhanced-confirmation.xml", // base_system + "etc_hosts", + "flags_health_check", + "framework-audio_effects.xml", // for handheld // handheld_system + "framework-sysconfig.xml", + "fs_config_dirs_system", + "fs_config_files_system", + "fsck.erofs", + "fsck.f2fs", // for media_system + "fsck_msdos", + "fsverity-release-cert-der", + "gatekeeperd", + "gpu_counter_producer", + "gpuservice", + "group_system", + "gsi_tool", + "gsid", + "heapprofd", + "hid", + "hiddenapi-package-whitelist.xml", // from runtime_libart + "idc_data", + "idmap2", + "idmap2d", + "ime", + "incident", + "incident-helper-cmd", + "incident_helper", + "incidentd", + "init.environ.rc-soong", + "init.usb.configfs.rc", + "init.usb.rc", + "init.zygote32.rc", + "init.zygote64.rc", + "init.zygote64_32.rc", + "init_first_stage", // for boot partition + "initial-package-stopped-states.xml", + "input", + "installd", + "ip", // base_system + "iptables", + "kcmdlinectrl", + "kernel-lifetimes.xml", // base_system + "keychars_data", + "keylayout_data", + "keystore2", + "ld.mc", + "llkd", // base_system + "lmkd", // base_system + "local_time.default", // handheld_vendo + "locksettings", // base_system + "logcat", // base_system + "logd", // base_system + "logpersist.start", + "lpdump", // base_system + "lshal", // base_system + "make_f2fs", // media_system + "mdnsd", // base_system + "media_profiles_V1_0.dtd", // base_system + "mediacodec.policy", // base_system + "mediaextractor", // base_system + "mediametrics", // base_system + "misctrl", // from base_system + "mke2fs", // base_system + "mkfs.erofs", // base_system + "monkey", // base_system + "mtectrl", // base_system + "ndc", // base_system + "netd", // base_system + "netutils-wrapper-1.0", // full_base + "notice_xml_system", + "odsign", // base_system + "otapreopt_script", // generic_system + "package-shareduid-allowlist.xml", // base_system + "passwd_system", // base_system + "perfetto", // base_system + "ping", // base_system + "ping6", // base_system + "pintool", // base_system + "platform.xml", // base_system + "pm", // base_system + "preinstalled-packages-asl-files.xml", // base_system + "preinstalled-packages-platform-generic-system.xml", // generic_system + "preinstalled-packages-platform-handheld-system.xml", // handheld_system + "preinstalled-packages-platform.xml", // base_system + "preinstalled-packages-strict-signature.xml", // base_system + "preloaded-classes", // ok + "printflags", // base_system + "privapp-permissions-platform.xml", // base_system + "prng_seeder", // base_system + "public.libraries.android.txt", + "recovery-persist", // base_system + "recovery-refresh", // generic_system + "requestsync", // media_system + "resize2fs", // base_system + "rss_hwm_reset", // base_system + "run-as", // base_system + "schedtest", // base_system + "screencap", // base_system + "screenrecord", // handheld_system + "sdcard", // base_system + "secdiscard", // base_system + "sensorservice", // base_system + "service", // base_system + "servicemanager", // base_system + "settings", // base_system + "sfdo", // base_system + "sgdisk", // base_system + "sm", // base_system + "snapshotctl", // base_system + "snapuserd", // base_system + "snapuserd_ramdisk", // ramdisk + "storaged", // base_system + "surfaceflinger", // base_system + "svc", // base_system + "task_profiles.json", // base_system + "tc", // base_system + "telecom", // base_system + "tombstoned", // base_system + "traced", // base_system + "traced_probes", // base_system + "tune2fs", // base_system + "uiautomator", // base_system + "uinput", // base_system + "uncrypt", // base_system + "update_engine", // generic_system + "update_engine_sideload", // recovery + "update_verifier", // generic_system + "usbd", // base_system + "vdc", // base_system + "virtual_camera", // handheld_system // release_package_virtual_camera + "vold", // base_system + "vr", // handheld_system + "watchdogd", // base_system + "wifi.rc", // base_system + "wificond", // base_system + "wm", // base_system + ] + select(release_flag("RELEASE_PLATFORM_VERSION_CODENAME"), { + "REL": [], + default: [ + "android.software.preview_sdk.prebuilt.xml", // media_system + ], + }) + select(soong_config_variable("ANDROID", "release_package_profiling_module"), { + "true": [ + "trace_redactor", // base_system (RELEASE_PACKAGE_PROFILING_MODULE) + ], + default: [], + }) + select(product_variable("debuggable"), { + true: [ + "adevice_fingerprint", + "arping", + "avbctl", + "bootctl", + "dmuserd", + "evemu-record", + "idlcli", + "init-debug.rc", + "iotop", + "iperf3", + "iw", + "layertracegenerator", + "logtagd.rc", + "ot-cli-ftd", + "ot-ctl", + "procrank", + "profcollectctl", + "profcollectd", + "record_binder", + "sanitizer-status", + "servicedispatcher", + "showmap", + "sqlite3", + "ss", + "start_with_lockagent", + "strace", + "su", + "tinycap", + "tinyhostless", + "tinymix", + "tinypcminfo", + "tinyplay", // host + "tracepath", + "tracepath6", + "traceroute6", + "unwind_info", + "unwind_reg_info", + "unwind_symbols", + "update_engine_client", + ], + default: [], + }), + multilib: { + common: { + deps: [ + "BackupRestoreConfirmation", // base_system + "BasicDreams", // handheld_system + "BlockedNumberProvider", // handheld_system + "BluetoothMidiService", // handheld_system + "BookmarkProvider", // handheld_system + "BuiltInPrintService", // handheld_system + "CalendarProvider", // handheld_system + "CallLogBackup", // telephony_system + "CameraExtensionsProxy", // handheld_system + "CaptivePortalLogin", // handheld_system + "CarrierDefaultApp", // telephony_system + "CellBroadcastLegacyApp", // telephony_system + "CertInstaller", // handheld_system + "CompanionDeviceManager", // media_system + "ContactsProvider", // base_system + "CredentialManager", // handheld_system + "DeviceAsWebcam", // handheld_system + "DocumentsUI", // handheld_system + "DownloadProvider", // base_system + "DownloadProviderUi", // handheld_system + "DynamicSystemInstallationService", // base_system + "E2eeContactKeysProvider", // base_system + "EasterEgg", // handheld_system + "ExtShared", // base_system + "ExternalStorageProvider", // handheld_system + "FusedLocation", // handheld_system + "HTMLViewer", // media_system + "InputDevices", // handheld_system + "IntentResolver", // base_system + "KeyChain", // handheld_system + "LiveWallpapersPicker", // generic_system, full_base + "LocalTransport", // base_system + "ManagedProvisioning", // handheld_system + "MediaProviderLegacy", // base_system + "MmsService", // handheld_system + "MtpService", // handheld_system + "MusicFX", // handheld_system + "NetworkStack", // base_system + "ONS", // telephony_system + "PacProcessor", // handheld_system + "PackageInstaller", // base_system + "PartnerBookmarksProvider", // generic_system + "PhotoTable", // full_base + "PrintRecommendationService", // handheld_system + "PrintSpooler", // handheld_system + "ProxyHandler", // handheld_system + "SecureElement", // handheld_system + "SettingsProvider", // base_system + "SharedStorageBackup", // handheld_system + "Shell", // base_system + "SimAppDialog", // handheld_system + "SoundPicker", // not installed by anyone + "StatementService", // media_system + "Stk", // generic_system + "Tag", // generic_system + "TeleService", // handheld_system + "Telecom", // handheld_system + "TelephonyProvider", // handheld_system + "Traceur", // handheld_system + "UserDictionaryProvider", // handheld_system + "VpnDialogs", // handheld_system + "WallpaperBackup", // base_system + "adbd_system_api", // base_system + "android.hidl.base-V1.0-java", // base_system + "android.hidl.manager-V1.0-java", // base_system + "android.test.base", // from runtime_libart + "android.test.mock", // base_system + "android.test.runner", // base_system + "aosp_mainline_modules", // ok + "build_flag_system", // base_system + "charger_res_images", // generic_system + "com.android.apex.cts.shim.v1_prebuilt", // ok + "com.android.cellbroadcast", // telephony_system + "com.android.future.usb.accessory", // media_system + "com.android.location.provider", // base_system + "com.android.media.remotedisplay", // media_system + "com.android.media.remotedisplay.xml", // media_system + "com.android.mediadrm.signer", // media_system + "com.android.nfc_extras", // ok + "com.android.nfcservices", // base_system (RELEASE_PACKAGE_NFC_STACK != NfcNci) + "com.android.runtime", // ok + "dex_bootjars", + "ext", // from runtime_libart + "framework-graphics", // base_system + "framework-location", // base_system + "framework-minus-apex-install-dependencies", // base_system + "framework_compatibility_matrix.device.xml", + "generic_system_fonts", // ok + "hwservicemanager_compat_symlink_module", // base_system + "hyph-data", + "ims-common", // base_system + "init_system", // base_system + "javax.obex", // base_system + "llndk.libraries.txt", //ok + "org.apache.http.legacy", // base_system + "perfetto-extras", // system + "sanitizer.libraries.txt", // base_system + "selinux_policy_system_soong", // ok + "services", // base_system + "shell_and_utilities_system", // ok + "system-build.prop", + "system_compatibility_matrix.xml", //base_system + "telephony-common", // libs from TeleService + "voip-common", // base_system + ] + select(soong_config_variable("ANDROID", "release_crashrecovery_module"), { + "true": [ + "com.android.crashrecovery", // base_system (RELEASE_CRASHRECOVERY_MODULE) + ], + default: [], + }) + select(soong_config_variable("ANDROID", "release_package_profiling_module"), { + "true": [ + "com.android.profiling", // base_system (RELEASE_PACKAGE_PROFILING_MODULE) + ], + default: [], + }) + select(release_flag("RELEASE_AVATAR_PICKER_APP"), { + true: [ + "AvatarPicker", // generic_system (RELEASE_AVATAR_PICKER_APP) + ], + default: [], + }), + }, + prefer32: { + deps: [ + "drmserver", // media_system + "mediaserver", // base_system + ], + }, + lib64: { + deps: [ + "android.system.virtualizationcommon-ndk", + "android.system.virtualizationservice-ndk", + "libgsi", + "servicemanager", + ], + }, + both: { + deps: [ + "android.hardware.biometrics.fingerprint@2.1", // generic_system + "android.hardware.radio.config@1.0", // generic_system + "android.hardware.radio.deprecated@1.0", // generic_system + "android.hardware.radio@1.0", // generic_system + "android.hardware.radio@1.1", // generic_system + "android.hardware.radio@1.2", // generic_system + "android.hardware.radio@1.3", // generic_system + "android.hardware.radio@1.4", // generic_system + "android.hardware.secure_element@1.0", // generic_system + "app_process", // base_system + "boringssl_self_test", // base_system + "heapprofd_client", // base_system + "libEGL", // base_system + "libEGL_angle", // base_system + "libETC1", // base_system + "libFFTEm", // base_system + "libGLESv1_CM", // base_system + "libGLESv1_CM_angle", // base_system + "libGLESv2", // base_system + "libGLESv2_angle", // base_system + "libGLESv3", // base_system + "libOpenMAXAL", // base_system + "libOpenSLES", // base_system + "libaaudio", // base_system + "libalarm_jni", // base_system + "libamidi", // base_system + "libandroid", + "libandroid_runtime", + "libandroid_servers", + "libandroidfw", + "libartpalette-system", + "libaudio-resampler", // generic-system + "libaudioeffect_jni", + "libaudiohal", // generic-system + "libaudiopolicyengineconfigurable", // generic-system + "libbinder", + "libbinder_ndk", + "libbinder_rpc_unstable", + "libcamera2ndk", + "libclang_rt.asan", + "libcompiler_rt", + "libcutils", // used by many libs + "libdmabufheap", // used by many libs + "libdrm", // used by many libs // generic_system + "libdrmframework", // base_system + "libdrmframework_jni", // base_system + "libfdtrack", // base_system + "libfilterfw", // base_system + "libfilterpack_imageproc", // media_system + "libfwdlockengine", // generic_system + "libgatekeeper", // base_system + "libgui", // base_system + "libhardware", // base_system + "libhardware_legacy", // base_system + "libhidltransport", // generic_system + "libhwbinder", // generic_system + "libinput", // base_system + "libinputflinger", // base_system + "libiprouteutil", // base_system + "libjnigraphics", // base_system + "libjpeg", // base_system + "liblog", // base_system + "liblogwrap", // generic_system + "liblz4", // generic_system + "libmedia", // base_system + "libmedia_jni", // base_system + "libmediandk", // base_system + "libminui", // generic_system + "libmtp", // base_system + "libnetd_client", // base_system + "libnetlink", // base_system + "libnetutils", // base_system + "libneuralnetworks_packageinfo", // base_system + "libnl", // generic_system + "libpdfium", // base_system + "libpolicy-subsystem", // generic_system + "libpower", // base_system + "libpowermanager", // base_system + "libprotobuf-cpp-full", // generic_system + "libradio_metadata", // base_system + "librs_jni", // handheld_system + "librtp_jni", // base_system + "libsensorservice", // base_system + "libsfplugin_ccodec", // base_system + "libskia", // base_system + "libsonic", // base_system + "libsonivox", // base_system + "libsoundpool", // base_system + "libspeexresampler", // base_system + "libsqlite", // base_system + "libstagefright", // base_system + "libstagefright_foundation", // base_system + "libstagefright_omx", // base_system + "libstdc++", // base_system + "libsysutils", // base_system + "libui", // base_system + "libusbhost", // base_system + "libutils", // base_system + "libvendorsupport", // llndk library + "libvintf_jni", // base_system + "libvulkan", // base_system + "libwebviewchromium_loader", // media_system + "libwebviewchromium_plat_support", // media_system + "libwilhelm", // base_system + "linker", // base_system + ] + select(soong_config_variable("ANDROID", "TARGET_DYNAMIC_64_32_DRMSERVER"), { + "true": ["drmserver"], + default: [], + }) + select(soong_config_variable("ANDROID", "TARGET_DYNAMIC_64_32_MEDIASERVER"), { + "true": ["mediaserver"], + default: [], + }), + }, + }, +} + +prebuilt_etc { + name: "prebuilt_vintf_manifest", + src: "manifest.xml", + filename: "manifest.xml", + relative_install_path: "vintf", + no_full_install: true, +} diff --git a/target/product/generic/OWNERS b/target/product/generic/OWNERS new file mode 100644 index 0000000000..6d1446f099 --- /dev/null +++ b/target/product/generic/OWNERS @@ -0,0 +1,6 @@ +# Bug component: 1322713 +inseob@google.com +jeongik@google.com +jiyong@google.com +justinyun@google.com +kiyoungkim@google.com diff --git a/target/product/generic/manifest.xml b/target/product/generic/manifest.xml new file mode 100644 index 0000000000..1df2c0d0cf --- /dev/null +++ b/target/product/generic/manifest.xml @@ -0,0 +1,54 @@ +<!-- + Input: + system/libhidl/vintfdata/manifest.xml +--> +<manifest version="8.0" type="framework"> + <hal format="hidl" max-level="6"> + <name>android.frameworks.displayservice</name> + <transport>hwbinder</transport> + <fqname>@1.0::IDisplayService/default</fqname> + </hal> + <hal format="hidl" max-level="5"> + <name>android.frameworks.schedulerservice</name> + <transport>hwbinder</transport> + <fqname>@1.0::ISchedulingPolicyService/default</fqname> + </hal> + <hal format="aidl"> + <name>android.frameworks.sensorservice</name> + <fqname>ISensorManager/default</fqname> + </hal> + <hal format="hidl" max-level="8"> + <name>android.frameworks.sensorservice</name> + <transport>hwbinder</transport> + <fqname>@1.0::ISensorManager/default</fqname> + </hal> + <hal format="hidl" max-level="8"> + <name>android.hidl.memory</name> + <transport arch="32+64">passthrough</transport> + <fqname>@1.0::IMapper/ashmem</fqname> + </hal> + <hal format="hidl" max-level="7"> + <name>android.system.net.netd</name> + <transport>hwbinder</transport> + <fqname>@1.1::INetd/default</fqname> + </hal> + <hal format="hidl" max-level="7"> + <name>android.system.wifi.keystore</name> + <transport>hwbinder</transport> + <fqname>@1.0::IKeystore/default</fqname> + </hal> + <hal format="native"> + <name>netutils-wrapper</name> + <version>1.0</version> + </hal> + <system-sdk> + <version>29</version> + <version>30</version> + <version>31</version> + <version>32</version> + <version>33</version> + <version>34</version> + <version>35</version> + <version>VanillaIceCream</version> + </system-sdk> +</manifest> diff --git a/target/product/generic_system.mk b/target/product/generic_system.mk index 0a09eb11d4..b9a623dcd3 100644 --- a/target/product/generic_system.mk +++ b/target/product/generic_system.mk @@ -152,4 +152,5 @@ _my_paths := \ $(call require-artifacts-in-path, $(_my_paths), $(_my_allowed_list)) # Product config map to toggle between sources and prebuilts of required mainline modules +PRODUCT_RELEASE_CONFIG_MAPS += $(wildcard build/release/gms_mainline/required/release_config_map.textproto) PRODUCT_RELEASE_CONFIG_MAPS += $(wildcard vendor/google_shared/build/release/gms_mainline/required/release_config_map.textproto) diff --git a/target/product/go_defaults.mk b/target/product/go_defaults.mk index c9285307ab..ccc4f365e7 100644 --- a/target/product/go_defaults.mk +++ b/target/product/go_defaults.mk @@ -18,6 +18,7 @@ $(call inherit-product, build/make/target/product/go_defaults_common.mk) # Product config map to toggle between sources and prebuilts of required mainline modules +PRODUCT_RELEASE_CONFIG_MAPS += $(wildcard build/release/gms_mainline_go/required/release_config_map.textproto) PRODUCT_RELEASE_CONFIG_MAPS += $(wildcard vendor/google_shared/build/release/gms_mainline_go/required/release_config_map.textproto) # Add the system properties. diff --git a/target/product/go_defaults_common.mk b/target/product/go_defaults_common.mk index fd4047a65b..0fcf16b753 100644 --- a/target/product/go_defaults_common.mk +++ b/target/product/go_defaults_common.mk @@ -24,11 +24,6 @@ PRODUCT_VENDOR_PROPERTIES += \ # Speed profile services and wifi-service to reduce RAM and storage. PRODUCT_SYSTEM_SERVER_COMPILER_FILTER := speed-profile -# Use a profile based boot image for this device. Note that this is currently a -# generic profile and not Android Go optimized. -PRODUCT_USE_PROFILE_FOR_BOOT_IMAGE := true -PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION := frameworks/base/config/boot-image-profile.txt - # Do not generate libartd. PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD := false diff --git a/target/product/gsi/Android.bp b/target/product/gsi/Android.bp index 45ba14331b..f18f35a328 100644 --- a/target/product/gsi/Android.bp +++ b/target/product/gsi/Android.bp @@ -46,3 +46,18 @@ install_symlink { installed_location: "etc/init/config", symlink_target: "/system/system_ext/etc/init/config", } + +// init.gsi.rc, GSI-specific init script. +prebuilt_etc { + name: "init.gsi.rc", + src: "init.gsi.rc", + system_ext_specific: true, + relative_install_path: "init", +} + +prebuilt_etc { + name: "init.vndk-nodef.rc", + src: "init.vndk-nodef.rc", + system_ext_specific: true, + relative_install_path: "gsi", +} diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk index 36897fef8e..729105961d 100644 --- a/target/product/gsi/Android.mk +++ b/target/product/gsi/Android.mk @@ -138,31 +138,3 @@ LOCAL_REQUIRED_MODULES := \ include $(BUILD_PHONY_PACKAGE) - -##################################################################### -# init.gsi.rc, GSI-specific init script. - -include $(CLEAR_VARS) -LOCAL_MODULE := init.gsi.rc -LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0 -LOCAL_LICENSE_CONDITIONS := notice -LOCAL_NOTICE_FILE := build/soong/licenses/LICENSE -LOCAL_SRC_FILES := $(LOCAL_MODULE) -LOCAL_MODULE_CLASS := ETC -LOCAL_SYSTEM_EXT_MODULE := true -LOCAL_MODULE_RELATIVE_PATH := init - -include $(BUILD_PREBUILT) - - -include $(CLEAR_VARS) -LOCAL_MODULE := init.vndk-nodef.rc -LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0 -LOCAL_LICENSE_CONDITIONS := notice -LOCAL_NOTICE_FILE := build/soong/licenses/LICENSE -LOCAL_SRC_FILES := $(LOCAL_MODULE) -LOCAL_MODULE_CLASS := ETC -LOCAL_SYSTEM_EXT_MODULE := true -LOCAL_MODULE_RELATIVE_PATH := gsi - -include $(BUILD_PREBUILT) diff --git a/teams/Android.bp b/teams/Android.bp index 94585fcb62..96d241bbf7 100644 --- a/teams/Android.bp +++ b/teams/Android.bp @@ -4440,3 +4440,17 @@ team { // go/trendy/manage/engineers/5097003746426880 trendy_team_id: "5097003746426880", } + +team { + name: "trendy_team_desktop_firmware", + + // go/trendy/manage/engineers/5787938454863872 + trendy_team_id: "5787938454863872", +} + +team { + name: "trendy_team_art_cloud", + + // go/trendy/manage/engineers/5121440647577600 + trendy_team_id: "5121440647577600", +} diff --git a/tools/aconfig/aconfig/Android.bp b/tools/aconfig/aconfig/Android.bp index 68521af91f..f4dd10399b 100644 --- a/tools/aconfig/aconfig/Android.bp +++ b/tools/aconfig/aconfig/Android.bp @@ -234,6 +234,7 @@ rust_aconfig_library { name: "libaconfig_test_rust_library", crate_name: "aconfig_test_rust_library", aconfig_declarations: "aconfig.test.flags", + host_supported: true, } rust_test { diff --git a/tools/aconfig/aconfig/src/codegen/cpp.rs b/tools/aconfig/aconfig/src/codegen/cpp.rs index 2c569da8f6..7a9c382bc7 100644 --- a/tools/aconfig/aconfig/src/codegen/cpp.rs +++ b/tools/aconfig/aconfig/src/codegen/cpp.rs @@ -283,39 +283,23 @@ public: virtual ~flag_provider_interface() = default; virtual bool disabled_ro() = 0; - - virtual void disabled_ro(bool val) = 0; - virtual bool disabled_rw() = 0; - - virtual void disabled_rw(bool val) = 0; - virtual bool disabled_rw_exported() = 0; - - virtual void disabled_rw_exported(bool val) = 0; - virtual bool disabled_rw_in_other_namespace() = 0; - - virtual void disabled_rw_in_other_namespace(bool val) = 0; - virtual bool enabled_fixed_ro() = 0; - - virtual void enabled_fixed_ro(bool val) = 0; - virtual bool enabled_fixed_ro_exported() = 0; - - virtual void enabled_fixed_ro_exported(bool val) = 0; - virtual bool enabled_ro() = 0; - - virtual void enabled_ro(bool val) = 0; - virtual bool enabled_ro_exported() = 0; - - virtual void enabled_ro_exported(bool val) = 0; - virtual bool enabled_rw() = 0; + virtual void disabled_ro(bool val) = 0; + virtual void disabled_rw(bool val) = 0; + virtual void disabled_rw_exported(bool val) = 0; + virtual void disabled_rw_in_other_namespace(bool val) = 0; + virtual void enabled_fixed_ro(bool val) = 0; + virtual void enabled_fixed_ro_exported(bool val) = 0; + virtual void enabled_ro(bool val) = 0; + virtual void enabled_ro_exported(bool val) = 0; virtual void enabled_rw(bool val) = 0; virtual void reset_flags() {} diff --git a/tools/aconfig/aconfig/src/codegen/java.rs b/tools/aconfig/aconfig/src/codegen/java.rs index 1ac58c1b84..a34166d51d 100644 --- a/tools/aconfig/aconfig/src/codegen/java.rs +++ b/tools/aconfig/aconfig/src/codegen/java.rs @@ -137,6 +137,7 @@ struct FlagElement { pub default_value: bool, pub device_config_namespace: String, pub device_config_flag: String, + pub flag_name: String, pub flag_name_constant_suffix: String, pub flag_offset: u16, pub is_read_write: bool, @@ -156,6 +157,7 @@ fn create_flag_element( default_value: pf.state() == ProtoFlagState::ENABLED, device_config_namespace: pf.namespace().to_string(), device_config_flag, + flag_name: pf.name().to_string(), flag_name_constant_suffix: pf.name().to_ascii_uppercase(), flag_offset: *flag_offsets.get(pf.name()).expect("didnt find package offset :("), is_read_write: pf.permission() == ProtoFlagPermission::READ_WRITE, @@ -507,25 +509,79 @@ mod tests { private static FeatureFlags FEATURE_FLAGS = new FeatureFlagsImpl(); }"#; - let expected_featureflagsmpl_content_0 = r#" + let expected_featureflagsmpl_content = r#" package com.android.aconfig.test; // TODO(b/303773055): Remove the annotation after access issue is resolved. import android.compat.annotation.UnsupportedAppUsage; import android.provider.DeviceConfig; import android.provider.DeviceConfig.Properties; - "#; + import android.aconfig.storage.StorageInternalReader; + import java.nio.file.Files; + import java.nio.file.Paths; - let expected_featureflagsmpl_content_1 = r#" /** @hide */ public final class FeatureFlagsImpl implements FeatureFlags { + private static final boolean isReadFromNew = Files.exists(Paths.get("/metadata/aconfig/boot/enable_only_new_storage")); + private static volatile boolean isCached = false; private static volatile boolean aconfig_test_is_cached = false; private static volatile boolean other_namespace_is_cached = false; private static boolean disabledRw = false; private static boolean disabledRwExported = false; private static boolean disabledRwInOtherNamespace = false; private static boolean enabledRw = true; - "#; - let expected_featureflagsmpl_content_2 = r#" + private void init() { + StorageInternalReader reader = null; + try { + reader = new StorageInternalReader("system", "com.android.aconfig.test"); + disabledRw = reader.getBooleanFlagValue(1); + disabledRwExported = reader.getBooleanFlagValue(2); + enabledRw = reader.getBooleanFlagValue(8); + disabledRwInOtherNamespace = reader.getBooleanFlagValue(3); + } catch (Exception e) { + throw new RuntimeException("Cannot read flag in codegen", e); + } + isCached = true; + } + private void load_overrides_aconfig_test() { + try { + Properties properties = DeviceConfig.getProperties("aconfig_test"); + disabledRw = + properties.getBoolean(Flags.FLAG_DISABLED_RW, false); + disabledRwExported = + properties.getBoolean(Flags.FLAG_DISABLED_RW_EXPORTED, false); + enabledRw = + properties.getBoolean(Flags.FLAG_ENABLED_RW, true); + } catch (NullPointerException e) { + throw new RuntimeException( + "Cannot read value from namespace aconfig_test " + + "from DeviceConfig. It could be that the code using flag " + + "executed before SettingsProvider initialization. Please use " + + "fixed read-only flag by adding is_fixed_read_only: true in " + + "flag declaration.", + e + ); + } + aconfig_test_is_cached = true; + } + + private void load_overrides_other_namespace() { + try { + Properties properties = DeviceConfig.getProperties("other_namespace"); + disabledRwInOtherNamespace = + properties.getBoolean(Flags.FLAG_DISABLED_RW_IN_OTHER_NAMESPACE, false); + } catch (NullPointerException e) { + throw new RuntimeException( + "Cannot read value from namespace other_namespace " + + "from DeviceConfig. It could be that the code using flag " + + "executed before SettingsProvider initialization. Please use " + + "fixed read-only flag by adding is_fixed_read_only: true in " + + "flag declaration.", + e + ); + } + other_namespace_is_cached = true; + } + @Override @com.android.aconfig.annotations.AconfigFlagAccessor @UnsupportedAppUsage @@ -536,8 +592,14 @@ mod tests { @com.android.aconfig.annotations.AconfigFlagAccessor @UnsupportedAppUsage public boolean disabledRw() { - if (!aconfig_test_is_cached) { - load_overrides_aconfig_test(); + if (isReadFromNew) { + if (!isCached) { + init(); + } + } else { + if (!aconfig_test_is_cached) { + load_overrides_aconfig_test(); + } } return disabledRw; } @@ -545,8 +607,14 @@ mod tests { @com.android.aconfig.annotations.AconfigFlagAccessor @UnsupportedAppUsage public boolean disabledRwExported() { - if (!aconfig_test_is_cached) { - load_overrides_aconfig_test(); + if (isReadFromNew) { + if (!isCached) { + init(); + } + } else { + if (!aconfig_test_is_cached) { + load_overrides_aconfig_test(); + } } return disabledRwExported; } @@ -554,8 +622,14 @@ mod tests { @com.android.aconfig.annotations.AconfigFlagAccessor @UnsupportedAppUsage public boolean disabledRwInOtherNamespace() { - if (!other_namespace_is_cached) { - load_overrides_other_namespace(); + if (isReadFromNew) { + if (!isCached) { + init(); + } + } else { + if (!other_namespace_is_cached) { + load_overrides_other_namespace(); + } } return disabledRwInOtherNamespace; } @@ -587,237 +661,23 @@ mod tests { @com.android.aconfig.annotations.AconfigFlagAccessor @UnsupportedAppUsage public boolean enabledRw() { - if (!aconfig_test_is_cached) { - load_overrides_aconfig_test(); - } - return enabledRw; - } - } - "#; - - let expect_featureflagsimpl_content_old = expected_featureflagsmpl_content_0.to_owned() - + expected_featureflagsmpl_content_1 - + r#" - private void load_overrides_aconfig_test() { - try { - Properties properties = DeviceConfig.getProperties("aconfig_test"); - disabledRw = - properties.getBoolean(Flags.FLAG_DISABLED_RW, false); - disabledRwExported = - properties.getBoolean(Flags.FLAG_DISABLED_RW_EXPORTED, false); - enabledRw = - properties.getBoolean(Flags.FLAG_ENABLED_RW, true); - } catch (NullPointerException e) { - throw new RuntimeException( - "Cannot read value from namespace aconfig_test " - + "from DeviceConfig. It could be that the code using flag " - + "executed before SettingsProvider initialization. Please use " - + "fixed read-only flag by adding is_fixed_read_only: true in " - + "flag declaration.", - e - ); - } - aconfig_test_is_cached = true; - } - - private void load_overrides_other_namespace() { - try { - Properties properties = DeviceConfig.getProperties("other_namespace"); - disabledRwInOtherNamespace = - properties.getBoolean(Flags.FLAG_DISABLED_RW_IN_OTHER_NAMESPACE, false); - } catch (NullPointerException e) { - throw new RuntimeException( - "Cannot read value from namespace other_namespace " - + "from DeviceConfig. It could be that the code using flag " - + "executed before SettingsProvider initialization. Please use " - + "fixed read-only flag by adding is_fixed_read_only: true in " - + "flag declaration.", - e - ); - } - other_namespace_is_cached = true; - }"# - + expected_featureflagsmpl_content_2; - - let mut file_set = HashMap::from([ - ("com/android/aconfig/test/Flags.java", expect_flags_content.as_str()), - ( - "com/android/aconfig/test/FeatureFlagsImpl.java", - &expect_featureflagsimpl_content_old, - ), - ("com/android/aconfig/test/FeatureFlags.java", EXPECTED_FEATUREFLAGS_COMMON_CONTENT), - ( - "com/android/aconfig/test/CustomFeatureFlags.java", - EXPECTED_CUSTOMFEATUREFLAGS_CONTENT, - ), - ( - "com/android/aconfig/test/FakeFeatureFlagsImpl.java", - EXPECTED_FAKEFEATUREFLAGSIMPL_CONTENT, - ), - ]); - - for file in generated_files { - let file_path = file.path.to_str().unwrap(); - assert!(file_set.contains_key(file_path), "Cannot find {}", file_path); - assert_eq!( - None, - crate::test::first_significant_code_diff( - file_set.get(file_path).unwrap(), - &String::from_utf8(file.contents).unwrap() - ), - "File {} content is not correct", - file_path - ); - file_set.remove(file_path); - } - - assert!(file_set.is_empty()); - - let parsed_flags = crate::test::parse_test_flags(); - let mode = CodegenMode::Production; - let modified_parsed_flags = - crate::commands::modify_parsed_flags_based_on_mode(parsed_flags, mode).unwrap(); - let flag_ids = - assign_flag_ids(crate::test::TEST_PACKAGE, modified_parsed_flags.iter()).unwrap(); - let generated_files = generate_java_code( - crate::test::TEST_PACKAGE, - modified_parsed_flags.into_iter(), - mode, - flag_ids, - true, - ) - .unwrap(); - - let expect_featureflagsimpl_content_new = expected_featureflagsmpl_content_0.to_owned() - + r#" - import android.aconfig.storage.StorageInternalReader; - import android.util.Log; - "# - + expected_featureflagsmpl_content_1 - + r#" - StorageInternalReader reader; - boolean readFromNewStorage; - - boolean useNewStorageValueAndDiscardOld = false; - - private final static String TAG = "AconfigJavaCodegen"; - private final static String SUCCESS_LOG = "success: %s value matches"; - private final static String MISMATCH_LOG = "error: %s value mismatch, new storage value is %s, old storage value is %s"; - private final static String ERROR_LOG = "error: failed to read flag value"; - - private void init() { - if (reader != null) return; - if (DeviceConfig.getBoolean("core_experiments_team_internal", "com.android.providers.settings.storage_test_mission_1", false)) { - readFromNewStorage = true; - try { - reader = new StorageInternalReader("system", "com.android.aconfig.test"); - } catch (Exception e) { - reader = null; - } - } - - useNewStorageValueAndDiscardOld = - DeviceConfig.getBoolean("core_experiments_team_internal", "com.android.providers.settings.use_new_storage_value", false); - } - - private void load_overrides_aconfig_test() { - try { - Properties properties = DeviceConfig.getProperties("aconfig_test"); - disabledRw = - properties.getBoolean(Flags.FLAG_DISABLED_RW, false); - disabledRwExported = - properties.getBoolean(Flags.FLAG_DISABLED_RW_EXPORTED, false); - enabledRw = - properties.getBoolean(Flags.FLAG_ENABLED_RW, true); - } catch (NullPointerException e) { - throw new RuntimeException( - "Cannot read value from namespace aconfig_test " - + "from DeviceConfig. It could be that the code using flag " - + "executed before SettingsProvider initialization. Please use " - + "fixed read-only flag by adding is_fixed_read_only: true in " - + "flag declaration.", - e - ); - } - aconfig_test_is_cached = true; - init(); - if (readFromNewStorage && reader != null) { - boolean val; - try { - val = reader.getBooleanFlagValue(1); - if (val != disabledRw) { - Log.w(TAG, String.format(MISMATCH_LOG, "disabledRw", val, disabledRw)); - } - - if (useNewStorageValueAndDiscardOld) { - disabledRw = val; - } - - val = reader.getBooleanFlagValue(2); - if (val != disabledRwExported) { - Log.w(TAG, String.format(MISMATCH_LOG, "disabledRwExported", val, disabledRwExported)); + if (isReadFromNew) { + if (!isCached) { + init(); } - - if (useNewStorageValueAndDiscardOld) { - disabledRwExported = val; - } - - val = reader.getBooleanFlagValue(8); - if (val != enabledRw) { - Log.w(TAG, String.format(MISMATCH_LOG, "enabledRw", val, enabledRw)); - } - - if (useNewStorageValueAndDiscardOld) { - enabledRw = val; + } else { + if (!aconfig_test_is_cached) { + load_overrides_aconfig_test(); } - - } catch (Exception e) { - Log.e(TAG, ERROR_LOG, e); } + return enabledRw; } } - - private void load_overrides_other_namespace() { - try { - Properties properties = DeviceConfig.getProperties("other_namespace"); - disabledRwInOtherNamespace = - properties.getBoolean(Flags.FLAG_DISABLED_RW_IN_OTHER_NAMESPACE, false); - } catch (NullPointerException e) { - throw new RuntimeException( - "Cannot read value from namespace other_namespace " - + "from DeviceConfig. It could be that the code using flag " - + "executed before SettingsProvider initialization. Please use " - + "fixed read-only flag by adding is_fixed_read_only: true in " - + "flag declaration.", - e - ); - } - other_namespace_is_cached = true; - init(); - if (readFromNewStorage && reader != null) { - boolean val; - try { - val = reader.getBooleanFlagValue(3); - if (val != disabledRwInOtherNamespace) { - Log.w(TAG, String.format(MISMATCH_LOG, "disabledRwInOtherNamespace", val, disabledRwInOtherNamespace)); - } - - if (useNewStorageValueAndDiscardOld) { - disabledRwInOtherNamespace = val; - } - - } catch (Exception e) { - Log.e(TAG, ERROR_LOG, e); - } - } - }"# + expected_featureflagsmpl_content_2; + "#; let mut file_set = HashMap::from([ ("com/android/aconfig/test/Flags.java", expect_flags_content.as_str()), - ( - "com/android/aconfig/test/FeatureFlagsImpl.java", - &expect_featureflagsimpl_content_new, - ), + ("com/android/aconfig/test/FeatureFlagsImpl.java", expected_featureflagsmpl_content), ("com/android/aconfig/test/FeatureFlags.java", EXPECTED_FEATUREFLAGS_COMMON_CONTENT), ( "com/android/aconfig/test/CustomFeatureFlags.java", @@ -908,7 +768,6 @@ mod tests { private static boolean enabledFixedRoExported = false; private static boolean enabledRoExported = false; - private void load_overrides_aconfig_test() { try { Properties properties = DeviceConfig.getProperties("aconfig_test"); @@ -933,21 +792,21 @@ mod tests { @Override public boolean disabledRwExported() { if (!aconfig_test_is_cached) { - load_overrides_aconfig_test(); + load_overrides_aconfig_test(); } return disabledRwExported; } @Override public boolean enabledFixedRoExported() { if (!aconfig_test_is_cached) { - load_overrides_aconfig_test(); + load_overrides_aconfig_test(); } return enabledFixedRoExported; } @Override public boolean enabledRoExported() { if (!aconfig_test_is_cached) { - load_overrides_aconfig_test(); + load_overrides_aconfig_test(); } return enabledRoExported; } diff --git a/tools/aconfig/aconfig/src/commands.rs b/tools/aconfig/aconfig/src/commands.rs index 797a893ff1..496876e08f 100644 --- a/tools/aconfig/aconfig/src/commands.rs +++ b/tools/aconfig/aconfig/src/commands.rs @@ -280,10 +280,11 @@ pub fn create_storage( caches: Vec<Input>, container: &str, file: &StorageFileType, + version: u32, ) -> Result<Vec<u8>> { let parsed_flags_vec: Vec<ProtoParsedFlags> = caches.into_iter().map(|mut input| input.try_parse_flags()).collect::<Result<Vec<_>>>()?; - generate_storage_file(container, parsed_flags_vec.iter(), file) + generate_storage_file(container, parsed_flags_vec.iter(), file, version) } pub fn create_device_config_defaults(mut input: Input) -> Result<Vec<u8>> { diff --git a/tools/aconfig/aconfig/src/main.rs b/tools/aconfig/aconfig/src/main.rs index 1fb64f9c56..edb4fd373b 100644 --- a/tools/aconfig/aconfig/src/main.rs +++ b/tools/aconfig/aconfig/src/main.rs @@ -16,6 +16,8 @@ //! `aconfig` is a build time tool to manage build time configurations, such as feature flags. +use aconfig_storage_file::DEFAULT_FILE_VERSION; +use aconfig_storage_file::MAX_SUPPORTED_FILE_VERSION; use anyhow::{anyhow, bail, Context, Result}; use clap::{builder::ArgAction, builder::EnumValueParser, Arg, ArgMatches, Command}; use core::any::Any; @@ -159,7 +161,13 @@ fn cli() -> Command { .value_parser(|s: &str| StorageFileType::try_from(s)), ) .arg(Arg::new("cache").long("cache").action(ArgAction::Append).required(true)) - .arg(Arg::new("out").long("out").required(true)), + .arg(Arg::new("out").long("out").required(true)) + .arg( + Arg::new("version") + .long("version") + .required(false) + .value_parser(|s: &str| s.parse::<u32>()), + ), ) } @@ -309,12 +317,18 @@ fn main() -> Result<()> { write_output_to_file_or_stdout(path, &output)?; } Some(("create-storage", sub_matches)) => { + let version = + get_optional_arg::<u32>(sub_matches, "version").unwrap_or(&DEFAULT_FILE_VERSION); + if *version > MAX_SUPPORTED_FILE_VERSION { + bail!("Invalid version selected ({})", version); + } let file = get_required_arg::<StorageFileType>(sub_matches, "file") .context("Invalid storage file selection")?; let cache = open_zero_or_more_files(sub_matches, "cache")?; let container = get_required_arg::<String>(sub_matches, "container")?; let path = get_required_arg::<String>(sub_matches, "out")?; - let output = commands::create_storage(cache, container, file) + + let output = commands::create_storage(cache, container, file, *version) .context("failed to create storage files")?; write_output_to_file_or_stdout(path, &output)?; } diff --git a/tools/aconfig/aconfig/src/storage/flag_info.rs b/tools/aconfig/aconfig/src/storage/flag_info.rs new file mode 100644 index 0000000000..25326094da --- /dev/null +++ b/tools/aconfig/aconfig/src/storage/flag_info.rs @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2024 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::commands::assign_flag_ids; +use crate::storage::FlagPackage; +use aconfig_protos::ProtoFlagPermission; +use aconfig_storage_file::{FlagInfoHeader, FlagInfoList, FlagInfoNode, StorageFileType}; +use anyhow::{anyhow, Result}; + +fn new_header(container: &str, num_flags: u32, version: u32) -> FlagInfoHeader { + FlagInfoHeader { + version, + container: String::from(container), + file_type: StorageFileType::FlagInfo as u8, + file_size: 0, + num_flags, + boolean_flag_offset: 0, + } +} + +pub fn create_flag_info( + container: &str, + packages: &[FlagPackage], + version: u32, +) -> Result<FlagInfoList> { + // create list + let num_flags = packages.iter().map(|pkg| pkg.boolean_flags.len() as u32).sum(); + + let mut is_flag_rw = vec![false; num_flags as usize]; + for pkg in packages.iter() { + let start_index = pkg.boolean_start_index as usize; + let flag_ids = assign_flag_ids(pkg.package_name, pkg.boolean_flags.iter().copied())?; + for pf in pkg.boolean_flags.iter() { + let fid = flag_ids + .get(pf.name()) + .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?; + is_flag_rw[start_index + (*fid as usize)] = + pf.permission() == ProtoFlagPermission::READ_WRITE; + } + } + + let mut list = FlagInfoList { + header: new_header(container, num_flags, version), + nodes: is_flag_rw.iter().map(|&rw| FlagInfoNode::create(rw)).collect(), + }; + + // initialize all header fields + list.header.boolean_flag_offset = list.header.into_bytes().len() as u32; + let bytes_per_node = FlagInfoNode::create(false).into_bytes().len() as u32; + list.header.file_size = list.header.boolean_flag_offset + num_flags * bytes_per_node; + + Ok(list) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::storage::{group_flags_by_package, tests::parse_all_test_flags}; + use aconfig_storage_file::DEFAULT_FILE_VERSION; + + pub fn create_test_flag_info_list_from_source() -> Result<FlagInfoList> { + let caches = parse_all_test_flags(); + let packages = group_flags_by_package(caches.iter()); + create_flag_info("mockup", &packages, DEFAULT_FILE_VERSION) + } + + #[test] + // this test point locks down the flag info creation and each field + fn test_list_contents() { + let flag_info_list = create_test_flag_info_list_from_source(); + assert!(flag_info_list.is_ok()); + let expected_flag_info_list = + aconfig_storage_file::test_utils::create_test_flag_info_list(); + assert_eq!(flag_info_list.unwrap(), expected_flag_info_list); + } +} diff --git a/tools/aconfig/aconfig/src/storage/flag_table.rs b/tools/aconfig/aconfig/src/storage/flag_table.rs index a9712119bf..6046d7ef18 100644 --- a/tools/aconfig/aconfig/src/storage/flag_table.rs +++ b/tools/aconfig/aconfig/src/storage/flag_table.rs @@ -19,13 +19,12 @@ use crate::storage::FlagPackage; use aconfig_protos::ProtoFlagPermission; use aconfig_storage_file::{ get_table_size, FlagTable, FlagTableHeader, FlagTableNode, StorageFileType, StoredFlagType, - FILE_VERSION, }; use anyhow::{anyhow, Result}; -fn new_header(container: &str, num_flags: u32) -> FlagTableHeader { +fn new_header(container: &str, num_flags: u32, version: u32) -> FlagTableHeader { FlagTableHeader { - version: FILE_VERSION, + version, container: String::from(container), file_type: StorageFileType::FlagMap as u8, file_size: 0, @@ -86,12 +85,16 @@ impl FlagTableNodeWrapper { } } -pub fn create_flag_table(container: &str, packages: &[FlagPackage]) -> Result<FlagTable> { +pub fn create_flag_table( + container: &str, + packages: &[FlagPackage], + version: u32, +) -> Result<FlagTable> { // create table let num_flags = packages.iter().map(|pkg| pkg.boolean_flags.len() as u32).sum(); let num_buckets = get_table_size(num_flags)?; - let mut header = new_header(container, num_flags); + let mut header = new_header(container, num_flags, version); let mut buckets = vec![None; num_buckets as usize]; let mut node_wrappers = packages .iter() @@ -138,13 +141,15 @@ pub fn create_flag_table(container: &str, packages: &[FlagPackage]) -> Result<Fl #[cfg(test)] mod tests { + use aconfig_storage_file::DEFAULT_FILE_VERSION; + use super::*; use crate::storage::{group_flags_by_package, tests::parse_all_test_flags}; fn create_test_flag_table_from_source() -> Result<FlagTable> { let caches = parse_all_test_flags(); let packages = group_flags_by_package(caches.iter()); - create_flag_table("mockup", &packages) + create_flag_table("mockup", &packages, DEFAULT_FILE_VERSION) } #[test] diff --git a/tools/aconfig/aconfig/src/storage/flag_value.rs b/tools/aconfig/aconfig/src/storage/flag_value.rs index c15ba54112..6a655b9a92 100644 --- a/tools/aconfig/aconfig/src/storage/flag_value.rs +++ b/tools/aconfig/aconfig/src/storage/flag_value.rs @@ -17,12 +17,12 @@ use crate::commands::assign_flag_ids; use crate::storage::FlagPackage; use aconfig_protos::ProtoFlagState; -use aconfig_storage_file::{FlagValueHeader, FlagValueList, StorageFileType, FILE_VERSION}; +use aconfig_storage_file::{FlagValueHeader, FlagValueList, StorageFileType}; use anyhow::{anyhow, Result}; -fn new_header(container: &str, num_flags: u32) -> FlagValueHeader { +fn new_header(container: &str, num_flags: u32, version: u32) -> FlagValueHeader { FlagValueHeader { - version: FILE_VERSION, + version, container: String::from(container), file_type: StorageFileType::FlagVal as u8, file_size: 0, @@ -31,12 +31,16 @@ fn new_header(container: &str, num_flags: u32) -> FlagValueHeader { } } -pub fn create_flag_value(container: &str, packages: &[FlagPackage]) -> Result<FlagValueList> { +pub fn create_flag_value( + container: &str, + packages: &[FlagPackage], + version: u32, +) -> Result<FlagValueList> { // create list let num_flags = packages.iter().map(|pkg| pkg.boolean_flags.len() as u32).sum(); let mut list = FlagValueList { - header: new_header(container, num_flags), + header: new_header(container, num_flags, version), booleans: vec![false; num_flags as usize], }; @@ -61,13 +65,15 @@ pub fn create_flag_value(container: &str, packages: &[FlagPackage]) -> Result<Fl #[cfg(test)] mod tests { + use aconfig_storage_file::DEFAULT_FILE_VERSION; + use super::*; use crate::storage::{group_flags_by_package, tests::parse_all_test_flags}; pub fn create_test_flag_value_list_from_source() -> Result<FlagValueList> { let caches = parse_all_test_flags(); let packages = group_flags_by_package(caches.iter()); - create_flag_value("mockup", &packages) + create_flag_value("mockup", &packages, DEFAULT_FILE_VERSION) } #[test] diff --git a/tools/aconfig/aconfig/src/storage/mod.rs b/tools/aconfig/aconfig/src/storage/mod.rs index 73339f24b3..9e5dad5955 100644 --- a/tools/aconfig/aconfig/src/storage/mod.rs +++ b/tools/aconfig/aconfig/src/storage/mod.rs @@ -14,15 +14,16 @@ * limitations under the License. */ +pub mod flag_info; pub mod flag_table; pub mod flag_value; pub mod package_table; -use anyhow::{anyhow, Result}; +use anyhow::Result; use std::collections::{HashMap, HashSet}; use crate::storage::{ - flag_table::create_flag_table, flag_value::create_flag_value, + flag_info::create_flag_info, flag_table::create_flag_table, flag_value::create_flag_value, package_table::create_package_table, }; use aconfig_protos::{ProtoParsedFlag, ProtoParsedFlags}; @@ -87,6 +88,7 @@ pub fn generate_storage_file<'a, I>( container: &str, parsed_flags_vec_iter: I, file: &StorageFileType, + version: u32, ) -> Result<Vec<u8>> where I: Iterator<Item = &'a ProtoParsedFlags>, @@ -95,18 +97,21 @@ where match file { StorageFileType::PackageMap => { - let package_table = create_package_table(container, &packages)?; + let package_table = create_package_table(container, &packages, version)?; Ok(package_table.into_bytes()) } StorageFileType::FlagMap => { - let flag_table = create_flag_table(container, &packages)?; + let flag_table = create_flag_table(container, &packages, version)?; Ok(flag_table.into_bytes()) } StorageFileType::FlagVal => { - let flag_value = create_flag_value(container, &packages)?; + let flag_value = create_flag_value(container, &packages, version)?; Ok(flag_value.into_bytes()) } - _ => Err(anyhow!("aconfig does not support the creation of this storage file type")), + StorageFileType::FlagInfo => { + let flag_info = create_flag_info(container, &packages, version)?; + Ok(flag_info.into_bytes()) + } } } diff --git a/tools/aconfig/aconfig/src/storage/package_table.rs b/tools/aconfig/aconfig/src/storage/package_table.rs index c53602f9cb..56559f8daa 100644 --- a/tools/aconfig/aconfig/src/storage/package_table.rs +++ b/tools/aconfig/aconfig/src/storage/package_table.rs @@ -18,14 +18,13 @@ use anyhow::Result; use aconfig_storage_file::{ get_table_size, PackageTable, PackageTableHeader, PackageTableNode, StorageFileType, - FILE_VERSION, }; use crate::storage::FlagPackage; -fn new_header(container: &str, num_packages: u32) -> PackageTableHeader { +fn new_header(container: &str, num_packages: u32, version: u32) -> PackageTableHeader { PackageTableHeader { - version: FILE_VERSION, + version, container: String::from(container), file_type: StorageFileType::PackageMap as u8, file_size: 0, @@ -56,20 +55,26 @@ impl PackageTableNodeWrapper { } } -pub fn create_package_table(container: &str, packages: &[FlagPackage]) -> Result<PackageTable> { +pub fn create_package_table( + container: &str, + packages: &[FlagPackage], + version: u32, +) -> Result<PackageTable> { // create table let num_packages = packages.len() as u32; let num_buckets = get_table_size(num_packages)?; - let mut header = new_header(container, num_packages); + let mut header = new_header(container, num_packages, version); let mut buckets = vec![None; num_buckets as usize]; - let mut node_wrappers: Vec<_> = - packages.iter().map(|pkg| PackageTableNodeWrapper::new(pkg, num_buckets)).collect(); + let mut node_wrappers: Vec<_> = packages + .iter() + .map(|pkg: &FlagPackage<'_>| PackageTableNodeWrapper::new(pkg, num_buckets)) + .collect(); // initialize all header fields header.bucket_offset = header.into_bytes().len() as u32; header.node_offset = header.bucket_offset + num_buckets * 4; header.file_size = header.node_offset - + node_wrappers.iter().map(|x| x.node.into_bytes().len()).sum::<usize>() as u32; + + node_wrappers.iter().map(|x| x.node.into_bytes(version).len()).sum::<usize>() as u32; // sort node_wrappers by bucket index for efficiency node_wrappers.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index)); @@ -87,7 +92,7 @@ pub fn create_package_table(container: &str, packages: &[FlagPackage]) -> Result if buckets[node_bucket_idx as usize].is_none() { buckets[node_bucket_idx as usize] = Some(offset); } - offset += node_wrappers[i].node.into_bytes().len() as u32; + offset += node_wrappers[i].node.into_bytes(version).len() as u32; if let Some(index) = next_node_bucket_idx { if index == node_bucket_idx { @@ -106,13 +111,15 @@ pub fn create_package_table(container: &str, packages: &[FlagPackage]) -> Result #[cfg(test)] mod tests { + use aconfig_storage_file::DEFAULT_FILE_VERSION; + use super::*; use crate::storage::{group_flags_by_package, tests::parse_all_test_flags}; pub fn create_test_package_table_from_source() -> Result<PackageTable> { let caches = parse_all_test_flags(); let packages = group_flags_by_package(caches.iter()); - create_package_table("mockup", &packages) + create_package_table("mockup", &packages, DEFAULT_FILE_VERSION) } #[test] diff --git a/tools/aconfig/aconfig/templates/FeatureFlagsImpl.java.template b/tools/aconfig/aconfig/templates/FeatureFlagsImpl.java.template index bc01aa4bab..d1cf191e29 100644 --- a/tools/aconfig/aconfig/templates/FeatureFlagsImpl.java.template +++ b/tools/aconfig/aconfig/templates/FeatureFlagsImpl.java.template @@ -9,56 +9,50 @@ import android.compat.annotation.UnsupportedAppUsage; import android.provider.DeviceConfig; import android.provider.DeviceConfig.Properties; - {{ -if not library_exported }} -{{ -if allow_instrumentation }} import android.aconfig.storage.StorageInternalReader; -import android.util.Log; -{{ -endif }} +import java.nio.file.Files; +import java.nio.file.Paths; {{ -endif }} {{ -endif }} /** @hide */ public final class FeatureFlagsImpl implements FeatureFlags \{ {{ -if runtime_lookup_required }} +{{ -if not library_exported }} + private static final boolean isReadFromNew = Files.exists(Paths.get("/metadata/aconfig/boot/enable_only_new_storage")); + private static volatile boolean isCached = false; +{{ -endif }} {{ -for namespace_with_flags in namespace_flags }} private static volatile boolean {namespace_with_flags.namespace}_is_cached = false; {{ -endfor- }} {{ for flag in flag_elements }} -{{- if flag.is_read_write }} +{{ -if flag.is_read_write }} private static boolean {flag.method_name} = {flag.default_value}; {{ -endif }} {{ -endfor }} -{{ -if not library_exported }} -{{ -if allow_instrumentation }} - StorageInternalReader reader; - boolean readFromNewStorage; - - boolean useNewStorageValueAndDiscardOld = false; - - private final static String TAG = "AconfigJavaCodegen"; - private final static String SUCCESS_LOG = "success: %s value matches"; - private final static String MISMATCH_LOG = "error: %s value mismatch, new storage value is %s, old storage value is %s"; - private final static String ERROR_LOG = "error: failed to read flag value"; +{{ if not library_exported }} private void init() \{ - if (reader != null) return; - if (DeviceConfig.getBoolean("core_experiments_team_internal", "com.android.providers.settings.storage_test_mission_1", false)) \{ - readFromNewStorage = true; - try \{ - reader = new StorageInternalReader("{container}", "{package_name}"); - } catch (Exception e) \{ - reader = null; - } + StorageInternalReader reader = null; + try \{ + reader = new StorageInternalReader("{container}", "{package_name}"); +{{ for namespace_with_flags in namespace_flags }} +{{ -for flag in namespace_with_flags.flags }} +{{ if flag.is_read_write }} + {flag.method_name} = reader.getBooleanFlagValue({flag.flag_offset}); +{{ endif }} +{{ -endfor }} +{{ -endfor }} + } catch (Exception e) \{ + throw new RuntimeException("Cannot read flag in codegen", e); } - - useNewStorageValueAndDiscardOld = - DeviceConfig.getBoolean("core_experiments_team_internal", "com.android.providers.settings.use_new_storage_value", false); + isCached = true; } +{{ endif }} + -{{ -endif }} -{{ -endif }} {{ for namespace_with_flags in namespace_flags }} private void load_overrides_{namespace_with_flags.namespace}() \{ try \{ @@ -80,34 +74,9 @@ public final class FeatureFlagsImpl implements FeatureFlags \{ ); } {namespace_with_flags.namespace}_is_cached = true; -{{ -if not library_exported }} -{{ -if allow_instrumentation }} - init(); - if (readFromNewStorage && reader != null) \{ - boolean val; - try \{ -{{ -for flag in namespace_with_flags.flags }} -{{ -if flag.is_read_write }} - - val = reader.getBooleanFlagValue({flag.flag_offset}); - if (val != {flag.method_name}) \{ - Log.w(TAG, String.format(MISMATCH_LOG, "{flag.method_name}", val, {flag.method_name})); - } - - if (useNewStorageValueAndDiscardOld) \{ - {flag.method_name} = val; - } - -{{ -endif }} -{{ -endfor }} - } catch (Exception e) \{ - Log.e(TAG, ERROR_LOG, e); - } - } -{{ -endif }} -{{ -endif }} } {{ endfor- }} + {{ -endif }}{#- end of runtime_lookup_required #} {{ -for flag in flag_elements }} @Override @@ -116,19 +85,31 @@ public final class FeatureFlagsImpl implements FeatureFlags \{ @UnsupportedAppUsage {{ -endif }} public boolean {flag.method_name}() \{ +{{ -if not library_exported }} {{ -if flag.is_read_write }} - if (!{flag.device_config_namespace}_is_cached) \{ - load_overrides_{flag.device_config_namespace}(); + if (isReadFromNew) \{ + if (!isCached) \{ + init(); + } + } else \{ + if (!{flag.device_config_namespace}_is_cached) \{ + load_overrides_{flag.device_config_namespace}(); + } } return {flag.method_name}; {{ -else }} return {flag.default_value}; {{ -endif }} +{{ else }} + if (!{flag.device_config_namespace}_is_cached) \{ + load_overrides_{flag.device_config_namespace}(); + } + return {flag.method_name}; +{{ -endif }} } {{ endfor }} } -{{ else }} -{#- Generate only stub if in test mode #} +{{ else }} {#- Generate only stub if in test mode #} /** @hide */ public final class FeatureFlagsImpl implements FeatureFlags \{ {{ for flag in flag_elements }} diff --git a/tools/aconfig/aconfig/templates/cpp_exported_header.template b/tools/aconfig/aconfig/templates/cpp_exported_header.template index 0f7853e405..4643c9775c 100644 --- a/tools/aconfig/aconfig/templates/cpp_exported_header.template +++ b/tools/aconfig/aconfig/templates/cpp_exported_header.template @@ -27,12 +27,13 @@ public: {{ -for item in class_elements}} virtual bool {item.flag_name}() = 0; + {{ -endfor }} + {{ -if is_test_mode }} + {{ -for item in class_elements}} virtual void {item.flag_name}(bool val) = 0; - {{ -endif }} {{ -endfor }} - {{ -if is_test_mode }} virtual void reset_flags() \{} {{ -endif }} }; diff --git a/tools/aconfig/aconfig/templates/cpp_source_file.template b/tools/aconfig/aconfig/templates/cpp_source_file.template index 623034a87b..852b905f32 100644 --- a/tools/aconfig/aconfig/templates/cpp_source_file.template +++ b/tools/aconfig/aconfig/templates/cpp_source_file.template @@ -2,7 +2,7 @@ {{ if allow_instrumentation }} {{ if readwrite- }} -#include <sys/stat.h> +#include <unistd.h> #include "aconfig_storage/aconfig_storage_read_api.hpp" #include <android/log.h> #define LOG_TAG "aconfig_cpp_codegen" @@ -78,8 +78,7 @@ namespace {cpp_namespace} \{ , flag_value_file_(nullptr) , read_from_new_storage_(false) \{ - struct stat buffer; - if (stat("/metadata/aconfig/boot/enable_only_new_storage", &buffer) == 0) \{ + if (access("/metadata/aconfig/boot/enable_only_new_storage", F_OK) == 0) \{ read_from_new_storage_ = true; } diff --git a/tools/aconfig/aconfig_device_paths/Android.bp b/tools/aconfig/aconfig_device_paths/Android.bp index 932dfbfc28..bdf96ed896 100644 --- a/tools/aconfig/aconfig_device_paths/Android.bp +++ b/tools/aconfig/aconfig_device_paths/Android.bp @@ -56,3 +56,20 @@ java_library { "//apex_available:platform", ], } + +genrule { + name: "libaconfig_java_host_device_paths_src", + srcs: ["src/HostDeviceProtosTemplate.java"], + out: ["HostDeviceProtos.java"], + tool_files: [ + "partition_aconfig_flags_paths.txt", + "mainline_aconfig_flags_paths.txt", + ], + cmd: "sed -e '/TEMPLATE/{r$(location partition_aconfig_flags_paths.txt)' -e 'd}' $(in) > $(out).tmp && " + + "sed -e '/MAINLINE_T/{r$(location mainline_aconfig_flags_paths.txt)' -e 'd}' $(out).tmp > $(out)", +} + +java_library_host { + name: "aconfig_host_device_paths_java", + srcs: [":libaconfig_java_host_device_paths_src"], +} diff --git a/tools/aconfig/aconfig_device_paths/mainline_aconfig_flags_paths.txt b/tools/aconfig/aconfig_device_paths/mainline_aconfig_flags_paths.txt new file mode 100644 index 0000000000..af73a842b9 --- /dev/null +++ b/tools/aconfig/aconfig_device_paths/mainline_aconfig_flags_paths.txt @@ -0,0 +1,20 @@ +"/apex/com.android.adservices/etc/aconfig_flags.pb", +"/apex/com.android.appsearch/etc/aconfig_flags.pb", +"/apex/com.android.art/etc/aconfig_flags.pb", +"/apex/com.android.btservices/etc/aconfig_flags.pb", +"/apex/com.android.cellbroadcast/etc/aconfig_flags.pb", +"/apex/com.android.configinfrastructure/etc/aconfig_flags.pb", +"/apex/com.android.conscrypt/etc/aconfig_flags.pb", +"/apex/com.android.devicelock/etc/aconfig_flags.pb", +"/apex/com.android.healthfitness/etc/aconfig_flags.pb", +"/apex/com.android.ipsec/etc/aconfig_flags.pb", +"/apex/com.android.media/etc/aconfig_flags.pb", +"/apex/com.android.mediaprovider/etc/aconfig_flags.pb", +"/apex/com.android.ondevicepersonalization/etc/aconfig_flags.pb", +"/apex/com.android.os.statsd/etc/aconfig_flags.pb", +"/apex/com.android.permission/etc/aconfig_flags.pb", +"/apex/com.android.profiling/etc/aconfig_flags.pb", +"/apex/com.android.tethering/etc/aconfig_flags.pb", +"/apex/com.android.uwb/etc/aconfig_flags.pb", +"/apex/com.android.virt/etc/aconfig_flags.pb", +"/apex/com.android.wifi/etc/aconfig_flags.pb", diff --git a/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt b/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt index 140cd21ac8..e997e3ddfa 100644 --- a/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt +++ b/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt @@ -1,4 +1,3 @@ "/system/etc/aconfig_flags.pb", -"/system_ext/etc/aconfig_flags.pb", "/product/etc/aconfig_flags.pb", "/vendor/etc/aconfig_flags.pb", diff --git a/tools/aconfig/aconfig_device_paths/src/HostDeviceProtosTemplate.java b/tools/aconfig/aconfig_device_paths/src/HostDeviceProtosTemplate.java new file mode 100644 index 0000000000..e7d0a76a8a --- /dev/null +++ b/tools/aconfig/aconfig_device_paths/src/HostDeviceProtosTemplate.java @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2024 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package android.aconfig; + +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * A host lib that can read all aconfig proto file paths on a given device. + * This lib is only available on device with root access (userdebug/eng). + */ +public class HostDeviceProtos { + /** + * An interface that executes ADB command and return the result. + */ + public static interface AdbCommandExecutor { + /** Executes the ADB command. */ + String executeAdbCommand(String command); + } + + static final String[] PATHS = { + TEMPLATE + }; + + static final String[] MAINLINE_PATHS = { + MAINLINE_T + }; + + private static final String APEX_DIR = "/apex"; + private static final String RECURSIVELY_LIST_APEX_DIR_COMMAND = + "shell su 0 find /apex | grep aconfig_flags"; + private static final String APEX_ACONFIG_PATH_SUFFIX = "/etc/aconfig_flags.pb"; + + + /** + * Returns the list of all on-device aconfig proto paths from host side. + */ + public static List<String> parsedFlagsProtoPaths(AdbCommandExecutor adbCommandExecutor) { + ArrayList<String> paths = new ArrayList(Arrays.asList(PATHS)); + + String adbCommandOutput = adbCommandExecutor.executeAdbCommand( + RECURSIVELY_LIST_APEX_DIR_COMMAND); + + if (adbCommandOutput == null || adbCommandOutput.isEmpty()) { + paths.addAll(Arrays.asList(MAINLINE_PATHS)); + return paths; + } + + Set<String> allFiles = new HashSet<>(Arrays.asList(adbCommandOutput.split("\n"))); + + Set<String> subdirs = allFiles.stream().map(file -> { + String[] filePaths = file.split("/"); + // The first element is "", the second element is "apex". + return filePaths.length > 2 ? filePaths[2] : ""; + }).collect(Collectors.toSet()); + + for (String prefix : subdirs) { + // For each mainline modules, there are two directories, one <modulepackage>/, + // and one <modulepackage>@<versioncode>/. Just read the former. + if (prefix.contains("@")) { + continue; + } + + String protoPath = APEX_DIR + "/" + prefix + APEX_ACONFIG_PATH_SUFFIX; + if (allFiles.contains(protoPath)) { + paths.add(protoPath); + } + } + return paths; + } +} diff --git a/tools/aconfig/aconfig_device_paths/src/lib.rs b/tools/aconfig/aconfig_device_paths/src/lib.rs index 9ab9cea267..8871b4f8ac 100644 --- a/tools/aconfig/aconfig_device_paths/src/lib.rs +++ b/tools/aconfig/aconfig_device_paths/src/lib.rs @@ -62,13 +62,12 @@ mod tests { #[test] fn test_read_partition_paths() { - assert_eq!(read_partition_paths().len(), 4); + assert_eq!(read_partition_paths().len(), 3); assert_eq!( read_partition_paths(), vec![ PathBuf::from("/system/etc/aconfig_flags.pb"), - PathBuf::from("/system_ext/etc/aconfig_flags.pb"), PathBuf::from("/product/etc/aconfig_flags.pb"), PathBuf::from("/vendor/etc/aconfig_flags.pb") ] diff --git a/tools/aconfig/aconfig_flags/Android.bp b/tools/aconfig/aconfig_flags/Android.bp index e327ced26c..4c1fd4efcf 100644 --- a/tools/aconfig/aconfig_flags/Android.bp +++ b/tools/aconfig/aconfig_flags/Android.bp @@ -44,3 +44,8 @@ cc_aconfig_library { name: "libaconfig_flags_cc", aconfig_declarations: "aconfig_flags", } + +java_aconfig_library { + name: "aconfig_flags_java", + aconfig_declarations: "aconfig_flags", +} diff --git a/tools/aconfig/aconfig_flags/flags.aconfig b/tools/aconfig/aconfig_flags/flags.aconfig index db8b1b7904..0a004ca4e1 100644 --- a/tools/aconfig/aconfig_flags/flags.aconfig +++ b/tools/aconfig/aconfig_flags/flags.aconfig @@ -7,3 +7,10 @@ flag { bug: "312235596" description: "When enabled, aconfig flags are read from the new aconfig storage only." } + +flag { + name: "enable_aconfigd_from_mainline" + namespace: "core_experiments_team_internal" + bug: "369808805" + description: "When enabled, launch aconfigd from config infra module." +} diff --git a/tools/aconfig/aconfig_flags/src/lib.rs b/tools/aconfig/aconfig_flags/src/lib.rs index a607efb7d4..2e891273ed 100644 --- a/tools/aconfig/aconfig_flags/src/lib.rs +++ b/tools/aconfig/aconfig_flags/src/lib.rs @@ -34,6 +34,11 @@ pub mod auto_generated { pub fn enable_only_new_storage() -> bool { aconfig_flags_rust::enable_only_new_storage() } + + /// Returns the value for the enable_aconfigd_from_mainline flag. + pub fn enable_aconfigd_from_mainline() -> bool { + aconfig_flags_rust::enable_only_new_storage() + } } /// Module used when building with cargo @@ -44,4 +49,10 @@ pub mod auto_generated { // Used only to enable typechecking and testing with cargo true } + + /// Returns a placeholder value for the enable_aconfigd_from_mainline flag. + pub fn enable_aconfigd_from_mainline() -> bool { + // Used only to enable typechecking and testing with cargo + true + } } diff --git a/tools/aconfig/aconfig_storage_file/src/lib.rs b/tools/aconfig/aconfig_storage_file/src/lib.rs index cf52bc017d..1d92ba49ab 100644 --- a/tools/aconfig/aconfig_storage_file/src/lib.rs +++ b/tools/aconfig/aconfig_storage_file/src/lib.rs @@ -57,8 +57,13 @@ use crate::AconfigStorageError::{ BytesParseFail, HashTableSizeLimit, InvalidFlagValueType, InvalidStoredFlagType, }; -/// Storage file version -pub const FILE_VERSION: u32 = 1; +/// The max storage file version from which we can safely read/write. May be +/// experimental. +pub const MAX_SUPPORTED_FILE_VERSION: u32 = 2; + +/// The newest fully-released version. Unless otherwise specified, this is the +/// version we will write. +pub const DEFAULT_FILE_VERSION: u32 = 1; /// Good hash table prime number pub(crate) const HASH_PRIMES: [u32; 29] = [ @@ -244,6 +249,11 @@ pub(crate) fn read_u16_from_bytes( Ok(val) } +/// Read and parse the first 4 bytes of buf as u32. +pub fn read_u32_from_start_of_bytes(buf: &[u8]) -> Result<u32, AconfigStorageError> { + read_u32_from_bytes(buf, &mut 0) +} + /// Read and parse bytes as u32 pub fn read_u32_from_bytes(buf: &[u8], head: &mut usize) -> Result<u32, AconfigStorageError> { let val = @@ -254,6 +264,16 @@ pub fn read_u32_from_bytes(buf: &[u8], head: &mut usize) -> Result<u32, AconfigS Ok(val) } +// Read and parse bytes as u64 +pub fn read_u64_from_bytes(buf: &[u8], head: &mut usize) -> Result<u64, AconfigStorageError> { + let val = + u64::from_le_bytes(buf[*head..*head + 8].try_into().map_err(|errmsg| { + BytesParseFail(anyhow!("fail to parse u64 from bytes: {}", errmsg)) + })?); + *head += 8; + Ok(val) +} + /// Read and parse bytes as string pub(crate) fn read_str_from_bytes( buf: &[u8], diff --git a/tools/aconfig/aconfig_storage_file/src/package_table.rs b/tools/aconfig/aconfig_storage_file/src/package_table.rs index a5bd9e6446..af39fbc783 100644 --- a/tools/aconfig/aconfig_storage_file/src/package_table.rs +++ b/tools/aconfig/aconfig_storage_file/src/package_table.rs @@ -17,7 +17,10 @@ //! package table module defines the package table file format and methods for serialization //! and deserialization -use crate::{get_bucket_index, read_str_from_bytes, read_u32_from_bytes, read_u8_from_bytes}; +use crate::{ + get_bucket_index, read_str_from_bytes, read_u32_from_bytes, read_u64_from_bytes, + read_u8_from_bytes, +}; use crate::{AconfigStorageError, StorageFileType}; use anyhow::anyhow; use serde::{Deserialize, Serialize}; @@ -117,7 +120,16 @@ impl fmt::Debug for PackageTableNode { impl PackageTableNode { /// Serialize to bytes - pub fn into_bytes(&self) -> Vec<u8> { + pub fn into_bytes(&self, version: u32) -> Vec<u8> { + match version { + 1 => Self::into_bytes_v1(self), + 2 => Self::into_bytes_v2(self), + // TODO(b/316357686): into_bytes should return a Result. + _ => Self::into_bytes_v2(&self), + } + } + + fn into_bytes_v1(&self) -> Vec<u8> { let mut result = Vec::new(); let name_bytes = self.package_name.as_bytes(); result.extend_from_slice(&(name_bytes.len() as u32).to_le_bytes()); @@ -128,18 +140,64 @@ impl PackageTableNode { result } - /// Deserialize from bytes - pub fn from_bytes(bytes: &[u8]) -> Result<Self, AconfigStorageError> { + fn into_bytes_v2(&self) -> Vec<u8> { + let mut result = Vec::new(); + let name_bytes = self.package_name.as_bytes(); + result.extend_from_slice(&(name_bytes.len() as u32).to_le_bytes()); + result.extend_from_slice(name_bytes); + result.extend_from_slice(&self.package_id.to_le_bytes()); + // V2 storage files have a fingerprint. Current struct (v1) does not, so + // we write 0. + result.extend_from_slice(&0u64.to_le_bytes()); + result.extend_from_slice(&self.boolean_start_index.to_le_bytes()); + result.extend_from_slice(&self.next_offset.unwrap_or(0).to_le_bytes()); + result + } + + /// Deserialize from bytes based on file version. + pub fn from_bytes(bytes: &[u8], version: u32) -> Result<Self, AconfigStorageError> { + match version { + 1 => Self::from_bytes_v1(bytes), + 2 => Self::from_bytes_v2(bytes), + _ => { + return Err(AconfigStorageError::BytesParseFail(anyhow!( + "Binary file is an unsupported version: {}", + version + ))) + } + } + } + + fn from_bytes_v1(bytes: &[u8]) -> Result<Self, AconfigStorageError> { let mut head = 0; - let node = Self { - package_name: read_str_from_bytes(bytes, &mut head)?, - package_id: read_u32_from_bytes(bytes, &mut head)?, - boolean_start_index: read_u32_from_bytes(bytes, &mut head)?, - next_offset: match read_u32_from_bytes(bytes, &mut head)? { - 0 => None, - val => Some(val), - }, + let package_name = read_str_from_bytes(bytes, &mut head)?; + let package_id = read_u32_from_bytes(bytes, &mut head)?; + let boolean_start_index = read_u32_from_bytes(bytes, &mut head)?; + let next_offset = match read_u32_from_bytes(bytes, &mut head)? { + 0 => None, + val => Some(val), + }; + + let node = Self { package_name, package_id, boolean_start_index, next_offset }; + Ok(node) + } + + fn from_bytes_v2(bytes: &[u8]) -> Result<Self, AconfigStorageError> { + let mut head = 0; + let package_name = read_str_from_bytes(bytes, &mut head)?; + let package_id = read_u32_from_bytes(bytes, &mut head)?; + + // Fingerprint is unused in the current struct (v1), but we need to read + // the bytes if the storage file type is v2 or else the subsequent + // fields will be inaccurate. + let _fingerprint = read_u64_from_bytes(bytes, &mut head)?; + let boolean_start_index = read_u32_from_bytes(bytes, &mut head)?; + let next_offset = match read_u32_from_bytes(bytes, &mut head)? { + 0 => None, + val => Some(val), }; + + let node = Self { package_name, package_id, boolean_start_index, next_offset }; Ok(node) } @@ -180,7 +238,11 @@ impl PackageTable { [ self.header.into_bytes(), self.buckets.iter().map(|v| v.unwrap_or(0).to_le_bytes()).collect::<Vec<_>>().concat(), - self.nodes.iter().map(|v| v.into_bytes()).collect::<Vec<_>>().concat(), + self.nodes + .iter() + .map(|v| v.into_bytes(self.header.version)) + .collect::<Vec<_>>() + .concat(), ] .concat() } @@ -199,8 +261,8 @@ impl PackageTable { .collect(); let nodes = (0..num_packages) .map(|_| { - let node = PackageTableNode::from_bytes(&bytes[head..])?; - head += node.into_bytes().len(); + let node = PackageTableNode::from_bytes(&bytes[head..], header.version)?; + head += node.into_bytes(header.version).len(); Ok(node) }) .collect::<Result<Vec<_>, AconfigStorageError>>() @@ -219,7 +281,8 @@ impl PackageTable { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::create_test_package_table; + use crate::read_u32_from_start_of_bytes; + use crate::{test_utils::create_test_package_table, DEFAULT_FILE_VERSION}; #[test] // this test point locks down the table serialization @@ -232,7 +295,9 @@ mod tests { let nodes: &Vec<PackageTableNode> = &package_table.nodes; for node in nodes.iter() { - let reinterpreted_node = PackageTableNode::from_bytes(&node.into_bytes()).unwrap(); + let reinterpreted_node = + PackageTableNode::from_bytes(&node.into_bytes(header.version), header.version) + .unwrap(); assert_eq!(node, &reinterpreted_node); } @@ -249,9 +314,36 @@ mod tests { fn test_version_number() { let package_table = create_test_package_table(); let bytes = &package_table.into_bytes(); - let mut head = 0; - let version = read_u32_from_bytes(bytes, &mut head).unwrap(); - assert_eq!(version, 1); + let version = read_u32_from_start_of_bytes(bytes).unwrap(); + assert_eq!(version, DEFAULT_FILE_VERSION); + } + + #[test] + fn test_round_trip_v1() { + let table_v1: PackageTable = create_test_package_table(); + let table_bytes_v1 = table_v1.into_bytes(); + + // Will automatically read from version 2 as the version code is encoded + // into the bytes. + let reinterpreted_table = PackageTable::from_bytes(&table_bytes_v1).unwrap(); + + assert_eq!(table_v1, reinterpreted_table); + } + + #[test] + fn test_round_trip_v2() { + // Have to fake v2 because though we will set the version to v2 + // and write the bytes as v2, we don't have the ability to actually set + // the fingerprint yet. + let mut fake_table_v2 = create_test_package_table(); + fake_table_v2.header.version = 2; + let table_bytes_v2 = fake_table_v2.into_bytes(); + + // Will automatically read from version 2 as the version code is encoded + // into the bytes. + let reinterpreted_table = PackageTable::from_bytes(&table_bytes_v2).unwrap(); + + assert_eq!(fake_table_v2, reinterpreted_table); } #[test] diff --git a/tools/aconfig/aconfig_storage_file/src/test_utils.rs b/tools/aconfig/aconfig_storage_file/src/test_utils.rs index 106666c47f..5c364f6bc2 100644 --- a/tools/aconfig/aconfig_storage_file/src/test_utils.rs +++ b/tools/aconfig/aconfig_storage_file/src/test_utils.rs @@ -18,7 +18,7 @@ use crate::flag_info::{FlagInfoHeader, FlagInfoList, FlagInfoNode}; use crate::flag_table::{FlagTable, FlagTableHeader, FlagTableNode}; use crate::flag_value::{FlagValueHeader, FlagValueList}; use crate::package_table::{PackageTable, PackageTableHeader, PackageTableNode}; -use crate::{AconfigStorageError, StorageFileType, StoredFlagType}; +use crate::{AconfigStorageError, StorageFileType, StoredFlagType, DEFAULT_FILE_VERSION}; use anyhow::anyhow; use std::io::Write; @@ -26,7 +26,7 @@ use tempfile::NamedTempFile; pub fn create_test_package_table() -> PackageTable { let header = PackageTableHeader { - version: 1, + version: DEFAULT_FILE_VERSION, container: String::from("mockup"), file_type: StorageFileType::PackageMap as u8, file_size: 209, @@ -78,7 +78,7 @@ impl FlagTableNode { pub fn create_test_flag_table() -> FlagTable { let header = FlagTableHeader { - version: 1, + version: DEFAULT_FILE_VERSION, container: String::from("mockup"), file_type: StorageFileType::FlagMap as u8, file_size: 321, @@ -120,7 +120,7 @@ pub fn create_test_flag_table() -> FlagTable { pub fn create_test_flag_value_list() -> FlagValueList { let header = FlagValueHeader { - version: 1, + version: DEFAULT_FILE_VERSION, container: String::from("mockup"), file_type: StorageFileType::FlagVal as u8, file_size: 35, @@ -133,7 +133,7 @@ pub fn create_test_flag_value_list() -> FlagValueList { pub fn create_test_flag_info_list() -> FlagInfoList { let header = FlagInfoHeader { - version: 1, + version: DEFAULT_FILE_VERSION, container: String::from("mockup"), file_type: StorageFileType::FlagInfo as u8, file_size: 35, diff --git a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java index 9838a7c780..757844a603 100644 --- a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java +++ b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java @@ -37,9 +37,16 @@ public class FlagTable { public Node get(int packageId, String flagName) { int numBuckets = (mHeader.mNodeOffset - mHeader.mBucketOffset) / 4; int bucketIndex = TableUtils.getBucketIndex(makeKey(packageId, flagName), numBuckets); + int newPosition = mHeader.mBucketOffset + bucketIndex * 4; + if (newPosition >= mHeader.mNodeOffset) { + return null; + } - mReader.position(mHeader.mBucketOffset + bucketIndex * 4); + mReader.position(newPosition); int nodeIndex = mReader.readInt(); + if (nodeIndex < mHeader.mNodeOffset || nodeIndex >= mHeader.mFileSize) { + return null; + } while (nodeIndex != -1) { mReader.position(nodeIndex); @@ -50,7 +57,7 @@ public class FlagTable { nodeIndex = node.mNextOffset; } - throw new AconfigStorageException("get cannot find flag: " + flagName); + return null; } public Header getHeader() { diff --git a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java index 773b882f4a..39b7e59d7e 100644 --- a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java +++ b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java @@ -35,23 +35,29 @@ public class PackageTable { } public Node get(String packageName) { - int numBuckets = (mHeader.mNodeOffset - mHeader.mBucketOffset) / 4; int bucketIndex = TableUtils.getBucketIndex(packageName.getBytes(UTF_8), numBuckets); - - mReader.position(mHeader.mBucketOffset + bucketIndex * 4); + int newPosition = mHeader.mBucketOffset + bucketIndex * 4; + if (newPosition >= mHeader.mNodeOffset) { + return null; + } + mReader.position(newPosition); int nodeIndex = mReader.readInt(); + if (nodeIndex < mHeader.mNodeOffset || nodeIndex >= mHeader.mFileSize) { + return null; + } + while (nodeIndex != -1) { mReader.position(nodeIndex); - Node node = Node.fromBytes(mReader); + Node node = Node.fromBytes(mReader, mHeader.mVersion); if (Objects.equals(packageName, node.mPackageName)) { return node; } nodeIndex = node.mNextOffset; } - throw new AconfigStorageException("get cannot find package: " + packageName); + return null; } public Header getHeader() { @@ -68,7 +74,7 @@ public class PackageTable { private int mBucketOffset; private int mNodeOffset; - public static Header fromBytes(ByteBufferReader reader) { + private static Header fromBytes(ByteBufferReader reader) { Header header = new Header(); header.mVersion = reader.readInt(); header.mContainer = reader.readString(); @@ -121,7 +127,29 @@ public class PackageTable { private int mBooleanStartIndex; private int mNextOffset; - public static Node fromBytes(ByteBufferReader reader) { + private static Node fromBytes(ByteBufferReader reader, int version) { + switch (version) { + case 1: + return fromBytesV1(reader); + case 2: + return fromBytesV2(reader); + default: + // Do we want to throw here? + return new Node(); + } + } + + private static Node fromBytesV1(ByteBufferReader reader) { + Node node = new Node(); + node.mPackageName = reader.readString(); + node.mPackageId = reader.readInt(); + node.mBooleanStartIndex = reader.readInt(); + node.mNextOffset = reader.readInt(); + node.mNextOffset = node.mNextOffset == 0 ? -1 : node.mNextOffset; + return node; + } + + private static Node fromBytesV2(ByteBufferReader reader) { Node node = new Node(); node.mPackageName = reader.readString(); node.mPackageId = reader.readInt(); diff --git a/tools/aconfig/aconfig_storage_read_api/Android.bp b/tools/aconfig/aconfig_storage_read_api/Android.bp index f96b2230d1..6ae34f3a28 100644 --- a/tools/aconfig/aconfig_storage_read_api/Android.bp +++ b/tools/aconfig/aconfig_storage_read_api/Android.bp @@ -107,31 +107,12 @@ cc_library { afdo: true, } -soong_config_module_type { - name: "aconfig_lib_cc_shared_link_defaults", - module_type: "cc_defaults", - config_namespace: "Aconfig", - bool_variables: [ - "read_from_new_storage", - ], - properties: [ - "shared_libs", - ], -} - -soong_config_bool_variable { - name: "read_from_new_storage", -} - -aconfig_lib_cc_shared_link_defaults { +cc_defaults { name: "aconfig_lib_cc_shared_link.defaults", - soong_config_variables: { - read_from_new_storage: { - shared_libs: [ - "libaconfig_storage_read_api_cc", - ], - }, - }, + shared_libs: select(release_flag("RELEASE_READ_FROM_NEW_STORAGE"), { + true: ["libaconfig_storage_read_api_cc"], + default: [], + }), } cc_defaults { diff --git a/tools/aconfig/aconfig_storage_read_api/src/flag_info_query.rs b/tools/aconfig/aconfig_storage_read_api/src/flag_info_query.rs index 6d03377683..fe57a6dd78 100644 --- a/tools/aconfig/aconfig_storage_read_api/src/flag_info_query.rs +++ b/tools/aconfig/aconfig_storage_read_api/src/flag_info_query.rs @@ -16,8 +16,10 @@ //! flag value query module defines the flag value file read from mapped bytes -use crate::{AconfigStorageError, FILE_VERSION}; -use aconfig_storage_file::{flag_info::FlagInfoHeader, read_u8_from_bytes, FlagValueType}; +use crate::AconfigStorageError; +use aconfig_storage_file::{ + flag_info::FlagInfoHeader, read_u8_from_bytes, FlagValueType, MAX_SUPPORTED_FILE_VERSION, +}; use anyhow::anyhow; /// Get flag attribute bitfield @@ -27,11 +29,11 @@ pub fn find_flag_attribute( flag_index: u32, ) -> Result<u8, AconfigStorageError> { let interpreted_header = FlagInfoHeader::from_bytes(buf)?; - if interpreted_header.version > crate::FILE_VERSION { + if interpreted_header.version > MAX_SUPPORTED_FILE_VERSION { return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!( "Cannot read storage file with a higher version of {} with lib version {}", interpreted_header.version, - FILE_VERSION + MAX_SUPPORTED_FILE_VERSION ))); } @@ -108,15 +110,15 @@ mod tests { // this test point locks down query error when file has a higher version fn test_higher_version_storage_file() { let mut info_list = create_test_flag_info_list(); - info_list.header.version = crate::FILE_VERSION + 1; + info_list.header.version = MAX_SUPPORTED_FILE_VERSION + 1; let flag_info = info_list.into_bytes(); let error = find_flag_attribute(&flag_info[..], FlagValueType::Boolean, 4).unwrap_err(); assert_eq!( format!("{:?}", error), format!( "HigherStorageFileVersion(Cannot read storage file with a higher version of {} with lib version {})", - crate::FILE_VERSION + 1, - crate::FILE_VERSION + MAX_SUPPORTED_FILE_VERSION + 1, + MAX_SUPPORTED_FILE_VERSION ) ); } diff --git a/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs b/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs index a1a4793bc2..e9bc6041cf 100644 --- a/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs +++ b/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs @@ -16,9 +16,10 @@ //! flag table query module defines the flag table file read from mapped bytes -use crate::{AconfigStorageError, FILE_VERSION}; +use crate::AconfigStorageError; use aconfig_storage_file::{ flag_table::FlagTableHeader, flag_table::FlagTableNode, read_u32_from_bytes, StoredFlagType, + MAX_SUPPORTED_FILE_VERSION, }; use anyhow::anyhow; @@ -36,11 +37,11 @@ pub fn find_flag_read_context( flag: &str, ) -> Result<Option<FlagReadContext>, AconfigStorageError> { let interpreted_header = FlagTableHeader::from_bytes(buf)?; - if interpreted_header.version > crate::FILE_VERSION { + if interpreted_header.version > MAX_SUPPORTED_FILE_VERSION { return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!( "Cannot read storage file with a higher version of {} with lib version {}", interpreted_header.version, - FILE_VERSION + MAX_SUPPORTED_FILE_VERSION ))); } @@ -111,15 +112,15 @@ mod tests { // this test point locks down query error when file has a higher version fn test_higher_version_storage_file() { let mut table = create_test_flag_table(); - table.header.version = crate::FILE_VERSION + 1; + table.header.version = MAX_SUPPORTED_FILE_VERSION + 1; let flag_table = table.into_bytes(); let error = find_flag_read_context(&flag_table[..], 0, "enabled_ro").unwrap_err(); assert_eq!( format!("{:?}", error), format!( "HigherStorageFileVersion(Cannot read storage file with a higher version of {} with lib version {})", - crate::FILE_VERSION + 1, - crate::FILE_VERSION + MAX_SUPPORTED_FILE_VERSION + 1, + MAX_SUPPORTED_FILE_VERSION ) ); } diff --git a/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs b/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs index 9d32a16ac8..12c1e83628 100644 --- a/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs +++ b/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs @@ -16,18 +16,20 @@ //! flag value query module defines the flag value file read from mapped bytes -use crate::{AconfigStorageError, FILE_VERSION}; -use aconfig_storage_file::{flag_value::FlagValueHeader, read_u8_from_bytes}; +use crate::AconfigStorageError; +use aconfig_storage_file::{ + flag_value::FlagValueHeader, read_u8_from_bytes, MAX_SUPPORTED_FILE_VERSION, +}; use anyhow::anyhow; /// Query flag value pub fn find_boolean_flag_value(buf: &[u8], flag_index: u32) -> Result<bool, AconfigStorageError> { let interpreted_header = FlagValueHeader::from_bytes(buf)?; - if interpreted_header.version > crate::FILE_VERSION { + if interpreted_header.version > MAX_SUPPORTED_FILE_VERSION { return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!( "Cannot read storage file with a higher version of {} with lib version {}", interpreted_header.version, - FILE_VERSION + MAX_SUPPORTED_FILE_VERSION ))); } @@ -74,15 +76,15 @@ mod tests { // this test point locks down query error when file has a higher version fn test_higher_version_storage_file() { let mut value_list = create_test_flag_value_list(); - value_list.header.version = crate::FILE_VERSION + 1; + value_list.header.version = MAX_SUPPORTED_FILE_VERSION + 1; let flag_value = value_list.into_bytes(); let error = find_boolean_flag_value(&flag_value[..], 4).unwrap_err(); assert_eq!( format!("{:?}", error), format!( "HigherStorageFileVersion(Cannot read storage file with a higher version of {} with lib version {})", - crate::FILE_VERSION + 1, - crate::FILE_VERSION + MAX_SUPPORTED_FILE_VERSION + 1, + MAX_SUPPORTED_FILE_VERSION ) ); } diff --git a/tools/aconfig/aconfig_storage_read_api/src/lib.rs b/tools/aconfig/aconfig_storage_read_api/src/lib.rs index d76cf3fe4e..988ce63adc 100644 --- a/tools/aconfig/aconfig_storage_read_api/src/lib.rs +++ b/tools/aconfig/aconfig_storage_read_api/src/lib.rs @@ -46,7 +46,7 @@ pub use aconfig_storage_file::{AconfigStorageError, FlagValueType, StorageFileTy pub use flag_table_query::FlagReadContext; pub use package_table_query::PackageReadContext; -use aconfig_storage_file::{read_u32_from_bytes, FILE_VERSION}; +use aconfig_storage_file::read_u32_from_bytes; use flag_info_query::find_flag_attribute; use flag_table_query::find_flag_read_context; use flag_value_query::find_boolean_flag_value; diff --git a/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs b/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs index 2cb854b1b1..acb60f6b3a 100644 --- a/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs +++ b/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs @@ -16,9 +16,10 @@ //! package table query module defines the package table file read from mapped bytes -use crate::{AconfigStorageError, FILE_VERSION}; +use crate::AconfigStorageError; use aconfig_storage_file::{ package_table::PackageTableHeader, package_table::PackageTableNode, read_u32_from_bytes, + MAX_SUPPORTED_FILE_VERSION, }; use anyhow::anyhow; @@ -35,11 +36,11 @@ pub fn find_package_read_context( package: &str, ) -> Result<Option<PackageReadContext>, AconfigStorageError> { let interpreted_header = PackageTableHeader::from_bytes(buf)?; - if interpreted_header.version > FILE_VERSION { + if interpreted_header.version > MAX_SUPPORTED_FILE_VERSION { return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!( "Cannot read storage file with a higher version of {} with lib version {}", interpreted_header.version, - FILE_VERSION + MAX_SUPPORTED_FILE_VERSION ))); } @@ -55,7 +56,8 @@ pub fn find_package_read_context( } loop { - let interpreted_node = PackageTableNode::from_bytes(&buf[package_node_offset..])?; + let interpreted_node = + PackageTableNode::from_bytes(&buf[package_node_offset..], interpreted_header.version)?; if interpreted_node.package_name == package { return Ok(Some(PackageReadContext { package_id: interpreted_node.package_id, @@ -118,7 +120,7 @@ mod tests { // this test point locks down query error when file has a higher version fn test_higher_version_storage_file() { let mut table = create_test_package_table(); - table.header.version = crate::FILE_VERSION + 1; + table.header.version = MAX_SUPPORTED_FILE_VERSION + 1; let package_table = table.into_bytes(); let error = find_package_read_context(&package_table[..], "com.android.aconfig.storage.test_1") @@ -127,8 +129,8 @@ mod tests { format!("{:?}", error), format!( "HigherStorageFileVersion(Cannot read storage file with a higher version of {} with lib version {})", - crate::FILE_VERSION + 1, - crate::FILE_VERSION + MAX_SUPPORTED_FILE_VERSION + 1, + MAX_SUPPORTED_FILE_VERSION ) ); } diff --git a/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java b/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java index 29ebee5ab4..6fbcdb354a 100644 --- a/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java +++ b/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java @@ -53,9 +53,6 @@ public class StorageInternalReader { @UnsupportedAppUsage public boolean getBooleanFlagValue(int index) { index += mPackageBooleanStartOffset; - if (index >= mFlagValueList.size()) { - throw new AconfigStorageException("Fail to get boolean flag value"); - } return mFlagValueList.getBoolean(index); } diff --git a/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp b/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp index 7b435746da..03a8fa284a 100644 --- a/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp +++ b/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp @@ -100,18 +100,4 @@ android::base::Result<void> set_flag_has_local_override( return {}; } -android::base::Result<void> create_flag_info( - std::string const& package_map, - std::string const& flag_map, - std::string const& flag_info_out) { - auto creation_cxx = create_flag_info_cxx( - rust::Str(package_map.c_str()), - rust::Str(flag_map.c_str()), - rust::Str(flag_info_out.c_str())); - if (creation_cxx.success) { - return {}; - } else { - return android::base::Error() << creation_cxx.error_message; - } -} } // namespace aconfig_storage diff --git a/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp b/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp index 0bba7ffcfc..50a51889b1 100644 --- a/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp +++ b/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp @@ -36,13 +36,4 @@ android::base::Result<void> set_flag_has_local_override( uint32_t offset, bool value); -/// Create flag info file based on package and flag map -/// \input package_map: package map file -/// \input flag_map: flag map file -/// \input flag_info_out: flag info file to be created -android::base::Result<void> create_flag_info( - std::string const& package_map, - std::string const& flag_map, - std::string const& flag_info_out); - } // namespace aconfig_storage diff --git a/tools/aconfig/aconfig_storage_write_api/src/flag_info_update.rs b/tools/aconfig/aconfig_storage_write_api/src/flag_info_update.rs index 7e6071340c..5640922f57 100644 --- a/tools/aconfig/aconfig_storage_write_api/src/flag_info_update.rs +++ b/tools/aconfig/aconfig_storage_write_api/src/flag_info_update.rs @@ -18,7 +18,7 @@ use aconfig_storage_file::{ read_u8_from_bytes, AconfigStorageError, FlagInfoBit, FlagInfoHeader, FlagValueType, - FILE_VERSION, + MAX_SUPPORTED_FILE_VERSION, }; use anyhow::anyhow; @@ -28,11 +28,11 @@ fn get_flag_info_offset( flag_index: u32, ) -> Result<usize, AconfigStorageError> { let interpreted_header = FlagInfoHeader::from_bytes(buf)?; - if interpreted_header.version > FILE_VERSION { + if interpreted_header.version > MAX_SUPPORTED_FILE_VERSION { return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!( "Cannot write to storage file with a higher version of {} with lib version {}", interpreted_header.version, - FILE_VERSION + MAX_SUPPORTED_FILE_VERSION ))); } diff --git a/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs b/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs index dd15c996a6..06a9b15241 100644 --- a/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs +++ b/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs @@ -16,7 +16,7 @@ //! flag value update module defines the flag value file write to mapped bytes -use aconfig_storage_file::{AconfigStorageError, FlagValueHeader, FILE_VERSION}; +use aconfig_storage_file::{AconfigStorageError, FlagValueHeader, MAX_SUPPORTED_FILE_VERSION}; use anyhow::anyhow; /// Set flag value @@ -26,11 +26,11 @@ pub fn update_boolean_flag_value( flag_value: bool, ) -> Result<usize, AconfigStorageError> { let interpreted_header = FlagValueHeader::from_bytes(buf)?; - if interpreted_header.version > FILE_VERSION { + if interpreted_header.version > MAX_SUPPORTED_FILE_VERSION { return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!( "Cannot write to storage file with a higher version of {} with lib version {}", interpreted_header.version, - FILE_VERSION + MAX_SUPPORTED_FILE_VERSION ))); } @@ -84,15 +84,15 @@ mod tests { // this test point locks down query error when file has a higher version fn test_higher_version_storage_file() { let mut value_list = create_test_flag_value_list(); - value_list.header.version = FILE_VERSION + 1; + value_list.header.version = MAX_SUPPORTED_FILE_VERSION + 1; let mut flag_value = value_list.into_bytes(); let error = update_boolean_flag_value(&mut flag_value[..], 4, true).unwrap_err(); assert_eq!( format!("{:?}", error), format!( "HigherStorageFileVersion(Cannot write to storage file with a higher version of {} with lib version {})", - FILE_VERSION + 1, - FILE_VERSION + MAX_SUPPORTED_FILE_VERSION + 1, + MAX_SUPPORTED_FILE_VERSION ) ); } diff --git a/tools/aconfig/aconfig_storage_write_api/src/lib.rs b/tools/aconfig/aconfig_storage_write_api/src/lib.rs index 0396a63d4e..09bb41f54f 100644 --- a/tools/aconfig/aconfig_storage_write_api/src/lib.rs +++ b/tools/aconfig/aconfig_storage_write_api/src/lib.rs @@ -24,15 +24,10 @@ pub mod mapped_file; #[cfg(test)] mod test_utils; -use aconfig_storage_file::{ - AconfigStorageError, FlagInfoHeader, FlagInfoList, FlagInfoNode, FlagTable, FlagValueType, - PackageTable, StorageFileType, StoredFlagType, FILE_VERSION, -}; +use aconfig_storage_file::{AconfigStorageError, FlagValueType}; use anyhow::anyhow; use memmap2::MmapMut; -use std::fs::File; -use std::io::{Read, Write}; /// Get read write mapped storage files. /// @@ -104,86 +99,6 @@ pub fn set_flag_has_local_override( }) } -/// Read in storage file as bytes -fn read_file_to_bytes(file_path: &str) -> Result<Vec<u8>, AconfigStorageError> { - let mut file = File::open(file_path).map_err(|errmsg| { - AconfigStorageError::FileReadFail(anyhow!("Failed to open file {}: {}", file_path, errmsg)) - })?; - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer).map_err(|errmsg| { - AconfigStorageError::FileReadFail(anyhow!( - "Failed to read bytes from file {}: {}", - file_path, - errmsg - )) - })?; - Ok(buffer) -} - -/// Create flag info file given package map file and flag map file -/// \input package_map: package map file -/// \input flag_map: flag map file -/// \output flag_info_out: created flag info file -pub fn create_flag_info( - package_map: &str, - flag_map: &str, - flag_info_out: &str, -) -> Result<(), AconfigStorageError> { - let package_table = PackageTable::from_bytes(&read_file_to_bytes(package_map)?)?; - let flag_table = FlagTable::from_bytes(&read_file_to_bytes(flag_map)?)?; - - if package_table.header.container != flag_table.header.container { - return Err(AconfigStorageError::FileCreationFail(anyhow!( - "container for package map {} and flag map {} does not match", - package_table.header.container, - flag_table.header.container, - ))); - } - - let mut package_start_index = vec![0; package_table.header.num_packages as usize]; - for node in package_table.nodes.iter() { - package_start_index[node.package_id as usize] = node.boolean_start_index; - } - - let mut is_flag_rw = vec![false; flag_table.header.num_flags as usize]; - for node in flag_table.nodes.iter() { - let flag_index = package_start_index[node.package_id as usize] + node.flag_index as u32; - is_flag_rw[flag_index as usize] = node.flag_type == StoredFlagType::ReadWriteBoolean; - } - - let mut list = FlagInfoList { - header: FlagInfoHeader { - version: FILE_VERSION, - container: flag_table.header.container, - file_type: StorageFileType::FlagInfo as u8, - file_size: 0, - num_flags: flag_table.header.num_flags, - boolean_flag_offset: 0, - }, - nodes: is_flag_rw.iter().map(|&rw| FlagInfoNode::create(rw)).collect(), - }; - - list.header.boolean_flag_offset = list.header.into_bytes().len() as u32; - list.header.file_size = list.into_bytes().len() as u32; - - let mut file = File::create(flag_info_out).map_err(|errmsg| { - AconfigStorageError::FileCreationFail(anyhow!( - "fail to create file {}: {}", - flag_info_out, - errmsg - )) - })?; - file.write_all(&list.into_bytes()).map_err(|errmsg| { - AconfigStorageError::FileCreationFail(anyhow!( - "fail to write to file {}: {}", - flag_info_out, - errmsg - )) - })?; - - Ok(()) -} - // *************************************** // // CC INTERLOP // *************************************** // @@ -212,12 +127,6 @@ mod ffi { pub error_message: String, } - // Flag info file creation return for cc interlop - pub struct FlagInfoCreationCXX { - pub success: bool, - pub error_message: String, - } - // Rust export to c++ extern "Rust" { pub fn update_boolean_flag_value_cxx( @@ -239,12 +148,6 @@ mod ffi { offset: u32, value: bool, ) -> FlagHasLocalOverrideUpdateCXX; - - pub fn create_flag_info_cxx( - package_map: &str, - flag_map: &str, - flag_info_out: &str, - ) -> FlagInfoCreationCXX; } } @@ -329,34 +232,15 @@ pub(crate) fn update_flag_has_local_override_cxx( } } -/// Create flag info file cc interlop -pub(crate) fn create_flag_info_cxx( - package_map: &str, - flag_map: &str, - flag_info_out: &str, -) -> ffi::FlagInfoCreationCXX { - match create_flag_info(package_map, flag_map, flag_info_out) { - Ok(()) => ffi::FlagInfoCreationCXX { success: true, error_message: String::from("") }, - Err(errmsg) => { - ffi::FlagInfoCreationCXX { success: false, error_message: format!("{:?}", errmsg) } - } - } -} - #[cfg(test)] mod tests { use super::*; use crate::test_utils::copy_to_temp_file; - use aconfig_storage_file::test_utils::{ - create_test_flag_info_list, create_test_flag_table, create_test_package_table, - write_bytes_to_temp_file, - }; use aconfig_storage_file::FlagInfoBit; use aconfig_storage_read_api::flag_info_query::find_flag_attribute; use aconfig_storage_read_api::flag_value_query::find_boolean_flag_value; use std::fs::File; use std::io::Read; - use tempfile::NamedTempFile; fn get_boolean_flag_value_at_offset(file: &str, offset: u32) -> bool { let mut f = File::open(&file).unwrap(); @@ -439,31 +323,4 @@ mod tests { } } } - - fn create_empty_temp_file() -> Result<NamedTempFile, AconfigStorageError> { - let file = NamedTempFile::new().map_err(|_| { - AconfigStorageError::FileCreationFail(anyhow!("Failed to create temp file")) - })?; - Ok(file) - } - - #[test] - // this test point locks down the flag info creation - fn test_create_flag_info() { - let package_table = - write_bytes_to_temp_file(&create_test_package_table().into_bytes()).unwrap(); - let flag_table = write_bytes_to_temp_file(&create_test_flag_table().into_bytes()).unwrap(); - let flag_info = create_empty_temp_file().unwrap(); - - let package_table_path = package_table.path().display().to_string(); - let flag_table_path = flag_table.path().display().to_string(); - let flag_info_path = flag_info.path().display().to_string(); - - assert!(create_flag_info(&package_table_path, &flag_table_path, &flag_info_path).is_ok()); - - let flag_info = - FlagInfoList::from_bytes(&read_file_to_bytes(&flag_info_path).unwrap()).unwrap(); - let expected_flag_info = create_test_flag_info_list(); - assert_eq!(flag_info, expected_flag_info); - } } diff --git a/tools/aconfig/aflags/Android.bp b/tools/aconfig/aflags/Android.bp index 2040cc635b..a7aceeebad 100644 --- a/tools/aconfig/aflags/Android.bp +++ b/tools/aconfig/aflags/Android.bp @@ -12,7 +12,7 @@ rust_defaults { "libaconfig_device_paths", "libaconfig_flags", "libaconfig_protos", - "libaconfigd_protos", + "libaconfigd_protos_rust", "libaconfig_storage_read_api", "libaconfig_storage_file", "libanyhow", @@ -20,6 +20,10 @@ rust_defaults { "libnix", "libprotobuf", "libregex", + // TODO: b/371021174 remove this fake dependency once we find a proper strategy to + // deal with test aconfig libs are not present in storage because they are never used + // by the actual build + "libaconfig_test_rust_library", ], } diff --git a/tools/aconfig/fake_device_config/Android.bp b/tools/aconfig/fake_device_config/Android.bp index 7704742601..1f17e6b89f 100644 --- a/tools/aconfig/fake_device_config/Android.bp +++ b/tools/aconfig/fake_device_config/Android.bp @@ -15,9 +15,7 @@ java_library { name: "fake_device_config", srcs: [ - "src/android/util/Log.java", - "src/android/provider/DeviceConfig.java", - "src/android/os/StrictMode.java", + "src/**/*.java", ], sdk_version: "none", system_modules: "core-all-system-modules", diff --git a/tools/aconfig/fake_device_config/src/android/provider/AconfigPackage.java b/tools/aconfig/fake_device_config/src/android/provider/AconfigPackage.java new file mode 100644 index 0000000000..2f01b8c7e6 --- /dev/null +++ b/tools/aconfig/fake_device_config/src/android/provider/AconfigPackage.java @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2024 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.provider; + +/* + * This class allows generated aconfig code to compile independently of the framework. + */ +public class AconfigPackage { + + /** Flag value is true */ + public static final int FLAG_BOOLEAN_VALUE_TRUE = 1; + + /** Flag value is false */ + public static final int FLAG_BOOLEAN_VALUE_FALSE = 0; + + /** Flag value doesn't exist */ + public static final int FLAG_BOOLEAN_VALUE_NOT_EXIST = 2; + + public static int getBooleanFlagValue(String packageName, String flagName) { + return 0; + } + + public AconfigPackage(String packageName) {} + + public int getBooleanFlagValue(String flagName) { + return 0; + } +}
\ No newline at end of file diff --git a/tools/auto_gen_test_config.py b/tools/auto_gen_test_config.py index 8ee599a1ec..d54c4121e4 100755 --- a/tools/auto_gen_test_config.py +++ b/tools/auto_gen_test_config.py @@ -34,6 +34,7 @@ PLACEHOLDER_MODULE = '{MODULE}' PLACEHOLDER_PACKAGE = '{PACKAGE}' PLACEHOLDER_RUNNER = '{RUNNER}' PLACEHOLDER_TEST_TYPE = '{TEST_TYPE}' +PLACEHOLDER_EXTRA_TEST_RUNNER_CONFIGS = '{EXTRA_TEST_RUNNER_CONFIGS}' def main(argv): @@ -59,6 +60,7 @@ def main(argv): "instrumentation_test_config_template", help="Path to the instrumentation test config template.") parser.add_argument("--extra-configs", default="") + parser.add_argument("--extra-test-runner-configs", default="") args = parser.parse_args(argv) target_config = args.target_config @@ -66,6 +68,7 @@ def main(argv): empty_config = args.empty_config instrumentation_test_config_template = args.instrumentation_test_config_template extra_configs = '\n'.join(args.extra_configs.split('\\n')) + extra_test_runner_configs = '\n'.join(args.extra_test_runner_configs.split('\\n')) module = os.path.splitext(os.path.basename(target_config))[0] @@ -131,6 +134,7 @@ def main(argv): config = config.replace(PLACEHOLDER_PACKAGE, package) config = config.replace(PLACEHOLDER_TEST_TYPE, test_type) config = config.replace(PLACEHOLDER_EXTRA_CONFIGS, extra_configs) + config = config.replace(PLACEHOLDER_EXTRA_TEST_RUNNER_CONFIGS, extra_test_runner_configs) config = config.replace(PLACEHOLDER_RUNNER, runner) with open(target_config, 'w') as config_file: config_file.write(config) diff --git a/tools/edit_monitor/Android.bp b/tools/edit_monitor/Android.bp index 80437c00d4..34978214a9 100644 --- a/tools/edit_monitor/Android.bp +++ b/tools/edit_monitor/Android.bp @@ -19,3 +19,48 @@ package { default_applicable_licenses: ["Android-Apache-2.0"], default_team: "trendy_team_adte", } + +python_library_host { + name: "edit_event_proto", + srcs: [ + "proto/edit_event.proto", + ], + proto: { + canonical_path_from_root: false, + }, +} + +python_library_host { + name: "edit_monitor_lib", + pkg_path: "edit_monitor", + srcs: [ + "daemon_manager.py", + ], +} + +python_test_host { + name: "daemon_manager_test", + main: "daemon_manager_test.py", + pkg_path: "edit_monitor", + srcs: [ + "daemon_manager_test.py", + ], + libs: [ + "edit_monitor_lib", + ], + test_options: { + unit_test: true, + }, +} + +python_binary_host { + name: "edit_monitor", + pkg_path: "edit_monitor", + srcs: [ + "main.py", + ], + libs: [ + "edit_monitor_lib", + ], + main: "main.py", +} diff --git a/tools/edit_monitor/daemon_manager.py b/tools/edit_monitor/daemon_manager.py new file mode 100644 index 0000000000..445d849a49 --- /dev/null +++ b/tools/edit_monitor/daemon_manager.py @@ -0,0 +1,349 @@ +# Copyright 2024, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import hashlib +import logging +import multiprocessing +import os +import pathlib +import signal +import subprocess +import sys +import tempfile +import time + + +DEFAULT_PROCESS_TERMINATION_TIMEOUT_SECONDS = 1 +DEFAULT_MONITOR_INTERVAL_SECONDS = 5 +DEFAULT_MEMORY_USAGE_THRESHOLD = 2000 +DEFAULT_CPU_USAGE_THRESHOLD = 200 +DEFAULT_REBOOT_TIMEOUT_SECONDS = 60 * 60 * 24 +BLOCK_SIGN_FILE = "edit_monitor_block_sign" + + +def default_daemon_target(): + """Place holder for the default daemon target.""" + print("default daemon target") + + +class DaemonManager: + """Class to manage and monitor the daemon run as a subprocess.""" + + def __init__( + self, + binary_path: str, + daemon_target: callable = default_daemon_target, + daemon_args: tuple = (), + ): + self.binary_path = binary_path + self.daemon_target = daemon_target + self.daemon_args = daemon_args + + self.pid = os.getpid() + self.daemon_process = None + + self.max_memory_usage = 0 + self.max_cpu_usage = 0 + + pid_file_dir = pathlib.Path(tempfile.gettempdir()).joinpath("edit_monitor") + pid_file_dir.mkdir(parents=True, exist_ok=True) + self.pid_file_path = self._get_pid_file_path(pid_file_dir) + self.block_sign = pathlib.Path(tempfile.gettempdir()).joinpath( + BLOCK_SIGN_FILE + ) + + def start(self): + """Writes the pidfile and starts the daemon proces.""" + if self.block_sign.exists(): + logging.warning("Block sign found, exiting...") + return + + self._stop_any_existing_instance() + self._write_pid_to_pidfile() + self._start_daemon_process() + + def monitor_daemon( + self, + interval: int = DEFAULT_MONITOR_INTERVAL_SECONDS, + memory_threshold: float = DEFAULT_MEMORY_USAGE_THRESHOLD, + cpu_threshold: float = DEFAULT_CPU_USAGE_THRESHOLD, + reboot_timeout: int = DEFAULT_REBOOT_TIMEOUT_SECONDS, + ): + """Monits the daemon process status. + + Periodically check the CPU/Memory usage of the daemon process as long as the + process is still running and kill the process if the resource usage is above + given thresholds. + """ + if not self.daemon_process: + return + + logging.info("start monitoring daemon process %d.", self.daemon_process.pid) + reboot_time = time.time() + reboot_timeout + while self.daemon_process.is_alive(): + if time.time() > reboot_time: + self.reboot() + try: + memory_usage = self._get_process_memory_percent(self.daemon_process.pid) + self.max_memory_usage = max(self.max_memory_usage, memory_usage) + + cpu_usage = self._get_process_cpu_percent(self.daemon_process.pid) + self.max_cpu_usage = max(self.max_cpu_usage, cpu_usage) + + time.sleep(interval) + except Exception as e: + # Logging the error and continue. + logging.warning("Failed to monitor daemon process with error: %s", e) + + if ( + self.max_memory_usage >= memory_threshold + or self.max_cpu_usage >= cpu_threshold + ): + logging.error( + "Daemon process is consuming too much resource, killing..." + ), + self._terminate_process(self.daemon_process.pid) + + logging.info( + "Daemon process %d terminated. Max memory usage: %f, Max cpu" + " usage: %f.", + self.daemon_process.pid, + self.max_memory_usage, + self.max_cpu_usage, + ) + + def stop(self): + """Stops the daemon process and removes the pidfile.""" + + logging.debug("in daemon manager cleanup.") + try: + if self.daemon_process and self.daemon_process.is_alive(): + self._terminate_process(self.daemon_process.pid) + self._remove_pidfile() + logging.debug("Successfully stopped daemon manager.") + except Exception as e: + logging.exception("Failed to stop daemon manager with error %s", e) + + def reboot(self): + """Reboots the current process. + + Stops the current daemon manager and reboots the entire process based on + the binary file. Exits directly If the binary file no longer exists. + """ + logging.debug("Rebooting process based on binary %s.", self.binary_path) + + # Stop the current daemon manager first. + self.stop() + + # If the binary no longer exists, exit directly. + if not os.path.exists(self.binary_path): + logging.info("binary %s no longer exists, exiting.", self.binary_path) + sys.exit(0) + + try: + os.execv(self.binary_path, sys.argv) + except OSError as e: + logging.exception("Failed to reboot process with error: %s.", e) + sys.exit(1) # Indicate an error occurred + + def cleanup(self): + """Wipes out all edit monitor instances in the system. + + Stops all the existing edit monitor instances and place a block sign + to prevent any edit monitor process to start. This method is only used + in emergency case when there's something goes wrong with the edit monitor + that requires immediate cleanup to prevent damanger to the system. + """ + logging.debug("Start cleaning up all existing instances.") + + try: + # First places a block sign to prevent any edit monitor process to start. + self.block_sign.touch() + except (FileNotFoundError, PermissionError, OSError): + logging.exception("Failed to place the block sign") + + # Finds and kills all the existing instances of edit monitor. + existing_instances_pids = self._find_all_instances_pids() + for pid in existing_instances_pids: + logging.info( + "Found existing edit monitor instance with pid %d, killing...", pid + ) + try: + self._terminate_process(pid) + except Exception: + logging.exception("Failed to terminate process %d", pid) + + def _stop_any_existing_instance(self): + if not self.pid_file_path.exists(): + logging.debug("No existing instances.") + return + + ex_pid = self._read_pid_from_pidfile() + + if ex_pid: + logging.info("Found another instance with pid %d.", ex_pid) + self._terminate_process(ex_pid) + self._remove_pidfile() + + def _read_pid_from_pidfile(self): + with open(self.pid_file_path, "r") as f: + return int(f.read().strip()) + + def _write_pid_to_pidfile(self): + """Creates a pidfile and writes the current pid to the file. + + Raise FileExistsError if the pidfile already exists. + """ + try: + # Use the 'x' mode to open the file for exclusive creation + with open(self.pid_file_path, "x") as f: + f.write(f"{self.pid}") + except FileExistsError as e: + # This could be caused due to race condition that a user is trying + # to start two edit monitors at the same time. Or because there is + # already an existing edit monitor running and we can not kill it + # for some reason. + logging.exception("pidfile %s already exists.", self.pid_file_path) + raise e + + def _start_daemon_process(self): + """Starts a subprocess to run the daemon.""" + p = multiprocessing.Process( + target=self.daemon_target, args=self.daemon_args + ) + p.start() + + logging.info("Start subprocess with PID %d", p.pid) + self.daemon_process = p + + def _terminate_process( + self, pid: int, timeout: int = DEFAULT_PROCESS_TERMINATION_TIMEOUT_SECONDS + ): + """Terminates a process with given pid. + + It first sends a SIGTERM to the process to allow it for proper + termination with a timeout. If the process is not terminated within + the timeout, kills it forcefully. + """ + try: + os.kill(pid, signal.SIGTERM) + if not self._wait_for_process_terminate(pid, timeout): + logging.warning( + "Process %d not terminated within timeout, try force kill", pid + ) + os.kill(pid, signal.SIGKILL) + except ProcessLookupError: + logging.info("Process with PID %d not found (already terminated)", pid) + + def _wait_for_process_terminate(self, pid: int, timeout: int) -> bool: + start_time = time.time() + + while time.time() < start_time + timeout: + if not self._is_process_alive(pid): + return True + time.sleep(1) + + logging.error("Process %d not terminated within %d seconds.", pid, timeout) + return False + + def _is_process_alive(self, pid: int) -> bool: + try: + output = subprocess.check_output( + ["ps", "-p", str(pid), "-o", "state="], text=True + ).strip() + state = output.split()[0] + return state != "Z" # Check if the state is not 'Z' (zombie) + except subprocess.CalledProcessError: + # Process not found (already dead). + return False + except (FileNotFoundError, OSError, ValueError) as e: + logging.warning( + "Unable to check the status for process %d with error: %s.", pid, e + ) + return True + + def _remove_pidfile(self): + try: + os.remove(self.pid_file_path) + except FileNotFoundError: + logging.info("pid file %s already removed.", self.pid_file_path) + + def _get_pid_file_path(self, pid_file_dir: pathlib.Path) -> pathlib.Path: + """Generates the path to store the pidfile. + + The file path should have the format of "/tmp/edit_monitor/xxxx.lock" + where xxxx is a hashed value based on the binary path that starts the + process. + """ + hash_object = hashlib.sha256() + hash_object.update(self.binary_path.encode("utf-8")) + pid_file_path = pid_file_dir.joinpath(hash_object.hexdigest() + ".lock") + logging.info("pid_file_path: %s", pid_file_path) + + return pid_file_path + + def _get_process_memory_percent(self, pid: int) -> float: + try: + with open(f"/proc/{pid}/stat", "r") as f: + stat_data = f.readline().split() + # RSS is the 24th field in /proc/[pid]/stat + rss_pages = int(stat_data[23]) + return rss_pages * 4 / 1024 # Covert to MB + except (FileNotFoundError, IndexError, ValueError, IOError) as e: + logging.exception("Failed to get memory usage.") + raise e + + def _get_process_cpu_percent(self, pid: int, interval: int = 1) -> float: + try: + total_start_time = self._get_total_cpu_time(pid) + with open("/proc/uptime", "r") as f: + uptime_start = float(f.readline().split()[0]) + + time.sleep(interval) + + total_end_time = self._get_total_cpu_time(pid) + with open("/proc/uptime", "r") as f: + uptime_end = float(f.readline().split()[0]) + + return ( + (total_end_time - total_start_time) + / (uptime_end - uptime_start) + * 100 + ) + except (FileNotFoundError, IndexError, ValueError, IOError) as e: + logging.exception("Failed to get CPU usage.") + raise e + + def _get_total_cpu_time(self, pid: int) -> float: + with open(f"/proc/{str(pid)}/stat", "r") as f: + stats = f.readline().split() + # utime is the 14th field in /proc/[pid]/stat measured in clock ticks. + utime = int(stats[13]) + # stime is the 15th field in /proc/[pid]/stat measured in clock ticks. + stime = int(stats[14]) + return (utime + stime) / os.sysconf(os.sysconf_names["SC_CLK_TCK"]) + + def _find_all_instances_pids(self) -> list[int]: + pids = [] + + for file in os.listdir(self.pid_file_path.parent): + if file.endswith(".lock"): + try: + with open(self.pid_file_path.parent.joinpath(file), "r") as f: + pids.append(int(f.read().strip())) + except (FileNotFoundError, IOError, ValueError, TypeError): + logging.exception("Failed to get pid from file path: %s", file) + + return pids
\ No newline at end of file diff --git a/tools/edit_monitor/daemon_manager_test.py b/tools/edit_monitor/daemon_manager_test.py new file mode 100644 index 0000000000..d62eade361 --- /dev/null +++ b/tools/edit_monitor/daemon_manager_test.py @@ -0,0 +1,372 @@ +# Copyright 2024, The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unittests for DaemonManager.""" + +import logging +import multiprocessing +import os +import pathlib +import signal +import subprocess +import sys +import tempfile +import time +import unittest +from unittest import mock +from edit_monitor import daemon_manager + + +TEST_BINARY_FILE = '/path/to/test_binary' +TEST_PID_FILE_PATH = ( + '587239c2d1050afdf54512e2d799f3b929f86b43575eb3c7b4bab105dd9bd25e.lock' +) + + +def simple_daemon(output_file): + with open(output_file, 'w') as f: + f.write('running daemon target') + + +def long_running_daemon(): + while True: + time.sleep(1) + + +def memory_consume_daemon_target(size_mb): + try: + size_bytes = size_mb * 1024 * 1024 + dummy_data = bytearray(size_bytes) + time.sleep(10) + except MemoryError: + print(f'Process failed to allocate {size_mb} MB of memory.') + + +def cpu_consume_daemon_target(target_usage_percent): + while True: + start_time = time.time() + while time.time() - start_time < target_usage_percent / 100: + pass # Busy loop to consume CPU + + # Sleep to reduce CPU usage + time.sleep(1 - target_usage_percent / 100) + + +class DaemonManagerTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + super().setUpClass() + # Configure to print logging to stdout. + logging.basicConfig(filename=None, level=logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + logging.getLogger('').addHandler(console) + + def setUp(self): + super().setUp() + self.original_tempdir = tempfile.tempdir + self.working_dir = tempfile.TemporaryDirectory() + # Sets the tempdir under the working dir so any temp files created during + # tests will be cleaned. + tempfile.tempdir = self.working_dir.name + + def tearDown(self): + # Cleans up any child processes left by the tests. + self._cleanup_child_processes() + self.working_dir.cleanup() + # Restores tempdir. + tempfile.tempdir = self.original_tempdir + super().tearDown() + + def test_start_success_with_no_existing_instance(self): + self.assert_run_simple_daemon_success() + + def test_start_success_with_existing_instance_running(self): + # Create a running daemon subprocess + p = self._create_fake_deamon_process() + + self.assert_run_simple_daemon_success() + + def test_start_success_with_existing_instance_already_dead(self): + # Create a pidfile with pid that does not exist. + pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath( + 'edit_monitor' + ) + pid_file_path_dir.mkdir(parents=True, exist_ok=True) + with open(pid_file_path_dir.joinpath(TEST_PID_FILE_PATH), 'w') as f: + f.write('123456') + + self.assert_run_simple_daemon_success() + + def test_start_success_with_existing_instance_from_different_binary(self): + # First start an instance based on "some_binary_path" + existing_dm = daemon_manager.DaemonManager( + 'some_binary_path', + daemon_target=long_running_daemon, + ) + existing_dm.start() + + self.assert_run_simple_daemon_success() + existing_dm.stop() + + def test_start_return_directly_if_block_sign_exists(self): + # Creates the block sign. + pathlib.Path(self.working_dir.name).joinpath( + daemon_manager.BLOCK_SIGN_FILE + ).touch() + + dm = daemon_manager.DaemonManager(TEST_BINARY_FILE) + dm.start() + # Verify no daemon process is started. + self.assertIsNone(dm.daemon_process) + + @mock.patch('os.kill') + def test_start_failed_to_kill_existing_instance(self, mock_kill): + mock_kill.side_effect = OSError('Unknown OSError') + pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath( + 'edit_monitor' + ) + pid_file_path_dir.mkdir(parents=True, exist_ok=True) + with open(pid_file_path_dir.joinpath(TEST_PID_FILE_PATH), 'w') as f: + f.write('123456') + + with self.assertRaises(OSError) as error: + dm = daemon_manager.DaemonManager(TEST_BINARY_FILE) + dm.start() + + def test_start_failed_to_write_pidfile(self): + pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath( + 'edit_monitor' + ) + pid_file_path_dir.mkdir(parents=True, exist_ok=True) + # Makes the directory read-only so write pidfile will fail. + os.chmod(pid_file_path_dir, 0o555) + + with self.assertRaises(PermissionError) as error: + dm = daemon_manager.DaemonManager(TEST_BINARY_FILE) + dm.start() + + def test_start_failed_to_start_daemon_process(self): + with self.assertRaises(TypeError) as error: + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, daemon_target='wrong_target', daemon_args=(1) + ) + dm.start() + + def test_monitor_daemon_subprocess_killed_high_memory_usage(self): + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, + daemon_target=memory_consume_daemon_target, + daemon_args=(2,), + ) + dm.start() + dm.monitor_daemon(interval=1, memory_threshold=2) + + self.assertTrue(dm.max_memory_usage >= 2) + self.assert_no_subprocess_running() + + def test_monitor_daemon_subprocess_killed_high_cpu_usage(self): + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, + daemon_target=cpu_consume_daemon_target, + daemon_args=(20,), + ) + dm.start() + dm.monitor_daemon(interval=1, cpu_threshold=20) + + self.assertTrue(dm.max_cpu_usage >= 20) + self.assert_no_subprocess_running() + + @mock.patch('subprocess.check_output') + def test_monitor_daemon_failed_does_not_matter(self, mock_output): + mock_output.side_effect = OSError('Unknown OSError') + self.assert_run_simple_daemon_success() + + @mock.patch('os.execv') + def test_monitor_daemon_reboot_triggered(self, mock_execv): + binary_file = tempfile.NamedTemporaryFile( + dir=self.working_dir.name, delete=False + ) + + dm = daemon_manager.DaemonManager( + binary_file.name, daemon_target=long_running_daemon + ) + dm.start() + dm.monitor_daemon(reboot_timeout=0.5) + mock_execv.assert_called_once() + + def test_stop_success(self): + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, daemon_target=long_running_daemon + ) + dm.start() + dm.stop() + + self.assert_no_subprocess_running() + self.assertFalse(dm.pid_file_path.exists()) + + @mock.patch('os.kill') + def test_stop_failed_to_kill_daemon_process(self, mock_kill): + mock_kill.side_effect = OSError('Unknown OSError') + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, daemon_target=long_running_daemon + ) + dm.start() + dm.stop() + + self.assertTrue(dm.daemon_process.is_alive()) + self.assertTrue(dm.pid_file_path.exists()) + + @mock.patch('os.remove') + def test_stop_failed_to_remove_pidfile(self, mock_remove): + mock_remove.side_effect = OSError('Unknown OSError') + + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, daemon_target=long_running_daemon + ) + dm.start() + dm.stop() + + self.assert_no_subprocess_running() + self.assertTrue(dm.pid_file_path.exists()) + + @mock.patch('os.execv') + def test_reboot_success(self, mock_execv): + binary_file = tempfile.NamedTemporaryFile( + dir=self.working_dir.name, delete=False + ) + + dm = daemon_manager.DaemonManager( + binary_file.name, daemon_target=long_running_daemon + ) + dm.start() + dm.reboot() + + # Verifies the old process is stopped + self.assert_no_subprocess_running() + self.assertFalse(dm.pid_file_path.exists()) + + mock_execv.assert_called_once() + + @mock.patch('os.execv') + def test_reboot_binary_no_longer_exists(self, mock_execv): + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, daemon_target=long_running_daemon + ) + dm.start() + + with self.assertRaises(SystemExit) as cm: + dm.reboot() + mock_execv.assert_not_called() + self.assertEqual(cm.exception.code, 0) + + @mock.patch('os.execv') + def test_reboot_failed(self, mock_execv): + mock_execv.side_effect = OSError('Unknown OSError') + binary_file = tempfile.NamedTemporaryFile( + dir=self.working_dir.name, delete=False + ) + + dm = daemon_manager.DaemonManager( + binary_file.name, daemon_target=long_running_daemon + ) + dm.start() + + with self.assertRaises(SystemExit) as cm: + dm.reboot() + self.assertEqual(cm.exception.code, 1) + + def assert_run_simple_daemon_success(self): + damone_output_file = tempfile.NamedTemporaryFile( + dir=self.working_dir.name, delete=False + ) + dm = daemon_manager.DaemonManager( + TEST_BINARY_FILE, + daemon_target=simple_daemon, + daemon_args=(damone_output_file.name,), + ) + dm.start() + dm.monitor_daemon(interval=1) + + # Verifies the expected pid file is created. + expected_pid_file_path = pathlib.Path(self.working_dir.name).joinpath( + 'edit_monitor', TEST_PID_FILE_PATH + ) + self.assertTrue(expected_pid_file_path.exists()) + + # Verify the daemon process is executed successfully. + with open(damone_output_file.name, 'r') as f: + contents = f.read() + self.assertEqual(contents, 'running daemon target') + + def assert_no_subprocess_running(self): + child_pids = self._get_child_processes(os.getpid()) + for child_pid in child_pids: + self.assertFalse( + self._is_process_alive(child_pid), f'process {child_pid} still alive' + ) + + def _get_child_processes(self, parent_pid: int) -> list[int]: + try: + output = subprocess.check_output( + ['ps', '-o', 'pid,ppid', '--no-headers'], text=True + ) + + child_processes = [] + for line in output.splitlines(): + pid, ppid = line.split() + if int(ppid) == parent_pid: + child_processes.append(int(pid)) + return child_processes + except subprocess.CalledProcessError as e: + self.fail(f'failed to get child process, error: {e}') + + def _is_process_alive(self, pid: int) -> bool: + try: + output = subprocess.check_output( + ['ps', '-p', str(pid), '-o', 'state='], text=True + ).strip() + state = output.split()[0] + return state != 'Z' # Check if the state is not 'Z' (zombie) + except subprocess.CalledProcessError: + return False + + def _cleanup_child_processes(self): + child_pids = self._get_child_processes(os.getpid()) + for child_pid in child_pids: + try: + os.kill(child_pid, signal.SIGKILL) + except ProcessLookupError: + # process already terminated + pass + + def _create_fake_deamon_process( + self, name: str = '' + ) -> multiprocessing.Process: + # Create a long running subprocess + p = multiprocessing.Process(target=long_running_daemon) + p.start() + + # Create the pidfile with the subprocess pid + pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath( + 'edit_monitor' + ) + pid_file_path_dir.mkdir(parents=True, exist_ok=True) + with open(pid_file_path_dir.joinpath(name + 'pid.lock'), 'w') as f: + f.write(str(p.pid)) + return p + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/edit_monitor/main.py b/tools/edit_monitor/main.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tools/edit_monitor/main.py diff --git a/tools/edit_monitor/proto/edit_event.proto b/tools/edit_monitor/proto/edit_event.proto new file mode 100644 index 0000000000..b3630bc944 --- /dev/null +++ b/tools/edit_monitor/proto/edit_event.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; + +package tools.asuite.edit_monitor; + +message EditEvent { + enum EditType { + UNSUPPORTED_TYPE = 0; + CREATE = 1; + MODIFY = 2; + DELETE = 3; + MOVE = 4; + } + + enum ErrorType { + UNKNOWN_ERROR = 0; + FAILED_TO_START_EDIT_MONITOR = 1; + FAILED_TO_STOP_EDIT_MONITOR = 2; + FAILED_TO_REBOOT_EDIT_MONITOR = 3; + KILLED_DUE_TO_EXCEEDED_RESOURCE_USAGE = 4; + FORCE_CLEANUP = 5; + } + + // Event that logs a single edit + message SingleEditEvent { + // Full path of the file that edited. + string file_path = 1; + // Type of the edit. + EditType edit_type = 2; + } + + // Event that logs aggregated info for a set of edits. + message AggregatedEditEvent { + int32 num_edits = 1; + } + + // Event that logs errors happened in the edit monitor. + message EditMonitorErrorEvent { + ErrorType error_type = 1; + string error_msg = 2; + string stack_trace = 3; + } + + // ------------------------ + // FIELDS FOR EditEvent + // ------------------------ + // Internal user name. + string user_name = 1; + // The root of Android source. + string source_root = 2; + // Name of the host workstation. + string host_name = 3; + + oneof event { + SingleEditEvent single_edit_event = 4; + AggregatedEditEvent aggregated_edit_event = 5; + EditMonitorErrorEvent edit_monitor_error_event = 6; + } +} diff --git a/tools/filelistdiff/Android.bp b/tools/filelistdiff/Android.bp index ab766d6d93..3826e50ff3 100644 --- a/tools/filelistdiff/Android.bp +++ b/tools/filelistdiff/Android.bp @@ -24,4 +24,9 @@ python_binary_host { prebuilt_etc_host { name: "system_image_diff_allowlist", src: "allowlist", -}
\ No newline at end of file +} + +prebuilt_etc_host { + name: "system_image_diff_allowlist_next", + src: "allowlist_next", +} diff --git a/tools/filelistdiff/OWNERS b/tools/filelistdiff/OWNERS new file mode 100644 index 0000000000..690fb178fc --- /dev/null +++ b/tools/filelistdiff/OWNERS @@ -0,0 +1 @@ +per-file allowlist = justinyun@google.com, jeongik@google.com, kiyoungkim@google.com, inseob@google.com diff --git a/tools/filelistdiff/allowlist b/tools/filelistdiff/allowlist index c4a464dd16..eb785872cf 100644 --- a/tools/filelistdiff/allowlist +++ b/tools/filelistdiff/allowlist @@ -1,49 +1,5 @@ -# Known diffs only in the KATI system image -etc/NOTICE.xml.gz -framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.odex -framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.odex.fsv_meta -framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.vdex -framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.vdex.fsv_meta -lib/aaudio-aidl-cpp.so -lib/android.hardware.biometrics.fingerprint@2.1.so -lib/android.hardware.radio.config@1.0.so -lib/android.hardware.radio.deprecated@1.0.so -lib/android.hardware.radio@1.0.so -lib/android.hardware.radio@1.1.so -lib/android.hardware.radio@1.2.so -lib/android.hardware.radio@1.3.so -lib/android.hardware.radio@1.4.so -lib/android.hardware.secure_element@1.0.so -lib/com.android.media.aaudio-aconfig-cc.so -lib/heapprofd_client.so -lib/heapprofd_client_api.so -lib/libaaudio.so -lib/libaaudio_internal.so -lib/libalarm_jni.so -lib/libamidi.so -lib/libcups.so -lib/libjni_deviceAsWebcam.so -lib/libprintspooler_jni.so -lib/libvendorsupport.so -lib/libwfds.so -lib/libyuv.so - -# b/351258461 -adb_keys +# Known diffs that are installed in either system image with the configuration +# b/353429422 init.environ.rc - -# Known diffs only in the Soong system image -lib/libhidcommand_jni.so -lib/libuinputcommand_jni.so - -# Known diffs in internal source -bin/uprobestats -etc/aconfig/flag.map -etc/aconfig/flag.val -etc/aconfig/package.map -etc/bpf/uprobestats/BitmapAllocation.o -etc/bpf/uprobestats/GenericInstrumentation.o -etc/init/UprobeStats.rc -lib/libuprobestats_client.so -lib64/libuprobestats_client.so -priv-app/DeviceDiagnostics/DeviceDiagnostics.apk
\ No newline at end of file +# b/338342381 +etc/NOTICE.xml.gz diff --git a/tools/filelistdiff/allowlist_next b/tools/filelistdiff/allowlist_next new file mode 100644 index 0000000000..d7078f5d11 --- /dev/null +++ b/tools/filelistdiff/allowlist_next @@ -0,0 +1,15 @@ +# Allowlist only for the next release configuration. +# TODO(b/369678122): The list will be cleared when the trunk configurations are +# available to the next. + +# KATI only installed files +framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.odex +framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.odex.fsv_meta +framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.vdex +framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.vdex.fsv_meta + +# Soong only installed files +etc/aconfig/flag.info +etc/aconfig/flag.map +etc/aconfig/flag.val +etc/aconfig/package.map diff --git a/tools/filelistdiff/file_list_diff.py b/tools/filelistdiff/file_list_diff.py index cdc5b2ee41..951325f431 100644 --- a/tools/filelistdiff/file_list_diff.py +++ b/tools/filelistdiff/file_list_diff.py @@ -19,13 +19,16 @@ COLOR_WARNING = '\033[93m' COLOR_ERROR = '\033[91m' COLOR_NORMAL = '\033[0m' -def find_unique_items(kati_installed_files, soong_installed_files, allowlist, system_module_name): +def find_unique_items(kati_installed_files, soong_installed_files, system_module_name, allowlists): with open(kati_installed_files, 'r') as kati_list_file, \ - open(soong_installed_files, 'r') as soong_list_file, \ - open(allowlist, 'r') as allowlist_file: + open(soong_installed_files, 'r') as soong_list_file: kati_files = set(kati_list_file.read().split()) soong_files = set(soong_list_file.read().split()) - allowed_files = set(filter(lambda x: len(x), map(lambda x: x.lstrip().split('#',1)[0].rstrip() , allowlist_file.read().split('\n')))) + + allowed_files = set() + for allowlist in allowlists: + with open(allowlist, 'r') as allowlist_file: + allowed_files.update(set(filter(lambda x: len(x), map(lambda x: x.lstrip().split('#',1)[0].rstrip() , allowlist_file.read().split('\n'))))) def is_unknown_diff(filepath): return not filepath in allowed_files @@ -34,23 +37,24 @@ def find_unique_items(kati_installed_files, soong_installed_files, allowlist, sy unique_in_soong = set(filter(is_unknown_diff, soong_files - kati_files)) if unique_in_kati: - print(f'{COLOR_ERROR}Please add following modules into system image module {system_module_name}.{COLOR_NORMAL}') - print(f'{COLOR_WARNING}KATI only module(s):{COLOR_NORMAL}') + print('') + print(f'{COLOR_ERROR}Missing required modules in {system_module_name} module.{COLOR_NORMAL}') + print(f'To resolve this issue, please add the modules to the Android.bp file for the {system_module_name} to install the following KATI only installed files.') + print(f'You can find the correct Android.bp file using the command "gomod {system_module_name}".') + print(f'{COLOR_WARNING}KATI only installed file(s):{COLOR_NORMAL}') for item in sorted(unique_in_kati): - print(item) + print(' '+item) if unique_in_soong: - if unique_in_kati: - print('') - - print(f'{COLOR_ERROR}Please add following modules into build/make/target/product/base_system.mk.{COLOR_NORMAL}') - print(f'{COLOR_WARNING}Soong only module(s):{COLOR_NORMAL}') + print('') + print(f'{COLOR_ERROR}Missing packages in base_system.mk.{COLOR_NORMAL}') + print('Please add packages into build/make/target/product/base_system.mk or build/make/tools/filelistdiff/allowlist to install or skip the following Soong only installed files.') + print(f'{COLOR_WARNING}Soong only installed file(s):{COLOR_NORMAL}') for item in sorted(unique_in_soong): - print(item) + print(' '+item) if unique_in_kati or unique_in_soong: print('') - print(f'{COLOR_ERROR}FAILED: System image from KATI and SOONG differs from installed file list.{COLOR_NORMAL}') sys.exit(1) @@ -59,8 +63,8 @@ if __name__ == '__main__': parser.add_argument('kati_installed_file_list') parser.add_argument('soong_installed_file_list') - parser.add_argument('allowlist') parser.add_argument('system_module_name') + parser.add_argument('--allowlists', nargs='+') args = parser.parse_args() - find_unique_items(args.kati_installed_file_list, args.soong_installed_file_list, args.allowlist, args.system_module_name)
\ No newline at end of file + find_unique_items(args.kati_installed_file_list, args.soong_installed_file_list, args.system_module_name, args.allowlists)
\ No newline at end of file diff --git a/tools/ide_query/ide_query.go b/tools/ide_query/ide_query.go index 23c7abd2a0..89ac78fa5f 100644 --- a/tools/ide_query/ide_query.go +++ b/tools/ide_query/ide_query.go @@ -363,6 +363,7 @@ func getJavaInputs(env Env, modulesByPath map[string]string, modules map[string] Id: name, SourceFilePaths: mod.Srcs, GeneratedFiles: genFiles(env, paths), + DependencyIds: mod.Deps, } for _, d := range mod.Deps { diff --git a/tools/ide_query/ide_query.sh b/tools/ide_query/ide_query.sh index 6f9b0c4b8b..8dfffc1cfa 100755 --- a/tools/ide_query/ide_query.sh +++ b/tools/ide_query/ide_query.sh @@ -19,7 +19,7 @@ source $(pwd)/../../shell_utils.sh require_top # Ensure cogsetup (out/ will be symlink outside the repo) -. ${TOP}/build/make/cogsetup.sh +setup_cog_env_if_needed case $(uname -s) in Linux) diff --git a/tools/sbom/Android.bp b/tools/sbom/Android.bp index 6901b06720..4f6d3b7863 100644 --- a/tools/sbom/Android.bp +++ b/tools/sbom/Android.bp @@ -33,6 +33,13 @@ python_binary_host { ], } +python_library_host { + name: "compliance_metadata", + srcs: [ + "compliance_metadata.py", + ], +} + python_binary_host { name: "gen_sbom", srcs: [ @@ -44,6 +51,7 @@ python_binary_host { }, }, libs: [ + "compliance_metadata", "metadata_file_proto_py", "libprotobuf-python", "sbom_lib", @@ -109,3 +117,17 @@ python_binary_host { "sbom_lib", ], } + +python_binary_host { + name: "gen_notice_xml", + srcs: [ + "gen_notice_xml.py", + ], + version: { + py3: { + embedded_launcher: true, + }, + }, + libs: [ + ], +} diff --git a/tools/sbom/compliance_metadata.py b/tools/sbom/compliance_metadata.py new file mode 100644 index 0000000000..9910217bbe --- /dev/null +++ b/tools/sbom/compliance_metadata.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlite3 + +class MetadataDb: + def __init__(self, db): + self.conn = sqlite3.connect(':memory') + self.conn.row_factory = sqlite3.Row + with sqlite3.connect(db) as c: + c.backup(self.conn) + self.reorg() + + def reorg(self): + # package_license table + self.conn.execute("create table package_license as " + "select name as package, pkg_default_applicable_licenses as license " + "from modules " + "where module_type = 'package' ") + cursor = self.conn.execute("select package,license from package_license where license like '% %'") + multi_licenses_packages = cursor.fetchall() + cursor.close() + rows = [] + for p in multi_licenses_packages: + licenses = p['license'].strip().split(' ') + for lic in licenses: + rows.append((p['package'], lic)) + self.conn.executemany('insert into package_license values (?, ?)', rows) + self.conn.commit() + + self.conn.execute("delete from package_license where license like '% %'") + self.conn.commit() + + # module_license table + self.conn.execute("create table module_license as " + "select distinct name as module, package, licenses as license " + "from modules " + "where licenses != '' ") + cursor = self.conn.execute("select module,package,license from module_license where license like '% %'") + multi_licenses_modules = cursor.fetchall() + cursor.close() + rows = [] + for m in multi_licenses_modules: + licenses = m['license'].strip().split(' ') + for lic in licenses: + rows.append((m['module'], m['package'],lic)) + self.conn.executemany('insert into module_license values (?, ?, ?)', rows) + self.conn.commit() + + self.conn.execute("delete from module_license where license like '% %'") + self.conn.commit() + + # module_installed_file table + self.conn.execute("create table module_installed_file as " + "select id as module_id, name as module_name, package, installed_files as installed_file " + "from modules " + "where installed_files != '' ") + cursor = self.conn.execute("select module_id, module_name, package, installed_file " + "from module_installed_file where installed_file like '% %'") + multi_installed_file_modules = cursor.fetchall() + cursor.close() + rows = [] + for m in multi_installed_file_modules: + installed_files = m['installed_file'].strip().split(' ') + for f in installed_files: + rows.append((m['module_id'], m['module_name'], m['package'], f)) + self.conn.executemany('insert into module_installed_file values (?, ?, ?, ?)', rows) + self.conn.commit() + + self.conn.execute("delete from module_installed_file where installed_file like '% %'") + self.conn.commit() + + # module_built_file table + self.conn.execute("create table module_built_file as " + "select id as module_id, name as module_name, package, built_files as built_file " + "from modules " + "where built_files != '' ") + cursor = self.conn.execute("select module_id, module_name, package, built_file " + "from module_built_file where built_file like '% %'") + multi_built_file_modules = cursor.fetchall() + cursor.close() + rows = [] + for m in multi_built_file_modules: + built_files = m['installed_file'].strip().split(' ') + for f in built_files: + rows.append((m['module_id'], m['module_name'], m['package'], f)) + self.conn.executemany('insert into module_built_file values (?, ?, ?, ?)', rows) + self.conn.commit() + + self.conn.execute("delete from module_built_file where built_file like '% %'") + self.conn.commit() + + + # Indexes + self.conn.execute('create index idx_modules_id on modules (id)') + self.conn.execute('create index idx_modules_name on modules (name)') + self.conn.execute('create index idx_package_licnese_package on package_license (package)') + self.conn.execute('create index idx_package_licnese_license on package_license (license)') + self.conn.execute('create index idx_module_licnese_module on module_license (module)') + self.conn.execute('create index idx_module_licnese_license on module_license (license)') + self.conn.execute('create index idx_module_installed_file_module_id on module_installed_file (module_id)') + self.conn.execute('create index idx_module_installed_file_installed_file on module_installed_file (installed_file)') + self.conn.execute('create index idx_module_built_file_module_id on module_built_file (module_id)') + self.conn.execute('create index idx_module_built_file_built_file on module_built_file (built_file)') + self.conn.commit() + + def dump_debug_db(self, debug_db): + with sqlite3.connect(debug_db) as c: + self.conn.backup(c) + + def get_installed_files(self): + # Get all records from table make_metadata, which contains all installed files and corresponding make modules' metadata + cursor = self.conn.execute('select installed_file, module_path, is_prebuilt_make_module, product_copy_files, kernel_module_copy_files, is_platform_generated, license_text from make_metadata') + rows = cursor.fetchall() + cursor.close() + installed_files_metadata = [] + for row in rows: + metadata = dict(zip(row.keys(), row)) + installed_files_metadata.append(metadata) + return installed_files_metadata + + def get_soong_modules(self): + # Get all records from table modules, which contains metadata of all soong modules + cursor = self.conn.execute('select name, package, package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files from modules') + rows = cursor.fetchall() + cursor.close() + soong_modules = [] + for row in rows: + soong_module = dict(zip(row.keys(), row)) + soong_modules.append(soong_module) + return soong_modules + + def get_package_licenses(self, package): + cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text ' + 'from package_license pl join modules m on pl.license = m.name ' + 'where pl.package = ?', + ('//' + package,)) + rows = cursor.fetchall() + licenses = {} + for r in rows: + licenses[r['name']] = r['license_text'] + return licenses + + def get_module_licenses(self, module_name, package): + licenses = {} + # If property "licenses" is defined on module + cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text ' + 'from module_license ml join modules m on ml.license = m.name ' + 'where ml.module = ? and ml.package = ?', + (module_name, package)) + rows = cursor.fetchall() + for r in rows: + licenses[r['name']] = r['license_text'] + if len(licenses) > 0: + return licenses + + # Use default package license + cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text ' + 'from package_license pl join modules m on pl.license = m.name ' + 'where pl.package = ?', + ('//' + package,)) + rows = cursor.fetchall() + for r in rows: + licenses[r['name']] = r['license_text'] + return licenses + + def get_soong_module_of_installed_file(self, installed_file): + cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files ' + 'from modules m join module_installed_file mif on m.id = mif.module_id ' + 'where mif.installed_file = ?', + (installed_file,)) + rows = cursor.fetchall() + cursor.close() + if rows: + soong_module = dict(zip(rows[0].keys(), rows[0])) + return soong_module + + return None + + def get_soong_module_of_built_file(self, built_file): + cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files ' + 'from modules m join module_built_file mbf on m.id = mbf.module_id ' + 'where mbf.built_file = ?', + (built_file,)) + rows = cursor.fetchall() + cursor.close() + if rows: + soong_module = dict(zip(rows[0].keys(), rows[0])) + return soong_module + + return None
\ No newline at end of file diff --git a/tools/sbom/gen_notice_xml.py b/tools/sbom/gen_notice_xml.py new file mode 100644 index 0000000000..eaa6e5a74d --- /dev/null +++ b/tools/sbom/gen_notice_xml.py @@ -0,0 +1,81 @@ +# !/usr/bin/env python3 +# +# Copyright (C) 2024 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Generate NOTICE.xml.gz of a partition. +Usage example: + gen_notice_xml.py --output_file out/soong/.intermediate/.../NOTICE.xml.gz \ + --metadata out/soong/compliance-metadata/aosp_cf_x86_64_phone/compliance-metadata.db \ + --partition system \ + --product_out out/target/vsoc_x86_64 \ + --soong_out out/soong +""" + +import argparse + + +FILE_HEADER = '''\ +<?xml version="1.0" encoding="utf-8"?> +<licenses> +''' +FILE_FOOTER = '''\ +</licenses> +''' + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Print more information.') + parser.add_argument('-d', '--debug', action='store_true', default=True, help='Debug mode') + parser.add_argument('--output_file', required=True, help='The path of the generated NOTICE.xml.gz file.') + parser.add_argument('--partition', required=True, help='The name of partition for which the NOTICE.xml.gz is generated.') + parser.add_argument('--metadata', required=True, help='The path of compliance metadata DB file.') + parser.add_argument('--product_out', required=True, help='The path of PRODUCT_OUT, e.g. out/target/product/vsoc_x86_64.') + parser.add_argument('--soong_out', required=True, help='The path of Soong output directory, e.g. out/soong') + + return parser.parse_args() + + +def log(*info): + if args.verbose: + for i in info: + print(i) + + +def new_file_name_tag(file_metadata, package_name): + file_path = file_metadata['installed_file'].removeprefix(args.product_out) + lib = 'Android' + if package_name: + lib = package_name + return f'<file-name contentId="" lib="{lib}">{file_path}</file-name>\n' + + +def new_file_content_tag(): + pass + + +def main(): + global args + args = get_args() + log('Args:', vars(args)) + + with open(args.output_file, 'w', encoding="utf-8") as notice_xml_file: + notice_xml_file.write(FILE_HEADER) + notice_xml_file.write(FILE_FOOTER) + + +if __name__ == '__main__': + main() diff --git a/tools/sbom/gen_sbom.py b/tools/sbom/gen_sbom.py index a203258b96..9c3a8be9ef 100644 --- a/tools/sbom/gen_sbom.py +++ b/tools/sbom/gen_sbom.py @@ -26,6 +26,7 @@ Usage example: """ import argparse +import compliance_metadata import datetime import google.protobuf.text_format as text_format import hashlib @@ -35,7 +36,6 @@ import queue import metadata_file_pb2 import sbom_data import sbom_writers -import sqlite3 # Package type PKG_SOURCE = 'SOURCE' @@ -568,202 +568,16 @@ def get_all_transitive_static_dep_files_of_installed_files(installed_files_metad return sorted(all_static_dep_files.keys()) -class MetadataDb: - def __init__(self, db): - self.conn = sqlite3.connect(':memory') - self.conn.row_factory = sqlite3.Row - with sqlite3.connect(db) as c: - c.backup(self.conn) - self.reorg() - - def reorg(self): - # package_license table - self.conn.execute("create table package_license as " - "select name as package, pkg_default_applicable_licenses as license " - "from modules " - "where module_type = 'package' ") - cursor = self.conn.execute("select package,license from package_license where license like '% %'") - multi_licenses_packages = cursor.fetchall() - cursor.close() - rows = [] - for p in multi_licenses_packages: - licenses = p['license'].strip().split(' ') - for lic in licenses: - rows.append((p['package'], lic)) - self.conn.executemany('insert into package_license values (?, ?)', rows) - self.conn.commit() - - self.conn.execute("delete from package_license where license like '% %'") - self.conn.commit() - - # module_license table - self.conn.execute("create table module_license as " - "select distinct name as module, package, licenses as license " - "from modules " - "where licenses != '' ") - cursor = self.conn.execute("select module,package,license from module_license where license like '% %'") - multi_licenses_modules = cursor.fetchall() - cursor.close() - rows = [] - for m in multi_licenses_modules: - licenses = m['license'].strip().split(' ') - for lic in licenses: - rows.append((m['module'], m['package'],lic)) - self.conn.executemany('insert into module_license values (?, ?, ?)', rows) - self.conn.commit() - - self.conn.execute("delete from module_license where license like '% %'") - self.conn.commit() - - # module_installed_file table - self.conn.execute("create table module_installed_file as " - "select id as module_id, name as module_name, package, installed_files as installed_file " - "from modules " - "where installed_files != '' ") - cursor = self.conn.execute("select module_id, module_name, package, installed_file " - "from module_installed_file where installed_file like '% %'") - multi_installed_file_modules = cursor.fetchall() - cursor.close() - rows = [] - for m in multi_installed_file_modules: - installed_files = m['installed_file'].strip().split(' ') - for f in installed_files: - rows.append((m['module_id'], m['module_name'], m['package'], f)) - self.conn.executemany('insert into module_installed_file values (?, ?, ?, ?)', rows) - self.conn.commit() - - self.conn.execute("delete from module_installed_file where installed_file like '% %'") - self.conn.commit() - - # module_built_file table - self.conn.execute("create table module_built_file as " - "select id as module_id, name as module_name, package, built_files as built_file " - "from modules " - "where built_files != '' ") - cursor = self.conn.execute("select module_id, module_name, package, built_file " - "from module_built_file where built_file like '% %'") - multi_built_file_modules = cursor.fetchall() - cursor.close() - rows = [] - for m in multi_built_file_modules: - built_files = m['installed_file'].strip().split(' ') - for f in built_files: - rows.append((m['module_id'], m['module_name'], m['package'], f)) - self.conn.executemany('insert into module_built_file values (?, ?, ?, ?)', rows) - self.conn.commit() - - self.conn.execute("delete from module_built_file where built_file like '% %'") - self.conn.commit() - - - # Indexes - self.conn.execute('create index idx_modules_id on modules (id)') - self.conn.execute('create index idx_modules_name on modules (name)') - self.conn.execute('create index idx_package_licnese_package on package_license (package)') - self.conn.execute('create index idx_package_licnese_license on package_license (license)') - self.conn.execute('create index idx_module_licnese_module on module_license (module)') - self.conn.execute('create index idx_module_licnese_license on module_license (license)') - self.conn.execute('create index idx_module_installed_file_module_id on module_installed_file (module_id)') - self.conn.execute('create index idx_module_installed_file_installed_file on module_installed_file (installed_file)') - self.conn.execute('create index idx_module_built_file_module_id on module_built_file (module_id)') - self.conn.execute('create index idx_module_built_file_built_file on module_built_file (built_file)') - self.conn.commit() - - if args.debug: - with sqlite3.connect(os.path.dirname(args.metadata) + '/compliance-metadata-debug.db') as c: - self.conn.backup(c) - - - def get_installed_files(self): - # Get all records from table make_metadata, which contains all installed files and corresponding make modules' metadata - cursor = self.conn.execute('select installed_file, module_path, is_prebuilt_make_module, product_copy_files, kernel_module_copy_files, is_platform_generated, license_text from make_metadata') - rows = cursor.fetchall() - cursor.close() - installed_files_metadata = [] - for row in rows: - metadata = dict(zip(row.keys(), row)) - installed_files_metadata.append(metadata) - return installed_files_metadata - - def get_soong_modules(self): - # Get all records from table modules, which contains metadata of all soong modules - cursor = self.conn.execute('select name, package, package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files from modules') - rows = cursor.fetchall() - cursor.close() - soong_modules = [] - for row in rows: - soong_module = dict(zip(row.keys(), row)) - soong_modules.append(soong_module) - return soong_modules - - def get_package_licenses(self, package): - cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text ' - 'from package_license pl join modules m on pl.license = m.name ' - 'where pl.package = ?', - ('//' + package,)) - rows = cursor.fetchall() - licenses = {} - for r in rows: - licenses[r['name']] = r['license_text'] - return licenses - - def get_module_licenses(self, module_name, package): - licenses = {} - # If property "licenses" is defined on module - cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text ' - 'from module_license ml join modules m on ml.license = m.name ' - 'where ml.module = ? and ml.package = ?', - (module_name, package)) - rows = cursor.fetchall() - for r in rows: - licenses[r['name']] = r['license_text'] - if len(licenses) > 0: - return licenses - - # Use default package license - cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text ' - 'from package_license pl join modules m on pl.license = m.name ' - 'where pl.package = ?', - ('//' + package,)) - rows = cursor.fetchall() - for r in rows: - licenses[r['name']] = r['license_text'] - return licenses - - def get_soong_module_of_installed_file(self, installed_file): - cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files ' - 'from modules m join module_installed_file mif on m.id = mif.module_id ' - 'where mif.installed_file = ?', - (installed_file,)) - rows = cursor.fetchall() - cursor.close() - if rows: - soong_module = dict(zip(rows[0].keys(), rows[0])) - return soong_module - - return None - - def get_soong_module_of_built_file(self, built_file): - cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files ' - 'from modules m join module_built_file mbf on m.id = mbf.module_id ' - 'where mbf.built_file = ?', - (built_file,)) - rows = cursor.fetchall() - cursor.close() - if rows: - soong_module = dict(zip(rows[0].keys(), rows[0])) - return soong_module - - return None - - def main(): global args args = get_args() log('Args:', vars(args)) global db - db = MetadataDb(args.metadata) + db = compliance_metadata.MetadataDb(args.metadata) + if args.debug: + db.dump_debug_db(os.path.dirname(args.output_file) + '/compliance-metadata-debug.db') + global metadata_file_protos metadata_file_protos = {} global licenses_text diff --git a/tools/signapk/src/com/android/signapk/SignApk.java b/tools/signapk/src/com/android/signapk/SignApk.java index 6b2341bc80..654e19675d 100644 --- a/tools/signapk/src/com/android/signapk/SignApk.java +++ b/tools/signapk/src/com/android/signapk/SignApk.java @@ -302,7 +302,6 @@ class SignApk { final KeyStore keyStore, final String keyName) throws CertificateException, KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException, UnrecoverableEntryException { - final Key key = keyStore.getKey(keyName, readPassword(keyName)); final PrivateKeyEntry privateKeyEntry = (PrivateKeyEntry) keyStore.getEntry(keyName, null); if (privateKeyEntry == null) { throw new Error( |