diff options
32 files changed, 747 insertions, 280 deletions
diff --git a/core/Makefile b/core/Makefile index b3870e5920..94fc88e80d 100644 --- a/core/Makefile +++ b/core/Makefile @@ -287,6 +287,11 @@ endif endif endif +# Do this early because sysprop.mk depends on BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC. +ifeq (default,$(ENABLE_UFFD_GC)) +BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC := $(OUT_DIR)/soong/dexpreopt/kernel_version_for_uffd_gc.txt +endif # ENABLE_UFFD_GC + include $(BUILD_SYSTEM)/sysprop.mk # ---------------------------------------------------------------- @@ -5256,6 +5261,34 @@ my_board_extracted_kernel := endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS +ifeq (default,$(ENABLE_UFFD_GC)) + +ifneq (,$(BUILT_KERNEL_VERSION_FILE)) +$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC): $(BUILT_KERNEL_VERSION_FILE) +$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC): + cp $(BUILT_KERNEL_VERSION_FILE) $(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC) +else +# We make this a warning rather than an error to avoid breaking too many builds. When it happens, +# we use a placeholder as the kernel version, which is consumed by uffd_gc_utils.py. +$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC): + echo $$'\ +Unable to determine UFFD GC flag because the kernel version is not available and\n\ +PRODUCT_ENABLE_UFFD_GC is "default".\n\ +You can fix this by:\n\ + 1. [Recommended] Making the kernel version available.\n\ + (1). Set PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS to "true".\n\ + (2). If you are still getting this message after doing so, see the warning about\n\ + PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS in the build logs.\n\ + or\n\ + 2. Explicitly setting PRODUCT_ENABLE_UFFD_GC to "true" or "false" based on the kernel version.\n\ + (1). Set PRODUCT_ENABLE_UFFD_GC to "true" if the kernel is a GKI kernel and is android12-5.4\n\ + or above, or a non-GKI kernel that supports userfaultfd(2) and MREMAP_DONTUNMAP.\n\ + (2). Set PRODUCT_ENABLE_UFFD_GC to "false" otherwise.'\ + && echo '<unknown-kernel>' > $@ +endif # BUILT_KERNEL_VERSION_FILE + +endif # ENABLE_UFFD_GC == "default" + # -- Check VINTF compatibility of build. # Skip partial builds; only check full builds. Only check if: # - PRODUCT_ENFORCE_VINTF_MANIFEST is true diff --git a/core/OWNERS b/core/OWNERS index 88f6d068bb..c98196a7c1 100644 --- a/core/OWNERS +++ b/core/OWNERS @@ -3,7 +3,7 @@ per-file proguard*.flags = jdduke@google.com # For version updates -per-file version_defaults.mk = aseaton@google.com,lubomir@google.com,pscovanner@google.com,bkhalife@google.com,jainne@google.com +per-file version_defaults.mk = ankurbakshi@google.com,bkhalife@google.com,jainne@google.com,lokeshgoel@google.com,lubomir@google.com,pscovanner@google.com # For sdk extensions version updates per-file version_defaults.mk = amhk@google.com,gurpreetgs@google.com,mkhokhlova@google.com,robertogil@google.com diff --git a/core/art_config.mk b/core/art_config.mk index f47a8e21f7..54bfd6b728 100644 --- a/core/art_config.mk +++ b/core/art_config.mk @@ -12,38 +12,15 @@ # ENABLE_UFFD_GC: Whether to use userfaultfd GC. config_enable_uffd_gc := \ - $(firstword $(OVERRIDE_ENABLE_UFFD_GC) $(PRODUCT_ENABLE_UFFD_GC)) + $(firstword $(OVERRIDE_ENABLE_UFFD_GC) $(PRODUCT_ENABLE_UFFD_GC) default) -ifeq (,$(filter-out default,$(config_enable_uffd_gc))) - ENABLE_UFFD_GC := true - - # Disable userfaultfd GC if the device doesn't support it (i.e., if - # `min(ro.board.api_level ?? ro.board.first_api_level ?? MAX_VALUE, - # ro.product.first_api_level ?? ro.build.version.sdk ?? MAX_VALUE) < 31`) - # This logic aligns with how `ro.vendor.api_level` is calculated in - # `system/core/init/property_service.cpp`. - # We omit the check on `ro.build.version.sdk` here because we are on the latest build system. - board_api_level := $(firstword $(BOARD_API_LEVEL) $(BOARD_SHIPPING_API_LEVEL)) - ifneq (,$(board_api_level)) - ifeq (true,$(call math_lt,$(board_api_level),31)) - ENABLE_UFFD_GC := false - endif - endif - - ifneq (,$(PRODUCT_SHIPPING_API_LEVEL)) - ifeq (true,$(call math_lt,$(PRODUCT_SHIPPING_API_LEVEL),31)) - ENABLE_UFFD_GC := false - endif - endif -else ifeq (true,$(config_enable_uffd_gc)) - ENABLE_UFFD_GC := true -else ifeq (false,$(config_enable_uffd_gc)) - ENABLE_UFFD_GC := false -else +ifeq (,$(filter default true false,$(config_enable_uffd_gc))) $(error Unknown PRODUCT_ENABLE_UFFD_GC value: $(config_enable_uffd_gc)) endif -ADDITIONAL_PRODUCT_PROPERTIES += ro.dalvik.vm.enable_uffd_gc=$(ENABLE_UFFD_GC) +ENABLE_UFFD_GC := $(config_enable_uffd_gc) +# If the value is "default", it will be mangled by post_process_props.py. +ADDITIONAL_PRODUCT_PROPERTIES += ro.dalvik.vm.enable_uffd_gc=$(config_enable_uffd_gc) # Create APEX_BOOT_JARS_EXCLUDED which is a list of jars to be removed from # ApexBoorJars when built from mainline prebuilts. diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk index 37a389f25e..08311ca481 100644 --- a/core/dex_preopt.mk +++ b/core/dex_preopt.mk @@ -57,6 +57,7 @@ my_boot_image_module := # Build the boot.zip which contains the boot jars and their compilation output # We can do this only if preopt is enabled and if the product uses libart config (which sets the # default properties for preopting). +# At the time of writing, this is only for ART Cloud. ifeq ($(WITH_DEXPREOPT), true) ifneq ($(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY), true) ifeq ($(PRODUCT_USES_DEFAULT_ART_CONFIG), true) @@ -95,15 +96,16 @@ bootclasspath_arg := $(subst $(space),:,$(patsubst $(dexpreopt_root_dir)%,%,$(DE bootclasspath_locations_arg := $(subst $(space),:,$(DEXPREOPT_BOOTCLASSPATH_DEX_LOCATIONS)) boot_images := $(subst :,$(space),$(DEXPREOPT_IMAGE_LOCATIONS_ON_DEVICE$(DEXPREOPT_INFIX))) boot_image_arg := $(subst $(space),:,$(patsubst /%,%,$(boot_images))) -dex2oat_extra_args := $(if $(filter true,$(ENABLE_UFFD_GC)),--runtime-arg -Xgc:CMC) +uffd_gc_flag_txt := $(OUT_DIR)/soong/dexpreopt/uffd_gc_flag.txt boot_zip_metadata_txt := $(dir $(boot_zip))boot_zip/METADATA.txt +$(boot_zip_metadata_txt): $(uffd_gc_flag_txt) $(boot_zip_metadata_txt): rm -f $@ echo "bootclasspath = $(bootclasspath_arg)" >> $@ echo "bootclasspath-locations = $(bootclasspath_locations_arg)" >> $@ echo "boot-image = $(boot_image_arg)" >> $@ - echo "extra-args = $(dex2oat_extra_args)" >> $@ + echo "extra-args = `cat $(uffd_gc_flag_txt)`" >> $@ $(call dist-for-goals, droidcore, $(boot_zip_metadata_txt)) diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk index 10fbe8f184..d51de33273 100644 --- a/core/dex_preopt_config.mk +++ b/core/dex_preopt_config.mk @@ -122,7 +122,7 @@ ifeq ($(WRITE_SOONG_VARIABLES),true) $(call add_json_str, Dex2oatXmx, $(DEX2OAT_XMX)) $(call add_json_str, Dex2oatXms, $(DEX2OAT_XMS)) $(call add_json_str, EmptyDirectory, $(OUT_DIR)/empty) - $(call add_json_bool, EnableUffdGc, $(filter true,$(ENABLE_UFFD_GC))) + $(call add_json_str, EnableUffdGc, $(ENABLE_UFFD_GC)) ifdef TARGET_ARCH $(call add_json_map, CpuVariant) diff --git a/core/instrumentation_test_config_template.xml b/core/instrumentation_test_config_template.xml index 9dfc001d10..379126c6de 100644 --- a/core/instrumentation_test_config_template.xml +++ b/core/instrumentation_test_config_template.xml @@ -24,7 +24,7 @@ </target_preparer> <test class="com.android.tradefed.testtype.{TEST_TYPE}" > - <option name="package" value="{PACKAGE}" /> + {EXTRA_TEST_RUNNER_CONFIGS}<option name="package" value="{PACKAGE}" /> <option name="runner" value="{RUNNER}" /> </test> </configuration> diff --git a/core/packaging/flags.mk b/core/packaging/flags.mk index 6fc1e4c839..62ef3df7db 100644 --- a/core/packaging/flags.mk +++ b/core/packaging/flags.mk @@ -97,6 +97,20 @@ $(foreach partition, $(_FLAG_PARTITIONS), \ )) \ ) +# Collect the on-device flags into a single file, similar to all_aconfig_declarations. +required_aconfig_flags_files := \ + $(sort $(foreach partition, $(filter $(IMAGES_TO_BUILD), $(_FLAG_PARTITIONS)), \ + $(aconfig_flag_summaries_protobuf.$(partition)) \ + )) + +.PHONY: device_aconfig_declarations +device_aconfig_declarations: $(PRODUCT_OUT)/device_aconfig_declarations.pb +$(eval $(call generate-partition-aconfig-flag-file, \ + $(TARGET_OUT_FLAGS)/device_aconfig_declarations.pb, \ + $(PRODUCT_OUT)/device_aconfig_declarations.pb, \ + $(sort $(required_aconfig_flags_files)) \ +)) \ + # Create a set of storage file for each partition # $(1): built aconfig flags storage package map file (out) # $(2): built aconfig flags storage flag map file (out) @@ -178,6 +192,7 @@ flag-files: $(required_flags_files) # Clean up required_flags_files:= +required_aconfig_flags_files:= $(foreach partition, $(_FLAG_PARTITIONS), \ $(eval build_flag_summaries.$(partition):=) \ $(eval aconfig_flag_summaries_protobuf.$(partition):=) \ diff --git a/core/release_config.mk b/core/release_config.mk index 1fb574756f..8d19bc7b2a 100644 --- a/core/release_config.mk +++ b/core/release_config.mk @@ -59,15 +59,51 @@ $(foreach map,$(PRODUCT_RELEASE_CONFIG_MAPS), \ $(if $(filter $(map),$(config_map_files)),,$(eval config_map_files += $(map))) \ ) +# Declare an alias release-config +# +# This should be used to declare a release as an alias of another, meaning no +# release config files should be present. +# +# $1 config name +# $2 release config for which it is an alias +define alias-release-config + $(call _declare-release-config,$(1),,$(2),true) +endef + # Declare or extend a release-config. # +# The order of processing is: +# 1. Recursively apply any overridden release configs. Only apply each config +# the first time we reach it. +# 2. Apply any files for this release config, in the order they were added to +# the declaration. +# +# Example: +# With these declarations: +# $(declare-release-config foo, foo.scl) +# $(declare-release-config bar, bar.scl, foo) +# $(declare-release-config baz, baz.scl, bar) +# $(declare-release-config bif, bif.scl, foo baz) +# $(declare-release-config bop, bop.scl, bar baz) +# +# TARGET_RELEASE: +# - bar will use: foo.scl bar.scl +# - baz will use: foo.scl bar.scl baz.scl +# - bif will use: foo.scl bar.scl baz.scl bif.scl +# - bop will use: foo.scl bar.scl baz.scl bop.scl +# # $1 config name # $2 release config files -# $3 overridden release config. Only applied for $(TARGET_RELEASE), not in depth. +# $3 overridden release config define declare-release-config - $(if $(strip $(2)),, \ - $(error declare-release-config: config $(strip $(1)) must have release config files) \ + $(call _declare-release-config,$(1),$(2),$(3),) +endef + +define _declare-release-config + $(if $(strip $(2)$(3)),, \ + $(error declare-release-config: config $(strip $(1)) must have release config files, override another release config, or both) \ ) + $(if $(strip $(4)),$(eval _all_release_configs.$(strip $(1)).ALIAS := true)) $(eval _all_release_configs := $(sort $(_all_release_configs) $(strip $(1)))) $(if $(strip $(3)), \ $(if $(filter $(_all_release_configs), $(strip $(3))), @@ -113,16 +149,30 @@ endif # Don't sort this, use it in the order they gave us. # Do allow duplicate entries, retaining only the first usage. flag_value_files := -$(foreach r,$(_all_release_configs.$(TARGET_RELEASE).OVERRIDES) $(TARGET_RELEASE), \ + +# Apply overrides recursively +# +# $1 release config that we override +applied_releases := +define _apply-release-config-overrides +$(foreach r,$(1), \ + $(if $(filter $(r),$(applied_releases)),, \ + $(foreach o,$(_all_release_configs.$(r).OVERRIDES),$(call _apply-release-config-overrides,$(o)))\ + $(eval applied_releases += $(r))\ $(foreach f,$(_all_release_configs.$(r).FILES), \ $(if $(filter $(f),$(flag_value_files)),,$(eval flag_value_files += $(f)))\ )\ + )\ ) - +endef +$(call _apply-release-config-overrides,$(TARGET_RELEASE)) # Unset variables so they can't use them define declare-release-config $(error declare-release-config can only be called from inside release_config_map.mk files) endef +define apply-release-config-overrides +$(error invalid use of apply-release-config-overrides) +endef # TODO: Remove this check after enough people have sourced lunch that we don't # need to worry about it trying to do get_build_vars TARGET_RELEASE. Maybe after ~9/2023 @@ -135,6 +185,11 @@ TARGET_RELEASE:= endif .KATI_READONLY := TARGET_RELEASE +# Verify that alias configs do not have config files. +$(foreach r,$(_all_release_configs),\ + $(if $(_all_release_configs.$(r).ALIAS),$(if $(_all_release_configs.$(r).FILES),\ + $(error Alias release config "$(r)" may not specify release config files $(_all_release_configs.$(r).FILES))\ +))) $(foreach config, $(_all_release_configs), \ $(eval _all_release_configs.$(config).DECLARED_IN:= ) \ @@ -142,6 +197,7 @@ $(foreach config, $(_all_release_configs), \ ) _all_release_configs:= config_map_files:= +applied_releases:= # ----------------------------------------------------------------- diff --git a/core/sysprop.mk b/core/sysprop.mk index 4e8e9760f0..652ca9757e 100644 --- a/core/sysprop.mk +++ b/core/sysprop.mk @@ -124,7 +124,7 @@ $(if $(filter true,$(BUILD_BROKEN_DUP_SYSPROP)),\ $(eval _option := --allow-dup)\ ) -$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(3) $(6) +$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(3) $(6) $(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC) $(hide) echo Building $$@ $(hide) mkdir -p $$(dir $$@) $(hide) rm -f $$@ && touch $$@ @@ -148,7 +148,10 @@ endif echo "$$(line)" >> $$@;\ )\ ) - $(hide) $(POST_PROCESS_PROPS) $$(_option) --sdk-version $(PLATFORM_SDK_VERSION) $$@ $(5) + $(hide) $(POST_PROCESS_PROPS) $$(_option) \ + --sdk-version $(PLATFORM_SDK_VERSION) \ + --kernel-version-file-for-uffd-gc "$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC)" \ + $$@ $(5) $(hide) $(foreach file,$(strip $(6)),\ if [ -f "$(file)" ]; then\ cat $(file) >> $$@;\ diff --git a/target/product/base_system.mk b/target/product/base_system.mk index 0d88046ebe..7d2b3badd4 100644 --- a/target/product/base_system.mk +++ b/target/product/base_system.mk @@ -441,6 +441,7 @@ PRODUCT_PACKAGES_DEBUG := \ logpersist.start \ logtagd.rc \ ot-cli-ftd \ + ot-ctl \ procrank \ profcollectd \ profcollectctl \ diff --git a/tools/Android.bp b/tools/Android.bp index 5c54fcf315..0a55ed4b85 100644 --- a/tools/Android.bp +++ b/tools/Android.bp @@ -18,56 +18,65 @@ package { } python_binary_host { - name: "generate-self-extracting-archive", - srcs: ["generate-self-extracting-archive.py"], + name: "generate-self-extracting-archive", + srcs: ["generate-self-extracting-archive.py"], } python_binary_host { - name: "post_process_props", - srcs: ["post_process_props.py"], + name: "post_process_props", + srcs: ["post_process_props.py"], + libs: [ + "uffd_gc_utils", + ], } python_test_host { - name: "post_process_props_unittest", - main: "test_post_process_props.py", - srcs: [ - "post_process_props.py", - "test_post_process_props.py", - ], - test_config: "post_process_props_unittest.xml", - test_suites: ["general-tests"], + name: "post_process_props_unittest", + main: "test_post_process_props.py", + srcs: [ + "post_process_props.py", + "test_post_process_props.py", + ], + libs: [ + "uffd_gc_utils", + ], + test_config: "post_process_props_unittest.xml", + test_suites: ["general-tests"], } python_binary_host { - name: "extract_kernel", - srcs: ["extract_kernel.py"], + name: "extract_kernel", + srcs: ["extract_kernel.py"], } genrule_defaults { - name: "extract_kernel_release_defaults", - tools: ["extract_kernel", "lz4"], - out: ["kernel_release.txt"], - cmd: "$(location) --tools lz4:$(location lz4) --input $(in) --output-release > $(out)" + name: "extract_kernel_release_defaults", + tools: [ + "extract_kernel", + "lz4", + ], + out: ["kernel_release.txt"], + cmd: "$(location) --tools lz4:$(location lz4) --input $(in) --output-release > $(out)", } cc_binary_host { - name: "build-runfiles", - srcs: ["build-runfiles.cc"], + name: "build-runfiles", + srcs: ["build-runfiles.cc"], } python_binary_host { - name: "check_radio_versions", - srcs: ["check_radio_versions.py"], + name: "check_radio_versions", + srcs: ["check_radio_versions.py"], } python_binary_host { - name: "check_elf_file", - srcs: ["check_elf_file.py"], + name: "check_elf_file", + srcs: ["check_elf_file.py"], } python_binary_host { - name: "generate_gts_shared_report", - srcs: ["generate_gts_shared_report.py"], + name: "generate_gts_shared_report", + srcs: ["generate_gts_shared_report.py"], } python_binary_host { @@ -77,10 +86,10 @@ python_binary_host { "list_files.py", ], version: { - py3: { - embedded_launcher: true, - } - } + py3: { + embedded_launcher: true, + }, + }, } python_test_host { @@ -98,11 +107,11 @@ python_test_host { } python_binary_host { - name: "characteristics_rro_generator", - srcs: ["characteristics_rro_generator.py"], - version: { - py3: { - embedded_launcher: true, + name: "characteristics_rro_generator", + srcs: ["characteristics_rro_generator.py"], + version: { + py3: { + embedded_launcher: true, + }, }, - }, } diff --git a/tools/aconfig/aconfig/src/storage/flag_table.rs b/tools/aconfig/aconfig/src/storage/flag_table.rs index bebac890a0..1381e89762 100644 --- a/tools/aconfig/aconfig/src/storage/flag_table.rs +++ b/tools/aconfig/aconfig/src/storage/flag_table.rs @@ -32,40 +32,52 @@ fn new_header(container: &str, num_flags: u32) -> FlagTableHeader { } } -fn new_node( - package_id: u32, - flag_name: &str, - flag_type: u16, - flag_id: u16, - num_buckets: u32, -) -> FlagTableNode { - let bucket_index = FlagTableNode::find_bucket_index(package_id, flag_name, num_buckets); - FlagTableNode { - package_id, - flag_name: flag_name.to_string(), - flag_type, - flag_id, - next_offset: None, - bucket_index, - } +// a struct that contains FlagTableNode and a bunch of other information to help +// flag table creation +#[derive(PartialEq, Debug, Clone)] +struct FlagTableNodeWrapper { + pub node: FlagTableNode, + pub bucket_index: u32, } -fn create_nodes(package: &FlagPackage, num_buckets: u32) -> Result<Vec<FlagTableNode>> { - let flag_ids = assign_flag_ids(package.package_name, package.boolean_flags.iter().copied())?; - package - .boolean_flags - .iter() - .map(|&pf| { - let fid = flag_ids - .get(pf.name()) - .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?; - // all flags are boolean value at the moment, thus using the last bit. When more - // flag value types are supported, flag value type information should come from the - // parsed flag, and we will set the flag_type bit mask properly. - let flag_type = 1; - Ok(new_node(package.package_id, pf.name(), flag_type, *fid, num_buckets)) - }) - .collect::<Result<Vec<_>>>() +impl FlagTableNodeWrapper { + fn new( + package_id: u32, + flag_name: &str, + flag_type: u16, + flag_id: u16, + num_buckets: u32, + ) -> Self { + let bucket_index = FlagTableNode::find_bucket_index(package_id, flag_name, num_buckets); + let node = FlagTableNode { + package_id, + flag_name: flag_name.to_string(), + flag_type, + flag_id, + next_offset: None, + }; + Self { node, bucket_index } + } + + fn create_nodes(package: &FlagPackage, num_buckets: u32) -> Result<Vec<Self>> { + let flag_ids = + assign_flag_ids(package.package_name, package.boolean_flags.iter().copied())?; + package + .boolean_flags + .iter() + .map(|&pf| { + let fid = flag_ids + .get(pf.name()) + .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?; + // all flags are boolean value at the moment, thus using the last bit. + // When more flag value types are supported, flag value type information + // should come from the parsed flag, and we will set the flag_type bit + // mask properly. + let flag_type = 1; + Ok(Self::new(package.package_id, pf.name(), flag_type, *fid, num_buckets)) + }) + .collect::<Result<Vec<_>>>() + } } pub fn create_flag_table(container: &str, packages: &[FlagPackage]) -> Result<FlagTable> { @@ -73,44 +85,48 @@ pub fn create_flag_table(container: &str, packages: &[FlagPackage]) -> Result<Fl let num_flags = packages.iter().map(|pkg| pkg.boolean_flags.len() as u32).sum(); let num_buckets = get_table_size(num_flags)?; - let mut table = FlagTable { - header: new_header(container, num_flags), - buckets: vec![None; num_buckets as usize], - nodes: packages - .iter() - .map(|pkg| create_nodes(pkg, num_buckets)) - .collect::<Result<Vec<_>>>()? - .concat(), - }; + let mut header = new_header(container, num_flags); + let mut buckets = vec![None; num_buckets as usize]; + let mut node_wrappers = packages + .iter() + .map(|pkg| FlagTableNodeWrapper::create_nodes(pkg, num_buckets)) + .collect::<Result<Vec<_>>>()? + .concat(); // initialize all header fields - table.header.bucket_offset = table.header.as_bytes().len() as u32; - table.header.node_offset = table.header.bucket_offset + num_buckets * 4; - table.header.file_size = table.header.node_offset - + table.nodes.iter().map(|x| x.as_bytes().len()).sum::<usize>() as u32; + header.bucket_offset = header.as_bytes().len() as u32; + header.node_offset = header.bucket_offset + num_buckets * 4; + header.file_size = header.node_offset + + node_wrappers.iter().map(|x| x.node.as_bytes().len()).sum::<usize>() as u32; // sort nodes by bucket index for efficiency - table.nodes.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index)); + node_wrappers.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index)); // fill all node offset - let mut offset = table.header.node_offset; - for i in 0..table.nodes.len() { - let node_bucket_idx = table.nodes[i].bucket_index; - let next_node_bucket_idx = - if i + 1 < table.nodes.len() { Some(table.nodes[i + 1].bucket_index) } else { None }; - - if table.buckets[node_bucket_idx as usize].is_none() { - table.buckets[node_bucket_idx as usize] = Some(offset); + let mut offset = header.node_offset; + for i in 0..node_wrappers.len() { + let node_bucket_idx = node_wrappers[i].bucket_index; + let next_node_bucket_idx = if i + 1 < node_wrappers.len() { + Some(node_wrappers[i + 1].bucket_index) + } else { + None + }; + + if buckets[node_bucket_idx as usize].is_none() { + buckets[node_bucket_idx as usize] = Some(offset); } - offset += table.nodes[i].as_bytes().len() as u32; + offset += node_wrappers[i].node.as_bytes().len() as u32; if let Some(index) = next_node_bucket_idx { if index == node_bucket_idx { - table.nodes[i].next_offset = Some(offset); + node_wrappers[i].node.next_offset = Some(offset); } } } + let table = + FlagTable { header, buckets, nodes: node_wrappers.into_iter().map(|nw| nw.node).collect() }; + Ok(table) } @@ -126,7 +142,6 @@ mod tests { flag_type: u16, flag_id: u16, next_offset: Option<u32>, - bucket_index: u32, ) -> FlagTableNode { FlagTableNode { package_id, @@ -134,7 +149,6 @@ mod tests { flag_type, flag_id, next_offset, - bucket_index, } } @@ -186,13 +200,13 @@ mod tests { let nodes: &Vec<FlagTableNode> = &flag_table.as_ref().unwrap().nodes; assert_eq!(nodes.len(), 8); - assert_eq!(nodes[0], new_expected_node(0, "enabled_ro", 1, 1, None, 0)); - assert_eq!(nodes[1], new_expected_node(0, "enabled_rw", 1, 2, Some(150), 1)); - assert_eq!(nodes[2], new_expected_node(1, "disabled_ro", 1, 0, None, 1)); - assert_eq!(nodes[3], new_expected_node(2, "enabled_ro", 1, 1, None, 5)); - assert_eq!(nodes[4], new_expected_node(1, "enabled_fixed_ro", 1, 1, Some(235), 7)); - assert_eq!(nodes[5], new_expected_node(1, "enabled_ro", 1, 2, None, 7)); - assert_eq!(nodes[6], new_expected_node(2, "enabled_fixed_ro", 1, 0, None, 9)); - assert_eq!(nodes[7], new_expected_node(0, "disabled_rw", 1, 0, None, 15)); + assert_eq!(nodes[0], new_expected_node(0, "enabled_ro", 1, 1, None)); + assert_eq!(nodes[1], new_expected_node(0, "enabled_rw", 1, 2, Some(150))); + assert_eq!(nodes[2], new_expected_node(1, "disabled_ro", 1, 0, None)); + assert_eq!(nodes[3], new_expected_node(2, "enabled_ro", 1, 1, None)); + assert_eq!(nodes[4], new_expected_node(1, "enabled_fixed_ro", 1, 1, Some(235))); + assert_eq!(nodes[5], new_expected_node(1, "enabled_ro", 1, 2, None)); + assert_eq!(nodes[6], new_expected_node(2, "enabled_fixed_ro", 1, 0, None)); + assert_eq!(nodes[7], new_expected_node(0, "disabled_rw", 1, 0, None)); } } diff --git a/tools/aconfig/aconfig/src/storage/package_table.rs b/tools/aconfig/aconfig/src/storage/package_table.rs index f82e932b0e..4c08129209 100644 --- a/tools/aconfig/aconfig/src/storage/package_table.rs +++ b/tools/aconfig/aconfig/src/storage/package_table.rs @@ -17,8 +17,7 @@ use anyhow::Result; use aconfig_storage_file::{ - get_bucket_index, get_table_size, PackageTable, PackageTableHeader, PackageTableNode, - FILE_VERSION, + get_table_size, PackageTable, PackageTableHeader, PackageTableNode, FILE_VERSION, }; use crate::storage::FlagPackage; @@ -34,14 +33,24 @@ fn new_header(container: &str, num_packages: u32) -> PackageTableHeader { } } -fn new_node(package: &FlagPackage, num_buckets: u32) -> PackageTableNode { - let bucket_index = get_bucket_index(&package.package_name.to_string(), num_buckets); - PackageTableNode { - package_name: String::from(package.package_name), - package_id: package.package_id, - boolean_offset: package.boolean_offset, - next_offset: None, - bucket_index, +// a struct that contains PackageTableNode and a bunch of other information to help +// package table creation +#[derive(PartialEq, Debug)] +struct PackageTableNodeWrapper { + pub node: PackageTableNode, + pub bucket_index: u32, +} + +impl PackageTableNodeWrapper { + fn new(package: &FlagPackage, num_buckets: u32) -> Self { + let node = PackageTableNode { + package_name: String::from(package.package_name), + package_id: package.package_id, + boolean_offset: package.boolean_offset, + next_offset: None, + }; + let bucket_index = PackageTableNode::find_bucket_index(package.package_name, num_buckets); + Self { node, bucket_index } } } @@ -49,40 +58,47 @@ pub fn create_package_table(container: &str, packages: &[FlagPackage]) -> Result // create table let num_packages = packages.len() as u32; let num_buckets = get_table_size(num_packages)?; - let mut table = PackageTable { - header: new_header(container, num_packages), - buckets: vec![None; num_buckets as usize], - nodes: packages.iter().map(|pkg| new_node(pkg, num_buckets)).collect(), - }; + let mut header = new_header(container, num_packages); + let mut buckets = vec![None; num_buckets as usize]; + let mut node_wrappers: Vec<_> = + packages.iter().map(|pkg| PackageTableNodeWrapper::new(pkg, num_buckets)).collect(); // initialize all header fields - table.header.bucket_offset = table.header.as_bytes().len() as u32; - table.header.node_offset = table.header.bucket_offset + num_buckets * 4; - table.header.file_size = table.header.node_offset - + table.nodes.iter().map(|x| x.as_bytes().len()).sum::<usize>() as u32; + header.bucket_offset = header.as_bytes().len() as u32; + header.node_offset = header.bucket_offset + num_buckets * 4; + header.file_size = header.node_offset + + node_wrappers.iter().map(|x| x.node.as_bytes().len()).sum::<usize>() as u32; - // sort nodes by bucket index for efficiency - table.nodes.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index)); + // sort node_wrappers by bucket index for efficiency + node_wrappers.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index)); // fill all node offset - let mut offset = table.header.node_offset; - for i in 0..table.nodes.len() { - let node_bucket_idx = table.nodes[i].bucket_index; - let next_node_bucket_idx = - if i + 1 < table.nodes.len() { Some(table.nodes[i + 1].bucket_index) } else { None }; - - if table.buckets[node_bucket_idx as usize].is_none() { - table.buckets[node_bucket_idx as usize] = Some(offset); + let mut offset = header.node_offset; + for i in 0..node_wrappers.len() { + let node_bucket_idx = node_wrappers[i].bucket_index; + let next_node_bucket_idx = if i + 1 < node_wrappers.len() { + Some(node_wrappers[i + 1].bucket_index) + } else { + None + }; + + if buckets[node_bucket_idx as usize].is_none() { + buckets[node_bucket_idx as usize] = Some(offset); } - offset += table.nodes[i].as_bytes().len() as u32; + offset += node_wrappers[i].node.as_bytes().len() as u32; if let Some(index) = next_node_bucket_idx { if index == node_bucket_idx { - table.nodes[i].next_offset = Some(offset); + node_wrappers[i].node.next_offset = Some(offset); } } } + let table = PackageTable { + header, + buckets, + nodes: node_wrappers.into_iter().map(|nw| nw.node).collect(), + }; Ok(table) } @@ -125,7 +141,6 @@ mod tests { package_id: 1, boolean_offset: 3, next_offset: None, - bucket_index: 0, }; assert_eq!(nodes[0], first_node_expected); let second_node_expected = PackageTableNode { @@ -133,7 +148,6 @@ mod tests { package_id: 0, boolean_offset: 0, next_offset: Some(158), - bucket_index: 3, }; assert_eq!(nodes[1], second_node_expected); let third_node_expected = PackageTableNode { @@ -141,7 +155,6 @@ mod tests { package_id: 2, boolean_offset: 6, next_offset: None, - bucket_index: 3, }; assert_eq!(nodes[2], third_node_expected); } diff --git a/tools/aconfig/aconfig_protos/src/lib.rs b/tools/aconfig/aconfig_protos/src/lib.rs index ef16e06702..8f5667fc02 100644 --- a/tools/aconfig/aconfig_protos/src/lib.rs +++ b/tools/aconfig/aconfig_protos/src/lib.rs @@ -150,10 +150,7 @@ pub mod flag_declarations { ensure_required_fields!("flag declarations", pdf, "package"); // TODO(b/312769710): Make the container field required. - ensure!( - is_valid_package_ident(pdf.package()), - "bad flag declarations: bad package" - ); + ensure!(is_valid_package_ident(pdf.package()), "bad flag declarations: bad package"); ensure!( !pdf.has_container() || is_valid_container_ident(pdf.container()), "bad flag declarations: bad container" @@ -898,10 +895,7 @@ parsed_flag { "#; let parsed_flags = try_from_binary_proto_from_text_proto(text_proto).unwrap(); let parsed_flag = &parsed_flags.parsed_flag[0]; - assert_eq!( - crate::parsed_flag::path_to_declaration(parsed_flag), - "flags.declarations" - ); + assert_eq!(crate::parsed_flag::path_to_declaration(parsed_flag), "flags.declarations"); } #[test] diff --git a/tools/aconfig/aconfig_storage_file/Android.bp b/tools/aconfig/aconfig_storage_file/Android.bp index a2650d8d40..53b693f17b 100644 --- a/tools/aconfig/aconfig_storage_file/Android.bp +++ b/tools/aconfig/aconfig_storage_file/Android.bp @@ -12,6 +12,7 @@ rust_defaults { "libaconfig_storage_protos", "libonce_cell", "libprotobuf", + "libtempfile", ], } diff --git a/tools/aconfig/aconfig_storage_file/Cargo.toml b/tools/aconfig/aconfig_storage_file/Cargo.toml index e65b1bfe45..54ba6c7840 100644 --- a/tools/aconfig/aconfig_storage_file/Cargo.toml +++ b/tools/aconfig/aconfig_storage_file/Cargo.toml @@ -9,7 +9,10 @@ cargo = [] [dependencies] anyhow = "1.0.69" +memmap2 = "0.8.0" protobuf = "3.2.0" +once_cell = "1.19.0" +tempfile = "3.9.0" [build-dependencies] protobuf-codegen = "3.2.0" diff --git a/tools/aconfig/aconfig_storage_file/src/flag_table.rs b/tools/aconfig/aconfig_storage_file/src/flag_table.rs index 421f84705d..dfbd9de830 100644 --- a/tools/aconfig/aconfig_storage_file/src/flag_table.rs +++ b/tools/aconfig/aconfig_storage_file/src/flag_table.rs @@ -17,7 +17,7 @@ //! flag table module defines the flag table file format and methods for serialization //! and deserialization -use crate::{read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes, get_bucket_index}; +use crate::{get_bucket_index, read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes}; use anyhow::{anyhow, Result}; /// Flag table header struct @@ -68,7 +68,6 @@ pub struct FlagTableNode { pub flag_type: u16, pub flag_id: u16, pub next_offset: Option<u32>, - pub bucket_index: u32, } impl FlagTableNode { @@ -97,7 +96,6 @@ impl FlagTableNode { 0 => None, val => Some(val), }, - bucket_index: 0, }; Ok(node) } @@ -142,13 +140,11 @@ impl FlagTable { .collect(); let nodes = (0..num_flags) .map(|_| { - let mut node = FlagTableNode::from_bytes(&bytes[head..]).unwrap(); + let node = FlagTableNode::from_bytes(&bytes[head..])?; head += node.as_bytes().len(); - node.bucket_index = FlagTableNode::find_bucket_index( - node.package_id, &node.flag_name, num_buckets); - node + Ok(node) }) - .collect(); + .collect::<Result<Vec<_>>>()?; let table = Self { header, buckets, nodes }; Ok(table) @@ -179,8 +175,7 @@ pub fn find_flag_offset(buf: &[u8], package_id: u32, flag: &str) -> Result<Optio loop { let interpreted_node = FlagTableNode::from_bytes(&buf[flag_node_offset..])?; - if interpreted_node.package_id == package_id && - interpreted_node.flag_name == flag { + if interpreted_node.package_id == package_id && interpreted_node.flag_name == flag { return Ok(Some(interpreted_node.flag_id)); } match interpreted_node.next_offset { @@ -188,7 +183,6 @@ pub fn find_flag_offset(buf: &[u8], package_id: u32, flag: &str) -> Result<Optio None => return Ok(None), } } - } #[cfg(test)] @@ -203,16 +197,8 @@ mod tests { flag_type: u16, flag_id: u16, next_offset: Option<u32>, - bucket_index: u32, ) -> Self { - Self { - package_id, - flag_name: flag_name.to_string(), - flag_type, - flag_id, - next_offset, - bucket_index, - } + Self { package_id, flag_name: flag_name.to_string(), flag_type, flag_id, next_offset } } } @@ -245,14 +231,14 @@ mod tests { None, ]; let nodes = vec![ - FlagTableNode::new_expected(0, "enabled_ro", 1, 1, None, 0), - FlagTableNode::new_expected(0, "enabled_rw", 1, 2, Some(150), 1), - FlagTableNode::new_expected(1, "disabled_ro", 1, 0, None, 1), - FlagTableNode::new_expected(2, "enabled_ro", 1, 1, None, 5), - FlagTableNode::new_expected(1, "enabled_fixed_ro", 1, 1, Some(235), 7), - FlagTableNode::new_expected(1, "enabled_ro", 1, 2, None, 7), - FlagTableNode::new_expected(2, "enabled_fixed_ro", 1, 0, None, 9), - FlagTableNode::new_expected(0, "disabled_rw", 1, 0, None, 15), + FlagTableNode::new_expected(0, "enabled_ro", 1, 1, None), + FlagTableNode::new_expected(0, "enabled_rw", 1, 2, Some(150)), + FlagTableNode::new_expected(1, "disabled_ro", 1, 0, None), + FlagTableNode::new_expected(2, "enabled_ro", 1, 1, None), + FlagTableNode::new_expected(1, "enabled_fixed_ro", 1, 1, Some(235)), + FlagTableNode::new_expected(1, "enabled_ro", 1, 2, None), + FlagTableNode::new_expected(2, "enabled_fixed_ro", 1, 0, None), + FlagTableNode::new_expected(0, "disabled_rw", 1, 0, None), ]; Ok(FlagTable { header, buckets, nodes }) } @@ -268,14 +254,8 @@ mod tests { assert_eq!(header, &reinterpreted_header.unwrap()); let nodes: &Vec<FlagTableNode> = &flag_table.nodes; - let num_buckets = crate::get_table_size(header.num_flags).unwrap(); for node in nodes.iter() { - let mut reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes()).unwrap(); - reinterpreted_node.bucket_index = FlagTableNode::find_bucket_index( - reinterpreted_node.package_id, - &reinterpreted_node.flag_name, - num_buckets - ); + let reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes()).unwrap(); assert_eq!(node, &reinterpreted_node); } @@ -300,9 +280,7 @@ mod tests { ]; for (package_id, flag_name, expected_offset) in baseline.into_iter() { let flag_offset = - find_flag_offset(&flag_table[..], package_id, flag_name) - .unwrap() - .unwrap(); + find_flag_offset(&flag_table[..], package_id, flag_name).unwrap().unwrap(); assert_eq!(flag_offset, expected_offset); } } @@ -311,11 +289,9 @@ mod tests { // this test point locks down table query of a non exist flag fn test_not_existed_flag_query() { let flag_table = create_test_flag_table().unwrap().as_bytes(); - let flag_offset = - find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap(); + let flag_offset = find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap(); assert_eq!(flag_offset, None); - let flag_offset = - find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap(); + let flag_offset = find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap(); assert_eq!(flag_offset, None); } @@ -325,8 +301,7 @@ mod tests { let mut table = create_test_flag_table().unwrap(); table.header.version = crate::FILE_VERSION + 1; let flag_table = table.as_bytes(); - let error = find_flag_offset(&flag_table[..], 0, "enabled_ro") - .unwrap_err(); + let error = find_flag_offset(&flag_table[..], 0, "enabled_ro").unwrap_err(); assert_eq!( format!("{:?}", error), format!( diff --git a/tools/aconfig/aconfig_storage_file/src/flag_value.rs b/tools/aconfig/aconfig_storage_file/src/flag_value.rs index c8b52a998f..bb8892d5ca 100644 --- a/tools/aconfig/aconfig_storage_file/src/flag_value.rs +++ b/tools/aconfig/aconfig_storage_file/src/flag_value.rs @@ -146,9 +146,7 @@ mod tests { let flag_value_list = create_test_flag_value_list().unwrap().as_bytes(); let baseline: Vec<bool> = vec![false, true, false, false, true, true, false, true]; for (offset, expected_value) in baseline.into_iter().enumerate() { - let flag_value = - get_boolean_flag_value(&flag_value_list[..], offset as u32) - .unwrap(); + let flag_value = get_boolean_flag_value(&flag_value_list[..], offset as u32).unwrap(); assert_eq!(flag_value, expected_value); } } @@ -157,12 +155,8 @@ mod tests { // this test point locks down query beyond the end of boolean section fn test_boolean_out_of_range() { let flag_value_list = create_test_flag_value_list().unwrap().as_bytes(); - let error = get_boolean_flag_value(&flag_value_list[..], 8) - .unwrap_err(); - assert_eq!( - format!("{:?}", error), - "Flag value offset goes beyond the end of the file." - ); + let error = get_boolean_flag_value(&flag_value_list[..], 8).unwrap_err(); + assert_eq!(format!("{:?}", error), "Flag value offset goes beyond the end of the file."); } #[test] @@ -171,8 +165,7 @@ mod tests { let mut value_list = create_test_flag_value_list().unwrap(); value_list.header.version = crate::FILE_VERSION + 1; let flag_value = value_list.as_bytes(); - let error = get_boolean_flag_value(&flag_value[..], 4) - .unwrap_err(); + let error = get_boolean_flag_value(&flag_value[..], 4).unwrap_err(); assert_eq!( format!("{:?}", error), format!( diff --git a/tools/aconfig/aconfig_storage_file/src/lib.rs b/tools/aconfig/aconfig_storage_file/src/lib.rs index f5aecffa5c..a9f5e21e5c 100644 --- a/tools/aconfig/aconfig_storage_file/src/lib.rs +++ b/tools/aconfig/aconfig_storage_file/src/lib.rs @@ -21,6 +21,9 @@ pub mod flag_table; pub mod flag_value; pub mod package_table; +#[cfg(feature = "cargo")] +pub mod mapped_file; + mod protos; #[cfg(test)] mod test_utils; diff --git a/tools/aconfig/aconfig_storage_file/src/mapped_file.rs b/tools/aconfig/aconfig_storage_file/src/mapped_file.rs new file mode 100644 index 0000000000..4f65df0197 --- /dev/null +++ b/tools/aconfig/aconfig_storage_file/src/mapped_file.rs @@ -0,0 +1,274 @@ +/* + * Copyright (C) 2024 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; +use std::fs::File; +use std::io::{BufReader, Read}; +use std::sync::{Arc, Mutex}; + +use anyhow::{bail, ensure, Result}; +use memmap2::Mmap; +use once_cell::sync::Lazy; + +use crate::protos::{ + storage_files::try_from_binary_proto, ProtoStorageFileInfo, ProtoStorageFiles, +}; +use crate::StorageFileSelection; + +/// Cache for already mapped files +static ALL_MAPPED_FILES: Lazy<Mutex<HashMap<String, MappedStorageFileSet>>> = Lazy::new(|| { + let mapped_files = HashMap::new(); + Mutex::new(mapped_files) +}); + +/// Mapped storage files for a particular container +#[derive(Debug)] +struct MappedStorageFileSet { + package_map: Arc<Mmap>, + flag_map: Arc<Mmap>, + flag_val: Arc<Mmap>, +} + +/// Find where storage files are stored for a particular container +fn find_container_storage_location( + location_pb_file: &str, + container: &str, +) -> Result<ProtoStorageFileInfo> { + let file = File::open(location_pb_file)?; + let mut reader = BufReader::new(file); + let mut bytes = Vec::new(); + reader.read_to_end(&mut bytes)?; + + let storage_locations: ProtoStorageFiles = try_from_binary_proto(&bytes)?; + for location_info in storage_locations.files.iter() { + if location_info.container() == container { + return Ok(location_info.clone()); + } + } + bail!("Storage file does not exist for {}", container) +} + +/// Verify the file is read only and then map it +fn verify_read_only_and_map(file_path: &str) -> Result<Mmap> { + let file = File::open(file_path)?; + let metadata = file.metadata()?; + ensure!( + metadata.permissions().readonly(), + "Cannot mmap file {} as it is not read only", + file_path + ); + // SAFETY: + // + // Mmap constructors are unsafe as it would have undefined behaviors if the file + // is modified after mapped (https://docs.rs/memmap2/latest/memmap2/struct.Mmap.html). + // + // We either have to make this api unsafe or ensure that the file will not be modified + // which means it is read only. Here in the code, we check explicitly that the file + // being mapped must only have read permission, otherwise, error out, thus making sure + // it is safe. + // + // We should remove this restriction if we need to support mmap non read only file in + // the future (by making this api unsafe). But for now, all flags are boot stable, so + // the boot flag file copy should be readonly. + unsafe { Ok(Mmap::map(&file)?) } +} + +/// Map all storage files for a particular container +fn map_container_storage_files( + location_pb_file: &str, + container: &str, +) -> Result<MappedStorageFileSet> { + let files_location = find_container_storage_location(location_pb_file, container)?; + let package_map = Arc::new(verify_read_only_and_map(files_location.package_map())?); + let flag_map = Arc::new(verify_read_only_and_map(files_location.flag_map())?); + let flag_val = Arc::new(verify_read_only_and_map(files_location.flag_val())?); + Ok(MappedStorageFileSet { package_map, flag_map, flag_val }) +} + +/// Get a mapped storage file given the container and file type +pub fn get_mapped_file( + location_pb_file: &str, + container: &str, + file_selection: StorageFileSelection, +) -> Result<Arc<Mmap>> { + let mut all_mapped_files = ALL_MAPPED_FILES.lock().unwrap(); + match all_mapped_files.get(container) { + Some(mapped_files) => Ok(match file_selection { + StorageFileSelection::PackageMap => Arc::clone(&mapped_files.package_map), + StorageFileSelection::FlagMap => Arc::clone(&mapped_files.flag_map), + StorageFileSelection::FlagVal => Arc::clone(&mapped_files.flag_val), + }), + None => { + let mapped_files = map_container_storage_files(location_pb_file, container)?; + let file_ptr = match file_selection { + StorageFileSelection::PackageMap => Arc::clone(&mapped_files.package_map), + StorageFileSelection::FlagMap => Arc::clone(&mapped_files.flag_map), + StorageFileSelection::FlagVal => Arc::clone(&mapped_files.flag_val), + }; + all_mapped_files.insert(container.to_string(), mapped_files); + Ok(file_ptr) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{get_binary_storage_proto_bytes, write_bytes_to_temp_file}; + + #[test] + fn test_find_storage_file_location() { + let text_proto = r#" +files { + version: 0 + container: "system" + package_map: "/system/etc/package.map" + flag_map: "/system/etc/flag.map" + flag_val: "/metadata/aconfig/system.val" + timestamp: 12345 +} +files { + version: 1 + container: "product" + package_map: "/product/etc/package.map" + flag_map: "/product/etc/flag.map" + flag_val: "/metadata/aconfig/product.val" + timestamp: 54321 +} +"#; + let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap(); + let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap(); + let file_full_path = file.path().display().to_string(); + + let file_info = find_container_storage_location(&file_full_path, "system").unwrap(); + assert_eq!(file_info.version(), 0); + assert_eq!(file_info.container(), "system"); + assert_eq!(file_info.package_map(), "/system/etc/package.map"); + assert_eq!(file_info.flag_map(), "/system/etc/flag.map"); + assert_eq!(file_info.flag_val(), "/metadata/aconfig/system.val"); + assert_eq!(file_info.timestamp(), 12345); + + let file_info = find_container_storage_location(&file_full_path, "product").unwrap(); + assert_eq!(file_info.version(), 1); + assert_eq!(file_info.container(), "product"); + assert_eq!(file_info.package_map(), "/product/etc/package.map"); + assert_eq!(file_info.flag_map(), "/product/etc/flag.map"); + assert_eq!(file_info.flag_val(), "/metadata/aconfig/product.val"); + assert_eq!(file_info.timestamp(), 54321); + + let err = find_container_storage_location(&file_full_path, "vendor").unwrap_err(); + assert_eq!(format!("{:?}", err), "Storage file does not exist for vendor"); + } + + fn map_and_verify( + location_pb_file: &str, + file_selection: StorageFileSelection, + actual_file: &str, + ) { + let mut opened_file = File::open(actual_file).unwrap(); + let mut content = Vec::new(); + opened_file.read_to_end(&mut content).unwrap(); + + let mmaped_file = get_mapped_file(location_pb_file, "system", file_selection).unwrap(); + assert_eq!(mmaped_file[..], content[..]); + } + + #[test] + fn test_mapped_file_contents() { + let text_proto = r#" +files { + version: 0 + container: "system" + package_map: "./tests/package.map" + flag_map: "./tests/flag.map" + flag_val: "./tests/flag.val" + timestamp: 12345 +} +"#; + let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap(); + let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap(); + let file_full_path = file.path().display().to_string(); + + map_and_verify(&file_full_path, StorageFileSelection::PackageMap, "./tests/package.map"); + + map_and_verify(&file_full_path, StorageFileSelection::FlagMap, "./tests/flag.map"); + + map_and_verify(&file_full_path, StorageFileSelection::FlagVal, "./tests/flag.val"); + } + + #[test] + fn test_map_non_read_only_file() { + let text_proto = r#" +files { + version: 0 + container: "system" + package_map: "./tests/rw.package.map" + flag_map: "./tests/rw.flag.map" + flag_val: "./tests/rw.flag.val" + timestamp: 12345 +} +"#; + let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap(); + let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap(); + let file_full_path = file.path().display().to_string(); + + let error = map_container_storage_files(&file_full_path, "system").unwrap_err(); + assert_eq!( + format!("{:?}", error), + "Cannot mmap file ./tests/rw.package.map as it is not read only" + ); + + let text_proto = r#" +files { + version: 0 + container: "system" + package_map: "./tests/package.map" + flag_map: "./tests/rw.flag.map" + flag_val: "./tests/rw.flag.val" + timestamp: 12345 +} +"#; + let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap(); + let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap(); + let file_full_path = file.path().display().to_string(); + + let error = map_container_storage_files(&file_full_path, "system").unwrap_err(); + assert_eq!( + format!("{:?}", error), + "Cannot mmap file ./tests/rw.flag.map as it is not read only" + ); + + let text_proto = r#" +files { + version: 0 + container: "system" + package_map: "./tests/package.map" + flag_map: "./tests/flag.map" + flag_val: "./tests/rw.flag.val" + timestamp: 12345 +} +"#; + let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap(); + let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap(); + let file_full_path = file.path().display().to_string(); + + let error = map_container_storage_files(&file_full_path, "system").unwrap_err(); + assert_eq!( + format!("{:?}", error), + "Cannot mmap file ./tests/rw.flag.val as it is not read only" + ); + } +} diff --git a/tools/aconfig/aconfig_storage_file/src/package_table.rs b/tools/aconfig/aconfig_storage_file/src/package_table.rs index 8fd57e6061..a3ad6ec5b0 100644 --- a/tools/aconfig/aconfig_storage_file/src/package_table.rs +++ b/tools/aconfig/aconfig_storage_file/src/package_table.rs @@ -69,7 +69,6 @@ pub struct PackageTableNode { // boolean flag value array in the flag value file pub boolean_offset: u32, pub next_offset: Option<u32>, - pub bucket_index: u32, } impl PackageTableNode { @@ -96,10 +95,16 @@ impl PackageTableNode { 0 => None, val => Some(val), }, - bucket_index: 0, }; Ok(node) } + + /// Get the bucket index for a package table node, defined it here so the + /// construction side (aconfig binary) and consumption side (flag read lib) + /// use the same method of hashing + pub fn find_bucket_index(package: &str, num_buckets: u32) -> u32 { + get_bucket_index(&package, num_buckets) + } } /// Package table struct @@ -135,12 +140,11 @@ impl PackageTable { .collect(); let nodes = (0..num_packages) .map(|_| { - let mut node = PackageTableNode::from_bytes(&bytes[head..]).unwrap(); + let node = PackageTableNode::from_bytes(&bytes[head..])?; head += node.as_bytes().len(); - node.bucket_index = get_bucket_index(&node.package_name, num_buckets); - node + Ok(node) }) - .collect(); + .collect::<Result<Vec<_>>>()?; let table = Self { header, buckets, nodes }; Ok(table) @@ -166,7 +170,7 @@ pub fn find_package_offset(buf: &[u8], package: &str) -> Result<Option<PackageOf } let num_buckets = (interpreted_header.node_offset - interpreted_header.bucket_offset) / 4; - let bucket_index = get_bucket_index(&package, num_buckets); + let bucket_index = PackageTableNode::find_bucket_index(&package, num_buckets); let mut pos = (interpreted_header.bucket_offset + 4 * bucket_index) as usize; let mut package_node_offset = read_u32_from_bytes(buf, &mut pos)? as usize; @@ -210,21 +214,18 @@ mod tests { package_id: 1, boolean_offset: 3, next_offset: None, - bucket_index: 0, }; let second_node = PackageTableNode { package_name: String::from("com.android.aconfig.storage.test_1"), package_id: 0, boolean_offset: 0, next_offset: Some(158), - bucket_index: 3, }; let third_node = PackageTableNode { package_name: String::from("com.android.aconfig.storage.test_4"), package_id: 2, boolean_offset: 6, next_offset: None, - bucket_index: 3, }; let nodes = vec![first_node, second_node, third_node]; Ok(PackageTable { header, buckets, nodes }) @@ -241,11 +242,8 @@ mod tests { assert_eq!(header, &reinterpreted_header.unwrap()); let nodes: &Vec<PackageTableNode> = &package_table.nodes; - let num_buckets = crate::get_table_size(header.num_packages).unwrap(); for node in nodes.iter() { - let mut reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes()).unwrap(); - reinterpreted_node.bucket_index = - get_bucket_index(&reinterpreted_node.package_name, num_buckets); + let reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes()).unwrap(); assert_eq!(node, &reinterpreted_node); } diff --git a/tools/aconfig/aconfig_storage_file/src/test_utils.rs b/tools/aconfig/aconfig_storage_file/src/test_utils.rs index e1fb6c76e8..c468683407 100644 --- a/tools/aconfig/aconfig_storage_file/src/test_utils.rs +++ b/tools/aconfig/aconfig_storage_file/src/test_utils.rs @@ -14,9 +14,11 @@ * limitations under the License. */ +use crate::protos::ProtoStorageFiles; use anyhow::Result; use protobuf::Message; -use crate::protos::ProtoStorageFiles; +use std::io::Write; +use tempfile::NamedTempFile; pub fn get_binary_storage_proto_bytes(text_proto: &str) -> Result<Vec<u8>> { let storage_files: ProtoStorageFiles = protobuf::text_format::parse_from_str(text_proto)?; @@ -24,3 +26,9 @@ pub fn get_binary_storage_proto_bytes(text_proto: &str) -> Result<Vec<u8>> { storage_files.write_to_vec(&mut binary_proto)?; Ok(binary_proto) } + +pub fn write_bytes_to_temp_file(bytes: &[u8]) -> Result<NamedTempFile> { + let mut file = NamedTempFile::new()?; + let _ = file.write_all(&bytes); + Ok(file) +} diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.map b/tools/aconfig/aconfig_storage_file/tests/flag.map Binary files differnew file mode 100644 index 0000000000..43b6f9a640 --- /dev/null +++ b/tools/aconfig/aconfig_storage_file/tests/flag.map diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.val b/tools/aconfig/aconfig_storage_file/tests/flag.val Binary files differnew file mode 100644 index 0000000000..f39f8d3aaf --- /dev/null +++ b/tools/aconfig/aconfig_storage_file/tests/flag.val diff --git a/tools/aconfig/aconfig_storage_file/tests/package.map b/tools/aconfig/aconfig_storage_file/tests/package.map Binary files differnew file mode 100644 index 0000000000..8ed4767a56 --- /dev/null +++ b/tools/aconfig/aconfig_storage_file/tests/package.map diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.flag.map b/tools/aconfig/aconfig_storage_file/tests/rw.flag.map Binary files differnew file mode 100644 index 0000000000..43b6f9a640 --- /dev/null +++ b/tools/aconfig/aconfig_storage_file/tests/rw.flag.map diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.flag.val b/tools/aconfig/aconfig_storage_file/tests/rw.flag.val Binary files differnew file mode 100644 index 0000000000..f39f8d3aaf --- /dev/null +++ b/tools/aconfig/aconfig_storage_file/tests/rw.flag.val diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.package.map b/tools/aconfig/aconfig_storage_file/tests/rw.package.map Binary files differnew file mode 100644 index 0000000000..8ed4767a56 --- /dev/null +++ b/tools/aconfig/aconfig_storage_file/tests/rw.package.map diff --git a/tools/aconfig/printflags/src/main.rs b/tools/aconfig/printflags/src/main.rs index 7fcde61273..a0c9ee8b9b 100644 --- a/tools/aconfig/printflags/src/main.rs +++ b/tools/aconfig/printflags/src/main.rs @@ -17,7 +17,7 @@ //! `printflags` is a device binary to print feature flags. use aconfig_protos::ProtoFlagState as State; -use aconfig_protos::ProtoParsedFlags as ProtoParsedFlags; +use aconfig_protos::ProtoParsedFlags; use anyhow::{bail, Context, Result}; use regex::Regex; use std::collections::BTreeMap; diff --git a/tools/perf/benchmarks b/tools/perf/benchmarks index acc53bb7b0..ad34586ac0 100755 --- a/tools/perf/benchmarks +++ b/tools/perf/benchmarks @@ -29,6 +29,7 @@ import shutil import subprocess import time import uuid +from typing import Optional import pretty import utils @@ -80,6 +81,33 @@ class Change: undo: callable "Function to revert the source tree to its previous condition in the most minimal way possible." +_DUMPVARS_VARS=[ + "COMMON_LUNCH_CHOICES", + "HOST_PREBUILT_TAG", + "print", + "PRODUCT_OUT", + "report_config", + "TARGET_ARCH", + "TARGET_BUILD_VARIANT", + "TARGET_DEVICE", + "TARGET_PRODUCT", +] + +_DUMPVARS_ABS_VARS =[ + "ANDROID_CLANG_PREBUILTS", + "ANDROID_JAVA_HOME", + "ANDROID_JAVA_TOOLCHAIN", + "ANDROID_PREBUILTS", + "HOST_OUT", + "HOST_OUT_EXECUTABLES", + "HOST_OUT_TESTCASES", + "OUT_DIR", + "print", + "PRODUCT_OUT", + "SOONG_HOST_OUT", + "SOONG_HOST_OUT_EXECUTABLES", + "TARGET_OUT_TESTCASES", +] @dataclasses.dataclass(frozen=True) class Benchmark: @@ -94,15 +122,47 @@ class Benchmark: change: Change "Source tree modification for the benchmark that will be measured" - modules: list[str] + dumpvars: Optional[bool] = False + "If specified, soong will run in dumpvars mode rather than build-mode." + + modules: Optional[list[str]] = None "Build modules to build on soong command line" - preroll: int + preroll: Optional[int] = 0 "Number of times to run the build command to stabilize" - postroll: int + postroll: Optional[int] = 3 "Number of times to run the build command after reverting the action to stabilize" + def build_description(self): + "Short description of the benchmark's Soong invocation." + if self.dumpvars: + return "dumpvars" + elif self.modules: + return " ".join(self.modules) + return "" + + + def soong_command(self, root): + "Command line args to soong_ui for this benchmark." + if self.dumpvars: + return [ + "--dumpvars-mode", + f"--vars=\"{' '.join(_DUMPVARS_VARS)}\"", + f"--abs-vars=\"{' '.join(_DUMPVARS_ABS_VARS)}\"", + "--var-prefix=var_cache_", + "--abs-var-prefix=abs_var_cache_", + ] + elif self.modules: + return [ + "--build-mode", + "--all-modules", + f"--dir={root}", + "--skip-metrics-upload", + ] + self.modules + else: + raise Exception("Benchmark must specify dumpvars or modules") + @dataclasses.dataclass(frozen=True) class FileSnapshot: @@ -131,8 +191,14 @@ def Clean(): """Remove the out directory.""" def remove_out(): out_dir = utils.get_out_dir() + #only remove actual contents, in case out is a symlink (as is the case for cog) if os.path.exists(out_dir): - shutil.rmtree(out_dir) + for filename in os.listdir(out_dir): + p = os.path.join(out_dir, filename) + if os.path.isfile(p) or os.path.islink(p): + os.remove(p) + elif os.path.isdir(p): + shutil.rmtree(p) return Change(label="Remove out", change=remove_out, undo=lambda: None) @@ -236,6 +302,7 @@ class BenchmarkReport(): "id": self.benchmark.id, "title": self.benchmark.title, "modules": self.benchmark.modules, + "dumpvars": self.benchmark.dumpvars, "change": self.benchmark.change.label, "iteration": self.iteration, "log_dir": self.log_dir, @@ -284,7 +351,7 @@ class Runner(): # Preroll builds for i in range(benchmark.preroll): - ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark.modules) + ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark) report.preroll_duration_ns.append(ns) sys.stderr.write(f"PERFORMING CHANGE: {benchmark.change.label}\n") @@ -293,18 +360,19 @@ class Runner(): try: # Measured build - ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark.modules) + ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark) report.duration_ns = ns dist_one = self._options.DistOne() if dist_one: # If we're disting just one benchmark, save the logs and we can stop here. - self._dist(utils.get_dist_dir()) + self._dist(utils.get_dist_dir(), benchmark.dumpvars) else: + self._dist(benchmark_log_dir, benchmark.dumpvars, store_metrics_only=True) # Postroll builds - for i in range(benchmark.preroll): + for i in range(benchmark.postroll): ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"post_{i}"), - benchmark.modules) + benchmark) report.postroll_duration_ns.append(ns) finally: @@ -323,21 +391,17 @@ class Runner(): path += ("/%0" + str(len(str(self._options.Iterations()))) + "d") % iteration return path - def _run_build(self, lunch, build_log_dir, modules): + def _run_build(self, lunch, build_log_dir, benchmark): """Builds the modules. Saves interesting log files to log_dir. Raises FatalError if the build fails. """ - sys.stderr.write(f"STARTING BUILD {modules}\n") + sys.stderr.write(f"STARTING BUILD {benchmark.build_description()}\n") before_ns = time.perf_counter_ns() if not self._options.DryRun(): cmd = [ "build/soong/soong_ui.bash", - "--build-mode", - "--all-modules", - f"--dir={self._options.root}", - "--skip-metrics-upload", - ] + modules + ] + benchmark.soong_command(self._options.root) env = dict(os.environ) env["TARGET_PRODUCT"] = lunch.target_product env["TARGET_RELEASE"] = lunch.target_release @@ -351,20 +415,25 @@ class Runner(): # TODO: Copy some log files. - sys.stderr.write(f"FINISHED BUILD {modules}\n") + sys.stderr.write(f"FINISHED BUILD {benchmark.build_description()}\n") return after_ns - before_ns - def _dist(self, dist_dir): + def _dist(self, dist_dir, dumpvars, store_metrics_only=False): out_dir = utils.get_out_dir() dest_dir = dist_dir.joinpath("logs") os.makedirs(dest_dir, exist_ok=True) basenames = [ - "build.trace.gz", - "soong.log", "soong_build_metrics.pb", "soong_metrics", ] + if not store_metrics_only: + basenames.extend([ + "build.trace.gz", + "soong.log", + ]) + if dumpvars: + basenames = ['dumpvars-'+b for b in basenames] for base in basenames: src = out_dir.joinpath(base) if src.exists(): @@ -387,7 +456,7 @@ class Runner(): def benchmark_table(benchmarks): rows = [("ID", "DESCRIPTION", "REBUILD"),] - rows += [(benchmark.id, benchmark.title, " ".join(benchmark.modules)) for benchmark in + rows += [(benchmark.id, benchmark.title, benchmark.build_description()) for benchmark in benchmarks] return rows @@ -571,6 +640,22 @@ benchmarks: """Initialize the list of benchmarks.""" # Assumes that we've already chdired to the root of the tree. self._benchmarks = [ + Benchmark( + id="full_lunch", + title="Lunch from clean out", + change=Clean(), + dumpvars=True, + preroll=0, + postroll=0, + ), + Benchmark( + id="noop_lunch", + title="Lunch with no change", + change=NoChange(), + dumpvars=True, + preroll=1, + postroll=0, + ), Benchmark(id="full", title="Full build", change=Clean(), diff --git a/tools/perf/utils.py b/tools/perf/utils.py index 934130dc86..0e66d4cd24 100644 --- a/tools/perf/utils.py +++ b/tools/perf/utils.py @@ -19,9 +19,11 @@ DEFAULT_REPORT_DIR = "benchmarks" def get_root(): top_dir = os.environ.get("ANDROID_BUILD_TOP") - if top_dir: - return pathlib.Path(top_dir).resolve() d = pathlib.Path.cwd() + # with cog, someone may have a new workspace and new source tree top, but + # not run lunch yet, resulting in a misleading ANDROID_BUILD_TOP value + if top_dir and d.is_relative_to(top_dir): + return pathlib.Path(top_dir).resolve() while True: if d.joinpath("build", "soong", "soong_ui.bash").exists(): return d.resolve().absolute() diff --git a/tools/post_process_props.py b/tools/post_process_props.py index 32829c1c9f..6f429fa23e 100755 --- a/tools/post_process_props.py +++ b/tools/post_process_props.py @@ -17,6 +17,8 @@ import argparse import sys +from uffd_gc_utils import should_enable_uffd_gc + # Usage: post_process_props.py file.prop [disallowed_key, ...] # Disallowed keys are removed from the property file, if present @@ -27,7 +29,7 @@ PROP_VALUE_MAX = 91 # Put the modifications that you need to make into the */build.prop into this # function. -def mangle_build_prop(prop_list): +def mangle_build_prop(prop_list, kernel_version_file_for_uffd_gc): # If ro.debuggable is 1, then enable adb on USB by default # (this is for userdebug builds) if prop_list.get_value("ro.debuggable") == "1": @@ -38,6 +40,11 @@ def mangle_build_prop(prop_list): else: val = val + ",adb" prop_list.put("persist.sys.usb.config", val) + if prop_list.get_value("ro.dalvik.vm.enable_uffd_gc") == "default": + assert kernel_version_file_for_uffd_gc != "" + enable_uffd_gc = should_enable_uffd_gc(kernel_version_file_for_uffd_gc) + prop_list.put("ro.dalvik.vm.enable_uffd_gc", + "true" if enable_uffd_gc else "false") def validate_grf_props(prop_list): """Validate GRF properties if exist. @@ -233,6 +240,7 @@ def main(argv): parser.add_argument("filename") parser.add_argument("disallowed_keys", metavar="KEY", type=str, nargs="*") parser.add_argument("--sdk-version", type=int, required=True) + parser.add_argument("--kernel-version-file-for-uffd-gc", required=True) args = parser.parse_args() if not args.filename.endswith("/build.prop"): @@ -240,7 +248,7 @@ def main(argv): sys.exit(1) props = PropList(args.filename) - mangle_build_prop(props) + mangle_build_prop(props, args.kernel_version_file_for_uffd_gc) if not override_optional_props(props, args.allow_dup): sys.exit(1) if not validate_grf_props(props): |