diff options
Diffstat (limited to 'tools')
21 files changed, 1080 insertions, 453 deletions
diff --git a/tools/droiddoc/Android.bp b/tools/droiddoc/Android.bp new file mode 100644 index 0000000000..042806850a --- /dev/null +++ b/tools/droiddoc/Android.bp @@ -0,0 +1,18 @@ +// Copyright (C) 2013 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +droiddoc_exported_dir { + name: "droiddoc-templates-pdk", + path: "templates-pdk", +} diff --git a/tools/fs_config/Android.mk b/tools/fs_config/Android.mk index 96db0f39fb..af0da46b29 100644 --- a/tools/fs_config/Android.mk +++ b/tools/fs_config/Android.mk @@ -99,7 +99,7 @@ $(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_G --aid-header $(PRIVATE_ANDROID_FS_HDR) \ --capability-header $(PRIVATE_ANDROID_CAP_HDR) \ --partition system \ - --all-partitions $(subst $(space),$(comma),$(PRIVATE_PARTITION_LIST)) \ + --all-partitions "$(subst $(space),$(comma),$(PRIVATE_PARTITION_LIST))" \ --dirs \ --out_file $@ \ $(or $(PRIVATE_TARGET_FS_CONFIG_GEN),/dev/null) @@ -124,7 +124,7 @@ $(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_G --aid-header $(PRIVATE_ANDROID_FS_HDR) \ --capability-header $(PRIVATE_ANDROID_CAP_HDR) \ --partition system \ - --all-partitions $(subst $(space),$(comma),$(PRIVATE_PARTITION_LIST)) \ + --all-partitions "$(subst $(space),$(comma),$(PRIVATE_PARTITION_LIST))" \ --files \ --out_file $@ \ $(or $(PRIVATE_TARGET_FS_CONFIG_GEN),/dev/null) diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py index dccff928f9..4400466084 100755 --- a/tools/fs_config/fs_config_generator.py +++ b/tools/fs_config/fs_config_generator.py @@ -1004,10 +1004,6 @@ class FSConfigGen(BaseGenerator): self._partition = args['partition'] self._all_partitions = args['all_partitions'] - if self._partition == 'system' and self._all_partitions is None: - sys.exit( - 'All other partitions must be provided if generating output' - ' for the system partition') self._out_file = args['out_file'] diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py index 4af10ca53d..ebaa867bc9 100755 --- a/tools/releasetools/add_img_to_target_files.py +++ b/tools/releasetools/add_img_to_target_files.py @@ -28,7 +28,8 @@ Usage: add_img_to_target_files [flag] target_files -r (--rebuild_recovery) Rebuild the recovery patch and write it to the system image. Only - meaningful when system image needs to be rebuilt. + meaningful when system image needs to be rebuilt and there're separate + boot / recovery images. --replace_verity_private_key Replace the private key used for verity signing. (same as the option @@ -113,7 +114,7 @@ def GetCareMap(which, imgname): Returns: (which, care_map_ranges): care_map_ranges is the raw string of the care_map - RangeSet; or an empty list. + RangeSet; or None. """ assert which in common.PARTITIONS_WITH_CARE_MAP @@ -123,7 +124,7 @@ def GetCareMap(which, imgname): # invalid reads. image_size = OPTIONS.info_dict.get(which + "_image_size") if not image_size: - return [] + return None image_blocks = int(image_size) / 4096 - 1 assert image_blocks > 0, "blocks for {} must be positive".format(which) @@ -164,7 +165,8 @@ def AddSystem(output_zip, recovery_img=None, boot_img=None): else: common.ZipWrite(output_zip, ofile.name, arc_name) - if OPTIONS.rebuild_recovery: + if (OPTIONS.rebuild_recovery and recovery_img is not None and + boot_img is not None): logger.info("Building new recovery patch") common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, boot_img, info_dict=OPTIONS.info_dict) @@ -592,7 +594,11 @@ def AddCareMapForAbOta(output_zip, ab_partitions, image_paths): OPTIONS.info_dict.get(avb_hashtree_enable) == "true"): image_path = image_paths[partition] assert os.path.exists(image_path) - care_map_list += GetCareMap(partition, image_path) + + care_map = GetCareMap(partition, image_path) + if not care_map: + continue + care_map_list += care_map # adds fingerprint field to the care_map build_props = OPTIONS.info_dict.get(partition + ".build.prop", {}) @@ -726,6 +732,7 @@ def AddImagesToTargetFiles(filename): OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True) has_recovery = OPTIONS.info_dict.get("no_recovery") != "true" + has_boot = OPTIONS.info_dict.get("no_boot") != "true" # {vendor,odm,product,product_services}.img are unlike system.img or # system_other.img. Because it could be built from source, or dropped into @@ -773,17 +780,19 @@ def AddImagesToTargetFiles(filename): def banner(s): logger.info("\n\n++++ " + s + " ++++\n\n") - banner("boot") - # common.GetBootableImage() returns the image directly if present. - boot_image = common.GetBootableImage( - "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") - # boot.img may be unavailable in some targets (e.g. aosp_arm64). - if boot_image: - partitions['boot'] = os.path.join(OPTIONS.input_tmp, "IMAGES", "boot.img") - if not os.path.exists(partitions['boot']): - boot_image.WriteToDir(OPTIONS.input_tmp) - if output_zip: - boot_image.AddToZip(output_zip) + boot_image = None + if has_boot: + banner("boot") + # common.GetBootableImage() returns the image directly if present. + boot_image = common.GetBootableImage( + "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") + # boot.img may be unavailable in some targets (e.g. aosp_arm64). + if boot_image: + partitions['boot'] = os.path.join(OPTIONS.input_tmp, "IMAGES", "boot.img") + if not os.path.exists(partitions['boot']): + boot_image.WriteToDir(OPTIONS.input_tmp) + if output_zip: + boot_image.AddToZip(output_zip) recovery_image = None if has_recovery: diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py index d14c94f7dc..fb4ca7667f 100644 --- a/tools/releasetools/apex_utils.py +++ b/tools/releasetools/apex_utils.py @@ -19,11 +19,14 @@ import os.path import re import shlex import sys +import zipfile import common logger = logging.getLogger(__name__) +OPTIONS = common.OPTIONS + class ApexInfoError(Exception): """An Exception raised during Apex Information command.""" @@ -145,3 +148,72 @@ def ParseApexPayloadInfo(payload_path): 'Failed to find {} prop in {}'.format(key, payload_path)) return payload_info + + +def SignApex(apex_data, payload_key, container_key, container_pw, + codename_to_api_level_map, signing_args=None): + """Signs the current APEX with the given payload/container keys. + + Args: + apex_data: Raw APEX data. + payload_key: The path to payload signing key (w/ extension). + container_key: The path to container signing key (w/o extension). + container_pw: The matching password of the container_key, or None. + codename_to_api_level_map: A dict that maps from codename to API level. + signing_args: Additional args to be passed to the payload signer. + + Returns: + The path to the signed APEX file. + """ + apex_file = common.MakeTempFile(prefix='apex-', suffix='.apex') + with open(apex_file, 'wb') as apex_fp: + apex_fp.write(apex_data) + + APEX_PAYLOAD_IMAGE = 'apex_payload.img' + APEX_PUBKEY = 'apex_pubkey' + + # 1a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given + # payload_key. + payload_dir = common.MakeTempDir(prefix='apex-payload-') + with zipfile.ZipFile(apex_file) as apex_fd: + payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir) + + payload_info = ParseApexPayloadInfo(payload_file) + SignApexPayload( + payload_file, + payload_key, + payload_info['apex.key'], + payload_info['Algorithm'], + payload_info['Salt'], + signing_args) + + # 1b. Update the embedded payload public key. + payload_public_key = common.ExtractAvbPublicKey(payload_key) + + common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE) + common.ZipDelete(apex_file, APEX_PUBKEY) + apex_zip = zipfile.ZipFile(apex_file, 'a') + common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE) + common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY) + common.ZipClose(apex_zip) + + # 2. Align the files at page boundary (same as in apexer). + aligned_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex') + common.RunAndCheckOutput(['zipalign', '-f', '4096', apex_file, aligned_apex]) + + # 3. Sign the APEX container with container_key. + signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex') + + # Specify the 4K alignment when calling SignApk. + extra_signapk_args = OPTIONS.extra_signapk_args[:] + extra_signapk_args.extend(['-a', '4096']) + + common.SignFile( + aligned_apex, + signed_apex, + container_key, + container_pw, + codename_to_api_level_map=codename_to_api_level_map, + extra_signapk_args=extra_signapk_args) + + return signed_apex diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py index 4136ed432e..ba0465151e 100755 --- a/tools/releasetools/build_image.py +++ b/tools/releasetools/build_image.py @@ -519,7 +519,6 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): common_props = ( "extfs_sparse_flag", "squashfs_sparse_flag", - "selinux_fc", "skip_fsck", "ext_mkuserimg", "verity", @@ -564,6 +563,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): if not copy_prop("system_extfs_rsv_pct", "extfs_rsv_pct"): d["extfs_rsv_pct"] = "0" copy_prop("system_reserved_size", "partition_reserved_size") + copy_prop("system_selinux_fc", "selinux_fc") elif mount_point == "system_other": # We inherit the selinux policies of /system since we contain some of its # files. @@ -587,6 +587,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): if not copy_prop("system_extfs_rsv_pct", "extfs_rsv_pct"): d["extfs_rsv_pct"] = "0" copy_prop("system_reserved_size", "partition_reserved_size") + copy_prop("system_selinux_fc", "selinux_fc") elif mount_point == "data": # Copy the generic fs type first, override with specific one if available. copy_prop("fs_type", "fs_type") @@ -594,9 +595,11 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): copy_prop("userdata_size", "partition_size") copy_prop("flash_logical_block_size", "flash_logical_block_size") copy_prop("flash_erase_block_size", "flash_erase_block_size") + copy_prop("userdata_selinux_fc", "selinux_fc") elif mount_point == "cache": copy_prop("cache_fs_type", "fs_type") copy_prop("cache_size", "partition_size") + copy_prop("cache_selinux_fc", "selinux_fc") elif mount_point == "vendor": copy_prop("avb_vendor_hashtree_enable", "avb_hashtree_enable") copy_prop("avb_vendor_add_hashtree_footer_args", @@ -618,6 +621,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): if not copy_prop("vendor_extfs_rsv_pct", "extfs_rsv_pct"): d["extfs_rsv_pct"] = "0" copy_prop("vendor_reserved_size", "partition_reserved_size") + copy_prop("vendor_selinux_fc", "selinux_fc") elif mount_point == "product": copy_prop("avb_product_hashtree_enable", "avb_hashtree_enable") copy_prop("avb_product_add_hashtree_footer_args", @@ -639,6 +643,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): if not copy_prop("product_extfs_rsv_pct", "extfs_rsv_pct"): d["extfs_rsv_pct"] = "0" copy_prop("product_reserved_size", "partition_reserved_size") + copy_prop("product_selinux_fc", "selinux_fc") elif mount_point == "product_services": copy_prop("avb_product_services_hashtree_enable", "avb_hashtree_enable") copy_prop("avb_product_services_add_hashtree_footer_args", @@ -662,6 +667,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): if not copy_prop("product_services_extfs_rsv_pct", "extfs_rsv_pct"): d["extfs_rsv_pct"] = "0" copy_prop("product_services_reserved_size", "partition_reserved_size") + copy_prop("product_services_selinux_fc", "selinux_fc") elif mount_point == "odm": copy_prop("avb_odm_hashtree_enable", "avb_hashtree_enable") copy_prop("avb_odm_add_hashtree_footer_args", @@ -683,6 +689,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): if not copy_prop("odm_extfs_rsv_pct", "extfs_rsv_pct"): d["extfs_rsv_pct"] = "0" copy_prop("odm_reserved_size", "partition_reserved_size") + copy_prop("odm_selinux_fc", "selinux_fc") elif mount_point == "oem": copy_prop("fs_type", "fs_type") copy_prop("oem_size", "partition_size") @@ -692,6 +699,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point): copy_prop("ext4_share_dup_blocks", "ext4_share_dup_blocks") if not copy_prop("oem_extfs_rsv_pct", "extfs_rsv_pct"): d["extfs_rsv_pct"] = "0" + copy_prop("oem_selinux_fc", "selinux_fc") d["partition_name"] = mount_point return d diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 495fec30b9..80f80029f0 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -99,6 +99,9 @@ SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'product_services', 'recovery', 'system', 'vendor') +# Chained VBMeta partitions. +AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor') + # Partitions that should have their care_map added to META/care_map.pb PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'product_services', 'odm') @@ -332,15 +335,15 @@ def LoadInfoDict(input_file, repacking=False): raise ValueError("Failed to find 'fstab_version'") if repacking: - # We carry a copy of file_contexts.bin under META/. If not available, search - # BOOT/RAMDISK/. Note that sometimes we may need a different file to build - # images than the one running on device, in that case, we must have the one - # for image generation copied to META/. - fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts")) - fc_config = os.path.join(input_file, "META", fc_basename) - assert os.path.exists(fc_config) + # "selinux_fc" properties should point to the file_contexts files + # (file_contexts.bin) under META/. + for key in d: + if key.endswith("selinux_fc"): + fc_basename = os.path.basename(d[key]) + fc_config = os.path.join(input_file, "META", fc_basename) + assert os.path.exists(fc_config) - d["selinux_fc"] = fc_config + d[key] = fc_config # Similarly we need to redirect "root_dir", and "root_fs_config". d["root_dir"] = os.path.join(input_file, "ROOT") @@ -417,8 +420,14 @@ def LoadInfoDict(input_file, repacking=False): # Tries to load the build props for all partitions with care_map, including # system and vendor. for partition in PARTITIONS_WITH_CARE_MAP: - d["{}.build.prop".format(partition)] = LoadBuildProp( + partition_prop = "{}.build.prop".format(partition) + d[partition_prop] = LoadBuildProp( read_helper, "{}/build.prop".format(partition.upper())) + # Some partition might use /<partition>/etc/build.prop as the new path. + # TODO: try new path first when majority of them switch to the new path. + if not d[partition_prop]: + d[partition_prop] = LoadBuildProp( + read_helper, "{}/etc/build.prop".format(partition.upper())) d["build.prop"] = d["system.build.prop"] # Set up the salt (based on fingerprint or thumbprint) that will be used when diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py index e1105bb362..8fb98717b5 100755 --- a/tools/releasetools/img_from_target_files.py +++ b/tools/releasetools/img_from_target_files.py @@ -22,7 +22,8 @@ Usage: img_from_target_files [flags] input_target_files output_image_zip input_target_files: one of the following: - directory containing extracted target files. It will load info from - OTA/android-info.txt and build the image zipfile using images from IMAGES/. + OTA/android-info.txt, META/misc_info.txt and build the image zipfile using + images from IMAGES/. - target files package. Same as above, but extracts the archive before building the image zipfile. @@ -42,6 +43,7 @@ import sys import zipfile import common +from build_super_image import BuildSuperImage if sys.hexversion < 0x02070000: print("Python 2.7 or newer is required.", file=sys.stderr) @@ -52,13 +54,102 @@ logger = logging.getLogger(__name__) OPTIONS = common.OPTIONS -def CopyInfo(output_zip): +def LoadOptions(input_file): + """ + Load information from input_file to OPTIONS. + + Args: + input_file: A Zipfile instance of input zip file, or path to the directory + of extracted zip. + """ + info = OPTIONS.info_dict = common.LoadInfoDict(input_file) + + OPTIONS.put_super = info.get("super_image_in_update_package") == "true" + OPTIONS.dynamic_partition_list = info.get("dynamic_partition_list", + "").strip().split() + OPTIONS.super_device_list = info.get("super_block_devices", + "").strip().split() + OPTIONS.retrofit_dap = info.get("dynamic_partition_retrofit") == "true" + OPTIONS.build_super = info.get("build_super_partition") == "true" + OPTIONS.sparse_userimages = bool(info.get("extfs_sparse_flag")) + + +def CopyInfo(input_tmp, output_zip): """Copy the android-info.txt file from the input to the output.""" common.ZipWrite( - output_zip, os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"), + output_zip, os.path.join(input_tmp, "OTA", "android-info.txt"), "android-info.txt") +def CopyUserImages(input_tmp, output_zip): + """ + Copy user images from the unzipped input and write to output_zip. + + Args: + input_tmp: path to the unzipped input. + output_zip: a ZipFile instance to write images to. + """ + dynamic_images = [p + ".img" for p in OPTIONS.dynamic_partition_list] + + # Filter out system_other for launch DAP devices because it is in super image. + if not OPTIONS.retrofit_dap and "system" in OPTIONS.dynamic_partition_list: + dynamic_images.append("system_other.img") + + images_path = os.path.join(input_tmp, "IMAGES") + # A target-files zip must contain the images since Lollipop. + assert os.path.exists(images_path) + for image in sorted(os.listdir(images_path)): + if OPTIONS.bootable_only and image not in ("boot.img", "recovery.img"): + continue + if not image.endswith(".img"): + continue + if image == "recovery-two-step.img": + continue + if OPTIONS.put_super: + if image == "super_empty.img": + continue + if image in dynamic_images: + continue + logger.info("writing %s to archive...", os.path.join("IMAGES", image)) + common.ZipWrite(output_zip, os.path.join(images_path, image), image) + + +def WriteSuperImages(input_tmp, output_zip): + """ + Write super images from the unzipped input and write to output_zip. This is + only done if super_image_in_update_package is set to "true". + + - For retrofit dynamic partition devices, copy split super images from target + files package. + - For devices launched with dynamic partitions, build super image from target + files package. + + Args: + input_tmp: path to the unzipped input. + output_zip: a ZipFile instance to write images to. + """ + if not OPTIONS.build_super or not OPTIONS.put_super: + return + + if OPTIONS.retrofit_dap: + # retrofit devices already have split super images under OTA/ + images_path = os.path.join(input_tmp, "OTA") + for device in OPTIONS.super_device_list: + image = "super_%s.img" % device + image_path = os.path.join(images_path, image) + assert os.path.exists(image_path) + logger.info("writing %s to archive...", os.path.join("OTA", image)) + common.ZipWrite(output_zip, image_path, image) + else: + # super image for non-retrofit devices aren't in target files package, + # so build it. + super_file = common.MakeTempFile("super_", ".img") + logger.info("building super image %s...", super_file) + BuildSuperImage(input_tmp, super_file) + logger.info("writing super.img to archive...") + common.ZipWrite(output_zip, super_file, "super.img") + + def main(argv): # This allows modifying the value from inner function. bootable_only_array = [False] @@ -75,7 +166,7 @@ def main(argv): extra_long_opts=["bootable_zip"], extra_option_handler=option_handler) - bootable_only = bootable_only_array[0] + OPTIONS.bootable_only = bootable_only_array[0] if len(args) != 2: common.Usage(__doc__) @@ -89,26 +180,21 @@ def main(argv): OPTIONS.input_tmp = target_files elif zipfile.is_zipfile(target_files): logger.info("Building image zip from target files zip.") - OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"]) + # We need files under IMAGES/, OTA/, META/ for img_from_target_files.py. + # However, common.LoadInfoDict() may read additional files under BOOT/, + # RECOVERY/ and ROOT/. So unzip everything from the target_files.zip. + OPTIONS.input_tmp = common.UnzipTemp(target_files) else: raise ValueError("%s is not a valid path." % target_files) - output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED) - CopyInfo(output_zip) + LoadOptions(OPTIONS.input_tmp) + output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED, + allowZip64=not OPTIONS.sparse_userimages) try: - images_path = os.path.join(OPTIONS.input_tmp, "IMAGES") - # A target-files zip must contain the images since Lollipop. - assert os.path.exists(images_path) - for image in sorted(os.listdir(images_path)): - if bootable_only and image not in ("boot.img", "recovery.img"): - continue - if not image.endswith(".img"): - continue - if image == "recovery-two-step.img": - continue - common.ZipWrite(output_zip, os.path.join(images_path, image), image) - + CopyInfo(OPTIONS.input_tmp, output_zip) + CopyUserImages(OPTIONS.input_tmp, output_zip) + WriteSuperImages(OPTIONS.input_tmp, output_zip) finally: logger.info("cleaning up...") common.ZipClose(output_zip) diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py index 689e0951cc..f37c0ee1bc 100755 --- a/tools/releasetools/merge_target_files.py +++ b/tools/releasetools/merge_target_files.py @@ -15,31 +15,31 @@ # the License. """This script merges two partial target files packages. -One package contains system files, and the other contains non-system files. +One package contains framework files, and the other contains vendor files. It produces a complete target files package that can be used to generate an OTA package. Usage: merge_target_files.py [args] - --system-target-files system-target-files-zip-archive - The input target files package containing system bits. This is a zip + --framework-target-files framework-target-files-zip-archive + The input target files package containing framework bits. This is a zip archive. - --system-item-list system-item-list-file + --framework-item-list framework-item-list-file The optional path to a newline-separated config file that replaces the - contents of default_system_item_list if provided. + contents of DEFAULT_FRAMEWORK_ITEM_LIST if provided. - --system-misc-info-keys system-misc-info-keys-file + --framework-misc-info-keys framework-misc-info-keys-file The optional path to a newline-separated config file that replaces the - contents of default_system_misc_info_keys if provided. + contents of DEFAULT_FRAMEWORK_MISC_INFO_KEYS if provided. - --other-target-files other-target-files-zip-archive - The input target files package containing other bits. This is a zip + --vendor-target-files vendor-target-files-zip-archive + The input target files package containing vendor bits. This is a zip archive. - --other-item-list other-item-list-file + --vendor-item-list vendor-item-list-file The optional path to a newline-separated config file that replaces the - contents of default_other_item_list if provided. + contents of DEFAULT_VENDOR_ITEM_LIST if provided. --output-target-files output-target-files-package If provided, the output merged target files package. Also a zip archive. @@ -93,11 +93,11 @@ import ota_from_target_files logger = logging.getLogger(__name__) OPTIONS = common.OPTIONS OPTIONS.verbose = True -OPTIONS.system_target_files = None -OPTIONS.system_item_list = None -OPTIONS.system_misc_info_keys = None -OPTIONS.other_target_files = None -OPTIONS.other_item_list = None +OPTIONS.framework_target_files = None +OPTIONS.framework_item_list = None +OPTIONS.framework_misc_info_keys = None +OPTIONS.vendor_target_files = None +OPTIONS.vendor_item_list = None OPTIONS.output_target_files = None OPTIONS.output_dir = None OPTIONS.output_item_list = None @@ -107,12 +107,12 @@ OPTIONS.output_super_empty = None OPTIONS.rebuild_recovery = False OPTIONS.keep_tmp = False -# default_system_item_list is a list of items to extract from the partial -# system target files package as is, meaning these items will land in the +# DEFAULT_FRAMEWORK_ITEM_LIST is a list of items to extract from the partial +# framework target files package as is, meaning these items will land in the # output target files package exactly as they appear in the input partial -# system target files package. +# framework target files package. -default_system_item_list = [ +DEFAULT_FRAMEWORK_ITEM_LIST = ( 'META/apkcerts.txt', 'META/filesystem_config.txt', 'META/root_filesystem_config.txt', @@ -122,21 +122,20 @@ default_system_item_list = [ 'PRODUCT/*', 'ROOT/*', 'SYSTEM/*', -] +) -# system_extract_special_item_list is a list of items to extract from the -# partial system target files package that need some special processing, such -# as some sort of combination with items from the partial other target files +# FRAMEWORK_EXTRACT_SPECIAL_ITEM_LIST is a list of items to extract from the +# partial framework target files package that need some special processing, such +# as some sort of combination with items from the partial vendor target files # package. -system_extract_special_item_list = [ - 'META/*', -] +FRAMEWORK_EXTRACT_SPECIAL_ITEM_LIST = ('META/*',) -# default_system_misc_info_keys is a list of keys to obtain from the system -# instance of META/misc_info.txt. The remaining keys from the other instance. +# DEFAULT_FRAMEWORK_MISC_INFO_KEYS is a list of keys to obtain from the +# framework instance of META/misc_info.txt. The remaining keys from the +# vendor instance. -default_system_misc_info_keys = [ +DEFAULT_FRAMEWORK_MISC_INFO_KEYS = ( 'avb_system_hashtree_enable', 'avb_system_add_hashtree_footer_args', 'avb_system_key_path', @@ -151,14 +150,14 @@ default_system_misc_info_keys = [ 'ab_update', 'default_system_dev_certificate', 'system_size', -] +) -# default_other_item_list is a list of items to extract from the partial -# other target files package as is, meaning these items will land in the output -# target files package exactly as they appear in the input partial other target +# DEFAULT_VENDOR_ITEM_LIST is a list of items to extract from the partial +# vendor target files package as is, meaning these items will land in the output +# target files package exactly as they appear in the input partial vendor target # files package. -default_other_item_list = [ +DEFAULT_VENDOR_ITEM_LIST = ( 'META/boot_filesystem_config.txt', 'META/otakeys.txt', 'META/releasetools.py', @@ -172,16 +171,51 @@ default_other_item_list = [ 'PREBUILT_IMAGES/*', 'RADIO/*', 'VENDOR/*', -] +) -# other_extract_special_item_list is a list of items to extract from the -# partial other target files package that need some special processing, such as -# some sort of combination with items from the partial system target files +# VENDOR_EXTRACT_SPECIAL_ITEM_LIST is a list of items to extract from the +# partial vendor target files package that need some special processing, such as +# some sort of combination with items from the partial framework target files # package. -other_extract_special_item_list = [ - 'META/*', -] +VENDOR_EXTRACT_SPECIAL_ITEM_LIST = ('META/*',) + +# The merge config lists should not attempt to extract items from both +# builds for any of the following partitions. The partitions in +# SINGLE_BUILD_PARTITIONS should come entirely from a single build (either +# framework or vendor, but not both). + +SINGLE_BUILD_PARTITIONS = ( + 'BOOT/', + 'DATA/', + 'ODM/', + 'PRODUCT/', + 'PRODUCT_SERVICES/', + 'RADIO/', + 'RECOVERY/', + 'ROOT/', + 'SYSTEM/', + 'SYSTEM_OTHER/', + 'VENDOR/', +) + + +def write_sorted_data(data, path): + """Write the sorted contents of either a list or dict to file. + + This function sorts the contents of the list or dict and then + writes the resulting sorted contents to a file specified by path. + + Args: + data: The list or dict to sort and write. + path: Path to the file to write the sorted values to. The file at path will + be overridden if it exists. + """ + with open(path, 'w') as output: + for entry in sorted(data): + out_str = '{}={}\n'.format(entry, data[entry]) if isinstance( + data, dict) else '{}\n'.format(entry) + output.write(out_str) def extract_items(target_files, target_files_temp_dir, extract_item_list): @@ -260,26 +294,29 @@ def read_config_list(config_file_path): return config_file.read().splitlines() -def validate_config_lists(system_item_list, system_misc_info_keys, - other_item_list): +def validate_config_lists(framework_item_list, framework_misc_info_keys, + vendor_item_list): """Performs validations on the merge config lists. Args: - system_item_list: The list of items to extract from the partial system + framework_item_list: The list of items to extract from the partial framework + target files package as is. + framework_misc_info_keys: A list of keys to obtain from the framework + instance of META/misc_info.txt. The remaining keys from the vendor + instance. + vendor_item_list: The list of items to extract from the partial vendor target files package as is. - system_misc_info_keys: A list of keys to obtain from the system instance of - META/misc_info.txt. The remaining keys from the other instance. - other_item_list: The list of items to extract from the partial other target - files package as is. Returns: False if a validation fails, otherwise true. """ - default_combined_item_set = set(default_system_item_list) - default_combined_item_set.update(default_other_item_list) + has_error = False - combined_item_set = set(system_item_list) - combined_item_set.update(other_item_list) + default_combined_item_set = set(DEFAULT_FRAMEWORK_ITEM_LIST) + default_combined_item_set.update(DEFAULT_VENDOR_ITEM_LIST) + + combined_item_set = set(framework_item_list) + combined_item_set.update(vendor_item_list) # Check that the merge config lists are not missing any item specified # by the default config lists. @@ -287,26 +324,37 @@ def validate_config_lists(system_item_list, system_misc_info_keys, if difference: logger.error('Missing merge config items: %s', list(difference)) logger.error('Please ensure missing items are in either the ' - 'system-item-list or other-item-list files provided to ' + 'framework-item-list or vendor-item-list files provided to ' 'this script.') - return False - - if ('dynamic_partition_list' in system_misc_info_keys) or ( - 'super_partition_groups' in system_misc_info_keys): + has_error = True + + for partition in SINGLE_BUILD_PARTITIONS: + in_framework = any( + item.startswith(partition) for item in framework_item_list) + in_vendor = any(item.startswith(partition) for item in vendor_item_list) + if in_framework and in_vendor: + logger.error( + 'Cannot extract items from {0} for both the framework and vendor builds. ' + 'Please ensure only one merge config item list includes {0}.'.format( + partition)) + has_error = True + + if ('dynamic_partition_list' in framework_misc_info_keys) or ( + 'super_partition_groups' in framework_misc_info_keys): logger.error('Dynamic partition misc info keys should come from ' - 'the other instance of META/misc_info.txt.') - return False + 'the vendor instance of META/misc_info.txt.') + has_error = True - return True + return not has_error -def process_ab_partitions_txt(system_target_files_temp_dir, - other_target_files_temp_dir, +def process_ab_partitions_txt(framework_target_files_temp_dir, + vendor_target_files_temp_dir, output_target_files_temp_dir): """Perform special processing for META/ab_partitions.txt. This function merges the contents of the META/ab_partitions.txt files from - the system directory and the other directory, placing the merged result in + the framework directory and the vendor directory, placing the merged result in the output directory. The precondition in that the files are already extracted. The post condition is that the output META/ab_partitions.txt contains the merged content. The format for each ab_partitions.txt a one @@ -314,35 +362,33 @@ def process_ab_partitions_txt(system_target_files_temp_dir, names. Args: - system_target_files_temp_dir: The name of a directory containing the special - items extracted from the system target files package. - other_target_files_temp_dir: The name of a directory containing the special - items extracted from the other target files package. + framework_target_files_temp_dir: The name of a directory containing the + special items extracted from the framework target files package. + vendor_target_files_temp_dir: The name of a directory containing the special + items extracted from the vendor target files package. output_target_files_temp_dir: The name of a directory that will be used to create the output target files package after all the special cases are processed. """ - system_ab_partitions_txt = os.path.join(system_target_files_temp_dir, 'META', - 'ab_partitions.txt') + framework_ab_partitions_txt = os.path.join(framework_target_files_temp_dir, + 'META', 'ab_partitions.txt') - other_ab_partitions_txt = os.path.join(other_target_files_temp_dir, 'META', - 'ab_partitions.txt') + vendor_ab_partitions_txt = os.path.join(vendor_target_files_temp_dir, 'META', + 'ab_partitions.txt') - with open(system_ab_partitions_txt) as f: - system_ab_partitions = f.read().splitlines() + with open(framework_ab_partitions_txt) as f: + framework_ab_partitions = f.read().splitlines() - with open(other_ab_partitions_txt) as f: - other_ab_partitions = f.read().splitlines() + with open(vendor_ab_partitions_txt) as f: + vendor_ab_partitions = f.read().splitlines() - output_ab_partitions = set(system_ab_partitions + other_ab_partitions) + output_ab_partitions = set(framework_ab_partitions + vendor_ab_partitions) output_ab_partitions_txt = os.path.join(output_target_files_temp_dir, 'META', 'ab_partitions.txt') - with open(output_ab_partitions_txt, 'w') as output: - for partition in sorted(output_ab_partitions): - output.write('%s\n' % partition) + write_sorted_data(data=output_ab_partitions, path=output_ab_partitions_txt) def append_recovery_to_filesystem_config(output_target_files_temp_dir): @@ -371,27 +417,87 @@ def append_recovery_to_filesystem_config(output_target_files_temp_dir): 'selabel=u:object_r:install_recovery_exec:s0 capabilities=0x0\n') -def process_misc_info_txt(system_target_files_temp_dir, - other_target_files_temp_dir, - output_target_files_temp_dir, system_misc_info_keys): +def merge_dynamic_partition_info_dicts(framework_dict, + vendor_dict, + include_dynamic_partition_list=True, + size_prefix='', + size_suffix='', + list_prefix='', + list_suffix=''): + """Merges dynamic partition info variables. + + Args: + framework_dict: The dictionary of dynamic partition info variables from the + partial framework target files. + vendor_dict: The dictionary of dynamic partition info variables from the + partial vendor target files. + include_dynamic_partition_list: If true, merges the dynamic_partition_list + variable. Not all use cases need this variable merged. + size_prefix: The prefix in partition group size variables that precedes the + name of the partition group. For example, partition group 'group_a' with + corresponding size variable 'super_group_a_group_size' would have the + size_prefix 'super_'. + size_suffix: Similar to size_prefix but for the variable's suffix. For + example, 'super_group_a_group_size' would have size_suffix '_group_size'. + list_prefix: Similar to size_prefix but for the partition group's + partition_list variable. + list_suffix: Similar to size_suffix but for the partition group's + partition_list variable. + + Returns: + The merged dynamic partition info dictionary. + """ + merged_dict = {} + # Partition groups and group sizes are defined by the vendor dict because + # these values may vary for each board that uses a shared system image. + merged_dict['super_partition_groups'] = vendor_dict['super_partition_groups'] + if include_dynamic_partition_list: + framework_dynamic_partition_list = framework_dict.get( + 'dynamic_partition_list', '') + vendor_dynamic_partition_list = vendor_dict.get('dynamic_partition_list', + '') + merged_dict['dynamic_partition_list'] = ( + '%s %s' % (framework_dynamic_partition_list, + vendor_dynamic_partition_list)).strip() + for partition_group in merged_dict['super_partition_groups'].split(' '): + # Set the partition group's size using the value from the vendor dict. + key = '%s%s%s' % (size_prefix, partition_group, size_suffix) + if key not in vendor_dict: + raise ValueError('Vendor dict does not contain required key %s.' % key) + merged_dict[key] = vendor_dict[key] + + # Set the partition group's partition list using a concatenation of the + # framework and vendor partition lists. + key = '%s%s%s' % (list_prefix, partition_group, list_suffix) + merged_dict[key] = ( + '%s %s' % + (framework_dict.get(key, ''), vendor_dict.get(key, ''))).strip() + return merged_dict + + +def process_misc_info_txt(framework_target_files_temp_dir, + vendor_target_files_temp_dir, + output_target_files_temp_dir, + framework_misc_info_keys): """Perform special processing for META/misc_info.txt. This function merges the contents of the META/misc_info.txt files from the - system directory and the other directory, placing the merged result in the + framework directory and the vendor directory, placing the merged result in the output directory. The precondition in that the files are already extracted. The post condition is that the output META/misc_info.txt contains the merged content. Args: - system_target_files_temp_dir: The name of a directory containing the special - items extracted from the system target files package. - other_target_files_temp_dir: The name of a directory containing the special - items extracted from the other target files package. + framework_target_files_temp_dir: The name of a directory containing the + special items extracted from the framework target files package. + vendor_target_files_temp_dir: The name of a directory containing the special + items extracted from the vendor target files package. output_target_files_temp_dir: The name of a directory that will be used to create the output target files package after all the special cases are processed. - system_misc_info_keys: A list of keys to obtain from the system instance of - META/misc_info.txt. The remaining keys from the other instance. + framework_misc_info_keys: A list of keys to obtain from the framework + instance of META/misc_info.txt. The remaining keys from the vendor + instance. """ def read_helper(d): @@ -399,200 +505,269 @@ def process_misc_info_txt(system_target_files_temp_dir, with open(misc_info_txt) as f: return list(f.read().splitlines()) - system_info_dict = common.LoadDictionaryFromLines( - read_helper(system_target_files_temp_dir)) + framework_dict = common.LoadDictionaryFromLines( + read_helper(framework_target_files_temp_dir)) - # We take most of the misc info from the other target files. + # We take most of the misc info from the vendor target files. - merged_info_dict = common.LoadDictionaryFromLines( - read_helper(other_target_files_temp_dir)) + merged_dict = common.LoadDictionaryFromLines( + read_helper(vendor_target_files_temp_dir)) - # Replace certain values in merged_info_dict with values from - # system_info_dict. + # Replace certain values in merged_dict with values from + # framework_dict. - for key in system_misc_info_keys: - merged_info_dict[key] = system_info_dict[key] + for key in framework_misc_info_keys: + merged_dict[key] = framework_dict[key] # Merge misc info keys used for Dynamic Partitions. - if (merged_info_dict.get('use_dynamic_partitions') == 'true') and ( - system_info_dict.get('use_dynamic_partitions') == 'true'): - merged_info_dict['dynamic_partition_list'] = '%s %s' % ( - system_info_dict.get('dynamic_partition_list', ''), - merged_info_dict.get('dynamic_partition_list', '')) - # Partition groups and group sizes are defined by the other (non-system) - # misc info file because these values may vary for each board that uses - # a shared system image. - for partition_group in merged_info_dict['super_partition_groups'].split( - ' '): - if ('super_%s_group_size' % partition_group) not in merged_info_dict: - raise ValueError( - 'Other META/misc_info.txt does not contain required key ' - 'super_%s_group_size.' % partition_group) - key = 'super_%s_partition_list' % partition_group - merged_info_dict[key] = '%s %s' % (system_info_dict.get( - key, ''), merged_info_dict.get(key, '')) + if (merged_dict.get('use_dynamic_partitions') == 'true') and ( + framework_dict.get('use_dynamic_partitions') == 'true'): + merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts( + framework_dict=framework_dict, + vendor_dict=merged_dict, + size_prefix='super_', + size_suffix='_group_size', + list_prefix='super_', + list_suffix='_partition_list') + merged_dict.update(merged_dynamic_partitions_dict) + + # Replace <image>_selinux_fc values with framework or vendor file_contexts.bin + # depending on which dictionary the key came from. + # Only the file basename is required because all selinux_fc properties are + # replaced with the full path to the file under META/ when misc_info.txt is + # loaded from target files for repacking. See common.py LoadInfoDict(). + for key in merged_dict: + if key.endswith('_selinux_fc'): + merged_dict[key] = 'vendor_file_contexts.bin' + for key in framework_dict: + if key.endswith('_selinux_fc'): + merged_dict[key] = 'framework_file_contexts.bin' output_misc_info_txt = os.path.join(output_target_files_temp_dir, 'META', 'misc_info.txt') + write_sorted_data(data=merged_dict, path=output_misc_info_txt) - sorted_keys = sorted(merged_info_dict.keys()) - with open(output_misc_info_txt, 'w') as output: - for key in sorted_keys: - output.write('{}={}\n'.format(key, merged_info_dict[key])) +def process_dynamic_partitions_info_txt(framework_target_files_dir, + vendor_target_files_dir, + output_target_files_dir): + """Perform special processing for META/dynamic_partitions_info.txt. + This function merges the contents of the META/dynamic_partitions_info.txt + files from the framework directory and the vendor directory, placing the + merged result in the output directory. -def process_file_contexts_bin(temp_dir, output_target_files_temp_dir): - """Perform special processing for META/file_contexts.bin. - - This function combines plat_file_contexts and vendor_file_contexts, which are - expected to already be extracted in temp_dir, to produce a merged - file_contexts.bin that will land in temp_dir at META/file_contexts.bin. + This function does nothing if META/dynamic_partitions_info.txt from the vendor + directory does not exist. Args: - temp_dir: The name of a scratch directory that this function can use for - intermediate files generated during processing. - output_target_files_temp_dir: The name of the working directory that must - already contain plat_file_contexts and vendor_file_contexts (in the - appropriate sub directories), and to which META/file_contexts.bin will be - written. + framework_target_files_dir: The name of a directory containing the special + items extracted from the framework target files package. + vendor_target_files_dir: The name of a directory containing the special + items extracted from the vendor target files package. + output_target_files_dir: The name of a directory that will be used to create + the output target files package after all the special cases are processed. """ - # To create a merged file_contexts.bin file, we use the system and vendor - # file contexts files as input, the m4 tool to combine them, the sorting tool - # to sort, and finally the sefcontext_compile tool to generate the final - # output. We currently omit a checkfc step since the files had been checked - # as part of the build. - - # The m4 step concatenates the two input files contexts files. Since m4 - # writes to stdout, we receive that into an array of bytes, and then write it - # to a file. - - # Collect the file contexts that we're going to combine from SYSTEM, VENDOR, - # PRODUCT, and ODM. We require SYSTEM and VENDOR, but others are optional. - - file_contexts_list = [] - - for partition in ['SYSTEM', 'VENDOR', 'PRODUCT', 'ODM']: - prefix = 'plat' if partition == 'SYSTEM' else partition.lower() - - file_contexts = os.path.join(output_target_files_temp_dir, partition, 'etc', - 'selinux', prefix + '_file_contexts') - - mandatory = partition in ['SYSTEM', 'VENDOR'] - - if mandatory or os.path.isfile(file_contexts): - file_contexts_list.append(file_contexts) - else: - logger.warning('file not found: %s', file_contexts) - - command = ['m4', '--fatal-warnings', '-s'] + file_contexts_list - - merged_content = common.RunAndCheckOutput(command, verbose=False) - - merged_file_contexts_txt = os.path.join(temp_dir, 'merged_file_contexts.txt') - - with open(merged_file_contexts_txt, 'wb') as f: - f.write(merged_content) - - # The sort step sorts the concatenated file. - - sorted_file_contexts_txt = os.path.join(temp_dir, 'sorted_file_contexts.txt') - command = ['fc_sort', merged_file_contexts_txt, sorted_file_contexts_txt] - common.RunAndWait(command, verbose=True) - - # Finally, the compile step creates the final META/file_contexts.bin. - - file_contexts_bin = os.path.join(output_target_files_temp_dir, 'META', - 'file_contexts.bin') + if not os.path.exists( + os.path.join(vendor_target_files_dir, 'META', + 'dynamic_partitions_info.txt')): + return - command = [ - 'sefcontext_compile', - '-o', - file_contexts_bin, - sorted_file_contexts_txt, - ] + def read_helper(d): + dynamic_partitions_info_txt = os.path.join(d, 'META', + 'dynamic_partitions_info.txt') + with open(dynamic_partitions_info_txt) as f: + return list(f.read().splitlines()) - common.RunAndWait(command, verbose=True) + framework_dynamic_partitions_dict = common.LoadDictionaryFromLines( + read_helper(framework_target_files_dir)) + vendor_dynamic_partitions_dict = common.LoadDictionaryFromLines( + read_helper(vendor_target_files_dir)) + + merged_dynamic_partitions_dict = merge_dynamic_partition_info_dicts( + framework_dict=framework_dynamic_partitions_dict, + vendor_dict=vendor_dynamic_partitions_dict, + # META/dynamic_partitions_info.txt does not use dynamic_partition_list. + include_dynamic_partition_list=False, + size_suffix='_size', + list_suffix='_partition_list') + + output_dynamic_partitions_info_txt = os.path.join( + output_target_files_dir, 'META', 'dynamic_partitions_info.txt') + write_sorted_data( + data=merged_dynamic_partitions_dict, + path=output_dynamic_partitions_info_txt) + + +def process_apex_keys_apk_certs_common(framework_target_files_dir, + vendor_target_files_dir, + output_target_files_dir, file_name): + """Perform special processing for META/apexkeys.txt or META/apkcerts.txt. + + This function merges the contents of the META/apexkeys.txt or + META/apkcerts.txt files from the framework directory and the vendor + directory, placing the merged result in the output directory. The + precondition in that the files are already extracted. The post condition + is that the output META/apexkeys.txt or META/apkcerts.txt contains the + merged content. + Args: + framework_target_files_dir: The name of a directory containing the special + items extracted from the framework target files package. + vendor_target_files_dir: The name of a directory containing the special + items extracted from the vendor target files package. + output_target_files_dir: The name of a directory that will be used to create + the output target files package after all the special cases are processed. + file_name: The name of the file to merge. One of apkcerts.txt or + apexkeys.txt. + """ -def process_special_cases(temp_dir, system_target_files_temp_dir, - other_target_files_temp_dir, - output_target_files_temp_dir, system_misc_info_keys, - rebuild_recovery): + def read_helper(d): + temp = {} + file_path = os.path.join(d, 'META', file_name) + with open(file_path) as f: + for line in f: + if line.strip(): + temp[line.split()[0]] = line.strip() + return temp + + framework_dict = read_helper(framework_target_files_dir) + vendor_dict = read_helper(vendor_target_files_dir) + + for key in framework_dict: + if key in vendor_dict and vendor_dict[key] != framework_dict[key]: + raise ValueError('Conflicting entries found in %s:\n %s and\n %s' % + (file_name, framework_dict[key], vendor_dict[key])) + vendor_dict[key] = framework_dict[key] + + output_file = os.path.join(output_target_files_dir, 'META', file_name) + + write_sorted_data(data=vendor_dict.values(), path=output_file) + + +def copy_file_contexts(framework_target_files_dir, vendor_target_files_dir, + output_target_files_dir): + """Creates named copies of each build's file_contexts.bin in output META/.""" + framework_fc_path = os.path.join(framework_target_files_dir, 'META', + 'framework_file_contexts.bin') + if not os.path.exists(framework_fc_path): + framework_fc_path = os.path.join(framework_target_files_dir, 'META', + 'file_contexts.bin') + if not os.path.exists(framework_fc_path): + raise ValueError('Missing framework file_contexts.bin.') + shutil.copyfile( + framework_fc_path, + os.path.join(output_target_files_dir, 'META', + 'framework_file_contexts.bin')) + + vendor_fc_path = os.path.join(vendor_target_files_dir, 'META', + 'vendor_file_contexts.bin') + if not os.path.exists(vendor_fc_path): + vendor_fc_path = os.path.join(vendor_target_files_dir, 'META', + 'file_contexts.bin') + if not os.path.exists(vendor_fc_path): + raise ValueError('Missing vendor file_contexts.bin.') + shutil.copyfile( + vendor_fc_path, + os.path.join(output_target_files_dir, 'META', 'vendor_file_contexts.bin')) + + +def process_special_cases(framework_target_files_temp_dir, + vendor_target_files_temp_dir, + output_target_files_temp_dir, + framework_misc_info_keys, rebuild_recovery): """Perform special-case processing for certain target files items. Certain files in the output target files package require special-case processing. This function performs all that special-case processing. Args: - temp_dir: The name of a scratch directory that this function can use for - intermediate files generated during processing. - system_target_files_temp_dir: The name of a directory containing the special - items extracted from the system target files package. - other_target_files_temp_dir: The name of a directory containing the special - items extracted from the other target files package. + framework_target_files_temp_dir: The name of a directory containing the + special items extracted from the framework target files package. + vendor_target_files_temp_dir: The name of a directory containing the special + items extracted from the vendor target files package. output_target_files_temp_dir: The name of a directory that will be used to create the output target files package after all the special cases are processed. - system_misc_info_keys: A list of keys to obtain from the system instance of - META/misc_info.txt. The remaining keys from the other instance. + framework_misc_info_keys: A list of keys to obtain from the framework + instance of META/misc_info.txt. The remaining keys from the vendor + instance. rebuild_recovery: If true, rebuild the recovery patch used by non-A/B devices and write it to the system image. """ - if 'ab_update' in system_misc_info_keys: + if 'ab_update' in framework_misc_info_keys: process_ab_partitions_txt( - system_target_files_temp_dir=system_target_files_temp_dir, - other_target_files_temp_dir=other_target_files_temp_dir, + framework_target_files_temp_dir=framework_target_files_temp_dir, + vendor_target_files_temp_dir=vendor_target_files_temp_dir, output_target_files_temp_dir=output_target_files_temp_dir) if rebuild_recovery: append_recovery_to_filesystem_config( output_target_files_temp_dir=output_target_files_temp_dir) + copy_file_contexts( + framework_target_files_dir=framework_target_files_temp_dir, + vendor_target_files_dir=vendor_target_files_temp_dir, + output_target_files_dir=output_target_files_temp_dir) + process_misc_info_txt( - system_target_files_temp_dir=system_target_files_temp_dir, - other_target_files_temp_dir=other_target_files_temp_dir, + framework_target_files_temp_dir=framework_target_files_temp_dir, + vendor_target_files_temp_dir=vendor_target_files_temp_dir, output_target_files_temp_dir=output_target_files_temp_dir, - system_misc_info_keys=system_misc_info_keys) + framework_misc_info_keys=framework_misc_info_keys) + + process_dynamic_partitions_info_txt( + framework_target_files_dir=framework_target_files_temp_dir, + vendor_target_files_dir=vendor_target_files_temp_dir, + output_target_files_dir=output_target_files_temp_dir) - process_file_contexts_bin( - temp_dir=temp_dir, - output_target_files_temp_dir=output_target_files_temp_dir) + process_apex_keys_apk_certs_common( + framework_target_files_dir=framework_target_files_temp_dir, + vendor_target_files_dir=vendor_target_files_temp_dir, + output_target_files_dir=output_target_files_temp_dir, + file_name='apkcerts.txt') + process_apex_keys_apk_certs_common( + framework_target_files_dir=framework_target_files_temp_dir, + vendor_target_files_dir=vendor_target_files_temp_dir, + output_target_files_dir=output_target_files_temp_dir, + file_name='apexkeys.txt') -def merge_target_files(temp_dir, system_target_files, system_item_list, - system_misc_info_keys, other_target_files, - other_item_list, output_target_files, output_dir, + +def merge_target_files(temp_dir, framework_target_files, framework_item_list, + framework_misc_info_keys, vendor_target_files, + vendor_item_list, output_target_files, output_dir, output_item_list, output_ota, output_img, output_super_empty, rebuild_recovery): """Merge two target files packages together. - This function takes system and other target files packages as input, performs - various file extractions, special case processing, and finally creates a - merged zip archive as output. + This function takes framework and vendor target files packages as input, + performs various file extractions, special case processing, and finally + creates a merged zip archive as output. Args: temp_dir: The name of a directory we use when we extract items from the input target files packages, and also a scratch directory that we use for temporary files. - system_target_files: The name of the zip archive containing the system + framework_target_files: The name of the zip archive containing the framework partial target files package. - system_item_list: The list of items to extract from the partial system + framework_item_list: The list of items to extract from the partial framework target files package as is, meaning these items will land in the output - target files package exactly as they appear in the input partial system + target files package exactly as they appear in the input partial framework target files package. - system_misc_info_keys: The list of keys to obtain from the system instance - of META/misc_info.txt. The remaining keys from the other instance. - other_target_files: The name of the zip archive containing the other partial + framework_misc_info_keys: The list of keys to obtain from the framework + instance of META/misc_info.txt. The remaining keys from the vendor + instance. + vendor_target_files: The name of the zip archive containing the vendor + partial target files package. + vendor_item_list: The list of items to extract from the partial vendor + target files package as is, meaning these items will land in the output + target files package exactly as they appear in the input partial vendor target files package. - other_item_list: The list of items to extract from the partial other target - files package as is, meaning these items will land in the output target - files package exactly as they appear in the input partial other target - files package. output_target_files: The name of the output zip archive target files package - created by merging system and other. + created by merging framework and vendor. output_dir: The destination directory for saving merged files. output_item_list: The list of items to copy into the output_dir. output_ota: The name of the output zip archive ota package. @@ -603,51 +778,51 @@ def merge_target_files(temp_dir, system_target_files, system_item_list, devices and write it to the system image. """ - logger.info('starting: merge system %s and other %s into output %s', - system_target_files, other_target_files, output_target_files) + logger.info('starting: merge framework %s and vendor %s into output %s', + framework_target_files, vendor_target_files, output_target_files) - # Create directory names that we'll use when we extract files from system, - # and other, and for zipping the final output. + # Create directory names that we'll use when we extract files from framework, + # and vendor, and for zipping the final output. - system_target_files_temp_dir = os.path.join(temp_dir, 'system') - other_target_files_temp_dir = os.path.join(temp_dir, 'other') + framework_target_files_temp_dir = os.path.join(temp_dir, 'framework') + vendor_target_files_temp_dir = os.path.join(temp_dir, 'vendor') output_target_files_temp_dir = os.path.join(temp_dir, 'output') - # Extract "as is" items from the input system partial target files package. + # Extract "as is" items from the input framework partial target files package. # We extract them directly into the output temporary directory since the # items do not need special case processing. extract_items( - target_files=system_target_files, + target_files=framework_target_files, target_files_temp_dir=output_target_files_temp_dir, - extract_item_list=system_item_list) + extract_item_list=framework_item_list) - # Extract "as is" items from the input other partial target files package. We + # Extract "as is" items from the input vendor partial target files package. We # extract them directly into the output temporary directory since the items # do not need special case processing. extract_items( - target_files=other_target_files, + target_files=vendor_target_files, target_files_temp_dir=output_target_files_temp_dir, - extract_item_list=other_item_list) + extract_item_list=vendor_item_list) - # Extract "special" items from the input system partial target files package. - # We extract these items to different directory since they require special - # processing before they will end up in the output directory. + # Extract "special" items from the input framework partial target files + # package. We extract these items to different directory since they require + # special processing before they will end up in the output directory. extract_items( - target_files=system_target_files, - target_files_temp_dir=system_target_files_temp_dir, - extract_item_list=system_extract_special_item_list) + target_files=framework_target_files, + target_files_temp_dir=framework_target_files_temp_dir, + extract_item_list=FRAMEWORK_EXTRACT_SPECIAL_ITEM_LIST) - # Extract "special" items from the input other partial target files package. + # Extract "special" items from the input vendor partial target files package. # We extract these items to different directory since they require special # processing before they will end up in the output directory. extract_items( - target_files=other_target_files, - target_files_temp_dir=other_target_files_temp_dir, - extract_item_list=other_extract_special_item_list) + target_files=vendor_target_files, + target_files_temp_dir=vendor_target_files_temp_dir, + extract_item_list=VENDOR_EXTRACT_SPECIAL_ITEM_LIST) # Now that the temporary directories contain all the extracted files, perform # special case processing on any items that need it. After this function @@ -655,11 +830,10 @@ def merge_target_files(temp_dir, system_target_files, system_item_list, # files package are in place. process_special_cases( - temp_dir=temp_dir, - system_target_files_temp_dir=system_target_files_temp_dir, - other_target_files_temp_dir=other_target_files_temp_dir, + framework_target_files_temp_dir=framework_target_files_temp_dir, + vendor_target_files_temp_dir=vendor_target_files_temp_dir, output_target_files_temp_dir=output_target_files_temp_dir, - system_misc_info_keys=system_misc_info_keys, + framework_misc_info_keys=framework_misc_info_keys, rebuild_recovery=rebuild_recovery) # Regenerate IMAGES in the temporary directory. @@ -728,7 +902,8 @@ def merge_target_files(temp_dir, system_target_files, system_item_list, output_target_files_meta_dir, ] find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False) - meta_content = common.RunAndCheckOutput(['sort'], stdin=find_process.stdout, + meta_content = common.RunAndCheckOutput(['sort'], + stdin=find_process.stdout, verbose=False) find_command = [ @@ -736,7 +911,8 @@ def merge_target_files(temp_dir, system_target_files, system_item_list, output_target_files_meta_dir, '-prune', '-o', '-print' ] find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False) - other_content = common.RunAndCheckOutput(['sort'], stdin=find_process.stdout, + other_content = common.RunAndCheckOutput(['sort'], + stdin=find_process.stdout, verbose=False) with open(output_target_files_list, 'wb') as f: @@ -755,6 +931,7 @@ def merge_target_files(temp_dir, system_target_files, system_item_list, ] logger.info('creating %s', output_target_files) common.RunAndWait(command, verbose=True) + logger.info('finished creating %s', output_target_files) # Create the OTA package from the merged target files package. @@ -766,7 +943,6 @@ def merge_target_files(temp_dir, system_target_files, system_item_list, ota_from_target_files.main(ota_from_target_files_args) - def call_func_with_temp_dir(func, keep_tmp): """Manage the creation and cleanup of the temporary directory. @@ -807,15 +983,35 @@ def main(): def option_handler(o, a): if o == '--system-target-files': - OPTIONS.system_target_files = a + logger.warning( + '--system-target-files has been renamed to --framework-target-files') + OPTIONS.framework_target_files = a + elif o == '--framework-target-files': + OPTIONS.framework_target_files = a elif o == '--system-item-list': - OPTIONS.system_item_list = a + logger.warning( + '--system-item-list has been renamed to --framework-item-list') + OPTIONS.framework_item_list = a + elif o == '--framework-item-list': + OPTIONS.framework_item_list = a elif o == '--system-misc-info-keys': - OPTIONS.system_misc_info_keys = a + logger.warning( + '--system-misc-info-keys has been renamed to --framework-misc-info-keys' + ) + OPTIONS.framework_misc_info_keys = a + elif o == '--framework-misc-info-keys': + OPTIONS.framework_misc_info_keys = a elif o == '--other-target-files': - OPTIONS.other_target_files = a + logger.warning( + '--other-target-files has been renamed to --vendor-target-files') + OPTIONS.vendor_target_files = a + elif o == '--vendor-target-files': + OPTIONS.vendor_target_files = a elif o == '--other-item-list': - OPTIONS.other_item_list = a + logger.warning('--other-item-list has been renamed to --vendor-item-list') + OPTIONS.vendor_item_list = a + elif o == '--vendor-item-list': + OPTIONS.vendor_item_list = a elif o == '--output-target-files': OPTIONS.output_target_files = a elif o == '--output-dir': @@ -841,10 +1037,15 @@ def main(): __doc__, extra_long_opts=[ 'system-target-files=', + 'framework-target-files=', 'system-item-list=', + 'framework-item-list=', 'system-misc-info-keys=', + 'framework-misc-info-keys=', 'other-target-files=', + 'vendor-target-files=', 'other-item-list=', + 'vendor-item-list=', 'output-target-files=', 'output-dir=', 'output-item-list=', @@ -856,27 +1057,28 @@ def main(): ], extra_option_handler=option_handler) - if (args or OPTIONS.system_target_files is None or - OPTIONS.other_target_files is None or + if (args or OPTIONS.framework_target_files is None or + OPTIONS.vendor_target_files is None or (OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)): common.Usage(__doc__) sys.exit(1) - if OPTIONS.system_item_list: - system_item_list = read_config_list(OPTIONS.system_item_list) + if OPTIONS.framework_item_list: + framework_item_list = read_config_list(OPTIONS.framework_item_list) else: - system_item_list = default_system_item_list + framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST - if OPTIONS.system_misc_info_keys: - system_misc_info_keys = read_config_list(OPTIONS.system_misc_info_keys) + if OPTIONS.framework_misc_info_keys: + framework_misc_info_keys = read_config_list( + OPTIONS.framework_misc_info_keys) else: - system_misc_info_keys = default_system_misc_info_keys + framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS - if OPTIONS.other_item_list: - other_item_list = read_config_list(OPTIONS.other_item_list) + if OPTIONS.vendor_item_list: + vendor_item_list = read_config_list(OPTIONS.vendor_item_list) else: - other_item_list = default_other_item_list + vendor_item_list = DEFAULT_VENDOR_ITEM_LIST if OPTIONS.output_item_list: output_item_list = read_config_list(OPTIONS.output_item_list) @@ -884,19 +1086,19 @@ def main(): output_item_list = None if not validate_config_lists( - system_item_list=system_item_list, - system_misc_info_keys=system_misc_info_keys, - other_item_list=other_item_list): + framework_item_list=framework_item_list, + framework_misc_info_keys=framework_misc_info_keys, + vendor_item_list=vendor_item_list): sys.exit(1) call_func_with_temp_dir( lambda temp_dir: merge_target_files( temp_dir=temp_dir, - system_target_files=OPTIONS.system_target_files, - system_item_list=system_item_list, - system_misc_info_keys=system_misc_info_keys, - other_target_files=OPTIONS.other_target_files, - other_item_list=other_item_list, + framework_target_files=OPTIONS.framework_target_files, + framework_item_list=framework_item_list, + framework_misc_info_keys=framework_misc_info_keys, + vendor_target_files=OPTIONS.vendor_target_files, + vendor_item_list=vendor_item_list, output_target_files=OPTIONS.output_target_files, output_dir=OPTIONS.output_dir, output_item_list=output_item_list, diff --git a/tools/releasetools/sign_apex.py b/tools/releasetools/sign_apex.py new file mode 100755 index 0000000000..1778615a64 --- /dev/null +++ b/tools/releasetools/sign_apex.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Signs a standalone APEX file. + +Usage: sign_apex [flags] input_apex_file output_apex_file + + --container_key <key> + Mandatory flag that specifies the container signing key. + + --payload_key <key> + Mandatory flag that specifies the payload signing key. + + --payload_extra_args <args> + Optional flag that specifies any extra args to be passed to payload signer + (e.g. --payload_extra_args="--signing_helper_with_files /path/to/helper"). +""" + +import logging +import shutil +import sys + +import apex_utils +import common + +logger = logging.getLogger(__name__) + + +def main(argv): + + options = {} + + def option_handler(o, a): + if o == '--container_key': + # Strip the suffix if any, as common.SignFile expects no suffix. + DEFAULT_CONTAINER_KEY_SUFFIX = '.x509.pem' + if a.endswith(DEFAULT_CONTAINER_KEY_SUFFIX): + a = a[:-len(DEFAULT_CONTAINER_KEY_SUFFIX)] + options['container_key'] = a + elif o == '--payload_key': + options['payload_key'] = a + elif o == '--payload_extra_args': + options['payload_extra_args'] = a + else: + return False + return True + + args = common.ParseOptions( + argv, __doc__, + extra_opts='', + extra_long_opts=[ + 'container_key=', + 'payload_extra_args=', + 'payload_key=', + ], + extra_option_handler=option_handler) + + if (len(args) != 2 or 'container_key' not in options or + 'payload_key' not in options): + common.Usage(__doc__) + sys.exit(1) + + common.InitLogging() + + input_zip = args[0] + output_zip = args[1] + with open(input_zip) as input_fp: + apex_data = input_fp.read() + + signed_apex = apex_utils.SignApex( + apex_data, + payload_key=options['payload_key'], + container_key=options['container_key'], + container_pw=None, + codename_to_api_level_map=None, + signing_args=options.get('payload_extra_args')) + + shutil.copyfile(signed_apex, output_zip) + logger.info("done.") + + +if __name__ == '__main__': + try: + main(sys.argv[1:]) + except common.ExternalError: + logger.exception("\n ERROR:\n") + sys.exit(1) + finally: + common.Cleanup() diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py index 7b5f000939..c174d2fedf 100755 --- a/tools/releasetools/sign_target_files_apks.py +++ b/tools/releasetools/sign_target_files_apks.py @@ -91,12 +91,15 @@ Usage: sign_target_files_apks [flags] input_target_files output_target_files Replace the veritykeyid in BOOT/cmdline of input_target_file_zip with keyid of the cert pointed by <path_to_X509_PEM_cert_file>. - --avb_{boot,system,system_other,vendor,dtbo,vbmeta}_algorithm <algorithm> - --avb_{boot,system,system_other,vendor,dtbo,vbmeta}_key <key> + --avb_{boot,system,system_other,vendor,dtbo,vbmeta,vbmeta_system, + vbmeta_vendor}_algorithm <algorithm> + --avb_{boot,system,system_other,vendor,dtbo,vbmeta,vbmeta_system, + vbmeta_vendor}_key <key> Use the specified algorithm (e.g. SHA256_RSA4096) and the key to AVB-sign the specified image. Otherwise it uses the existing values in info dict. - --avb_{apex,boot,system,system_other,vendor,dtbo,vbmeta}_extra_args <args> + --avb_{apex,boot,system,system_other,vendor,dtbo,vbmeta,vbmeta_system, + vbmeta_vendor}_extra_args <args> Specify any additional args that are needed to AVB-sign the image (e.g. "--signing_helper /path/to/helper"). The args will be appended to the existing ones in info dict. @@ -400,77 +403,6 @@ def SignApk(data, keyname, pw, platform_api_level, codename_to_api_level_map, return data -def SignApex(apex_data, payload_key, container_key, container_pw, - codename_to_api_level_map, signing_args=None): - """Signs the current APEX with the given payload/container keys. - - Args: - apex_data: Raw APEX data. - payload_key: The path to payload signing key (w/ extension). - container_key: The path to container signing key (w/o extension). - container_pw: The matching password of the container_key, or None. - codename_to_api_level_map: A dict that maps from codename to API level. - signing_args: Additional args to be passed to the payload signer. - - Returns: - The path to the signed APEX file. - """ - apex_file = common.MakeTempFile(prefix='apex-', suffix='.apex') - with open(apex_file, 'wb') as apex_fp: - apex_fp.write(apex_data) - - APEX_PAYLOAD_IMAGE = 'apex_payload.img' - APEX_PUBKEY = 'apex_pubkey' - - # 1a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given - # payload_key. - payload_dir = common.MakeTempDir(prefix='apex-payload-') - with zipfile.ZipFile(apex_file) as apex_fd: - payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir) - - payload_info = apex_utils.ParseApexPayloadInfo(payload_file) - apex_utils.SignApexPayload( - payload_file, - payload_key, - payload_info['apex.key'], - payload_info['Algorithm'], - payload_info['Salt'], - signing_args) - - # 1b. Update the embedded payload public key. - payload_public_key = common.ExtractAvbPublicKey(payload_key) - - common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE) - common.ZipDelete(apex_file, APEX_PUBKEY) - apex_zip = zipfile.ZipFile(apex_file, 'a') - common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE) - common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY) - common.ZipClose(apex_zip) - - # 2. Align the files at page boundary (same as in apexer). - aligned_apex = common.MakeTempFile( - prefix='apex-container-', suffix='.apex') - common.RunAndCheckOutput( - ['zipalign', '-f', '4096', apex_file, aligned_apex]) - - # 3. Sign the APEX container with container_key. - signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex') - - # Specify the 4K alignment when calling SignApk. - extra_signapk_args = OPTIONS.extra_signapk_args[:] - extra_signapk_args.extend(['-a', '4096']) - - common.SignFile( - aligned_apex, - signed_apex, - container_key, - container_pw, - codename_to_api_level_map=codename_to_api_level_map, - extra_signapk_args=extra_signapk_args) - - return signed_apex - - def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, apk_keys, apex_keys, key_passwords, platform_api_level, codename_to_api_level_map, @@ -535,7 +467,7 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, print(" : %-*s payload (%s)" % ( maxsize, name, payload_key)) - signed_apex = SignApex( + signed_apex = apex_utils.SignApex( data, payload_key, container_key, @@ -558,6 +490,15 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, # System properties. elif filename in ("SYSTEM/build.prop", "VENDOR/build.prop", + "SYSTEM/vendor/build.prop", + "ODM/build.prop", # legacy + "ODM/etc/build.prop", + "VENDOR/odm/build.prop", # legacy + "VENDOR/odm/etc/build.prop", + "PRODUCT/build.prop", + "SYSTEM/product/build.prop", + "PRODUCT_SERVICES/build.prop", + "SYSTEM/product_services/build.prop", "SYSTEM/etc/prop.default", "BOOT/RAMDISK/prop.default", "BOOT/RAMDISK/default.prop", # legacy @@ -752,8 +693,8 @@ def RewriteProps(data): original_line = line if line and line[0] != '#' and "=" in line: key, value = line.split("=", 1) - if key in ("ro.build.fingerprint", "ro.build.thumbprint", - "ro.vendor.build.fingerprint", "ro.vendor.build.thumbprint"): + if (key.startswith("ro.") and + key.endswith((".build.fingerprint", ".build.thumbprint"))): pieces = value.split("/") pieces[-1] = EditTags(pieces[-1]) value = "/".join(pieces) @@ -766,7 +707,7 @@ def RewriteProps(data): assert len(pieces) == 5 pieces[-1] = EditTags(pieces[-1]) value = " ".join(pieces) - elif key == "ro.build.tags": + elif key.startswith("ro.") and key.endswith(".build.tags"): value = EditTags(value) elif key == "ro.build.display.id": # change, eg, "JWR66N dev-keys" to "JWR66N" @@ -959,6 +900,8 @@ def ReplaceAvbSigningKeys(misc_info): 'system_other' : 'avb_system_other_add_hashtree_footer_args', 'vendor' : 'avb_vendor_add_hashtree_footer_args', 'vbmeta' : 'avb_vbmeta_args', + 'vbmeta_system' : 'avb_vbmeta_system_args', + 'vbmeta_vendor' : 'avb_vbmeta_vendor_args', } def ReplaceAvbPartitionSigningKey(partition): @@ -1186,6 +1129,18 @@ def main(argv): OPTIONS.avb_algorithms['vendor'] = a elif o == "--avb_vendor_extra_args": OPTIONS.avb_extra_args['vendor'] = a + elif o == "--avb_vbmeta_system_key": + OPTIONS.avb_keys['vbmeta_system'] = a + elif o == "--avb_vbmeta_system_algorithm": + OPTIONS.avb_algorithms['vbmeta_system'] = a + elif o == "--avb_vbmeta_system_extra_args": + OPTIONS.avb_extra_args['vbmeta_system'] = a + elif o == "--avb_vbmeta_vendor_key": + OPTIONS.avb_keys['vbmeta_vendor'] = a + elif o == "--avb_vbmeta_vendor_algorithm": + OPTIONS.avb_algorithms['vbmeta_vendor'] = a + elif o == "--avb_vbmeta_vendor_extra_args": + OPTIONS.avb_extra_args['vbmeta_vendor'] = a elif o == "--avb_apex_extra_args": OPTIONS.avb_extra_args['apex'] = a else: @@ -1225,6 +1180,12 @@ def main(argv): "avb_vendor_algorithm=", "avb_vendor_key=", "avb_vendor_extra_args=", + "avb_vbmeta_system_algorithm=", + "avb_vbmeta_system_key=", + "avb_vbmeta_system_extra_args=", + "avb_vbmeta_vendor_algorithm=", + "avb_vbmeta_vendor_key=", + "avb_vbmeta_vendor_extra_args=", ], extra_option_handler=option_handler) diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py index 422262cdea..08e01907c0 100644 --- a/tools/releasetools/test_add_img_to_target_files.py +++ b/tools/releasetools/test_add_img_to_target_files.py @@ -283,6 +283,35 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): self._verifyCareMap(expected, care_map_file) + @test_utils.SkipIfExternalToolsUnavailable() + def test_AddCareMapForAbOta_skipPartition(self): + image_paths = self._test_AddCareMapForAbOta() + + # Remove vendor_image_size to invalidate the care_map for vendor.img. + del OPTIONS.info_dict['vendor_image_size'] + + AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) + + care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') + expected = ['system', RangeSet("0-5 10-15").to_string_raw(), + "ro.system.build.fingerprint", + "google/sailfish/12345:user/dev-keys"] + + self._verifyCareMap(expected, care_map_file) + + @test_utils.SkipIfExternalToolsUnavailable() + def test_AddCareMapForAbOta_skipAllPartitions(self): + image_paths = self._test_AddCareMapForAbOta() + + # Remove the image_size properties for all the partitions. + del OPTIONS.info_dict['system_image_size'] + del OPTIONS.info_dict['vendor_image_size'] + + AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) + + self.assertFalse( + os.path.exists(os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb'))) + def test_AddCareMapForAbOta_verityNotEnabled(self): """No care_map.pb should be generated if verity not enabled.""" image_paths = self._test_AddCareMapForAbOta() diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py index 7e18a3425d..0a636bb3ae 100644 --- a/tools/releasetools/test_merge_target_files.py +++ b/tools/releasetools/test_merge_target_files.py @@ -19,9 +19,11 @@ import os.path import common import test_utils from merge_target_files import (read_config_list, validate_config_lists, - default_system_item_list, - default_other_item_list, - default_system_misc_info_keys, copy_items) + DEFAULT_FRAMEWORK_ITEM_LIST, + DEFAULT_VENDOR_ITEM_LIST, + DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items, + merge_dynamic_partition_info_dicts, + process_apex_keys_apk_certs_common) class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase): @@ -82,10 +84,10 @@ class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase): os.readlink(os.path.join(output_dir, 'a_link.cpp')), 'a.cpp') def test_read_config_list(self): - system_item_list_file = os.path.join(self.testdata_dir, - 'merge_config_system_item_list') - system_item_list = read_config_list(system_item_list_file) - expected_system_item_list = [ + framework_item_list_file = os.path.join(self.testdata_dir, + 'merge_config_framework_item_list') + framework_item_list = read_config_list(framework_item_list_file) + expected_framework_item_list = [ 'META/apkcerts.txt', 'META/filesystem_config.txt', 'META/root_filesystem_config.txt', @@ -96,35 +98,130 @@ class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase): 'ROOT/*', 'SYSTEM/*', ] - self.assertItemsEqual(system_item_list, expected_system_item_list) + self.assertItemsEqual(framework_item_list, expected_framework_item_list) def test_validate_config_lists_ReturnsFalseIfMissingDefaultItem(self): - system_item_list = default_system_item_list[:] - system_item_list.remove('SYSTEM/*') + framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST) + framework_item_list.remove('SYSTEM/*') self.assertFalse( - validate_config_lists(system_item_list, default_system_misc_info_keys, - default_other_item_list)) + validate_config_lists(framework_item_list, + DEFAULT_FRAMEWORK_MISC_INFO_KEYS, + DEFAULT_VENDOR_ITEM_LIST)) def test_validate_config_lists_ReturnsTrueIfDefaultItemInDifferentList(self): - system_item_list = default_system_item_list[:] - system_item_list.remove('ROOT/*') - other_item_list = default_other_item_list[:] - other_item_list.append('ROOT/*') + framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST) + framework_item_list.remove('ROOT/*') + vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST) + vendor_item_list.append('ROOT/*') self.assertTrue( - validate_config_lists(system_item_list, default_system_misc_info_keys, - other_item_list)) + validate_config_lists(framework_item_list, + DEFAULT_FRAMEWORK_MISC_INFO_KEYS, + vendor_item_list)) def test_validate_config_lists_ReturnsTrueIfExtraItem(self): - system_item_list = default_system_item_list[:] - system_item_list.append('MY_NEW_PARTITION/*') + framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST) + framework_item_list.append('MY_NEW_PARTITION/*') self.assertTrue( - validate_config_lists(system_item_list, default_system_misc_info_keys, - default_other_item_list)) + validate_config_lists(framework_item_list, + DEFAULT_FRAMEWORK_MISC_INFO_KEYS, + DEFAULT_VENDOR_ITEM_LIST)) + + def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartition(self): + vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST) + vendor_item_list.append('SYSTEM/my_system_file') + self.assertFalse( + validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST, + DEFAULT_FRAMEWORK_MISC_INFO_KEYS, + vendor_item_list)) def test_validate_config_lists_ReturnsFalseIfBadSystemMiscInfoKeys(self): for bad_key in ['dynamic_partition_list', 'super_partition_groups']: - system_misc_info_keys = default_system_misc_info_keys[:] - system_misc_info_keys.append(bad_key) + framework_misc_info_keys = list(DEFAULT_FRAMEWORK_MISC_INFO_KEYS) + framework_misc_info_keys.append(bad_key) self.assertFalse( - validate_config_lists(default_system_item_list, system_misc_info_keys, - default_other_item_list)) + validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST, + framework_misc_info_keys, + DEFAULT_VENDOR_ITEM_LIST)) + + def test_merge_dynamic_partition_info_dicts_ReturnsMergedDict(self): + framework_dict = { + 'super_partition_groups': 'group_a', + 'dynamic_partition_list': 'system', + 'super_group_a_list': 'system', + } + vendor_dict = { + 'super_partition_groups': 'group_a group_b', + 'dynamic_partition_list': 'vendor product', + 'super_group_a_list': 'vendor', + 'super_group_a_size': '1000', + 'super_group_b_list': 'product', + 'super_group_b_size': '2000', + } + merged_dict = merge_dynamic_partition_info_dicts( + framework_dict=framework_dict, + vendor_dict=vendor_dict, + size_prefix='super_', + size_suffix='_size', + list_prefix='super_', + list_suffix='_list') + expected_merged_dict = { + 'super_partition_groups': 'group_a group_b', + 'dynamic_partition_list': 'system vendor product', + 'super_group_a_list': 'system vendor', + 'super_group_a_size': '1000', + 'super_group_b_list': 'product', + 'super_group_b_size': '2000', + } + self.assertEqual(merged_dict, expected_merged_dict) + + def test_process_apex_keys_apk_certs_ReturnsTrueIfNoConflicts(self): + output_dir = common.MakeTempDir() + os.makedirs(os.path.join(output_dir, 'META')) + + framework_dir = common.MakeTempDir() + os.makedirs(os.path.join(framework_dir, 'META')) + os.symlink( + os.path.join(self.testdata_dir, 'apexkeys_framework.txt'), + os.path.join(framework_dir, 'META', 'apexkeys.txt')) + + vendor_dir = common.MakeTempDir() + os.makedirs(os.path.join(vendor_dir, 'META')) + os.symlink( + os.path.join(self.testdata_dir, 'apexkeys_vendor.txt'), + os.path.join(vendor_dir, 'META', 'apexkeys.txt')) + + process_apex_keys_apk_certs_common(framework_dir, vendor_dir, output_dir, + 'apexkeys.txt') + + merged_entries = [] + merged_path = os.path.join(self.testdata_dir, 'apexkeys_merge.txt') + + with open(merged_path) as f: + merged_entries = f.read().split('\n') + + output_entries = [] + output_path = os.path.join(output_dir, 'META', 'apexkeys.txt') + + with open(output_path) as f: + output_entries = f.read().split('\n') + + return self.assertEqual(merged_entries, output_entries) + + def test_process_apex_keys_apk_certs_ReturnsFalseIfConflictsPresent(self): + output_dir = common.MakeTempDir() + os.makedirs(os.path.join(output_dir, 'META')) + + framework_dir = common.MakeTempDir() + os.makedirs(os.path.join(framework_dir, 'META')) + os.symlink( + os.path.join(self.testdata_dir, 'apexkeys_framework.txt'), + os.path.join(framework_dir, 'META', 'apexkeys.txt')) + + conflict_dir = common.MakeTempDir() + os.makedirs(os.path.join(conflict_dir, 'META')) + os.symlink( + os.path.join(self.testdata_dir, 'apexkeys_framework_conflict.txt'), + os.path.join(conflict_dir, 'META', 'apexkeys.txt')) + + self.assertRaises(ValueError, process_apex_keys_apk_certs_common, + framework_dir, conflict_dir, output_dir, 'apexkeys.txt') diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py index a7bf9772c1..e142ebb06c 100644 --- a/tools/releasetools/test_sign_target_files_apks.py +++ b/tools/releasetools/test_sign_target_files_apks.py @@ -53,36 +53,60 @@ name="apex.apexd_test_different_app.apex" public_key="system/apex/apexd/apexd_te def test_RewriteProps(self): props = ( - ('', '\n'), + ('', ''), ('ro.build.fingerprint=foo/bar/dev-keys', - 'ro.build.fingerprint=foo/bar/release-keys\n'), + 'ro.build.fingerprint=foo/bar/release-keys'), ('ro.build.thumbprint=foo/bar/dev-keys', - 'ro.build.thumbprint=foo/bar/release-keys\n'), + 'ro.build.thumbprint=foo/bar/release-keys'), ('ro.vendor.build.fingerprint=foo/bar/dev-keys', - 'ro.vendor.build.fingerprint=foo/bar/release-keys\n'), + 'ro.vendor.build.fingerprint=foo/bar/release-keys'), ('ro.vendor.build.thumbprint=foo/bar/dev-keys', - 'ro.vendor.build.thumbprint=foo/bar/release-keys\n'), - ('# comment line 1', '# comment line 1\n'), + 'ro.vendor.build.thumbprint=foo/bar/release-keys'), + ('ro.odm.build.fingerprint=foo/bar/test-keys', + 'ro.odm.build.fingerprint=foo/bar/release-keys'), + ('ro.odm.build.thumbprint=foo/bar/test-keys', + 'ro.odm.build.thumbprint=foo/bar/release-keys'), + ('ro.product.build.fingerprint=foo/bar/dev-keys', + 'ro.product.build.fingerprint=foo/bar/release-keys'), + ('ro.product.build.thumbprint=foo/bar/dev-keys', + 'ro.product.build.thumbprint=foo/bar/release-keys'), + ('ro.product_services.build.fingerprint=foo/bar/test-keys', + 'ro.product_services.build.fingerprint=foo/bar/release-keys'), + ('ro.product_services.build.thumbprint=foo/bar/test-keys', + 'ro.product_services.build.thumbprint=foo/bar/release-keys'), + ('# comment line 1', '# comment line 1'), ('ro.bootimage.build.fingerprint=foo/bar/dev-keys', - 'ro.bootimage.build.fingerprint=foo/bar/release-keys\n'), + 'ro.bootimage.build.fingerprint=foo/bar/release-keys'), ('ro.build.description=' 'sailfish-user 8.0.0 OPR6.170623.012 4283428 dev-keys', 'ro.build.description=' - 'sailfish-user 8.0.0 OPR6.170623.012 4283428 release-keys\n'), - ('ro.build.tags=dev-keys', 'ro.build.tags=release-keys\n'), - ('# comment line 2', '# comment line 2\n'), + 'sailfish-user 8.0.0 OPR6.170623.012 4283428 release-keys'), + ('ro.build.tags=dev-keys', 'ro.build.tags=release-keys'), + ('ro.build.tags=test-keys', 'ro.build.tags=release-keys'), + ('ro.system.build.tags=dev-keys', + 'ro.system.build.tags=release-keys'), + ('ro.vendor.build.tags=dev-keys', + 'ro.vendor.build.tags=release-keys'), + ('ro.odm.build.tags=dev-keys', + 'ro.odm.build.tags=release-keys'), + ('ro.product.build.tags=dev-keys', + 'ro.product.build.tags=release-keys'), + ('ro.product_services.build.tags=dev-keys', + 'ro.product_services.build.tags=release-keys'), + ('# comment line 2', '# comment line 2'), ('ro.build.display.id=OPR6.170623.012 dev-keys', - 'ro.build.display.id=OPR6.170623.012\n'), - ('# comment line 3', '# comment line 3\n'), + 'ro.build.display.id=OPR6.170623.012'), + ('# comment line 3', '# comment line 3'), ) # Assert the case for each individual line. - for prop, output in props: - self.assertEqual(RewriteProps(prop), output) + for prop, expected in props: + self.assertEqual(expected + '\n', RewriteProps(prop)) # Concatenate all the input lines. - self.assertEqual(RewriteProps('\n'.join([prop[0] for prop in props])), - ''.join([prop[1] for prop in props])) + self.assertEqual( + '\n'.join([prop[1] for prop in props]) + '\n', + RewriteProps('\n'.join([prop[0] for prop in props]))) def test_ReplaceVerityKeyId(self): BOOT_CMDLINE1 = ( diff --git a/tools/releasetools/testdata/apexkeys_framework.txt b/tools/releasetools/testdata/apexkeys_framework.txt new file mode 100644 index 0000000000..2346668708 --- /dev/null +++ b/tools/releasetools/testdata/apexkeys_framework.txt @@ -0,0 +1,2 @@ +name="com.android.runtime.debug.apex" public_key="art/build/apex/com.android.runtime.avbpubkey" private_key="art/build/apex/com.android.runtime.pem" container_certificate="art/build/apex/com.android.runtime.debug.x509.pem" container_private_key="art/build/apex/com.android.runtime.debug.pk8" +name="com.android.conscrypt.apex" public_key="external/conscrypt/apex/com.android.conscrypt.avbpubkey" private_key="external/conscrypt/apex/com.android.conscrypt.pem" container_certificate="external/conscrypt/apex/com.android.conscrypt.x509.pem" container_private_key="external/conscrypt/apex/com.android.conscrypt.pk8" diff --git a/tools/releasetools/testdata/apexkeys_framework_conflict.txt b/tools/releasetools/testdata/apexkeys_framework_conflict.txt new file mode 100644 index 0000000000..caa21c2fab --- /dev/null +++ b/tools/releasetools/testdata/apexkeys_framework_conflict.txt @@ -0,0 +1 @@ +name="com.android.runtime.debug.apex" public_key="art/build/apex/com.android.runtime.avbpubkey" private_key="art/build/apex/com.android.runtime.pem" container_certificate="art/build/apex/com.android.runtime.release.x509.pem" container_private_key="art/build/apex/com.android.runtime.debug.pk8" diff --git a/tools/releasetools/testdata/apexkeys_merge.txt b/tools/releasetools/testdata/apexkeys_merge.txt new file mode 100644 index 0000000000..48e789f43d --- /dev/null +++ b/tools/releasetools/testdata/apexkeys_merge.txt @@ -0,0 +1,4 @@ +name="com.android.conscrypt.apex" public_key="external/conscrypt/apex/com.android.conscrypt.avbpubkey" private_key="external/conscrypt/apex/com.android.conscrypt.pem" container_certificate="external/conscrypt/apex/com.android.conscrypt.x509.pem" container_private_key="external/conscrypt/apex/com.android.conscrypt.pk8" +name="com.android.runtime.debug.apex" public_key="art/build/apex/com.android.runtime.avbpubkey" private_key="art/build/apex/com.android.runtime.pem" container_certificate="art/build/apex/com.android.runtime.debug.x509.pem" container_private_key="art/build/apex/com.android.runtime.debug.pk8" +name="com.android.runtime.release.apex" public_key="art/build/apex/com.android.runtime.avbpubkey" private_key="art/build/apex/com.android.runtime.pem" container_certificate="art/build/apex/com.android.runtime.release.x509.pem" container_private_key="art/build/apex/com.android.runtime.release.pk8" +name="com.android.support.apexer.apex" public_key="system/apex/apexer/etc/com.android.support.apexer.avbpubkey" private_key="system/apex/apexer/etc/com.android.support.apexer.pem" container_certificate="build/target/product/security/testkey.x509.pem" container_private_key="build/target/product/security/testkey.pk8" diff --git a/tools/releasetools/testdata/apexkeys_vendor.txt b/tools/releasetools/testdata/apexkeys_vendor.txt new file mode 100644 index 0000000000..b751227aca --- /dev/null +++ b/tools/releasetools/testdata/apexkeys_vendor.txt @@ -0,0 +1,3 @@ +name="com.android.runtime.release.apex" public_key="art/build/apex/com.android.runtime.avbpubkey" private_key="art/build/apex/com.android.runtime.pem" container_certificate="art/build/apex/com.android.runtime.release.x509.pem" container_private_key="art/build/apex/com.android.runtime.release.pk8" +name="com.android.support.apexer.apex" public_key="system/apex/apexer/etc/com.android.support.apexer.avbpubkey" private_key="system/apex/apexer/etc/com.android.support.apexer.pem" container_certificate="build/target/product/security/testkey.x509.pem" container_private_key="build/target/product/security/testkey.pk8" +name="com.android.runtime.debug.apex" public_key="art/build/apex/com.android.runtime.avbpubkey" private_key="art/build/apex/com.android.runtime.pem" container_certificate="art/build/apex/com.android.runtime.debug.x509.pem" container_private_key="art/build/apex/com.android.runtime.debug.pk8" diff --git a/tools/releasetools/testdata/merge_config_system_item_list b/tools/releasetools/testdata/merge_config_framework_item_list index 36c26187d8..36c26187d8 100644 --- a/tools/releasetools/testdata/merge_config_system_item_list +++ b/tools/releasetools/testdata/merge_config_framework_item_list diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py index 1c856a8e14..37d5d27bf5 100755 --- a/tools/releasetools/validate_target_files.py +++ b/tools/releasetools/validate_target_files.py @@ -327,11 +327,14 @@ def ValidateVerifiedBootImages(input_tmp, info_dict, options): cmd = ['avbtool', 'verify_image', '--image', image, '--key', key] # Append the args for chained partitions if any. - for partition in common.AVB_PARTITIONS: + for partition in common.AVB_PARTITIONS + common.AVB_VBMETA_PARTITIONS: key_name = 'avb_' + partition + '_key_path' if info_dict.get(key_name) is not None: + # Use the key file from command line if specified; otherwise fall back + # to the one in info dict. + key_file = options.get(key_name, info_dict[key_name]) chained_partition_arg = common.GetAvbChainedPartitionArg( - partition, info_dict, options[key_name]) + partition, info_dict, key_file) cmd.extend(["--expected_chain_partition", chained_partition_arg]) proc = common.Run(cmd) @@ -357,7 +360,7 @@ def main(): help='the verity public key to verify the bootable images (Verified ' 'Boot 1.0), or the vbmeta image (Verified Boot 2.0, aka AVB), where ' 'applicable') - for partition in common.AVB_PARTITIONS: + for partition in common.AVB_PARTITIONS + common.AVB_VBMETA_PARTITIONS: parser.add_argument( '--avb_' + partition + '_key_path', help='the public or private key in PEM format to verify AVB chained ' diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py index 3a58755e47..3063800e98 100644 --- a/tools/releasetools/verity_utils.py +++ b/tools/releasetools/verity_utils.py @@ -52,7 +52,7 @@ def GetVerityTreeSize(image_size): def GetVerityMetadataSize(image_size): - cmd = ["build_verity_metadata.py", "size", str(image_size)] + cmd = ["build_verity_metadata", "size", str(image_size)] output = common.RunAndCheckOutput(cmd, verbose=False) return int(output) @@ -97,7 +97,7 @@ def BuildVerityTree(sparse_image_path, verity_image_path): def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt, block_device, signer_path, key, signer_args, verity_disable): - cmd = ["build_verity_metadata.py", "build", str(image_size), + cmd = ["build_verity_metadata", "build", str(image_size), verity_metadata_path, root_hash, salt, block_device, signer_path, key] if signer_args: cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),)) |