diff options
Diffstat (limited to 'tools/releasetools/common.py')
| -rw-r--r-- | tools/releasetools/common.py | 339 |
1 files changed, 212 insertions, 127 deletions
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index f678d08e25..418d8daa85 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -68,8 +68,12 @@ class Options(object): self.search_path = os.path.dirname(os.path.dirname(exec_path)) self.signapk_path = "framework/signapk.jar" # Relative to search_path + if not os.path.exists(os.path.join(self.search_path, self.signapk_path)): + if "ANDROID_HOST_OUT" in os.environ: + self.search_path = os.environ["ANDROID_HOST_OUT"] self.signapk_shared_library_path = "lib64" # Relative to search_path self.extra_signapk_args = [] + self.aapt2_path = "aapt2" self.java_path = "java" # Use the one on the path by default. self.java_args = ["-Xmx2048m"] # The default JVM args. self.android_jar_path = None @@ -80,11 +84,6 @@ class Options(object): self.boot_signer_args = [] self.verity_signer_path = None self.verity_signer_args = [] - self.aftl_tool_path = None - self.aftl_server = None - self.aftl_key_path = None - self.aftl_manufacturer_key_path = None - self.aftl_signer_helper = None self.verbose = False self.tempfiles = [] self.device_specific = None @@ -113,9 +112,9 @@ SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") # descriptor into vbmeta.img. When adding a new entry here, the # AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated # accordingly. -AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'pvmfw', 'recovery', - 'system', 'system_ext', 'vendor', 'vendor_boot', - 'vendor_dlkm', 'odm_dlkm') +AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw', 'recovery', + 'system', 'system_ext', 'vendor', 'vendor_boot', 'vendor_kernel_boot', + 'vendor_dlkm', 'odm_dlkm', 'system_dlkm') # Chained VBMeta partitions. AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor') @@ -129,10 +128,11 @@ PARTITIONS_WITH_CARE_MAP = [ 'odm', 'vendor_dlkm', 'odm_dlkm', + 'system_dlkm', ] # Partitions with a build.prop file -PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot'] +PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot'] # See sysprop.mk. If file is moved, add new search paths here; don't remove # existing search paths. @@ -276,6 +276,9 @@ def Run(args, verbose=None, **kwargs): args = args[:] args[0] = FindHostToolPath(args[0]) + if verbose is None: + verbose = OPTIONS.verbose + # Don't log any if caller explicitly says so. if verbose: logger.info(" Running: \"%s\"", " ".join(args)) @@ -451,6 +454,13 @@ class BuildInfo(object): return vabc_enabled @property + def is_vabc_xor(self): + vendor_prop = self.info_dict.get("vendor.build.prop") + vabc_xor_enabled = vendor_prop and \ + vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true" + return vabc_xor_enabled + + @property def vendor_suppressed_vabc(self): vendor_prop = self.info_dict.get("vendor.build.prop") vabc_suppressed = vendor_prop and \ @@ -484,8 +494,9 @@ class BuildInfo(object): def GetPartitionBuildProp(self, prop, partition): """Returns the inquired build property for the provided partition.""" - # Boot image uses ro.[product.]bootimage instead of boot. - prop_partition = "bootimage" if partition == "boot" else partition + # Boot image and init_boot image uses ro.[product.]bootimage instead of boot. + # This comes from the generic ramdisk + prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition # If provided a partition for this property, only look within that # partition's build.prop. @@ -788,7 +799,7 @@ def LoadInfoDict(input_file, repacking=False): # Redirect {partition}_base_fs_file for each of the named partitions. for part_name in ["system", "vendor", "system_ext", "product", "odm", - "vendor_dlkm", "odm_dlkm"]: + "vendor_dlkm", "odm_dlkm", "system_dlkm"]: key_name = part_name + "_base_fs_file" if key_name not in d: continue @@ -923,9 +934,9 @@ class PartitionBuildProps(object): def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4): """Loads the build.prop file and builds the attributes.""" - if name == "boot": + if name in ("boot", "init_boot"): data = PartitionBuildProps._ReadBootPropFile( - input_file, ramdisk_format=ramdisk_format) + input_file, name, ramdisk_format=ramdisk_format) else: data = PartitionBuildProps._ReadPartitionPropFile(input_file, name) @@ -934,15 +945,16 @@ class PartitionBuildProps(object): return props @staticmethod - def _ReadBootPropFile(input_file, ramdisk_format): + def _ReadBootPropFile(input_file, partition_name, ramdisk_format): """ Read build.prop for boot image from input_file. Return empty string if not found. """ + image_path = 'IMAGES/' + partition_name + '.img' try: - boot_img = ExtractFromInputFile(input_file, 'IMAGES/boot.img') + boot_img = ExtractFromInputFile(input_file, image_path) except KeyError: - logger.warning('Failed to read IMAGES/boot.img') + logger.warning('Failed to read %s', image_path) return '' prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format) if prop_file is None: @@ -964,6 +976,8 @@ class PartitionBuildProps(object): break except KeyError: logger.warning('Failed to read %s', prop_file) + if data == '': + logger.warning("Failed to read build.prop for partition {}".format(name)) return data @staticmethod @@ -1008,7 +1022,8 @@ class PartitionBuildProps(object): import_path = tokens[1] if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path): - raise ValueError('Unrecognized import path {}'.format(line)) + logger.warn('Unrecognized import path {}'.format(line)) + return {} # We only recognize a subset of import statement that the init process # supports. And we can loose the restriction based on how the dynamic @@ -1172,10 +1187,14 @@ def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict): return " ".join(sorted(combined)) if (framework_dict.get("use_dynamic_partitions") != - "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): + "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): raise ValueError("Both dictionaries must have use_dynamic_partitions=true") merged_dict = {"use_dynamic_partitions": "true"} + # For keys-value pairs that are the same, copy to merged dict + for key in vendor_dict.keys(): + if key in framework_dict and framework_dict[key] == vendor_dict[key]: + merged_dict[key] = vendor_dict[key] merged_dict["dynamic_partition_list"] = uniq_concat( framework_dict.get("dynamic_partition_list", ""), @@ -1229,6 +1248,7 @@ def PartitionMapFromTargetFiles(target_files_dir): "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm" ], "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"], + "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"], } partition_map = {} for partition, subdirs in possible_subdirs.items(): @@ -1380,74 +1400,51 @@ def GetAvbChainedPartitionArg(partition, info_dict, key=None): return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path) -def ConstructAftlMakeImageCommands(output_image): - """Constructs the command to append the aftl image to vbmeta.""" - - # Ensure the other AFTL parameters are set as well. - assert OPTIONS.aftl_tool_path is not None, 'No aftl tool provided.' - assert OPTIONS.aftl_key_path is not None, 'No AFTL key provided.' - assert OPTIONS.aftl_manufacturer_key_path is not None, \ - 'No AFTL manufacturer key provided.' - - vbmeta_image = MakeTempFile() - os.rename(output_image, vbmeta_image) - build_info = BuildInfo(OPTIONS.info_dict, use_legacy_id=True) - version_incremental = build_info.GetBuildProp("ro.build.version.incremental") - aftltool = OPTIONS.aftl_tool_path - server_argument_list = [OPTIONS.aftl_server, OPTIONS.aftl_key_path] - aftl_cmd = [aftltool, "make_icp_from_vbmeta", - "--vbmeta_image_path", vbmeta_image, - "--output", output_image, - "--version_incremental", version_incremental, - "--transparency_log_servers", ','.join(server_argument_list), - "--manufacturer_key", OPTIONS.aftl_manufacturer_key_path, - "--algorithm", "SHA256_RSA4096", - "--padding", "4096"] - if OPTIONS.aftl_signer_helper: - aftl_cmd.extend(shlex.split(OPTIONS.aftl_signer_helper)) - return aftl_cmd - +def _HasGkiCertificationArgs(): + return ("gki_signing_key_path" in OPTIONS.info_dict and + "gki_signing_algorithm" in OPTIONS.info_dict) -def AddAftlInclusionProof(output_image): - """Appends the aftl inclusion proof to the vbmeta image.""" - - aftl_cmd = ConstructAftlMakeImageCommands(output_image) - RunAndCheckOutput(aftl_cmd) - - verify_cmd = ['aftltool', 'verify_image_icp', '--vbmeta_image_path', - output_image, '--transparency_log_pub_keys', - OPTIONS.aftl_key_path] - RunAndCheckOutput(verify_cmd) - - -def AppendGkiSigningArgs(cmd): - """Append GKI signing arguments for mkbootimg.""" - # e.g., --gki_signing_key path/to/signing_key - # --gki_signing_algorithm SHA256_RSA4096" +def _GenerateGkiCertificate(image, image_name): key_path = OPTIONS.info_dict.get("gki_signing_key_path") - # It's fine that a non-GKI boot.img has no gki_signing_key_path. - if not key_path: - return + algorithm = OPTIONS.info_dict.get("gki_signing_algorithm") if not os.path.exists(key_path) and OPTIONS.search_path: new_key_path = os.path.join(OPTIONS.search_path, key_path) if os.path.exists(new_key_path): key_path = new_key_path - # Checks key_path exists, before appending --gki_signing_* args. + # Checks key_path exists, before processing --gki_signing_* args. if not os.path.exists(key_path): raise ExternalError( 'gki_signing_key_path: "{}" not found'.format(key_path)) - algorithm = OPTIONS.info_dict.get("gki_signing_algorithm") - if key_path and algorithm: - cmd.extend(["--gki_signing_key", key_path, - "--gki_signing_algorithm", algorithm]) + output_certificate = tempfile.NamedTemporaryFile() + cmd = [ + "generate_gki_certificate", + "--name", image_name, + "--algorithm", algorithm, + "--key", key_path, + "--output", output_certificate.name, + image, + ] + + signature_args = OPTIONS.info_dict.get("gki_signing_signature_args", "") + signature_args = signature_args.strip() + if signature_args: + cmd.extend(["--additional_avb_args", signature_args]) + + args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "") + args = args.strip() + if args: + cmd.extend(["--additional_avb_args", args]) - signature_args = OPTIONS.info_dict.get("gki_signing_signature_args") - if signature_args: - cmd.extend(["--gki_signing_signature_args", signature_args]) + RunAndCheckOutput(cmd) + + output_certificate.seek(os.SEEK_SET, 0) + data = output_certificate.read() + output_certificate.close() + return data def BuildVBMeta(image_path, partitions, name, needed_partitions): @@ -1513,10 +1510,6 @@ def BuildVBMeta(image_path, partitions, name, needed_partitions): RunAndCheckOutput(cmd) - # Generate the AFTL inclusion proof. - if OPTIONS.aftl_server is not None: - AddAftlInclusionProof(image_path) - def _MakeRamdisk(sourcedir, fs_config_file=None, ramdisk_format=RamdiskFormat.GZ): @@ -1569,12 +1562,16 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, logger.info("Excluded kernel binary from recovery image.") else: kernel = "kernel" + elif partition_name == "init_boot": + pass else: kernel = image_name.replace("boot", "kernel") kernel = kernel.replace(".img", "") if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK): return None + kernel_path = os.path.join(sourcedir, kernel) if kernel else None + if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK): return None @@ -1589,8 +1586,8 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" cmd = [mkbootimg] - if kernel: - cmd += ["--kernel", os.path.join(sourcedir, kernel)] + if kernel_path is not None: + cmd.extend(["--kernel", kernel_path]) fn = os.path.join(sourcedir, "second") if os.access(fn, os.F_OK): @@ -1623,6 +1620,8 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, # Fall back to "mkbootimg_args" for recovery image # in case "recovery_mkbootimg_args" is not set. args = info_dict.get("mkbootimg_args") + elif partition_name == "init_boot": + args = info_dict.get("mkbootimg_init_args") else: args = info_dict.get("mkbootimg_args") if args and args.strip(): @@ -1635,8 +1634,6 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, if has_ramdisk: cmd.extend(["--ramdisk", ramdisk_img.name]) - AppendGkiSigningArgs(cmd) - img_unsigned = None if info_dict.get("vboot"): img_unsigned = tempfile.NamedTemporaryFile() @@ -1654,6 +1651,29 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, RunAndCheckOutput(cmd) + if _HasGkiCertificationArgs(): + if not os.path.exists(img.name): + raise ValueError("Cannot find GKI boot.img") + if kernel_path is None or not os.path.exists(kernel_path): + raise ValueError("Cannot find GKI kernel.img") + + # Certify GKI images. + boot_signature_bytes = b'' + boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot") + boot_signature_bytes += _GenerateGkiCertificate( + kernel_path, "generic_kernel") + + BOOT_SIGNATURE_SIZE = 16 * 1024 + if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE: + raise ValueError( + f"GKI boot_signature size must be <= {BOOT_SIGNATURE_SIZE}") + boot_signature_bytes += ( + b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes))) + assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE + + with open(img.name, 'ab') as f: + f.write(boot_signature_bytes) + if (info_dict.get("boot_signer") == "true" and info_dict.get("verity_key")): # Hard-code the path as "/boot" for two-step special recovery image (which @@ -1724,8 +1744,8 @@ def _SignBootableImage(image_path, prebuilt_name, partition_name, Args: image_path: The full path of the image, e.g., /path/to/boot.img. prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img, - boot-5.10.img, recovery.img. - partition_name: The partition name, e.g., 'boot' or 'recovery'. + boot-5.10.img, recovery.img or init_boot.img. + partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'. info_dict: The information dict read from misc_info.txt. """ if info_dict is None: @@ -1749,6 +1769,38 @@ def _SignBootableImage(image_path, prebuilt_name, partition_name, RunAndCheckOutput(cmd) +def HasRamdisk(partition_name, info_dict=None): + """Returns true/false to see if a bootable image should have a ramdisk. + + Args: + partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'. + info_dict: The information dict read from misc_info.txt. + """ + if info_dict is None: + info_dict = OPTIONS.info_dict + + if partition_name != "boot": + return True # init_boot.img or recovery.img has a ramdisk. + + if info_dict.get("recovery_as_boot") == "true": + return True # the recovery-as-boot boot.img has a RECOVERY ramdisk. + + if info_dict.get("gki_boot_image_without_ramdisk") == "true": + return False # A GKI boot.img has no ramdisk since Android-13. + + if info_dict.get("system_root_image") == "true": + # The ramdisk content is merged into the system.img, so there is NO + # ramdisk in the boot.img or boot-<kernel version>.img. + return False + + if info_dict.get("init_boot") == "true": + # The ramdisk is moved to the init_boot.img, so there is NO + # ramdisk in the boot.img or boot-<kernel version>.img. + return False + + return True + + def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, info_dict=None, two_step_image=False): """Return a File object with the desired bootable image. @@ -1770,23 +1822,18 @@ def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, logger.info("using prebuilt %s from IMAGES...", prebuilt_name) return File.FromLocalFile(name, prebuilt_path) + partition_name = tree_subdir.lower() prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name) if os.path.exists(prebuilt_path): logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name) signed_img = MakeTempFile() shutil.copy(prebuilt_path, signed_img) - partition_name = tree_subdir.lower() _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict) return File.FromLocalFile(name, signed_img) logger.info("building image from target_files %s...", tree_subdir) - # With system_root_image == "true", we don't pack ramdisk into the boot image. - # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk - # for recovery. - has_ramdisk = (info_dict.get("system_root_image") != "true" or - prebuilt_name != "boot.img" or - info_dict.get("recovery_as_boot") == "true") + has_ramdisk = HasRamdisk(partition_name, info_dict) fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir), @@ -1797,7 +1844,7 @@ def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, return None -def _BuildVendorBootImage(sourcedir, info_dict=None): +def _BuildVendorBootImage(sourcedir, partition_name, info_dict=None): """Build a vendor boot image from the specified sourcedir. Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and @@ -1822,8 +1869,13 @@ def _BuildVendorBootImage(sourcedir, info_dict=None): fn = os.path.join(sourcedir, "dtb") if os.access(fn, os.F_OK): - cmd.append("--dtb") - cmd.append(fn) + has_vendor_kernel_boot = (info_dict.get("vendor_kernel_boot", "").lower() == "true") + + # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot. + # Otherwise pack dtb into vendor_boot. + if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot": + cmd.append("--dtb") + cmd.append(fn) fn = os.path.join(sourcedir, "vendor_cmdline") if os.access(fn, os.F_OK): @@ -1883,11 +1935,11 @@ def _BuildVendorBootImage(sourcedir, info_dict=None): # AVB: if enabled, calculate and add hash. if info_dict.get("avb_enable") == "true": avbtool = info_dict["avb_avbtool"] - part_size = info_dict["vendor_boot_size"] + part_size = info_dict[f'{partition_name}_size'] cmd = [avbtool, "add_hash_footer", "--image", img.name, - "--partition_size", str(part_size), "--partition_name", "vendor_boot"] - AppendAVBSigningArgs(cmd, "vendor_boot") - args = info_dict.get("avb_vendor_boot_add_hash_footer_args") + "--partition_size", str(part_size), "--partition_name", partition_name] + AppendAVBSigningArgs(cmd, partition_name) + args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args') if args and args.strip(): cmd.extend(shlex.split(args)) RunAndCheckOutput(cmd) @@ -1921,7 +1973,31 @@ def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir, info_dict = OPTIONS.info_dict data = _BuildVendorBootImage( - os.path.join(unpack_dir, tree_subdir), info_dict) + os.path.join(unpack_dir, tree_subdir), "vendor_boot", info_dict) + if data: + return File(name, data) + return None + + +def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir, + info_dict=None): + """Return a File object with the desired vendor kernel boot image. + + Look for it under 'unpack_dir'/IMAGES, otherwise construct it from + the source files in 'unpack_dir'/'tree_subdir'.""" + + prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) + if os.path.exists(prebuilt_path): + logger.info("using prebuilt %s from IMAGES...", prebuilt_name) + return File.FromLocalFile(name, prebuilt_path) + + logger.info("building image from target_files %s...", tree_subdir) + + if info_dict is None: + info_dict = OPTIONS.info_dict + + data = _BuildVendorBootImage( + os.path.join(unpack_dir, tree_subdir), "vendor_kernel_boot", info_dict) if data: return File(name, data) return None @@ -1960,14 +2036,14 @@ def UnzipToDir(filename, dirname, patterns=None): RunAndCheckOutput(cmd) -def UnzipTemp(filename, pattern=None): +def UnzipTemp(filename, patterns=None): """Unzips the given archive into a temporary directory and returns the name. Args: filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. - pattern: Files to unzip from the archive. If omitted, will unzip the entire + patterns: Files to unzip from the archive. If omitted, will unzip the entire archvie. Returns: @@ -1977,11 +2053,11 @@ def UnzipTemp(filename, pattern=None): tmp = MakeTempDir(prefix="targetfiles-") m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) if m: - UnzipToDir(m.group(1), tmp, pattern) - UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), pattern) + UnzipToDir(m.group(1), tmp, patterns) + UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns) filename = m.group(1) else: - UnzipToDir(filename, tmp, pattern) + UnzipToDir(filename, tmp, patterns) return tmp @@ -2018,6 +2094,8 @@ def GetUserImage(which, tmpdir, input_zip, info_dict = LoadInfoDict(input_zip) is_sparse = info_dict.get("extfs_sparse_flag") + if info_dict.get(which + "_disable_sparse"): + is_sparse = False # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain # shared blocks (i.e. some blocks will show up in multiple files' block @@ -2138,9 +2216,11 @@ def GetKeyPasswords(keylist): need_passwords = [] key_passwords = {} devnull = open("/dev/null", "w+b") - for k in sorted(keylist): + + # sorted() can't compare strings to None, so convert Nones to strings + for k in sorted(keylist, key=lambda x: x if x is not None else ""): # We don't need a password for things that aren't really keys. - if k in SPECIAL_CERT_STRINGS: + if k in SPECIAL_CERT_STRINGS or k is None: no_passwords.append(k) continue @@ -2181,8 +2261,8 @@ def GetKeyPasswords(keylist): def GetMinSdkVersion(apk_name): """Gets the minSdkVersion declared in the APK. - It calls 'aapt2' to query the embedded minSdkVersion from the given APK file. - This can be both a decimal number (API Level) or a codename. + It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given + APK file. This can be both a decimal number (API Level) or a codename. Args: apk_name: The APK filename. @@ -2194,13 +2274,13 @@ def GetMinSdkVersion(apk_name): ExternalError: On failing to obtain the min SDK version. """ proc = Run( - ["aapt2", "dump", "badging", apk_name], stdout=subprocess.PIPE, + [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdoutdata, stderrdata = proc.communicate() if proc.returncode != 0: raise ExternalError( - "Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format( - proc.returncode, stdoutdata, stderrdata)) + "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format( + apk_name, proc.returncode, stdoutdata, stderrdata)) for line in stdoutdata.split("\n"): # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'. @@ -2470,13 +2550,11 @@ def ParseOptions(argv, opts, args = getopt.getopt( argv, "hvp:s:x:" + extra_opts, ["help", "verbose", "path=", "signapk_path=", - "signapk_shared_library_path=", "extra_signapk_args=", + "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=", "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=", "private_key_suffix=", "boot_signer_path=", "boot_signer_args=", "verity_signer_path=", "verity_signer_args=", "device_specific=", - "extra=", "logfile=", "aftl_tool_path=", "aftl_server=", - "aftl_key_path=", "aftl_manufacturer_key_path=", - "aftl_signer_helper="] + list(extra_long_opts)) + "extra=", "logfile="] + list(extra_long_opts)) except getopt.GetoptError as err: Usage(docstring) print("**", str(err), "**") @@ -2496,6 +2574,8 @@ def ParseOptions(argv, OPTIONS.signapk_shared_library_path = a elif o in ("--extra_signapk_args",): OPTIONS.extra_signapk_args = shlex.split(a) + elif o in ("--aapt2_path",): + OPTIONS.aapt2_path = a elif o in ("--java_path",): OPTIONS.java_path = a elif o in ("--java_args",): @@ -2514,16 +2594,6 @@ def ParseOptions(argv, OPTIONS.verity_signer_path = a elif o in ("--verity_signer_args",): OPTIONS.verity_signer_args = shlex.split(a) - elif o in ("--aftl_tool_path",): - OPTIONS.aftl_tool_path = a - elif o in ("--aftl_server",): - OPTIONS.aftl_server = a - elif o in ("--aftl_key_path",): - OPTIONS.aftl_key_path = a - elif o in ("--aftl_manufacturer_key_path",): - OPTIONS.aftl_manufacturer_key_path = a - elif o in ("--aftl_signer_helper",): - OPTIONS.aftl_signer_helper = a elif o in ("-s", "--device_specific"): OPTIONS.device_specific = a elif o in ("-x", "--extra"): @@ -2783,6 +2853,9 @@ def ZipDelete(zip_filename, entries): """ if isinstance(entries, str): entries = [entries] + # If list is empty, nothing to do + if not entries: + return cmd = ["zip", "-d", zip_filename] + entries RunAndCheckOutput(cmd) @@ -2991,7 +3064,7 @@ class Difference(object): th.join() if p.returncode != 0: - logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err)) + logger.warning("Failure running %s:\n%s\n", cmd, "".join(err)) self.patch = None return None, None, None diff = ptemp.read() @@ -3898,12 +3971,17 @@ def GetCareMap(which, imgname): if not image_size: return None + disable_sparse = OPTIONS.info_dict.get(which + "_disable_sparse") + image_blocks = int(image_size) // 4096 - 1 - assert image_blocks > 0, "blocks for {} must be positive".format(which) + # It's OK for image_blocks to be 0, because care map ranges are inclusive. + # So 0-0 means "just block 0", which is valid. + assert image_blocks >= 0, "blocks for {} must be non-negative, image size: {}".format( + which, image_size) # For sparse images, we will only check the blocks that are listed in the care # map, i.e. the ones with meaningful data. - if "extfs_sparse_flag" in OPTIONS.info_dict: + if "extfs_sparse_flag" in OPTIONS.info_dict and not disable_sparse: simg = sparse_img.SparseImage(imgname) care_map_ranges = simg.care_map.intersect( rangelib.RangeSet("0-{}".format(image_blocks))) @@ -3996,3 +4074,10 @@ def AddCareMapForAbOta(output_file, ab_partitions, image_paths): OPTIONS.replace_updated_files_list.append(care_map_path) else: ZipWrite(output_file, temp_care_map, arcname=care_map_path) + + +def IsSparseImage(filepath): + with open(filepath, 'rb') as fp: + # Magic for android sparse image format + # https://source.android.com/devices/bootloader/images + return fp.read(4) == b'\x3A\xFF\x26\xED' |