| #! /usr/bin/env python3 |
| # |
| # Copyright 2020 The Android Open Source Project |
| # |
| # Licensed under the Apache License, Version 2.0 (the "License"); |
| # you may not use this file except in compliance with the License. |
| # You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| # Regenerate some ART test related files. |
| |
| # This script handles only a subset of ART run-tests at the moment; additional |
| # cases will be added later. |
| |
| import argparse |
| import copy |
| import collections |
| import itertools |
| import json |
| import logging |
| import os |
| import re |
| import sys |
| import textwrap |
| import xml.dom.minidom |
| |
| logging.basicConfig(format='%(levelname)s: %(message)s') |
| |
| ME = os.path.basename(sys.argv[0]) |
| |
| # Common advisory placed at the top of all generated files. |
| ADVISORY = f"Generated by `{ME}`. Do not edit manually." |
| |
| # Default indentation unit. |
| INDENT = " " |
| |
| # Indentation unit for XML files. |
| XML_INDENT = " " |
| |
| def reindent(str, indent = ""): |
| """Reindent literal string while removing common leading spaces.""" |
| return textwrap.indent(textwrap.dedent(str), indent) |
| |
| def copyright_header_text(year): |
| """Return the copyright header text used in XML files.""" |
| return reindent(f"""\ |
| Copyright (C) {year} The Android Open Source Project |
| |
| Licensed under the Apache License, Version 2.0 (the "License"); |
| you may not use this file except in compliance with the License. |
| You may obtain a copy of the License at |
| |
| http://www.apache.org/licenses/LICENSE-2.0 |
| |
| Unless required by applicable law or agreed to in writing, software |
| distributed under the License is distributed on an "AS IS" BASIS, |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| See the License for the specific language governing permissions and |
| limitations under the License. |
| """, " ") |
| |
| def split_list(l, n): |
| """Return a list of `n` sublists of (contiguous) elements of list `l`.""" |
| assert n > 0 |
| (d, m) = divmod(len(l), n) |
| # If the length of `l` is divisible by `n`, use that that divisor (`d`) as size of each sublist; |
| # otherwise, the next integer value (`d + 1`). |
| s = d if m == 0 else d + 1 |
| result = [l[i:i + s] for i in range(0, len(l), s)] |
| assert len(result) == n |
| return result |
| |
| # The prefix used in the Soong module name of all ART run-tests. |
| ART_RUN_TEST_MODULE_NAME_PREFIX = "art-run-test-" |
| |
| # Number of shards used to declare ART run-tests in the sharded ART MTS test plan. |
| NUM_MTS_ART_RUN_TEST_SHARDS = 1 |
| |
| # Curated list of tests that have a custom `run` script, but that are |
| # known to work fine with the default test execution strategy (i.e. |
| # when ignoring their `run` script), even if not exactly as they would |
| # with the original ART run-test harness. |
| runnable_test_exceptions = frozenset([ |
| "055-enum-performance", |
| "059-finalizer-throw", |
| "080-oom-throw", |
| "1004-checker-volatile-ref-load", |
| "133-static-invoke-super", |
| "1338-gc-no-los", |
| "159-app-image-fields", |
| "160-read-barrier-stress", |
| "163-app-image-methods", |
| "165-lock-owner-proxy", |
| "168-vmstack-annotated", |
| "176-app-image-string", |
| "304-method-tracing", |
| "628-vdex", |
| "643-checker-bogus-ic", |
| "676-proxy-jit-at-first-use", |
| "677-fsi2", |
| "678-quickening", |
| "818-clinit-nterp", |
| "821-madvise-willneed", |
| ]) |
| |
| known_slow_tests = frozenset([ |
| "175-alloc-big-bignums", |
| ]) |
| |
| # Known failing ART run-tests. |
| # TODO(rpl): Investigate and address the causes of failures. |
| known_failing_tests = frozenset([ |
| "004-SignalTest", |
| "004-UnsafeTest", |
| "051-thread", |
| "086-null-super", |
| "087-gc-after-link", |
| # 1002-notify-startup: Dependency on `libarttest` + custom `check` script. |
| "1002-notify-startup", |
| "1337-gc-coverage", |
| "1339-dead-reference-safe", |
| "136-daemon-jni-shutdown", |
| "139-register-natives", |
| "148-multithread-gc-annotations", |
| "149-suspend-all-stress", |
| "150-loadlibrary", |
| "154-gc-loop", |
| "169-threadgroup-jni", |
| "177-visibly-initialized-deadlock", |
| "179-nonvirtual-jni", |
| "1945-proxy-method-arguments", |
| "2011-stack-walk-concurrent-instrument", |
| # 2040-huge-native-alloc: Fails with: |
| # |
| # Test command execution failed with status FAILED: CommandResult: exit code=1, out=, err=Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: length=0; index=0 |
| # at Main.main(Main.java:56) |
| # |
| "2040-huge-native-alloc", |
| "203-multi-checkpoint", |
| "2033-shutdown-mechanics", |
| "2036-jni-filechannel", |
| "2037-thread-name-inherit", |
| "2235-JdkUnsafeTest", |
| "305-other-fault-handler", |
| # 449-checker-bce: Dependency on `libarttest`. |
| "449-checker-bce", |
| "454-get-vreg", |
| "461-get-reference-vreg", |
| "466-get-live-vreg", |
| "497-inlining-and-class-loader", |
| "530-regression-lse", |
| "555-UnsafeGetLong-regression", |
| # 596-monitor-inflation: Dependency on `libarttest`. |
| "596-monitor-inflation", |
| "602-deoptimizeable", |
| "604-hot-static-interface", |
| "616-cha-native", |
| "616-cha-regression-proxy-method", |
| # 623-checker-loop-regressions: Dependency on `libarttest`. |
| "623-checker-loop-regressions", |
| "626-set-resolved-string", |
| "642-fp-callees", |
| "647-jni-get-field-id", |
| "655-jit-clinit", |
| "656-loop-deopt", |
| "664-aget-verifier", |
| # 680-checker-deopt-dex-pc-0: Dependency on `libarttest`. |
| "680-checker-deopt-dex-pc-0", |
| "685-deoptimizeable", |
| "687-deopt", |
| "693-vdex-inmem-loader-evict", |
| "708-jit-cache-churn", |
| # 716-jli-jit-samples: Dependency on `libarttest`. |
| "716-jli-jit-samples", |
| "717-integer-value-of", |
| "720-thread-priority", |
| # 730-cha-deopt: Fails with: |
| # |
| # Test command execution failed with status FAILED: CommandResult: exit code=1, out=, err=Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: length=0; index=0 |
| # at Main.main(Main.java:24) |
| # |
| "730-cha-deopt", |
| # 813-fp-args: Dependency on `libarttest`. |
| "813-fp-args", |
| # 821-many-args: Fails with: |
| # |
| # Test command execution failed with status FAILED: CommandResult: exit code=1, out=, err=Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: length=0; index=0 |
| # at Main.main(Main.java:20) |
| # |
| "821-many-args", |
| # 823-cha-inlining: Dependency on `libarttest`. |
| "823-cha-inlining", |
| # 826-infinite-loop: The test expects an argument passed to `Main.main` (the test library, |
| # usually `arttestd` or `arttest)`, but the ART run-test TradeFed test runner |
| # (`com.android.tradefed.testtype.ArtRunTest`) does not implement this yet. |
| "826-infinite-loop", |
| # 832-cha-recursive: Dependency on `libarttest`. |
| "832-cha-recursive", |
| # 837-deopt: Dependency on `libarttest`. |
| "837-deopt", |
| # 844-exception: Dependency on `libarttest`. |
| "844-exception", |
| # 844-exception2: Dependency on `libarttest`. |
| "844-exception2", |
| # 966-default-conflict: Dependency on `libarttest`. |
| "966-default-conflict", |
| # These tests need native code. |
| "993-breakpoints-non-debuggable", |
| "2243-single-step-default", |
| "2262-miranda-methods", |
| "2262-default-conflict-methods", |
| ]) |
| |
| # These ART run-tests are new and have not had enough post-submit runs |
| # to meet pre-submit SLOs. Monitor their post-submit runs before |
| # removing them from this set (in order to promote them to |
| # presubmits). |
| postsubmit_only_tests = frozenset([ |
| "851-null-instanceof", |
| "853-checker-inlining", |
| "2266-checker-remove-empty-ifs", |
| "2268-checker-remove-dead-phis", |
| "2269-checker-constant-folding-instrinsics", |
| "2247-checker-write-barrier-elimination", |
| ]) |
| |
| known_failing_on_hwasan_tests = frozenset([ |
| "CtsJdwpTestCases", # times out |
| ]) |
| |
| # ART gtests that do not need root access to the device. |
| art_gtest_user_module_names = [ |
| "art_libnativebridge_cts_tests", |
| "art_standalone_artd_tests", |
| "art_standalone_cmdline_tests", |
| "art_standalone_compiler_tests", |
| "art_standalone_dex2oat_tests", |
| "art_standalone_dexdump_tests", |
| "art_standalone_dexlist_tests", |
| "art_standalone_dexopt_chroot_setup_tests", |
| "art_standalone_libartbase_tests", |
| "art_standalone_libartpalette_tests", |
| "art_standalone_libartservice_tests", |
| "art_standalone_libarttools_tests", |
| "art_standalone_libdexfile_support_tests", |
| "art_standalone_libdexfile_tests", |
| "art_standalone_libprofile_tests", |
| "art_standalone_oatdump_tests", |
| "art_standalone_odrefresh_tests", |
| "art_standalone_runtime_tests", |
| "art_standalone_sigchain_tests", |
| "libnativeloader_test", |
| ] |
| |
| # ART gtests that need root access to the device. |
| art_gtest_eng_only_module_names = [ |
| "art_standalone_dexoptanalyzer_tests", |
| "art_standalone_profman_tests", |
| "libnativeloader_e2e_tests", |
| ] |
| |
| # All supported ART gtests. |
| art_gtest_module_names = sorted(art_gtest_user_module_names + art_gtest_eng_only_module_names) |
| |
| # These ART gtests are new and have not had enough post-submit runs |
| # to meet pre-submit SLOs. Monitor their post-submit runs before |
| # removing them from this set (in order to promote them to |
| # presubmits). |
| art_gtest_postsubmit_only_module_names = [ |
| "art_standalone_dexopt_chroot_setup_tests", |
| ] |
| |
| # ART gtests supported in MTS that do not need root access to the device. |
| art_gtest_mts_user_module_names = copy.copy(art_gtest_user_module_names) |
| |
| # ART gtests supported in presubmits. |
| art_gtest_presubmit_module_names = [t for t in art_gtest_module_names |
| if t not in art_gtest_postsubmit_only_module_names] |
| |
| # ART gtests supported in Mainline presubmits. |
| art_gtest_mainline_presubmit_module_names = copy.copy(art_gtest_presubmit_module_names) |
| |
| # ART gtests supported in postsubmits. |
| unknown_art_gtest_postsubmit_only_module_names = [t for t in art_gtest_postsubmit_only_module_names |
| if t not in art_gtest_module_names] |
| if unknown_art_gtest_postsubmit_only_module_names: |
| logging.error(textwrap.dedent("""\ |
| The following `art_gtest_postsubmit_only_module_names` elements are not part of |
| `art_gtest_module_names`: """) + str(unknown_art_gtest_postsubmit_only_module_names)) |
| sys.exit(1) |
| art_gtest_postsubmit_module_names = copy.copy(art_gtest_postsubmit_only_module_names) |
| |
| # Tests exhibiting a flaky behavior, currently exluded from MTS for |
| # the stake of stability / confidence (b/209958457). |
| flaky_tests_excluded_from_mts = { |
| "CtsLibcoreFileIOTestCases": [ |
| ("android.cts.FileChannelInterProcessLockTest#" + m) for m in [ |
| "test_lockJJZ_Exclusive_asyncChannel", |
| "test_lockJJZ_Exclusive_syncChannel", |
| "test_lock_differentChannelTypes", |
| "test_lockJJZ_Shared_asyncChannel", |
| "test_lockJJZ_Shared_syncChannel", |
| ] |
| ], |
| "CtsLibcoreTestCases": [ |
| ("com.android.org.conscrypt.javax.net.ssl.SSLSocketVersionCompatibilityTest#" + m + c) |
| for (m, c) in itertools.product( |
| [ |
| "test_SSLSocket_interrupt_read_withoutAutoClose", |
| "test_SSLSocket_setSoWriteTimeout", |
| ], |
| [ |
| "[0: TLSv1.2 client, TLSv1.2 server]", |
| "[1: TLSv1.2 client, TLSv1.3 server]", |
| "[2: TLSv1.3 client, TLSv1.2 server]", |
| "[3: TLSv1.3 client, TLSv1.3 server]", |
| ] |
| ) |
| ] + [ |
| ("libcore.dalvik.system.DelegateLastClassLoaderTest#" + m) for m in [ |
| "testLookupOrderNodelegate_getResource", |
| "testLookupOrder_getResource", |
| ] |
| ] |
| } |
| |
| # Tests failing because of linking issues, currently exluded from MTS |
| # and Mainline Presubmits to minimize noise in continuous runs while |
| # we investigate. |
| # |
| # TODO(b/247108425): Address the linking issues and re-enable these |
| # tests. |
| failing_tests_excluded_from_mts_and_mainline_presubmits = { |
| "art_standalone_compiler_tests": ["JniCompilerTest*"], |
| "art_standalone_libartpalette_tests": ["PaletteClientJniTest*"], |
| } |
| |
| # Is `run_test` a Checker test (i.e. a test containing Checker |
| # assertions)? |
| def is_checker_test(run_test): |
| return re.match("^[0-9]+-checker-", run_test) |
| |
| |
| class Generator: |
| def __init__(self, top_dir): |
| """Generator of ART test files for an Android source tree anchored at `top_dir`.""" |
| # Path to the Android top source tree. |
| self.top_dir = top_dir |
| # Path to the ART directory |
| self.art_dir = os.path.join(top_dir, "art") |
| # Path to the ART tests directory. |
| self.art_test_dir = os.path.join(self.art_dir, "test") |
| # Path to the MTS configuration directory. |
| self.mts_config_dir = os.path.join( |
| top_dir, "test", "mts", "tools", "mts-tradefed", "res", "config") |
| |
| def enumerate_run_tests(self): |
| return sorted([run_test |
| for run_test in os.listdir(self.art_test_dir) |
| if re.match("^[0-9]{3,}-", run_test)]) |
| |
| # Return the metadata of a test, if any. |
| def get_test_metadata(self, run_test): |
| run_test_path = os.path.join(self.art_test_dir, run_test) |
| metadata_file = os.path.join(run_test_path, "test-metadata.json") |
| metadata = {} |
| if os.path.exists(metadata_file): |
| with open(metadata_file, "r") as f: |
| try: |
| metadata = json.load(f) |
| except json.decoder.JSONDecodeError: |
| logging.error(f"Unable to parse test metadata file `{metadata_file}`") |
| raise |
| return metadata |
| |
| # Can the build script of `run_test` be safely ignored? |
| def can_ignore_build_script(self, run_test): |
| # Check whether there are test metadata with build parameters |
| # enabling us to safely ignore the build script. |
| metadata = self.get_test_metadata(run_test) |
| build_param = metadata.get("build-param", {}) |
| # Ignore build scripts that are just about preventing building for |
| # the JVM and/or using VarHandles (Soong builds JARs with |
| # VarHandle support by default (i.e. by using an API level greater |
| # or equal to 28), so we can ignore build scripts that just |
| # request support for this feature.) |
| experimental_var_handles = {"experimental": "var-handles"} |
| jvm_supported_false = {"jvm-supported": "false"} |
| if (build_param == experimental_var_handles or |
| build_param == jvm_supported_false or |
| build_param == experimental_var_handles | jvm_supported_false): |
| return True |
| return False |
| |
| # Is building `run_test` supported? |
| # TODO(b/147814778): Add build support for more tests. |
| def is_buildable(self, run_test): |
| run_test_path = os.path.join(self.art_test_dir, run_test) |
| |
| # Skip tests with non-default build rules, unless these build |
| # rules can be safely ignored. |
| if (os.path.isfile(os.path.join(run_test_path, "generate-sources")) or |
| os.path.isfile(os.path.join(run_test_path, "javac_post.sh"))): |
| return False |
| if os.path.isfile(os.path.join(run_test_path, "build.py")): |
| if not self.can_ignore_build_script(run_test): |
| return False |
| # Skip tests with sources outside the `src` directory. |
| for subdir in ["jasmin", |
| "jasmin-multidex", |
| "smali", |
| "smali-ex", |
| "smali-multidex", |
| "src-aotex", |
| "src-bcpex", |
| "src-ex", |
| "src-ex2", |
| "src-multidex"]: |
| if os.path.isdir(os.path.join(run_test_path, subdir)): |
| return False |
| # Skip tests that have both an `src` directory and an `src-art` directory. |
| if os.path.isdir(os.path.join(run_test_path, "src")) and \ |
| os.path.isdir(os.path.join(run_test_path, "src-art")): |
| return False |
| # Skip tests that have neither an `src` directory nor an `src-art` directory. |
| if not os.path.isdir(os.path.join(run_test_path, "src")) and \ |
| not os.path.isdir(os.path.join(run_test_path, "src-art")): |
| return False |
| # Skip test with a copy of `sun.misc.Unsafe`. |
| if os.path.isfile(os.path.join(run_test_path, "src", "sun", "misc", "Unsafe.java")): |
| return False |
| # Skip tests with Hidden API specs. |
| if os.path.isfile(os.path.join(run_test_path, "hiddenapi-flags.csv")): |
| return False |
| # All other tests are considered buildable. |
| return True |
| |
| # Can the run script of `run_test` be safely ignored? |
| def can_ignore_run_script(self, run_test): |
| # Unconditionally consider some identified tests that have a |
| # (not-yet-handled) custom `run` script as runnable. |
| # |
| # TODO(rpl): Get rid of this exception mechanism by supporting |
| # these tests' `run` scripts properly. |
| if run_test in runnable_test_exceptions: |
| return True |
| # Check whether there are test metadata with run parameters |
| # enabling us to safely ignore the run script. |
| metadata = self.get_test_metadata(run_test) |
| run_param = metadata.get("run-param", {}) |
| if run_param.get("default-run", ""): |
| return True |
| return False |
| |
| def gen_libs_list_impl(self, library_type, libraries): |
| if len(libraries) == 0: |
| return "" |
| libraries_joined = """, |
| """.join(libraries) |
| return f""" |
| {library_type}: [ |
| {libraries_joined}, |
| ],""" |
| |
| def gen_libs_list(self, libraries): |
| return self.gen_libs_list_impl("libs", libraries); |
| |
| def gen_static_libs_list(self, libraries): |
| return self.gen_libs_list_impl("static_libs", libraries); |
| |
| def gen_java_library_rule(self, name, src_dir, libraries): |
| return f"""\ |
| |
| |
| // Library with {src_dir}/ sources for the test. |
| java_library {{ |
| name: "{name}", |
| defaults: ["art-run-test-defaults"],{self.gen_libs_list(libraries)} |
| srcs: ["{src_dir}/**/*.java"], |
| }}""" |
| |
| # Is (successfully) running `run_test` supported? |
| # TODO(b/147812905): Add run-time support for more tests. |
| def is_runnable(self, run_test): |
| run_test_path = os.path.join(self.art_test_dir, run_test) |
| |
| # Skip tests with non-default run rules, unless these run rules |
| # can be safely ignored. |
| if os.path.isfile(os.path.join(run_test_path, "run.py")): |
| if not self.can_ignore_run_script(run_test): |
| return False |
| # Skip tests known to fail. |
| if run_test in known_failing_tests: |
| return False |
| # All other tests are considered runnable. |
| return True |
| |
| def is_slow(self, run_test): |
| return run_test in known_slow_tests |
| |
| def regen_bp_files(self, run_tests, buildable_tests): |
| for run_test in run_tests: |
| # Remove any previously generated file. |
| bp_file = os.path.join(self.art_test_dir, run_test, "Android.bp") |
| if os.path.exists(bp_file): |
| logging.debug(f"Removing `{bp_file}`.") |
| os.remove(bp_file) |
| |
| for run_test in buildable_tests: |
| self.regen_bp_file(run_test) |
| |
| def regen_bp_file(self, run_test): |
| """Regenerate Blueprint file for an ART run-test.""" |
| |
| run_test_path = os.path.join(self.art_test_dir, run_test) |
| bp_file = os.path.join(run_test_path, "Android.bp") |
| |
| # Optional test metadata (JSON file). |
| metadata = self.get_test_metadata(run_test) |
| |
| run_test_module_name = ART_RUN_TEST_MODULE_NAME_PREFIX + run_test |
| |
| # Set the test configuration template. |
| if self.is_runnable(run_test): |
| if "cts" in metadata.get("test_suites", []): |
| test_config_template = "art-run-test-target-cts-template" |
| elif self.is_slow(run_test): |
| test_config_template = "art-run-test-target-slow-template" |
| else: |
| test_config_template = "art-run-test-target-template" |
| else: |
| test_config_template = "art-run-test-target-no-test-suite-tag-template" |
| |
| # Define the `test_suites` property, if test suites are present in |
| # the test's metadata. |
| test_suites_list = metadata.get("test_suites") |
| test_suites_prop = "" |
| if test_suites_list: |
| test_suites_joined = """, |
| """.join([f"\"{s}\"" for s in test_suites_list]) |
| test_suites_prop = f"""\ |
| |
| test_suites: [ |
| {test_suites_joined}, |
| ],""" |
| |
| include_srcs_prop = "" |
| if is_checker_test(run_test): |
| include_srcs_prop = """\ |
| |
| // Include the Java source files in the test's artifacts, to make Checker assertions |
| // available to the TradeFed test runner. |
| include_srcs: true,""" |
| |
| # The default source directory is `src`, except if `src-art` exists. |
| if os.path.isdir(os.path.join(run_test_path, "src-art")): |
| source_dir = "src-art" |
| else: |
| source_dir = "src" |
| |
| src_library_rules = [] |
| test_libraries = [] |
| if os.path.isdir(os.path.join(run_test_path, "src2")): |
| test_library = f"{run_test_module_name}-{source_dir}" |
| src_library_rules.append(self.gen_java_library_rule(test_library, source_dir, test_libraries)) |
| test_libraries.append(f"\"{test_library}\"") |
| source_dir = "src2" |
| |
| with open(bp_file, "w") as f: |
| logging.debug(f"Writing `{bp_file}`.") |
| f.write(textwrap.dedent(f"""\ |
| // {ADVISORY} |
| |
| // Build rules for ART run-test `{run_test}`. |
| |
| package {{ |
| // See: http://go/android-license-faq |
| // A large-scale-change added 'default_applicable_licenses' to import |
| // all of the 'license_kinds' from "art_license" |
| // to get the below license kinds: |
| // SPDX-license-identifier-Apache-2.0 |
| default_applicable_licenses: ["art_license"], |
| }}{''.join(src_library_rules)} |
| |
| // Test's Dex code. |
| java_test {{ |
| name: "{run_test_module_name}", |
| defaults: ["art-run-test-defaults"], |
| test_config_template: ":{test_config_template}", |
| srcs: ["{source_dir}/**/*.java"],{self.gen_static_libs_list(test_libraries)} |
| data: [ |
| ":{run_test_module_name}-expected-stdout", |
| ":{run_test_module_name}-expected-stderr", |
| ],{test_suites_prop}{include_srcs_prop} |
| }} |
| """)) |
| |
| def add_expected_output_genrule(type_str): |
| type_str_long = "standard output" if type_str == "stdout" else "standard error" |
| in_file = os.path.join(run_test_path, f"expected-{type_str}.txt") |
| if os.path.islink(in_file): |
| # Genrules are sandboxed, so if we just added the symlink to the srcs list, it would |
| # be a dangling symlink in the sandbox. Instead, if we see a symlink, depend on the |
| # genrule from the test that the symlink is pointing to instead of the symlink itself. |
| link_target = os.readlink(in_file) |
| basename = os.path.basename(in_file) |
| match = re.fullmatch('\.\./([a-zA-Z0-9_-]+)/' + re.escape(basename), link_target) |
| if not match: |
| sys.exit(f"Error: expected symlink to be '../something/{basename}', got {link_target}") |
| f.write(textwrap.dedent(f"""\ |
| |
| // Test's expected {type_str_long}. |
| genrule {{ |
| name: "{run_test_module_name}-expected-{type_str}", |
| out: ["{run_test_module_name}-expected-{type_str}.txt"], |
| srcs: [":{ART_RUN_TEST_MODULE_NAME_PREFIX}{match.group(1)}-expected-{type_str}"], |
| cmd: "cp -f $(in) $(out)", |
| }} |
| """)) |
| else: |
| f.write(textwrap.dedent(f"""\ |
| |
| // Test's expected {type_str_long}. |
| genrule {{ |
| name: "{run_test_module_name}-expected-{type_str}", |
| out: ["{run_test_module_name}-expected-{type_str}.txt"], |
| srcs: ["expected-{type_str}.txt"], |
| cmd: "cp -f $(in) $(out)", |
| }} |
| """)) |
| |
| add_expected_output_genrule("stdout") |
| add_expected_output_genrule("stderr") |
| |
| |
| def regen_test_mapping_file(self, art_run_tests): |
| """Regenerate ART's `TEST_MAPPING`.""" |
| |
| # See go/test-mapping#attributes and |
| # https://source.android.com/docs/core/tests/development/test-mapping |
| # for more information about Test Mapping test groups. |
| |
| # ART run-tests used in `*presubmit` test groups, used both in pre- and post-submit runs. |
| presubmit_run_test_module_names = [ART_RUN_TEST_MODULE_NAME_PREFIX + t |
| for t in art_run_tests |
| if t not in postsubmit_only_tests] |
| # ART run-tests used in the `postsubmit` test group, used in post-submit runs only. |
| postsubmit_run_test_module_names = [ART_RUN_TEST_MODULE_NAME_PREFIX + t |
| for t in art_run_tests |
| if t in postsubmit_only_tests] |
| |
| # Mainline presubmits. |
| mainline_presubmit_apex_suffix = "[com.google.android.art.apex]" |
| mainline_other_presubmit_tests = [] |
| mainline_presubmit_tests = (mainline_other_presubmit_tests + presubmit_run_test_module_names + |
| art_gtest_mainline_presubmit_module_names) |
| mainline_presubmit_tests_dict = [ |
| ({"name": t + mainline_presubmit_apex_suffix, |
| "options": [ |
| {"exclude-filter": e} |
| for e in failing_tests_excluded_from_mts_and_mainline_presubmits[t] |
| ]} |
| if t in failing_tests_excluded_from_mts_and_mainline_presubmits |
| else {"name": t + mainline_presubmit_apex_suffix}) |
| for t in mainline_presubmit_tests |
| ] |
| |
| # Android Virtualization Framework presubmits |
| avf_presubmit_tests = ["ComposHostTestCases"] |
| avf_presubmit_tests_dict = [{"name": t} for t in avf_presubmit_tests] |
| |
| # Presubmits. |
| other_presubmit_tests = [ |
| "ArtServiceTests", |
| "BootImageProfileTest", |
| "CtsJdwpTestCases", |
| "art-apex-update-rollback", |
| "art_standalone_dexpreopt_tests", |
| ] |
| presubmit_tests = (other_presubmit_tests + presubmit_run_test_module_names + |
| art_gtest_presubmit_module_names) |
| presubmit_tests_dict = [{"name": t} for t in presubmit_tests] |
| hwasan_presubmit_tests_dict = [{"name": t} for t in presubmit_tests |
| if t not in known_failing_on_hwasan_tests] |
| |
| # Postsubmits. |
| postsubmit_tests = postsubmit_run_test_module_names + art_gtest_postsubmit_module_names |
| postsubmit_tests_dict = [{"name": t} for t in postsubmit_tests] |
| |
| # Use an `OrderedDict` container to preserve the order in which items are inserted. |
| # Do not produce an entry for a test group if it is empty. |
| test_mapping_dict = collections.OrderedDict([ |
| (test_group_name, test_group_dict) |
| for (test_group_name, test_group_dict) |
| in [ |
| ("mainline-presubmit", mainline_presubmit_tests_dict), |
| ("presubmit", presubmit_tests_dict), |
| ("hwasan-presubmit", hwasan_presubmit_tests_dict), |
| ("avf-presubmit", avf_presubmit_tests_dict), |
| ("postsubmit", postsubmit_tests_dict), |
| ] |
| if test_group_dict |
| ]) |
| test_mapping_contents = json.dumps(test_mapping_dict, indent = INDENT) |
| |
| test_mapping_file = os.path.join(self.art_dir, "TEST_MAPPING") |
| with open(test_mapping_file, "w") as f: |
| logging.debug(f"Writing `{test_mapping_file}`.") |
| f.write(f"// {ADVISORY}\n") |
| f.write(test_mapping_contents) |
| f.write("\n") |
| |
| def create_mts_test_shard(self, description, tests, shard_num, copyright_year, comments = []): |
| """Factory method instantiating an `MtsTestShard`.""" |
| return self.MtsTestShard(self.mts_config_dir, |
| description, tests, shard_num, copyright_year, comments) |
| |
| class MtsTestShard: |
| """Class encapsulating data and generation logic for an ART MTS test shard.""" |
| |
| def __init__(self, mts_config_dir, description, tests, shard_num, copyright_year, comments): |
| self.mts_config_dir = mts_config_dir |
| self.description = description |
| self.tests = tests |
| self.shard_num = shard_num |
| self.copyright_year = copyright_year |
| self.comments = comments |
| |
| def shard_id(self): |
| return f"{self.shard_num:02}" |
| |
| def test_plan_name(self): |
| return "mts-art-shard-" + self.shard_id() |
| |
| def test_list_name(self): |
| return "mts-art-tests-list-user-shard-" + self.shard_id() |
| |
| def regen_test_plan_file(self): |
| """Regenerate ART MTS test plan file shard (`mts-art-shard-<shard_num>.xml`).""" |
| root = xml.dom.minidom.Document() |
| |
| advisory_header = root.createComment(f" {ADVISORY} ") |
| root.appendChild(advisory_header) |
| copyright_header = root.createComment(copyright_header_text(self.copyright_year)) |
| root.appendChild(copyright_header) |
| |
| configuration = root.createElement("configuration") |
| root.appendChild(configuration) |
| configuration.setAttribute( |
| "description", |
| f"Run mts-art-shard-{self.shard_id()} from a preexisting MTS installation.") |
| |
| # Included XML files. |
| included_xml_files = ["mts", self.test_list_name()] |
| for xml_file in included_xml_files: |
| include = root.createElement("include") |
| include.setAttribute("name", xml_file) |
| configuration.appendChild(include) |
| |
| # Test plan name. |
| option = root.createElement("option") |
| option.setAttribute("name", "plan") |
| option.setAttribute("value", self.test_plan_name()) |
| configuration.appendChild(option) |
| |
| xml_str = root.toprettyxml(indent = XML_INDENT, encoding = "utf-8") |
| |
| test_plan_file = os.path.join(self.mts_config_dir, self.test_plan_name() + ".xml") |
| with open(test_plan_file, "wb") as f: |
| logging.debug(f"Writing `{test_plan_file}`.") |
| f.write(xml_str) |
| |
| def regen_test_list_file(self): |
| """Regenerate ART MTS test list file (`mts-art-tests-list-user-shard-<shard_num>.xml`).""" |
| root = xml.dom.minidom.Document() |
| |
| advisory_header = root.createComment(f" {ADVISORY} ") |
| root.appendChild(advisory_header) |
| copyright_header = root.createComment(copyright_header_text(self.copyright_year)) |
| root.appendChild(copyright_header) |
| |
| configuration = root.createElement("configuration") |
| root.appendChild(configuration) |
| configuration.setAttribute( |
| "description", |
| f"List of ART MTS tests that do not need root access (shard {self.shard_id()})" |
| ) |
| |
| # Test declarations. |
| # ------------------ |
| |
| def append_test_declaration(test): |
| option = root.createElement("option") |
| option.setAttribute("name", "compatibility:include-filter") |
| option.setAttribute("value", test) |
| configuration.appendChild(option) |
| |
| test_declarations_comments = [self.description + "."] |
| test_declarations_comments.extend(self.comments) |
| for c in test_declarations_comments: |
| xml_comment = root.createComment(f" {c} ") |
| configuration.appendChild(xml_comment) |
| for t in self.tests: |
| append_test_declaration(t) |
| |
| # `MainlineTestModuleController` configurations. |
| # ---------------------------------------------- |
| |
| def append_module_controller_configuration(test): |
| option = root.createElement("option") |
| option.setAttribute("name", "compatibility:module-arg") |
| option.setAttribute("value", f"{test}:enable:true") |
| configuration.appendChild(option) |
| |
| module_controller_configuration_comments = [ |
| f"Enable MainlineTestModuleController for {self.description}."] |
| module_controller_configuration_comments.extend(self.comments) |
| for c in module_controller_configuration_comments: |
| xml_comment = root.createComment(f" {c} ") |
| configuration.appendChild(xml_comment) |
| for t in self.tests: |
| append_module_controller_configuration(t) |
| for t in self.tests: |
| if t in ["CtsLibcoreTestCases", "CtsLibcoreOjTestCases"]: |
| xml_comment = root.createComment( |
| " core-test-mode=mts tells ExpectationBasedFilter to exclude @NonMts Tests ") |
| configuration.appendChild(xml_comment) |
| option = root.createElement("option") |
| option.setAttribute("name", "compatibility:module-arg") |
| option.setAttribute("value", f"{t}:instrumentation-arg:core-test-mode:=mts") |
| configuration.appendChild(option) |
| |
| xml_str = root.toprettyxml(indent = XML_INDENT, encoding = "utf-8") |
| |
| test_list_file = os.path.join(self.mts_config_dir, self.test_list_name() + ".xml") |
| with open(test_list_file, "wb") as f: |
| logging.debug(f"Writing `{test_list_file}`.") |
| f.write(xml_str) |
| |
| def regen_mts_art_tests_list_user_file(self, num_mts_art_run_test_shards): |
| """Regenerate ART MTS test list file (`mts-art-tests-list-user.xml`).""" |
| root = xml.dom.minidom.Document() |
| |
| advisory_header = root.createComment(f" {ADVISORY} ") |
| root.appendChild(advisory_header) |
| copyright_header = root.createComment(copyright_header_text(2020)) |
| root.appendChild(copyright_header) |
| |
| configuration = root.createElement("configuration") |
| root.appendChild(configuration) |
| configuration.setAttribute("description", "List of ART MTS tests that do not need root access.") |
| |
| # Included XML files. |
| for s in range(num_mts_art_run_test_shards): |
| include = root.createElement("include") |
| include.setAttribute("name", f"mts-art-tests-list-user-shard-{s:02}") |
| configuration.appendChild(include) |
| |
| def append_test_exclusion(test): |
| option = root.createElement("option") |
| option.setAttribute("name", "compatibility:exclude-filter") |
| option.setAttribute("value", test) |
| configuration.appendChild(option) |
| |
| # Excluded flaky tests. |
| xml_comment = root.createComment(" Excluded flaky tests (b/209958457). ") |
| configuration.appendChild(xml_comment) |
| for module in flaky_tests_excluded_from_mts: |
| for testcase in flaky_tests_excluded_from_mts[module]: |
| append_test_exclusion(f"{module} {testcase}") |
| |
| # Excluded failing tests. |
| xml_comment = root.createComment(" Excluded failing tests (b/247108425). ") |
| configuration.appendChild(xml_comment) |
| for module in failing_tests_excluded_from_mts_and_mainline_presubmits: |
| for testcase in failing_tests_excluded_from_mts_and_mainline_presubmits[module]: |
| append_test_exclusion(f"{module} {testcase}") |
| |
| xml_str = root.toprettyxml(indent = XML_INDENT, encoding = "utf-8") |
| |
| mts_art_tests_list_user_file = os.path.join(self.mts_config_dir, "mts-art-tests-list-user.xml") |
| with open(mts_art_tests_list_user_file, "wb") as f: |
| logging.debug(f"Writing `{mts_art_tests_list_user_file}`.") |
| f.write(xml_str) |
| |
| def regen_art_mts_files(self, art_run_tests): |
| """Regenerate ART MTS definition files.""" |
| |
| # Remove any previously MTS ART test plan shard (`mts-art-shard-[0-9]+.xml`) |
| # and any test list shard (`mts-art-tests-list-user-shard-[0-9]+.xml`). |
| old_test_plan_shards = sorted([ |
| test_plan_shard |
| for test_plan_shard in os.listdir(self.mts_config_dir) |
| if re.match("^mts-art-(tests-list-user-)?shard-[0-9]+.xml$", test_plan_shard)]) |
| for shard in old_test_plan_shards: |
| shard_path = os.path.join(self.mts_config_dir, shard) |
| if os.path.exists(shard_path): |
| logging.debug(f"Removing `{shard_path}`.") |
| os.remove(shard_path) |
| |
| mts_test_shards = [] |
| |
| # ART run-tests shard(s). |
| art_run_test_module_names = [ART_RUN_TEST_MODULE_NAME_PREFIX + t for t in art_run_tests] |
| art_run_test_shards = split_list(art_run_test_module_names, NUM_MTS_ART_RUN_TEST_SHARDS) |
| for i in range(len(art_run_test_shards)): |
| art_tests_shard_i_tests = art_run_test_shards[i] |
| art_tests_shard_i = self.create_mts_test_shard( |
| "ART run-tests", art_tests_shard_i_tests, i, 2020, |
| ["TODO(rpl): Find a way to express this list in a more concise fashion."]) |
| mts_test_shards.append(art_tests_shard_i) |
| |
| # CTS Libcore non-OJ tests (`CtsLibcoreTestCases`) shard. |
| cts_libcore_tests_shard_num = len(mts_test_shards) |
| cts_libcore_tests_shard = self.create_mts_test_shard( |
| "CTS Libcore non-OJ tests", ["CtsLibcoreTestCases"], cts_libcore_tests_shard_num, 2020) |
| mts_test_shards.append(cts_libcore_tests_shard) |
| |
| # Other CTS Libcore tests shard. |
| other_cts_libcore_tests_shard_num = len(mts_test_shards) |
| other_cts_libcore_tests_shard_tests = [ |
| "CtsLibcoreApiEvolutionTestCases", |
| "CtsLibcoreFileIOTestCases", |
| "CtsLibcoreJsr166TestCases", |
| "CtsLibcoreLegacy22TestCases", |
| "CtsLibcoreOjTestCases", |
| "CtsLibcoreWycheproofBCTestCases", |
| "MtsLibcoreOkHttpTestCases", |
| "MtsLibcoreBouncyCastleTestCases", |
| ] |
| other_cts_libcore_tests_shard = self.create_mts_test_shard( |
| "CTS Libcore OJ tests", other_cts_libcore_tests_shard_tests, |
| other_cts_libcore_tests_shard_num, 2021) |
| mts_test_shards.append(other_cts_libcore_tests_shard) |
| |
| # ART gtests shard. |
| # TODO: Also handle the case of gtests requiring root access to the device |
| # (`art_gtest_eng_only_module_names`). |
| art_gtests_shard_num = len(mts_test_shards) |
| art_gtests_shard_tests = art_gtest_mts_user_module_names |
| art_gtests_shard = self.create_mts_test_shard( |
| "ART gtests", art_gtests_shard_tests, art_gtests_shard_num, 2022) |
| mts_test_shards.append(art_gtests_shard) |
| |
| for s in mts_test_shards: |
| s.regen_test_plan_file() |
| s.regen_test_list_file() |
| |
| self.regen_mts_art_tests_list_user_file(len(mts_test_shards)) |
| |
| def regen_test_files(self, regen_art_mts): |
| """Regenerate ART test files. |
| |
| Args: |
| regen_art_mts: If true, also regenerate the ART MTS definition. |
| """ |
| run_tests = self.enumerate_run_tests() |
| |
| # Create a list of the tests that can currently be built, and for |
| # which a Blueprint file is to be generated. |
| buildable_tests = list(filter(self.is_buildable, run_tests)) |
| |
| # Create a list of the tests that can be built and run |
| # (successfully). These tests are to be added to ART's |
| # `TEST_MAPPING` file and also tagged as part of TradeFed's |
| # `art-target-run-test` test suite via the `test-suite-tag` option |
| # in their configuration file. |
| expected_succeeding_tests = list(filter(self.is_runnable, buildable_tests)) |
| |
| # Regenerate Blueprint files. |
| # --------------------------- |
| |
| self.regen_bp_files(run_tests, buildable_tests) |
| |
| buildable_tests_percentage = int(len(buildable_tests) * 100 / len(run_tests)) |
| |
| print(f"Generated Blueprint files for {len(buildable_tests)} ART run-tests out of" |
| f" {len(run_tests)} ({buildable_tests_percentage}%).") |
| |
| # Regenerate `TEST_MAPPING` file. |
| # ------------------------------- |
| |
| # Note: We only include ART run-tests expected to succeed for now. |
| num_expected_succeeding_tests = len(expected_succeeding_tests) |
| |
| presubmit_run_tests = set(expected_succeeding_tests).difference(postsubmit_only_tests) |
| num_presubmit_run_tests = len(presubmit_run_tests) |
| presubmit_run_tests_percentage = int( |
| num_presubmit_run_tests * 100 / num_expected_succeeding_tests) |
| |
| num_mainline_presubmit_run_tests = num_presubmit_run_tests |
| mainline_presubmit_run_tests_percentage = presubmit_run_tests_percentage |
| |
| postsubmit_run_tests = set(expected_succeeding_tests).intersection(postsubmit_only_tests) |
| num_postsubmit_run_tests = len(postsubmit_run_tests) |
| postsubmit_run_tests_percentage = int( |
| num_postsubmit_run_tests * 100 / num_expected_succeeding_tests) |
| |
| self.regen_test_mapping_file(expected_succeeding_tests) |
| |
| expected_succeeding_tests_percentage = int( |
| num_expected_succeeding_tests * 100 / len(run_tests)) |
| |
| num_gtests = len(art_gtest_module_names) |
| |
| num_presubmit_gtests = len(art_gtest_presubmit_module_names) |
| presubmit_gtests_percentage = int(num_presubmit_gtests * 100 / num_gtests) |
| |
| num_mainline_presubmit_gtests = len(art_gtest_mainline_presubmit_module_names) |
| mainline_presubmit_gtests_percentage = int(num_mainline_presubmit_gtests * 100 / num_gtests) |
| |
| num_postsubmit_gtests = len(art_gtest_postsubmit_module_names) |
| postsubmit_gtests_percentage = int(num_postsubmit_gtests * 100 / num_gtests) |
| |
| print(f"Generated TEST_MAPPING entries for {num_expected_succeeding_tests} ART run-tests out" |
| f" of {len(run_tests)} ({expected_succeeding_tests_percentage}%):") |
| for (num_tests, test_kind, tests_percentage, test_group_name) in [ |
| (num_mainline_presubmit_run_tests, "ART run-tests", mainline_presubmit_run_tests_percentage, |
| "mainline-presubmit"), |
| (num_presubmit_run_tests, "ART run-tests", presubmit_run_tests_percentage, "presubmit"), |
| (num_postsubmit_run_tests, "ART run-tests", postsubmit_run_tests_percentage, "postsubmit"), |
| (num_mainline_presubmit_gtests, "ART gtests", mainline_presubmit_gtests_percentage, |
| "mainline-presubmit"), |
| (num_presubmit_gtests, "ART gtests", presubmit_gtests_percentage, "presubmit"), |
| (num_postsubmit_gtests, "ART gtests", postsubmit_gtests_percentage, "presubmit"), |
| ]: |
| print( |
| f" {num_tests:3d} {test_kind} ({tests_percentage}%) in `{test_group_name}` test group.") |
| print(""" Note: Tests in `*presubmit` test groups are executed in pre- and |
| post-submit test runs. Tests in the `postsubmit` test group |
| are only executed in post-submit test runs.""") |
| |
| # Regenerate ART MTS definition (optional). |
| # ----------------------------------------- |
| |
| if regen_art_mts: |
| self.regen_art_mts_files(expected_succeeding_tests) |
| print(f"Generated ART MTS entries for {num_expected_succeeding_tests} ART run-tests out" |
| f" of {len(run_tests)} ({expected_succeeding_tests_percentage}%).") |
| |
| def main(): |
| if "ANDROID_BUILD_TOP" not in os.environ: |
| logging.error("ANDROID_BUILD_TOP environment variable is empty; did you forget to run `lunch`?") |
| sys.exit(1) |
| |
| parser = argparse.ArgumentParser( |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| description=textwrap.dedent("Regenerate some ART test related files."), |
| epilog=textwrap.dedent("""\ |
| Regenerate ART run-tests Blueprint files, ART's `TEST_MAPPING` file, and |
| optionally the ART MTS (Mainline Test Suite) definition. |
| """)) |
| parser.add_argument("-m", "--regen-art-mts", help="regenerate the ART MTS definition as well", |
| action="store_true") |
| parser.add_argument("-v", "--verbose", help="enable verbose output", action="store_true") |
| args = parser.parse_args() |
| |
| if args.verbose: |
| logging.getLogger().setLevel(logging.DEBUG) |
| |
| generator = Generator(os.path.join(os.environ["ANDROID_BUILD_TOP"])) |
| generator.regen_test_files(args.regen_art_mts) |
| |
| |
| if __name__ == "__main__": |
| main() |