diff options
136 files changed, 4114 insertions, 1178 deletions
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc index cf359141c2..b6d6600358 100644 --- a/adbconnection/adbconnection.cc +++ b/adbconnection/adbconnection.cc @@ -476,7 +476,6 @@ android::base::unique_fd AdbConnectionState::ReadFdFromAdb() { int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0)); if (rc <= 0) { - PLOG(WARNING) << "Receiving file descriptor from ADB failed (socket " << control_sock_ << ")"; return android::base::unique_fd(-1); } else { VLOG(jdwp) << "Fds have been received from ADB!"; @@ -624,7 +623,6 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) { android::base::unique_fd new_fd(ReadFdFromAdb()); if (new_fd == -1) { // Something went wrong. We need to retry getting the control socket. - PLOG(ERROR) << "Something went wrong getting fds from adb. Retry!"; control_sock_.reset(); break; } else if (adb_connection_socket_ != -1) { diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk index 03e68ae93c..c73b988b54 100644 --- a/build/Android.common_path.mk +++ b/build/Android.common_path.mk @@ -73,8 +73,22 @@ TARGET_CORE_IMG_OUTS := HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art -# Jar files for core.art. -TEST_CORE_JARS := core-oj core-libart core-simple conscrypt okhttp bouncycastle +# Modules to compile for core.art. +# TODO: Move conscrypt from CORE_IMG_JARS to TEST_CORE_JARS and adjust scripts to fix Golem. +CORE_IMG_JARS := core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt +HOST_CORE_IMG_JARS := $(addsuffix -hostdex,$(CORE_IMG_JARS)) +TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS)) +HOST_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_IMG_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) +ifeq ($(ART_TEST_ANDROID_ROOT),) +TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar) +else +TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(ART_TEST_ANDROID_ROOT)/$(jar).jar) +endif +HOST_CORE_IMG_DEX_FILES := $(foreach jar,$(HOST_CORE_IMG_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar) +TARGET_CORE_IMG_DEX_FILES := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar) + +# Jar files for the boot class path for testing. Must start with CORE_IMG_JARS. +TEST_CORE_JARS := $(CORE_IMG_JARS) HOST_TEST_CORE_JARS := $(addsuffix -hostdex,$(TEST_CORE_JARS)) TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS)) HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_TEST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) @@ -83,7 +97,6 @@ TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),/$(DEXPREOPT else TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar) endif - HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_TEST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar) TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar) diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk index d8014bd55f..be1791b372 100644 --- a/build/Android.common_test.mk +++ b/build/Android.common_test.mk @@ -133,6 +133,7 @@ define build-art-test-dex LOCAL_MODULE_PATH := $(3) LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_MIN_SDK_VERSION := 19 LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex endif include $(BUILD_JAVA_LIBRARY) @@ -148,6 +149,7 @@ define build-art-test-dex LOCAL_JAVA_LIBRARIES := $(HOST_TEST_CORE_JARS) LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_MIN_SDK_VERSION := 19 LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex endif include $(BUILD_HOST_DALVIK_JAVA_LIBRARY) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 6885946c40..a926d9a686 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -452,12 +452,7 @@ define define-art-gtest-rule-target $$(gtest_exe) \ $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \ $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \ - $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \ - $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \ - $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar \ - $$(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar \ - $$(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar \ - $$(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar + $$(foreach jar,$$(TARGET_TEST_CORE_JARS),$$(TARGET_OUT_JAVA_LIBRARIES)/$$(jar).jar) ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps) @@ -515,7 +510,8 @@ define define-art-gtest-rule-host $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \ $$(gtest_exe) \ $$(ART_GTEST_$(1)_HOST_DEPS) \ - $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) + $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) \ + $(HOST_OUT_EXECUTABLES)/timeout_dumper ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps) @@ -528,7 +524,9 @@ $$(gtest_output): .KATI_IMPLICIT_OUTPUTS := $$(gtest_output)-nocache $$(gtest_output): NAME := $$(gtest_rule) ifeq (,$(SANITIZE_HOST)) $$(gtest_output): $$(gtest_exe) $$(gtest_deps) - $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && $$< --gtest_output=xml:$$@ && \ + $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \ + timeout --foreground -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \ + $$< --gtest_output=xml:$$@ && \ $$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME)) else # Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some @@ -540,7 +538,9 @@ else # under ASAN. $$(gtest_output): $$(gtest_exe) $$(gtest_deps) $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \ - ASAN_OPTIONS=detect_leaks=1 $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \ + ASAN_OPTIONS=detect_leaks=1 timeout --foreground -k 120s -s SIGRTMIN+2 3600s \ + $(HOST_OUT_EXECUTABLES)/timeout_dumper \ + $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \ { $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \ ( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \ { echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \ diff --git a/build/Android.oat.mk b/build/Android.oat.mk index e2adac1660..2ad11432d7 100644 --- a/build/Android.oat.mk +++ b/build/Android.oat.mk @@ -39,8 +39,6 @@ endif # Use dex2oat debug version for better error reporting # $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks). # $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds. -# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for -# run-test --no-image define create-core-oat-host-rules core_compile_options := core_image_name := @@ -80,13 +78,15 @@ define create-core-oat-host-rules $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) -$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency) +$$(core_image_name): $$(HOST_CORE_IMG_DEX_LOCATIONS) $$(core_dex2oat_dependency) @echo "host dex2oat: $$@" @mkdir -p $$(dir $$@) $$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ - --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \ - $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --image-classes=$$(PRELOADED_CLASSES) \ + $$(addprefix --dex-file=,$$(HOST_CORE_IMG_DEX_FILES)) \ + $$(addprefix --dex-location=,$$(HOST_CORE_IMG_DEX_LOCATIONS)) \ + --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \ $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \ @@ -169,13 +169,15 @@ define create-core-oat-target-rules $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) -$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency) +$$(core_image_name): $$(TARGET_CORE_IMG_DEX_FILES) $$(core_dex2oat_dependency) @echo "target dex2oat: $$@" @mkdir -p $$(dir $$@) $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ - --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \ - $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --image-classes=$$(PRELOADED_CLASSES) \ + $$(addprefix --dex-file=,$$(TARGET_CORE_IMG_DEX_FILES)) \ + $$(addprefix --dex-location=,$$(TARGET_CORE_IMG_DEX_LOCATIONS)) \ + --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \ --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \ diff --git a/build/apex/Android.bp b/build/apex/Android.bp index f2e12f6a77..0ec0a15e9e 100644 --- a/build/apex/Android.bp +++ b/build/apex/Android.bp @@ -19,6 +19,11 @@ art_runtime_base_native_shared_libs = [ "libopenjdkjvmti", "libadbconnection", ] +bionic_native_shared_libs = [ + "libc", + "libm", + "libdl", +] // - Fake library that avoids namespace issues and gives some warnings for nosy apps. art_runtime_fake_native_shared_libs = [ // FIXME: Does not work as-is, because `libart_fake` is defined in libart_fake/Android.mk, @@ -102,7 +107,8 @@ apex { compile_multilib: "both", manifest: "manifest.json", native_shared_libs: art_runtime_base_native_shared_libs - + art_runtime_fake_native_shared_libs, + + art_runtime_fake_native_shared_libs + + bionic_native_shared_libs, multilib: { both: { // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64` @@ -130,7 +136,8 @@ apex { manifest: "manifest.json", native_shared_libs: art_runtime_base_native_shared_libs + art_runtime_fake_native_shared_libs - + art_runtime_debug_native_shared_libs, + + art_runtime_debug_native_shared_libs + + bionic_native_shared_libs, multilib: { both: { // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64` diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh index c19c7bd94c..b5e8d8b7eb 100755 --- a/build/apex/runtests.sh +++ b/build/apex/runtests.sh @@ -33,6 +33,7 @@ can be installed with: sudo apt-get install libguestfs-tools " + [[ -n "$ANDROID_PRODUCT_OUT" ]] \ || die "You need to source and lunch before you can use this script." @@ -41,6 +42,7 @@ set -e build_apex_p=true list_image_files_p=false +print_image_tree_p=false function usage { cat <<EOF @@ -48,7 +50,8 @@ Usage: $0 [OPTION] Build (optional) and run tests on Android Runtime APEX package (on host). -s, --skip-build skip the build step - -l, --list-files list the contents of the ext4 image + -l, --list-files list the contents of the ext4 image using `find` + -t, --print-tree list the contents of the ext4 image using `tree` -h, --help display this help and exit EOF @@ -59,6 +62,7 @@ while [[ $# -gt 0 ]]; do case "$1" in (-s|--skip-build) build_apex_p=false;; (-l|--list-files) list_image_files_p=true;; + (-t|--print-tree) print_image_tree_p=true;; (-h|--help) usage;; (*) die "Unknown option: '$1' Try '$0 --help' for more information.";; @@ -66,8 +70,42 @@ Try '$0 --help' for more information.";; shift done -work_dir=$(mktemp -d) -mount_point="$work_dir/image" +if $print_image_tree_p; then + which tree >/dev/null || die "This script requires the 'tree' tool. +On Debian-based systems, this can be installed with: + + sudo apt-get install tree +" +fi + + +# build_apex APEX_MODULE +# ---------------------- +# Build APEX package APEX_MODULE. +function build_apex { + if $build_apex_p; then + local apex_module=$1 + say "Building package $apex_module" && make "$apex_module" || die "Cannot build $apex_module" + fi +} + +# maybe_list_apex_contents MOUNT_POINT +# ------------------------------------ +# If any listing/printing option was used, honor them and display the contents +# of the APEX payload at MOUNT_POINT. +function maybe_list_apex_contents { + local mount_point=$1 + + # List the contents of the mounted image using `find` (optional). + if $list_image_files_p; then + say "Listing image files" && find "$mount_point" + fi + + # List the contents of the mounted image using `tree` (optional). + if $print_image_tree_p; then + say "Printing image tree" && ls -ld "$mount_point" && tree -aph --du "$mount_point" + fi +} function check_binary { [[ -x "$mount_point/bin/$1" ]] || die "Cannot find binary '$1' in mounted image" @@ -91,36 +129,22 @@ function check_library { || die "Cannot find library '$1' in mounted image" } -function build_apex { - if $build_apex_p; then - say "Building package $1" && make "$1" || die "Cannot build $1" - fi -} - -function check_contents { - +# Check contents of APEX payload located in `$mount_point`. +function check_release_contents { # Check that the mounted image contains a manifest. [[ -f "$mount_point/apex_manifest.json" ]] || die "no manifest" # Check that the mounted image contains ART base binaries. check_multilib_binary dalvikvm - # TODO: Does not work yet. + # TODO: Does not work yet (b/119942078). : check_binary_symlink dalvikvm check_binary dex2oat check_binary dexoptanalyzer check_binary profman - # Check that the mounted image contains ART tools binaries. - check_binary dexdiag - check_binary dexdump - check_binary dexlist # oatdump is only in device apex's due to build rules - # check_binary oatdump - - # Check that the mounted image contains ART debug binaries. - check_binary dex2oatd - check_binary dexoptanalyzerd - check_binary profmand + # TODO: Check for it when it is also built for host. + : check_binary oatdump # Check that the mounted image contains ART libraries. check_library libart-compiler.so @@ -135,20 +159,6 @@ function check_contents { check_library libdexfile.so check_library libprofile.so - # Check that the mounted image contains ART debug libraries. - check_library libartd-compiler.so - check_library libartd.so - check_library libopenjdkd.so - check_library libopenjdkjvmd.so - check_library libopenjdkjvmtid.so - check_library libadbconnectiond.so - # TODO: Should we check for these libraries too, even if they are not explicitly - # listed as dependencies in the Android Runtime APEX module rule? - check_library libdexfiled.so - check_library libartbased.so - check_library libartd-dexlayout.so - check_library libprofiled.so - # TODO: Should we check for other libraries, such as: # # libbacktrace.so @@ -164,105 +174,210 @@ function check_contents { # ? } +# Check debug contents of APEX payload located in `$mount_point`. +function check_debug_contents { + # Check that the mounted image contains ART tools binaries. + check_binary dexdiag + check_binary dexdump + check_binary dexlist -# ***************************************** -# * Testing for com.android.runtime.debug * -# ***************************************** + # Check that the mounted image contains ART debug binaries. + check_binary dex2oatd + check_binary dexoptanalyzerd + check_binary profmand + + # Check that the mounted image contains ART debug libraries. + check_library libartd-compiler.so + check_library libartd.so + check_library libopenjdkd.so + check_library libopenjdkjvmd.so + check_library libopenjdkjvmtid.so + check_library libadbconnectiond.so + # TODO: Should we check for these libraries too, even if they are not explicitly + # listed as dependencies in the Android Runtime APEX module rule? + check_library libdexfiled.so + check_library libartbased.so + check_library libartd-dexlayout.so + check_library libprofiled.so +} + +# Testing target (device) APEX packages. +# ====================================== + +# Clean-up. +function cleanup_target { + guestunmount "$mount_point" + rm -rf "$work_dir" +} # Garbage collection. -function finish_device_debug { +function finish_target { # Don't fail early during cleanup. set +e - guestunmount "$mount_point" - rm -rf "$work_dir" + cleanup_target } -trap finish_device_debug EXIT +# setup_target_apex APEX_MODULE MOUNT_POINT +# ----------------------------------------- +# Extract image from target APEX_MODULE and mount it in MOUNT_POINT. +function setup_target_apex { + local apex_module=$1 + local mount_point=$2 + local system_apexdir="$ANDROID_PRODUCT_OUT/system/apex" + local apex_package="$system_apexdir/$apex_module.apex" + + say "Extracting and mounting image" + + # Extract the payload from the Android Runtime APEX. + local image_filename="apex_payload.img" + unzip -q "$apex_package" "$image_filename" -d "$work_dir" + mkdir "$mount_point" + local image_file="$work_dir/$image_filename" + + # Check filesystems in the image. + local image_filesystems="$work_dir/image_filesystems" + virt-filesystems -a "$image_file" >"$image_filesystems" + # We expect a single partition (/dev/sda) in the image. + local partition="/dev/sda" + echo "$partition" | cmp "$image_filesystems" - + + # Mount the image from the Android Runtime APEX. + guestmount -a "$image_file" -m "$partition" "$mount_point" +} -# TODO: Also exercise the Release Runtime APEX (`com.android.runtime.release`). -apex_module="com.android.runtime.debug" +# Testing release APEX package (com.android.runtime.release). +# ----------------------------------------------------------- -# Build the Android Runtime APEX package (optional). -build_apex $apex_module +apex_module="com.android.runtime.release" -system_apexdir="$ANDROID_PRODUCT_OUT/system/apex" -apex_package="$system_apexdir/$apex_module.apex" +say "Processing APEX package $apex_module" -say "Extracting and mounting image" +work_dir=$(mktemp -d) +mount_point="$work_dir/image" -# Extract the payload from the Android Runtime APEX. -image_filename="apex_payload.img" -unzip -q "$apex_package" "$image_filename" -d "$work_dir" -mkdir "$mount_point" -image_file="$work_dir/$image_filename" +trap finish_target EXIT -# Check filesystems in the image. -image_filesystems="$work_dir/image_filesystems" -virt-filesystems -a "$image_file" >"$image_filesystems" -# We expect a single partition (/dev/sda) in the image. -partition="/dev/sda" -echo "$partition" | cmp "$image_filesystems" - +# Build the APEX package (optional). +build_apex "$apex_module" -# Mount the image from the Android Runtime APEX. -guestmount -a "$image_file" -m "$partition" "$mount_point" +# Set up APEX package. +setup_target_apex "$apex_module" "$mount_point" -# List the contents of the mounted image (optional). -$list_image_files_p && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point" +# List the contents of the APEX image (optional). +maybe_list_apex_contents "$mount_point" -say "Running tests" +# Run tests on APEX package. +say "Checking APEX package $apex_module" +check_release_contents -check_contents +# Clean up. +trap - EXIT +cleanup_target -# Check for files pulled in from device-only oatdump. +say "$apex_module tests passed" +echo + +# Testing debug APEX package (com.android.runtime.debug). +# ------------------------------------------------------- + +apex_module="com.android.runtime.debug" + +say "Processing APEX package $apex_module" + +work_dir=$(mktemp -d) +mount_point="$work_dir/image" + +trap finish_target EXIT + +# Build the APEX package (optional). +build_apex "$apex_module" + +# Set up APEX package. +setup_target_apex "$apex_module" "$mount_point" + +# List the contents of the APEX image (optional). +maybe_list_apex_contents "$mount_point" + +# Run tests on APEX package. +say "Checking APEX package $apex_module" +check_release_contents +check_debug_contents +# Check for files pulled in from debug target-only oatdump. check_binary oatdump check_library libart-disassembler.so -# Cleanup +# Clean up. trap - EXIT -guestunmount "$mount_point" -rm -rf "$work_dir" +cleanup_target + +say "$apex_module tests passed" +echo -say "$apex_module Tests passed" -# **************************************** -# * Testing for com.android.runtime.host * -# **************************************** +# Testing host APEX package (com.android.runtime.host). +# ===================================================== + +# Clean-up. +function cleanup_host { + rm -rf "$work_dir" +} # Garbage collection. function finish_host { # Don't fail early during cleanup. set +e - rm -rf "$work_dir" + cleanup_host } +# setup_host_apex APEX_MODULE MOUNT_POINT +# --------------------------------------- +# Extract Zip file from host APEX_MODULE and extract it in MOUNT_POINT. +function setup_host_apex { + local apex_module=$1 + local mount_point=$2 + local system_apexdir="$ANDROID_HOST_OUT/apex" + local apex_package="$system_apexdir/$apex_module.zipapex" + + say "Extracting payload" + + # Extract the payload from the Android Runtime APEX. + local image_filename="apex_payload.zip" + unzip -q "$apex_package" "$image_filename" -d "$work_dir" + mkdir "$mount_point" + local image_file="$work_dir/$image_filename" + + # Unzipping the payload + unzip -q "$image_file" -d "$mount_point" +} + +apex_module="com.android.runtime.host" + +say "Processing APEX package $apex_module" + work_dir=$(mktemp -d) mount_point="$work_dir/zip" trap finish_host EXIT -apex_module="com.android.runtime.host" - -# Build the Android Runtime APEX package (optional). -build_apex $apex_module - -system_apexdir="$ANDROID_HOST_OUT/apex" -apex_package="$system_apexdir/$apex_module.zipapex" +# Build the APEX package (optional). +build_apex "$apex_module" -say "Extracting payload" +# Set up APEX package. +setup_host_apex "$apex_module" "$mount_point" -# Extract the payload from the Android Runtime APEX. -image_filename="apex_payload.zip" -unzip -q "$apex_package" "$image_filename" -d "$work_dir" -mkdir "$mount_point" -image_file="$work_dir/$image_filename" +# List the contents of the APEX image (optional). +maybe_list_apex_contents "$mount_point" -# Unzipping the payload -unzip -q "$image_file" -d "$mount_point" +# Run tests on APEX package. +say "Checking APEX package $apex_module" +check_release_contents +check_debug_contents -say "Running tests" +# Clean up. +trap - EXIT +cleanup_host -check_contents +say "$apex_module tests passed" -say "$apex_module Tests passed" -say "Tests passed" +say "All Android Runtime APEX tests passed" diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h index 82c04e70f5..952be44b0e 100644 --- a/cmdline/cmdline_parser.h +++ b/cmdline/cmdline_parser.h @@ -206,7 +206,7 @@ struct CmdlineParser { }; load_value_ = []() -> TArg& { assert(false && "Should not be appending values to ignored arguments"); - return *reinterpret_cast<TArg*>(0); // Blow up. + __builtin_trap(); // Blow up. }; save_value_specified_ = true; @@ -270,7 +270,7 @@ struct CmdlineParser { load_value_ = []() -> TArg& { assert(false && "No load value function defined"); - return *reinterpret_cast<TArg*>(0); // Blow up. + __builtin_trap(); // Blow up. }; } diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h index 28f108423e..4a27178146 100644 --- a/compiler/debug/dwarf/headers.h +++ b/compiler/debug/dwarf/headers.h @@ -107,7 +107,9 @@ void WriteFDE(bool is64bit, } else { DCHECK(format == DW_DEBUG_FRAME_FORMAT); // Relocate code_address if it has absolute value. - patch_locations->push_back(buffer_address + buffer->size() - section_address); + if (patch_locations != nullptr) { + patch_locations->push_back(buffer_address + buffer->size() - section_address); + } } if (is64bit) { writer.PushUint64(code_address); @@ -122,6 +124,30 @@ void WriteFDE(bool is64bit, writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4); } +// Read singe FDE entry from 'data' (which is advanced). +template<typename Addr> +bool ReadFDE(const uint8_t** data, Addr* addr, Addr* size, ArrayRef<const uint8_t>* opcodes) { + struct Header { + uint32_t length; + int32_t cie_pointer; + Addr addr; + Addr size; + uint8_t augmentaion; + uint8_t opcodes[]; + } PACKED(1); + const Header* header = reinterpret_cast<const Header*>(*data); + const size_t length = 4 + header->length; + *data += length; + if (header->cie_pointer == -1) { + return false; // Not an FDE entry. + } + DCHECK_EQ(header->cie_pointer, 0); // Expects single CIE. Assumes DW_DEBUG_FRAME_FORMAT. + *addr = header->addr; + *size = header->size; + *opcodes = ArrayRef<const uint8_t>(header->opcodes, length - offsetof(Header, opcodes)); + return true; +} + // Write compilation unit (CU) to .debug_info section. template<typename Vector> void WriteDebugInfoCU(uint32_t debug_abbrev_offset, diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h index 27b70c8caa..e0116c6f41 100644 --- a/compiler/debug/elf_debug_frame_writer.h +++ b/compiler/debug/elf_debug_frame_writer.h @@ -182,7 +182,7 @@ void WriteCFISection(linker::ElfBuilder<ElfTypes>* builder, std::vector<const MethodDebugInfo*> sorted_method_infos; sorted_method_infos.reserve(method_infos.size()); for (size_t i = 0; i < method_infos.size(); i++) { - if (!method_infos[i].cfi.empty() && !method_infos[i].deduped) { + if (!method_infos[i].deduped) { sorted_method_infos.push_back(&method_infos[i]); } } @@ -222,7 +222,6 @@ void WriteCFISection(linker::ElfBuilder<ElfTypes>* builder, buffer.clear(); for (const MethodDebugInfo* mi : sorted_method_infos) { DCHECK(!mi->deduped); - DCHECK(!mi->cfi.empty()); const Elf_Addr code_address = mi->code_address + (mi->is_code_address_text_relative ? builder->GetText()->GetAddress() : 0); if (format == dwarf::DW_EH_FRAME_FORMAT) { diff --git a/compiler/debug/elf_debug_reader.h b/compiler/debug/elf_debug_reader.h new file mode 100644 index 0000000000..91b1b3ea81 --- /dev/null +++ b/compiler/debug/elf_debug_reader.h @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_ + +#include "base/array_ref.h" +#include "debug/dwarf/headers.h" +#include "elf.h" +#include "xz_utils.h" + +namespace art { +namespace debug { + +// Trivial ELF file reader. +// +// It is the bare minimum needed to read mini-debug-info symbols for unwinding. +// We use it to merge JIT mini-debug-infos together or to prune them after GC. +// The consumed ELF file comes from ART JIT. +template <typename ElfTypes, typename VisitSym, typename VisitFde> +static void ReadElfSymbols(const uint8_t* elf, VisitSym visit_sym, VisitFde visit_fde) { + // Note that the input buffer might be misaligned. + typedef typename ElfTypes::Ehdr ALIGNED(1) Elf_Ehdr; + typedef typename ElfTypes::Shdr ALIGNED(1) Elf_Shdr; + typedef typename ElfTypes::Sym ALIGNED(1) Elf_Sym; + typedef typename ElfTypes::Addr ALIGNED(1) Elf_Addr; + + // Read and check the elf header. + const Elf_Ehdr* header = reinterpret_cast<const Elf_Ehdr*>(elf); + CHECK(header->checkMagic()); + + // Find sections that we are interested in. + const Elf_Shdr* sections = reinterpret_cast<const Elf_Shdr*>(elf + header->e_shoff); + const Elf_Shdr* strtab = nullptr; + const Elf_Shdr* symtab = nullptr; + const Elf_Shdr* debug_frame = nullptr; + const Elf_Shdr* gnu_debugdata = nullptr; + for (size_t i = 1 /* skip null section */; i < header->e_shnum; i++) { + const Elf_Shdr* section = sections + i; + const char* name = reinterpret_cast<const char*>( + elf + sections[header->e_shstrndx].sh_offset + section->sh_name); + if (strcmp(name, ".strtab") == 0) { + strtab = section; + } else if (strcmp(name, ".symtab") == 0) { + symtab = section; + } else if (strcmp(name, ".debug_frame") == 0) { + debug_frame = section; + } else if (strcmp(name, ".gnu_debugdata") == 0) { + gnu_debugdata = section; + } + } + + // Visit symbols. + if (symtab != nullptr && strtab != nullptr) { + const Elf_Sym* symbols = reinterpret_cast<const Elf_Sym*>(elf + symtab->sh_offset); + DCHECK_EQ(symtab->sh_entsize, sizeof(Elf_Sym)); + size_t count = symtab->sh_size / sizeof(Elf_Sym); + for (size_t i = 1 /* skip null symbol */; i < count; i++) { + Elf_Sym symbol = symbols[i]; + if (symbol.getBinding() != STB_LOCAL) { // Ignore local symbols (e.g. "$t"). + const uint8_t* name = elf + strtab->sh_offset + symbol.st_name; + visit_sym(symbol, reinterpret_cast<const char*>(name)); + } + } + } + + // Visit CFI (unwind) data. + if (debug_frame != nullptr) { + const uint8_t* data = elf + debug_frame->sh_offset; + const uint8_t* end = data + debug_frame->sh_size; + while (data < end) { + Elf_Addr addr, size; + ArrayRef<const uint8_t> opcodes; + if (dwarf::ReadFDE<Elf_Addr>(&data, &addr, &size, &opcodes)) { + visit_fde(addr, size, opcodes); + } + } + } + + // Process embedded compressed ELF file. + if (gnu_debugdata != nullptr) { + ArrayRef<const uint8_t> compressed(elf + gnu_debugdata->sh_offset, gnu_debugdata->sh_size); + std::vector<uint8_t> decompressed; + XzDecompress(compressed, &decompressed); + ReadElfSymbols<ElfTypes>(decompressed.data(), visit_sym, visit_fde); + } +} + +} // namespace debug +} // namespace art +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_ diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc index 1ecb1d8ed9..56d773f508 100644 --- a/compiler/debug/elf_debug_writer.cc +++ b/compiler/debug/elf_debug_writer.cc @@ -21,12 +21,14 @@ #include <vector> #include "base/array_ref.h" +#include "base/stl_util.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/elf_compilation_unit.h" #include "debug/elf_debug_frame_writer.h" #include "debug/elf_debug_info_writer.h" #include "debug/elf_debug_line_writer.h" #include "debug/elf_debug_loc_writer.h" +#include "debug/elf_debug_reader.h" #include "debug/elf_symtab_writer.h" #include "debug/method_debug_info.h" #include "debug/xz_utils.h" @@ -203,9 +205,147 @@ std::vector<uint8_t> MakeElfFileForJIT( } builder->End(); CHECK(builder->Good()); + // Verify the ELF file by reading it back using the trivial reader. + if (kIsDebugBuild) { + using Elf_Sym = typename ElfTypes::Sym; + using Elf_Addr = typename ElfTypes::Addr; + size_t num_syms = 0; + size_t num_cfis = 0; + ReadElfSymbols<ElfTypes>( + buffer.data(), + [&](Elf_Sym sym, const char*) { + DCHECK_EQ(sym.st_value, method_info.code_address + CompiledMethod::CodeDelta(isa)); + DCHECK_EQ(sym.st_size, method_info.code_size); + num_syms++; + }, + [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) { + DCHECK_EQ(addr, method_info.code_address); + DCHECK_EQ(size, method_info.code_size); + DCHECK_GE(opcodes.size(), method_info.cfi.size()); + DCHECK_EQ(memcmp(opcodes.data(), method_info.cfi.data(), method_info.cfi.size()), 0); + num_cfis++; + }); + DCHECK_EQ(num_syms, 1u); + DCHECK_EQ(num_cfis, 1u); + } return buffer; } +// Combine several mini-debug-info ELF files into one, while filtering some symbols. +std::vector<uint8_t> PackElfFileForJIT( + InstructionSet isa, + const InstructionSetFeatures* features, + std::vector<const uint8_t*>& added_elf_files, + std::vector<const void*>& removed_symbols, + /*out*/ size_t* num_symbols) { + using ElfTypes = ElfRuntimeTypes; + using Elf_Addr = typename ElfTypes::Addr; + using Elf_Sym = typename ElfTypes::Sym; + CHECK_EQ(sizeof(Elf_Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa))); + const bool is64bit = Is64BitInstructionSet(isa); + auto is_removed_symbol = [&removed_symbols](Elf_Addr addr) { + const void* code_ptr = reinterpret_cast<const void*>(addr); + return std::binary_search(removed_symbols.begin(), removed_symbols.end(), code_ptr); + }; + uint64_t min_address = std::numeric_limits<uint64_t>::max(); + uint64_t max_address = 0; + + // Produce the inner ELF file. + // It will contain the symbols (.symtab) and unwind information (.debug_frame). + std::vector<uint8_t> inner_elf_file; + { + inner_elf_file.reserve(1 * KB); // Approximate size of ELF file with a single symbol. + linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &inner_elf_file); + std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder( + new linker::ElfBuilder<ElfTypes>(isa, features, &out)); + builder->Start(/*write_program_headers=*/ false); + auto* text = builder->GetText(); + auto* strtab = builder->GetStrTab(); + auto* symtab = builder->GetSymTab(); + auto* debug_frame = builder->GetDebugFrame(); + std::deque<Elf_Sym> symbols; + std::vector<uint8_t> debug_frame_buffer; + WriteCIE(isa, dwarf::DW_DEBUG_FRAME_FORMAT, &debug_frame_buffer); + + // Write symbols names. All other data is buffered. + strtab->Start(); + strtab->Write(""); // strtab should start with empty string. + for (const uint8_t* added_elf_file : added_elf_files) { + ReadElfSymbols<ElfTypes>( + added_elf_file, + [&](Elf_Sym sym, const char* name) { + if (is_removed_symbol(sym.st_value)) { + return; + } + sym.st_name = strtab->Write(name); + symbols.push_back(sym); + min_address = std::min<uint64_t>(min_address, sym.st_value); + max_address = std::max<uint64_t>(max_address, sym.st_value + sym.st_size); + }, + [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) { + if (is_removed_symbol(addr)) { + return; + } + WriteFDE(is64bit, + /*section_address=*/ 0, + /*cie_address=*/ 0, + addr, + size, + opcodes, + dwarf::DW_DEBUG_FRAME_FORMAT, + debug_frame_buffer.size(), + &debug_frame_buffer, + /*patch_locations=*/ nullptr); + }); + } + strtab->End(); + + // Create .text covering the code range. Needed for gdb to find the symbols. + if (max_address > min_address) { + text->AllocateVirtualMemory(min_address, max_address - min_address); + } + + // Add the symbols. + *num_symbols = symbols.size(); + for (; !symbols.empty(); symbols.pop_front()) { + symtab->Add(symbols.front(), text); + } + symtab->WriteCachedSection(); + + // Add the CFI/unwind section. + debug_frame->Start(); + debug_frame->WriteFully(debug_frame_buffer.data(), debug_frame_buffer.size()); + debug_frame->End(); + + builder->End(); + CHECK(builder->Good()); + } + + // Produce the outer ELF file. + // It contains only the inner ELF file compressed as .gnu_debugdata section. + // This extra wrapping is not necessary but the compression saves space. + std::vector<uint8_t> outer_elf_file; + { + std::vector<uint8_t> gnu_debugdata; + gnu_debugdata.reserve(inner_elf_file.size() / 4); + XzCompress(ArrayRef<const uint8_t>(inner_elf_file), &gnu_debugdata); + + outer_elf_file.reserve(KB + gnu_debugdata.size()); + linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &outer_elf_file); + std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder( + new linker::ElfBuilder<ElfTypes>(isa, features, &out)); + builder->Start(/*write_program_headers=*/ false); + if (max_address > min_address) { + builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address); + } + builder->WriteSection(".gnu_debugdata", &gnu_debugdata); + builder->End(); + CHECK(builder->Good()); + } + + return outer_elf_file; +} + std::vector<uint8_t> WriteDebugElfFileForClasses( InstructionSet isa, const InstructionSetFeatures* features, diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h index 8ad0c4219a..85ab356b0c 100644 --- a/compiler/debug/elf_debug_writer.h +++ b/compiler/debug/elf_debug_writer.h @@ -56,6 +56,13 @@ std::vector<uint8_t> MakeElfFileForJIT( bool mini_debug_info, const MethodDebugInfo& method_info); +std::vector<uint8_t> PackElfFileForJIT( + InstructionSet isa, + const InstructionSetFeatures* features, + std::vector<const uint8_t*>& added_elf_files, + std::vector<const void*>& removed_symbols, + /*out*/ size_t* num_symbols); + std::vector<uint8_t> WriteDebugElfFileForClasses( InstructionSet isa, const InstructionSetFeatures* features, diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 18f7105769..f52c566727 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1231,8 +1231,15 @@ class ClinitImageUpdate { bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) { std::string temp; StringPiece name(klass->GetDescriptor(&temp)); - if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) { - data_->image_classes_.push_back(hs_.NewHandle(klass)); + auto it = data_->image_class_descriptors_->find(name); + if (it != data_->image_class_descriptors_->end()) { + if (LIKELY(klass->IsResolved())) { + data_->image_classes_.push_back(hs_.NewHandle(klass)); + } else { + DCHECK(klass->IsErroneousUnresolved()); + VLOG(compiler) << "Removing unresolved class from image classes: " << name; + data_->image_class_descriptors_->erase(it); + } } else { // Check whether it is initialized and has a clinit. They must be kept, too. if (klass->IsInitialized() && klass->FindClassInitializer( @@ -1742,6 +1749,9 @@ static void LoadAndUpdateStatus(const ClassAccessor& accessor, if (&cls->GetDexFile() == &accessor.GetDexFile()) { ObjectLock<mirror::Class> lock(self, cls); mirror::Class::SetStatus(cls, status, self); + if (status >= ClassStatus::kVerified) { + cls->SetVerificationAttempted(); + } } } else { DCHECK(self->IsExceptionPending()); diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 9b8bb3e90e..93575d7c75 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -126,11 +126,11 @@ extern "C" void jit_unload(void* handle) { } extern "C" bool jit_compile_method( - void* handle, ArtMethod* method, Thread* self, bool osr) + void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr) REQUIRES_SHARED(Locks::mutator_lock_) { auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle); DCHECK(jit_compiler != nullptr); - return jit_compiler->CompileMethod(self, method, osr); + return jit_compiler->CompileMethod(self, method, baseline, osr); } extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count) @@ -146,7 +146,10 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou // (which would have been otherwise used as identifier to remove it later). AddNativeDebugInfoForJit(Thread::Current(), /*code_ptr=*/ nullptr, - elf_file); + elf_file, + debug::PackElfFileForJIT, + compiler_options.GetInstructionSet(), + compiler_options.GetInstructionSetFeatures()); } } @@ -181,7 +184,7 @@ JitCompiler::~JitCompiler() { } } -bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { +bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) { SCOPED_TRACE << "JIT compiling " << method->PrettyMethod(); DCHECK(!method->IsProxyMethod()); @@ -198,7 +201,7 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { TimingLogger::ScopedTiming t2("Compiling", &logger); JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); success = compiler_driver_->GetCompiler()->JitCompile( - self, code_cache, method, /* baseline= */ false, osr, jit_logger_.get()); + self, code_cache, method, baseline, osr, jit_logger_.get()); } // Trim maps to reduce memory usage. diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index d201611d79..29d2761348 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -37,7 +37,7 @@ class JitCompiler { virtual ~JitCompiler(); // Compilation entrypoint. Returns whether the compilation succeeded. - bool CompileMethod(Thread* self, ArtMethod* method, bool osr) + bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) REQUIRES_SHARED(Locks::mutator_lock_); const CompilerOptions& GetCompilerOptions() const { diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 92b9543c27..bd4304c7ff 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -1300,15 +1300,15 @@ jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1)); EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2)); - EXPECT_EQ(0x12345678ABCDEF88ll, val1); - EXPECT_EQ(0x7FEDCBA987654321ll, val2); + EXPECT_EQ(0x12345678ABCDEF88LL, val1); + EXPECT_EQ(0x7FEDCBA987654321LL, val2); return 42; } void JniCompilerTest::GetTextImpl() { SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I", CURRENT_JNI_WRAPPER(my_gettext)); - jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_, + jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88LL, jobj_, INT64_C(0x7FEDCBA987654321), jobj_); EXPECT_EQ(result, 42); } diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 5bd1122698..50b13c842b 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -243,7 +243,8 @@ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNU // compilation. #define UNREACHABLE_INTRINSIC(Arch, Name) \ void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \ - if (!codegen_->GetCompilerOptions().IsBaseline()) { \ + if (Runtime::Current()->IsAotCompiler() && \ + !codegen_->GetCompilerOptions().IsBaseline()) { \ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ << " should have been converted to HIR"; \ } \ diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 1688ea7811..0b17c9d27e 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2927,7 +2927,7 @@ void IntrinsicLocationsBuilderARM64::VisitCRC32Update(HInvoke* invoke) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - locations->SetOut(Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } // Lower the invoke of CRC32.update(int crc, int b). @@ -2945,9 +2945,13 @@ void IntrinsicCodeGeneratorARM64::VisitCRC32Update(HInvoke* invoke) { // result = crc32_for_byte(crc, b) // crc = ~result // It is directly lowered to three instructions. - __ Mvn(out, crc); - __ Crc32b(out, out, val); - __ Mvn(out, out); + + UseScratchRegisterScope temps(masm); + Register tmp = temps.AcquireSameSizeAs(out); + + __ Mvn(tmp, crc); + __ Crc32b(tmp, tmp, val); + __ Mvn(out, tmp); } // The threshold for sizes of arrays to use the library provided implementation diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index c9b4d36bc4..4936a6d3c0 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1456,8 +1456,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, return true; } -void OptimizingCompiler::GenerateJitDebugInfo( - ArtMethod* method, const debug::MethodDebugInfo& info) { +void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED, + const debug::MethodDebugInfo& info) { const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); DCHECK(compiler_options.GenerateAnyDebugInfo()); @@ -1472,12 +1472,10 @@ void OptimizingCompiler::GenerateJitDebugInfo( info); AddNativeDebugInfoForJit(Thread::Current(), reinterpret_cast<const void*>(info.code_address), - elf_file); - - VLOG(jit) - << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method) - << " size=" << PrettySize(elf_file.size()) - << " total_size=" << PrettySize(GetJitMiniDebugInfoMemUsage()); + elf_file, + debug::PackElfFileForJIT, + compiler_options.GetInstructionSet(), + compiler_options.GetInstructionSetFeatures()); } } // namespace art diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index a5bba9bb97..0b2c0b6749 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -601,8 +601,7 @@ class WatchDog { Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds", timeout_in_milliseconds_/1000)); } else if (rc != 0) { - std::string message(StringPrintf("pthread_cond_timedwait failed: %s", - strerror(errno))); + std::string message(StringPrintf("pthread_cond_timedwait failed: %s", strerror(rc))); Fatal(message.c_str()); } } @@ -624,7 +623,6 @@ class Dex2Oat final { explicit Dex2Oat(TimingLogger* timings) : compiler_kind_(Compiler::kOptimizing), // Take the default set of instruction features from the build. - boot_image_checksum_(0), key_value_store_(nullptr), verification_results_(nullptr), runtime_(nullptr), @@ -1437,17 +1435,22 @@ class Dex2Oat final { if (!IsBootImage()) { // When compiling an app, create the runtime early to retrieve - // the image location key needed for the oat header. + // the boot image checksums needed for the oat header. if (!CreateRuntime(std::move(runtime_options))) { return dex2oat::ReturnCode::kCreateRuntime; } if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) { TimingLogger::ScopedTiming t3("Loading image checksum", timings_); - std::vector<ImageSpace*> image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces(); - boot_image_checksum_ = image_spaces[0]->GetImageHeader().GetImageChecksum(); - } else { - boot_image_checksum_ = 0u; + Runtime* runtime = Runtime::Current(); + key_value_store_->Put(OatHeader::kBootClassPathKey, + android::base::Join(runtime->GetBootClassPathLocations(), ':')); + std::vector<ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces(); + const std::vector<const DexFile*>& bcp_dex_files = + runtime->GetClassLinker()->GetBootClassPath(); + key_value_store_->Put( + OatHeader::kBootClassPathChecksumsKey, + gc::space::ImageSpace::GetBootClassPathChecksums(image_spaces, bcp_dex_files)); } // Open dex files for class path. @@ -2015,7 +2018,7 @@ class Dex2Oat final { elf_writer->EndDataBimgRelRo(data_bimg_rel_ro); } - if (!oat_writer->WriteHeader(elf_writer->GetStream(), boot_image_checksum_)) { + if (!oat_writer->WriteHeader(elf_writer->GetStream())) { LOG(ERROR) << "Failed to write oat header to the ELF file " << oat_file->GetPath(); return false; } @@ -2646,7 +2649,6 @@ class Dex2Oat final { std::unique_ptr<CompilerOptions> compiler_options_; Compiler::Kind compiler_kind_; - uint32_t boot_image_checksum_; std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_; std::unique_ptr<VerificationResults> verification_results_; diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h index bd8cf5ad56..fa0a3d4ac9 100644 --- a/dex2oat/linker/image_test.h +++ b/dex2oat/linker/image_test.h @@ -326,8 +326,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode, elf_writer->EndDataBimgRelRo(data_bimg_rel_ro); } - bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(), - /*boot_image_checksum=*/ 0u); + bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream()); ASSERT_TRUE(header_ok); writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader()); diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc index 61d105f5cd..e4e4b13458 100644 --- a/dex2oat/linker/image_writer.cc +++ b/dex2oat/linker/image_writer.cc @@ -32,6 +32,7 @@ #include "base/enums.h" #include "base/globals.h" #include "base/logging.h" // For VLOG. +#include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "class_linker-inl.h" #include "class_root.h" @@ -153,6 +154,26 @@ ObjPtr<mirror::ClassLoader> ImageWriter::GetAppClassLoader() const : nullptr; } +bool ImageWriter::IsImageObject(ObjPtr<mirror::Object> obj) const { + // For boot image, we keep all objects remaining after the GC in PrepareImageAddressSpace(). + if (compiler_options_.IsBootImage()) { + return true; + } + // Objects already in the boot image do not belong to the image being written. + if (IsInBootImage(obj.Ptr())) { + return false; + } + // DexCaches for the boot class path components that are not a part of the boot image + // cannot be garbage collected in PrepareImageAddressSpace() but we do not want to + // include them in the app image. So make sure we include only the app DexCaches. + if (obj->IsDexCache() && + !ContainsElement(compiler_options_.GetDexFilesForOatFile(), + obj->AsDexCache()->GetDexFile())) { + return false; + } + return true; +} + // Return true if an object is already in an image space. bool ImageWriter::IsInBootImage(const void* obj) const { gc::Heap* const heap = Runtime::Current()->GetHeap(); @@ -437,7 +458,7 @@ std::vector<ImageWriter::HeapReferencePointerInfo> ImageWriter::CollectStringRef */ heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_) { - if (!IsInBootImage(object.Ptr())) { + if (IsImageObject(object)) { visitor.SetObject(object); if (object->IsDexCache()) { @@ -680,7 +701,7 @@ bool ImageWriter::Write(int image_fd, ObjPtr<mirror::ClassLoader> class_loader = GetAppClassLoader(); std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self); for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) { - if (IsInBootImage(dex_cache.Ptr())) { + if (!IsImageObject(dex_cache)) { continue; // Boot image DexCache is not written to the app image. } PreloadDexCache(dex_cache, class_loader); @@ -989,7 +1010,7 @@ void ImageWriter::PrepareDexCacheArraySlots() { for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { ObjPtr<mirror::DexCache> dex_cache = ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root)); - if (dex_cache == nullptr || IsInBootImage(dex_cache.Ptr())) { + if (dex_cache == nullptr || !IsImageObject(dex_cache)) { continue; } const DexFile* dex_file = dex_cache->GetDexFile(); @@ -1758,7 +1779,8 @@ void ImageWriter::PruneNonImageClasses() { for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) { // Pass the class loader associated with the DexCache. This can either be // the app's `class_loader` or `nullptr` if boot class loader. - PruneDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : GetAppClassLoader()); + bool is_app_image_dex_cache = compiler_options_.IsAppImage() && IsImageObject(dex_cache); + PruneDexCache(dex_cache, is_app_image_dex_cache ? GetAppClassLoader() : nullptr); } // Drop the array class cache in the ClassLinker, as these are roots holding those classes live. @@ -1856,7 +1878,7 @@ ObjPtr<mirror::ObjectArray<mirror::Object>> ImageWriter::CollectDexCaches(Thread continue; } const DexFile* dex_file = dex_cache->GetDexFile(); - if (!IsInBootImage(dex_cache.Ptr())) { + if (IsImageObject(dex_cache)) { dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; } } @@ -1875,7 +1897,7 @@ ObjPtr<mirror::ObjectArray<mirror::Object>> ImageWriter::CollectDexCaches(Thread continue; } const DexFile* dex_file = dex_cache->GetDexFile(); - if (!IsInBootImage(dex_cache.Ptr())) { + if (IsImageObject(dex_cache)) { non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; } } @@ -1889,7 +1911,7 @@ ObjPtr<mirror::ObjectArray<mirror::Object>> ImageWriter::CollectDexCaches(Thread continue; } const DexFile* dex_file = dex_cache->GetDexFile(); - if (!IsInBootImage(dex_cache.Ptr()) && + if (IsImageObject(dex_cache) && image_dex_files.find(dex_file) != image_dex_files.end()) { dex_caches->Set<false>(i, dex_cache.Ptr()); ++i; @@ -1942,7 +1964,7 @@ ObjPtr<ObjectArray<Object>> ImageWriter::CreateImageRoots( mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index) { - if (obj == nullptr || IsInBootImage(obj)) { + if (obj == nullptr || !IsImageObject(obj)) { // Object is null or already in the image, there is no work to do. return obj; } @@ -2373,7 +2395,7 @@ void ImageWriter::CalculateNewObjectOffsets() { { auto ensure_bin_slots_assigned = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { - if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) { + if (IsImageObject(obj)) { CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj; } }; @@ -2444,7 +2466,7 @@ void ImageWriter::CalculateNewObjectOffsets() { { auto unbin_objects_into_offset = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { - if (!IsInBootImage(obj)) { + if (IsImageObject(obj)) { UnbinObjectsIntoOffset(obj); } }; @@ -2909,7 +2931,7 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst, } void ImageWriter::CopyAndFixupObject(Object* obj) { - if (IsInBootImage(obj)) { + if (!IsImageObject(obj)) { return; } size_t offset = GetImageOffset(obj); diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h index 33bacf8c1b..b680265ff1 100644 --- a/dex2oat/linker/image_writer.h +++ b/dex2oat/linker/image_writer.h @@ -674,7 +674,12 @@ class ImageWriter final { template <typename T> T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_); - // Return true of obj is inside of the boot image space. This may only return true if we are + // Return true if `obj` belongs to the image we're writing. + // For a boot image, this is true for all objects. + // For an app image, boot image objects and boot class path dex caches are excluded. + bool IsImageObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_); + + // Return true if `obj` is inside of the boot image space. This may only return true if we are // compiling an app image. bool IsInBootImage(const void* obj) const; diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc index d045698d07..e2a9ac2a8f 100644 --- a/dex2oat/linker/oat_writer.cc +++ b/dex2oat/linker/oat_writer.cc @@ -2808,11 +2808,9 @@ bool OatWriter::CheckOatSize(OutputStream* out, size_t file_offset, size_t relat return true; } -bool OatWriter::WriteHeader(OutputStream* out, uint32_t boot_image_checksum) { +bool OatWriter::WriteHeader(OutputStream* out) { CHECK(write_state_ == WriteState::kWriteHeader); - oat_header_->SetBootImageChecksum(boot_image_checksum); - // Update checksum with header data. DCHECK_EQ(oat_header_->GetChecksum(), 0u); // For checksum calculation. const uint8_t* header_begin = reinterpret_cast<const uint8_t*>(oat_header_.get()); diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h index 9cd2fd04ea..cc0e83a0e5 100644 --- a/dex2oat/linker/oat_writer.h +++ b/dex2oat/linker/oat_writer.h @@ -198,7 +198,7 @@ class OatWriter { // Check the size of the written oat file. bool CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset); // Write the oat header. This finalizes the oat file. - bool WriteHeader(OutputStream* out, uint32_t boot_image_checksum); + bool WriteHeader(OutputStream* out); // Returns whether the oat file has an associated image. bool HasImage() const { diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc index 5de1540839..ecf9db8867 100644 --- a/dex2oat/linker/oat_writer_test.cc +++ b/dex2oat/linker/oat_writer_test.cc @@ -240,7 +240,7 @@ class OatTest : public CommonCompilerTest { elf_writer->EndDataBimgRelRo(data_bimg_rel_ro); } - if (!oat_writer.WriteHeader(elf_writer->GetStream(), /*boot_image_checksum=*/ 42u)) { + if (!oat_writer.WriteHeader(elf_writer->GetStream())) { return false; } @@ -396,6 +396,7 @@ TEST_F(OatTest, WriteRead) { ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex"); SafeMap<std::string, std::string> key_value_store; + key_value_store.Put(OatHeader::kBootClassPathChecksumsKey, "testkey"); bool success = WriteElf(tmp_vdex.GetFile(), tmp_oat.GetFile(), class_linker->GetBootClassPath(), @@ -418,7 +419,8 @@ TEST_F(OatTest, WriteRead) { const OatHeader& oat_header = oat_file->GetOatHeader(); ASSERT_TRUE(oat_header.IsValid()); ASSERT_EQ(class_linker->GetBootClassPath().size(), oat_header.GetDexFileCount()); // core - ASSERT_EQ(42u, oat_header.GetBootImageChecksum()); + ASSERT_TRUE(oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey) != nullptr); + ASSERT_STREQ("testkey", oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey)); ASSERT_TRUE(java_lang_dex_file_ != nullptr); const DexFile& dex_file = *java_lang_dex_file_; @@ -464,7 +466,7 @@ TEST_F(OatTest, WriteRead) { TEST_F(OatTest, OatHeaderSizeCheck) { // If this test is failing and you have to update these constants, // it is time to update OatHeader::kOatVersion - EXPECT_EQ(68U, sizeof(OatHeader)); + EXPECT_EQ(64U, sizeof(OatHeader)); EXPECT_EQ(4U, sizeof(OatMethodOffsets)); EXPECT_EQ(8U, sizeof(OatQuickMethodHeader)); EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)), diff --git a/dexdump/Android.bp b/dexdump/Android.bp index d15bbda4f4..434cb3553f 100644 --- a/dexdump/Android.bp +++ b/dexdump/Android.bp @@ -49,6 +49,9 @@ art_cc_binary { darwin: { enabled: false, }, + windows: { + enabled: true, + }, }, } diff --git a/libartbase/Android.bp b/libartbase/Android.bp index 58d12a1e04..0fcd6a5d25 100644 --- a/libartbase/Android.bp +++ b/libartbase/Android.bp @@ -35,7 +35,6 @@ cc_defaults { "base/memory_region.cc", "base/mem_map.cc", // "base/mem_map_fuchsia.cc", put in target when fuchsia supported by soong - "base/mem_map_unix.cc", "base/os_linux.cc", "base/runtime_debug.cc", "base/safe_copy.cc", @@ -50,20 +49,38 @@ cc_defaults { ], target: { android: { + srcs: [ + "base/mem_map_unix.cc", + ], static_libs: [ // ZipArchive support, the order matters here to get all symbols. "libziparchive", "libz", ], + shared_libs: [ + "liblog", + // For ashmem. + "libcutils", + // For common macros. + "libbase", + ], // Exclude the version script from Darwin host since it's not // supported by the linker there. That means ASan checks on Darwin // might trigger ODR violations. version_script: "libartbase.map", }, - host: { + not_windows: { + srcs: [ + "base/mem_map_unix.cc", + ], shared_libs: [ "libziparchive", "libz", + "liblog", + // For ashmem. + "libcutils", + // For common macros. + "libbase", ], }, linux_glibc: { @@ -71,17 +88,20 @@ cc_defaults { }, windows: { version_script: "libartbase.map", + static_libs: [ + "libziparchive", + "libz", + "liblog", + // For ashmem. + "libcutils", + // For common macros. + "libbase", + ], + cflags: ["-Wno-thread-safety"], }, }, generated_sources: ["art_libartbase_operator_srcs"], cflags: ["-DBUILDING_LIBART=1"], - shared_libs: [ - "liblog", - // For ashmem. - "libcutils", - // For common macros. - "libbase", - ], // Utilities used by various ART libs and tools are linked in statically // here to avoid shared lib dependencies outside the ART APEX. No target @@ -147,6 +167,14 @@ art_cc_library { "libziparchive", ], export_shared_lib_headers: ["libbase"], + target: { + windows: { + enabled: true, + shared: { + enabled: false, + }, + }, + }, } art_cc_library { @@ -160,6 +188,14 @@ art_cc_library { "libziparchive", ], export_shared_lib_headers: ["libbase"], + target: { + windows: { + enabled: true, + shared: { + enabled: false, + }, + }, + }, } art_cc_library { @@ -177,9 +213,6 @@ art_cc_library { header_libs: [ "libnativehelper_header_only", ], - include_dirs: [ - "external/icu/icu4c/source/common", - ], } art_cc_test { diff --git a/libartbase/base/arena_allocator.cc b/libartbase/base/arena_allocator.cc index df3deba178..0e7f6cceb3 100644 --- a/libartbase/base/arena_allocator.cc +++ b/libartbase/base/arena_allocator.cc @@ -16,7 +16,6 @@ #include "arena_allocator-inl.h" -#include <sys/mman.h> #include <algorithm> #include <cstddef> @@ -25,6 +24,8 @@ #include <android-base/logging.h> +#include "mman.h" + namespace art { constexpr size_t kMemoryToolRedZoneBytes = 8; diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc index 278203da23..53afea234b 100644 --- a/libartbase/base/common_art_test.cc +++ b/libartbase/base/common_art_test.cc @@ -26,7 +26,6 @@ #include "android-base/stringprintf.h" #include "android-base/strings.h" #include "android-base/unique_fd.h" -#include <unicode/uvernum.h> #include "art_field-inl.h" #include "base/file_utils.h" @@ -329,15 +328,19 @@ static std::string GetDexFileName(const std::string& jar_prefix, bool host) { } std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() { - // Note: This must match the TEST_CORE_JARS in Android.common_path.mk + // Note: This must start with the CORE_IMG_JARS in Android.common_path.mk // because that's what we use for compiling the core.art image. + // It may contain additional modules from TEST_CORE_JARS. static const char* const kLibcoreModules[] = { + // CORE_IMG_JARS modules. "core-oj", "core-libart", "core-simple", - "conscrypt", "okhttp", "bouncycastle", + "apache-xml", + // Additional modules. + "conscrypt", }; std::vector<std::string> result; diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc index f8d6016315..9490798552 100644 --- a/libartbase/base/file_utils.cc +++ b/libartbase/base/file_utils.cc @@ -19,11 +19,13 @@ #include <inttypes.h> #include <sys/stat.h> #include <sys/types.h> +#ifndef _WIN32 #include <sys/wait.h> +#endif #include <unistd.h> // We need dladdr. -#ifndef __APPLE__ +#if !defined(__APPLE__) && !defined(_WIN32) #ifndef _GNU_SOURCE #define _GNU_SOURCE #define DEFINED_GNU_SOURCE @@ -84,6 +86,10 @@ bool ReadFileToString(const std::string& file_name, std::string* result) { } std::string GetAndroidRootSafe(std::string* error_msg) { +#ifdef _WIN32 + *error_msg = "GetAndroidRootSafe unsupported for Windows."; + return ""; +#else // Prefer ANDROID_ROOT if it's set. const char* android_dir = getenv("ANDROID_ROOT"); if (android_dir != nullptr) { @@ -118,6 +124,7 @@ std::string GetAndroidRootSafe(std::string* error_msg) { return ""; } return "/system"; +#endif } std::string GetAndroidRoot() { @@ -179,6 +186,15 @@ std::string GetDefaultBootImageLocation(std::string* error_msg) { void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache, bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) { +#ifdef _WIN32 + UNUSED(subdir); + UNUSED(create_if_absent); + UNUSED(dalvik_cache); + UNUSED(have_android_data); + UNUSED(dalvik_cache_exists); + UNUSED(is_global_cache); + LOG(FATAL) << "GetDalvikCache unsupported on Windows."; +#else CHECK(subdir != nullptr); std::string error_msg; const char* android_data = GetAndroidDataSafe(&error_msg); @@ -199,6 +215,7 @@ void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) && (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST)); } +#endif } std::string GetDalvikCache(const char* subdir) { @@ -262,9 +279,15 @@ std::string ReplaceFileExtension(const std::string& filename, const std::string& } bool LocationIsOnSystem(const char* path) { +#ifdef _WIN32 + UNUSED(path); + LOG(FATAL) << "LocationIsOnSystem is unsupported on Windows."; + return false; +#else UniqueCPtr<const char[]> full_path(realpath(path, nullptr)); return full_path != nullptr && android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str()); +#endif } bool LocationIsOnSystemFramework(const char* full_path) { diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc index 02e29f1d21..4de34b5707 100644 --- a/libartbase/base/malloc_arena_pool.cc +++ b/libartbase/base/malloc_arena_pool.cc @@ -16,7 +16,6 @@ #include "malloc_arena_pool.h" -#include <sys/mman.h> #include <algorithm> #include <cstddef> @@ -25,6 +24,7 @@ #include <android-base/logging.h> #include "arena_allocator-inl.h" +#include "mman.h" namespace art { diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc index 532ca28b50..28337507d8 100644 --- a/libartbase/base/mem_map.cc +++ b/libartbase/base/mem_map.cc @@ -18,8 +18,7 @@ #include <inttypes.h> #include <stdlib.h> -#include <sys/mman.h> // For the PROT_* and MAP_* constants. -#if !defined(ANDROID_OS) && !defined(__Fuchsia__) +#if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32) #include <sys/resource.h> #endif @@ -39,6 +38,7 @@ #include "globals.h" #include "logging.h" // For VLOG_IS_ON. #include "memory_tool.h" +#include "mman.h" // For the PROT_* and MAP_* constants. #include "utils.h" #ifndef MAP_ANONYMOUS @@ -811,19 +811,30 @@ void MemMap::MadviseDontNeedAndZero() { if (!kMadviseZeroes) { memset(base_begin_, 0, base_size_); } +#ifdef _WIN32 + // It is benign not to madvise away the pages here. + PLOG(WARNING) << "MemMap::MadviseDontNeedAndZero does not madvise on Windows."; +#else int result = madvise(base_begin_, base_size_, MADV_DONTNEED); if (result == -1) { PLOG(WARNING) << "madvise failed"; } +#endif } } bool MemMap::Sync() { +#ifdef _WIN32 + // TODO: add FlushViewOfFile support. + PLOG(ERROR) << "MemMap::Sync unsupported on Windows."; + return false; +#else // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind // only accepts page-aligned base address, and excludes the higher-end noaccess protection // from the msync range. b/27552451. return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0; +#endif } bool MemMap::Protect(int prot) { @@ -832,10 +843,12 @@ bool MemMap::Protect(int prot) { return true; } +#ifndef _WIN32 if (mprotect(base_begin_, base_size_, prot) == 0) { prot_ = prot; return true; } +#endif PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", " << prot << ") failed"; @@ -1206,7 +1219,11 @@ void ZeroAndReleasePages(void* address, size_t length) { DCHECK_LE(page_begin, page_end); DCHECK_LE(page_end, mem_end); std::fill(mem_begin, page_begin, 0); +#ifdef _WIN32 + LOG(WARNING) << "ZeroAndReleasePages does not madvise on Windows."; +#else CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed"; +#endif std::fill(page_end, mem_end, 0); } } diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc index d1c92ce4d6..6b0e06c268 100644 --- a/libartbase/base/mem_map_fuchsia.cc +++ b/libartbase/base/mem_map_fuchsia.cc @@ -15,8 +15,8 @@ */ #include "mem_map.h" -#include <sys/mman.h> #include "logging.h" +#include "mman.h" #include <zircon/process.h> #include <zircon/syscalls.h> diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc index 074d4c2890..bf39fd1562 100644 --- a/libartbase/base/mem_map_test.cc +++ b/libartbase/base/mem_map_test.cc @@ -16,8 +16,6 @@ #include "mem_map.h" -#include <sys/mman.h> - #include <memory> #include <random> @@ -25,6 +23,7 @@ #include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS #include "logging.h" #include "memory_tool.h" +#include "mman.h" #include "unix_file/fd_file.h" namespace art { diff --git a/libartbase/base/mem_map_unix.cc b/libartbase/base/mem_map_unix.cc index 601b049525..ac854dfd52 100644 --- a/libartbase/base/mem_map_unix.cc +++ b/libartbase/base/mem_map_unix.cc @@ -16,7 +16,7 @@ #include "mem_map.h" -#include <sys/mman.h> +#include "mman.h" namespace art { diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc index 4c86b6b0f6..abb36bcaa9 100644 --- a/libartbase/base/membarrier.cc +++ b/libartbase/base/membarrier.cc @@ -18,8 +18,10 @@ #include <errno.h> +#if !defined(_WIN32) #include <sys/syscall.h> #include <unistd.h> +#endif #include "macros.h" #if defined(__BIONIC__) diff --git a/libartbase/base/memfd.cc b/libartbase/base/memfd.cc index 7c2040147e..780be328af 100644 --- a/libartbase/base/memfd.cc +++ b/libartbase/base/memfd.cc @@ -18,9 +18,11 @@ #include <errno.h> #include <stdio.h> +#if !defined(_WIN32) #include <sys/syscall.h> #include <sys/utsname.h> #include <unistd.h> +#endif #include "macros.h" diff --git a/libartbase/base/mman.h b/libartbase/base/mman.h new file mode 100644 index 0000000000..bd63f6506f --- /dev/null +++ b/libartbase/base/mman.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_LIBARTBASE_BASE_MMAN_H_ +#define ART_LIBARTBASE_BASE_MMAN_H_ + +#ifdef _WIN32 + +// There is no sys/mman.h in mingw. +// As these are just placeholders for the APIs, all values are stubbed out. + +#define PROT_READ 0 // 0x1 +#define PROT_WRITE 0 // 0x2 +#define PROT_EXEC 0 // 0x4 +#define PROT_NONE 0 // 0x0 + +#define MAP_SHARED 0 // 0x01 +#define MAP_PRIVATE 0 // 0x02 + +#define MAP_FAILED nullptr // ((void*) -1) +#define MAP_FIXED 0 // 0x10 +#define MAP_ANONYMOUS 0 // 0x20 + +#else + +#include <sys/mman.h> + +#endif + + +#endif // ART_LIBARTBASE_BASE_MMAN_H_ diff --git a/libartbase/base/os_linux.cc b/libartbase/base/os_linux.cc index f8b31cf0d8..a00779eb48 100644 --- a/libartbase/base/os_linux.cc +++ b/libartbase/base/os_linux.cc @@ -50,7 +50,12 @@ File* OS::CreateEmptyFile(const char* name) { } File* OS::CreateEmptyFileWriteOnly(const char* name) { - return art::CreateEmptyFile(name, O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC); +#ifdef _WIN32 + int flags = O_WRONLY | O_TRUNC; +#else + int flags = O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC; +#endif + return art::CreateEmptyFile(name, flags); } File* OS::OpenFileWithFlags(const char* name, int flags, bool auto_flush) { diff --git a/libartbase/base/safe_copy.cc b/libartbase/base/safe_copy.cc index b46b921307..ad75aa7b28 100644 --- a/libartbase/base/safe_copy.cc +++ b/libartbase/base/safe_copy.cc @@ -16,8 +16,10 @@ #include "safe_copy.h" +#ifdef __linux__ #include <sys/uio.h> #include <sys/user.h> +#endif #include <unistd.h> #include <algorithm> diff --git a/libartbase/base/safe_copy_test.cc b/libartbase/base/safe_copy_test.cc index c23651f7a7..9f7d40964b 100644 --- a/libartbase/base/safe_copy_test.cc +++ b/libartbase/base/safe_copy_test.cc @@ -18,12 +18,12 @@ #include <errno.h> #include <string.h> -#include <sys/mman.h> #include <sys/user.h> #include "android-base/logging.h" #include "globals.h" #include "gtest/gtest.h" +#include "mman.h" namespace art { diff --git a/libartbase/base/scoped_flock.cc b/libartbase/base/scoped_flock.cc index 2f16fb2820..b16a45aaec 100644 --- a/libartbase/base/scoped_flock.cc +++ b/libartbase/base/scoped_flock.cc @@ -35,6 +35,14 @@ using android::base::StringPrintf; /* static */ ScopedFlock LockedFile::Open(const char* filename, int flags, bool block, std::string* error_msg) { +#ifdef _WIN32 + // TODO: implement file locking for Windows. + UNUSED(filename); + UNUSED(flags); + UNUSED(block); + *error_msg = "flock is unsupported on Windows"; + return nullptr; +#else while (true) { // NOTE: We don't check usage here because the ScopedFlock should *never* be // responsible for flushing its underlying FD. Its only purpose should be @@ -89,10 +97,19 @@ using android::base::StringPrintf; return ScopedFlock(new LockedFile(std::move((*file.get())))); } +#endif } ScopedFlock LockedFile::DupOf(const int fd, const std::string& path, const bool read_only_mode, std::string* error_msg) { +#ifdef _WIN32 + // TODO: implement file locking for Windows. + UNUSED(fd); + UNUSED(path); + UNUSED(read_only_mode); + *error_msg = "flock is unsupported on Windows."; + return nullptr; +#else // NOTE: We don't check usage here because the ScopedFlock should *never* be // responsible for flushing its underlying FD. Its only purpose should be // to acquire a lock, and the unlock / close in the corresponding @@ -112,9 +129,11 @@ ScopedFlock LockedFile::DupOf(const int fd, const std::string& path, } return locked_file; +#endif } void LockedFile::ReleaseLock() { +#ifndef _WIN32 if (this->Fd() != -1) { int flock_result = TEMP_FAILURE_RETRY(flock(this->Fd(), LOCK_UN)); if (flock_result != 0) { @@ -126,6 +145,7 @@ void LockedFile::ReleaseLock() { PLOG(WARNING) << "Unable to unlock file " << this->GetPath(); } } +#endif } } // namespace art diff --git a/libartbase/base/socket_peer_is_trusted.cc b/libartbase/base/socket_peer_is_trusted.cc index 440054eac6..3996d90b8b 100644 --- a/libartbase/base/socket_peer_is_trusted.cc +++ b/libartbase/base/socket_peer_is_trusted.cc @@ -16,8 +16,10 @@ #include "socket_peer_is_trusted.h" +#if !defined(_WIN32) #include <pwd.h> #include <sys/socket.h> +#endif #include <android-base/logging.h> diff --git a/libartbase/base/time_utils.cc b/libartbase/base/time_utils.cc index 89a1109a7e..aa6c987669 100644 --- a/libartbase/base/time_utils.cc +++ b/libartbase/base/time_utils.cc @@ -14,12 +14,14 @@ * limitations under the License. */ +#include "time_utils.h" + #include <inttypes.h> +#include <stdio.h> + #include <limits> #include <sstream> -#include "time_utils.h" - #include "android-base/stringprintf.h" #include "logging.h" @@ -30,6 +32,20 @@ namespace art { +namespace { + +#if !defined(__linux__) +int GetTimeOfDay(struct timeval* tv, struct timezone* tz) { +#ifdef _WIN32 + return mingw_gettimeofday(tv, tz); +#else + return gettimeofday(tv, tz); +#endif +} +#endif + +} // namespace + using android::base::StringPrintf; std::string PrettyDuration(uint64_t nano_duration, size_t max_fraction_digits) { @@ -117,7 +133,12 @@ std::string FormatDuration(uint64_t nano_duration, TimeUnit time_unit, std::string GetIsoDate() { time_t now = time(nullptr); tm tmbuf; +#ifdef _WIN32 + localtime_s(&tmbuf, &now); + tm* ptm = &tmbuf; +#else tm* ptm = localtime_r(&now, &tmbuf); +#endif return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d", ptm->tm_year + 1900, ptm->tm_mon+1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec); @@ -128,9 +149,9 @@ uint64_t MilliTime() { timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000); -#else // __APPLE__ +#else timeval now; - gettimeofday(&now, nullptr); + GetTimeOfDay(&now, nullptr); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000); #endif } @@ -140,9 +161,9 @@ uint64_t MicroTime() { timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000); -#else // __APPLE__ +#else timeval now; - gettimeofday(&now, nullptr); + GetTimeOfDay(&now, nullptr); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec; #endif } @@ -152,9 +173,9 @@ uint64_t NanoTime() { timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec; -#else // __APPLE__ +#else timeval now; - gettimeofday(&now, nullptr); + GetTimeOfDay(&now, nullptr); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000); #endif } @@ -164,7 +185,7 @@ uint64_t ThreadCpuNanoTime() { timespec now; clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec; -#else // __APPLE__ +#else UNIMPLEMENTED(WARNING); return -1; #endif @@ -176,8 +197,13 @@ uint64_t ProcessCpuNanoTime() { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &now); return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec; #else - UNIMPLEMENTED(WARNING); - return -1; + // We cannot use clock_gettime() here. Return the process wall clock time + // (using art::NanoTime, which relies on gettimeofday()) as approximation of + // the process CPU time instead. + // + // Note: clock_gettime() is available from macOS 10.12 (Darwin 16), but we try + // to keep things simple here. + return NanoTime(); #endif } @@ -190,12 +216,12 @@ void NanoSleep(uint64_t ns) { void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) { if (absolute) { -#if !defined(__APPLE__) +#if defined(__linux__) clock_gettime(clock, ts); #else UNUSED(clock); timeval tv; - gettimeofday(&tv, nullptr); + GetTimeOfDay(&tv, nullptr); ts->tv_sec = tv.tv_sec; ts->tv_nsec = tv.tv_usec * 1000; #endif diff --git a/libartbase/base/time_utils.h b/libartbase/base/time_utils.h index 431d3e19ac..15805f3f63 100644 --- a/libartbase/base/time_utils.h +++ b/libartbase/base/time_utils.h @@ -18,6 +18,7 @@ #define ART_LIBARTBASE_BASE_TIME_UTILS_H_ #include <stdint.h> +#include <stdio.h> // Needed for correct _WIN32 build. #include <time.h> #include <string> diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc index 76894c6a8a..8831b9c6b7 100644 --- a/libartbase/base/unix_file/fd_file.cc +++ b/libartbase/base/unix_file/fd_file.cc @@ -25,8 +25,13 @@ #include <android/fdsan.h> #endif +#if defined(_WIN32) +#include <windows.h> +#endif + #include <limits> +#include <android-base/file.h> #include <android-base/logging.h> // Includes needed for FdFile::Copy(). @@ -40,6 +45,96 @@ namespace unix_file { +#if defined(_WIN32) +// RAII wrapper for an event object to allow asynchronous I/O to correctly signal completion. +class ScopedEvent { + public: + ScopedEvent() { + handle_ = CreateEventA(/*lpEventAttributes*/ nullptr, + /*bManualReset*/ true, + /*bInitialState*/ false, + /*lpName*/ nullptr); + } + + ~ScopedEvent() { CloseHandle(handle_); } + + HANDLE handle() { return handle_; } + + private: + HANDLE handle_; + DISALLOW_COPY_AND_ASSIGN(ScopedEvent); +}; + +// Windows implementation of pread/pwrite. Note that these DO move the file descriptor's read/write +// position, but do so atomically. +static ssize_t pread(int fd, void* data, size_t byte_count, off64_t offset) { + ScopedEvent event; + if (event.handle() == INVALID_HANDLE_VALUE) { + PLOG(ERROR) << "Could not create event handle."; + errno = EIO; + return static_cast<ssize_t>(-1); + } + + auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd)); + DWORD bytes_read = 0; + OVERLAPPED overlapped = {}; + overlapped.Offset = static_cast<DWORD>(offset); + overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32); + overlapped.hEvent = event.handle(); + if (!ReadFile(handle, data, static_cast<DWORD>(byte_count), &bytes_read, &overlapped)) { + // If the read failed with other than ERROR_IO_PENDING, return an error. + // ERROR_IO_PENDING signals the write was begun asynchronously. + // Block until the asynchronous operation has finished or fails, and return + // result accordingly. + if (::GetLastError() != ERROR_IO_PENDING || + !::GetOverlappedResult(handle, &overlapped, &bytes_read, TRUE)) { + // In case someone tries to read errno (since this is masquerading as a POSIX call). + errno = EIO; + return static_cast<ssize_t>(-1); + } + } + return static_cast<ssize_t>(bytes_read); +} + +static ssize_t pwrite(int fd, const void* buf, size_t count, off64_t offset) { + ScopedEvent event; + if (event.handle() == INVALID_HANDLE_VALUE) { + PLOG(ERROR) << "Could not create event handle."; + errno = EIO; + return static_cast<ssize_t>(-1); + } + + auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd)); + DWORD bytes_written = 0; + OVERLAPPED overlapped = {}; + overlapped.Offset = static_cast<DWORD>(offset); + overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32); + overlapped.hEvent = event.handle(); + if (!::WriteFile(handle, buf, count, &bytes_written, &overlapped)) { + // If the write failed with other than ERROR_IO_PENDING, return an error. + // ERROR_IO_PENDING signals the write was begun asynchronously. + // Block until the asynchronous operation has finished or fails, and return + // result accordingly. + if (::GetLastError() != ERROR_IO_PENDING || + !::GetOverlappedResult(handle, &overlapped, &bytes_written, TRUE)) { + // In case someone tries to read errno (since this is masquerading as a POSIX call). + errno = EIO; + return static_cast<ssize_t>(-1); + } + } + return static_cast<ssize_t>(bytes_written); +} + +static int fsync(int fd) { + auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd)); + if (handle != INVALID_HANDLE_VALUE && ::FlushFileBuffers(handle)) { + return 0; + } + errno = EINVAL; + return -1; +} +#endif + #if defined(__BIONIC__) static uint64_t GetFdFileOwnerTag(FdFile* fd_file) { return android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_ART_FDFILE, diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc index 0f172fdcfb..58d8575ea8 100644 --- a/libartbase/base/utils.cc +++ b/libartbase/base/utils.cc @@ -19,9 +19,7 @@ #include <inttypes.h> #include <pthread.h> #include <sys/stat.h> -#include <sys/syscall.h> #include <sys/types.h> -#include <sys/wait.h> #include <unistd.h> #include <fstream> @@ -47,6 +45,16 @@ #if defined(__linux__) #include <linux/unistd.h> +#include <sys/syscall.h> +#endif + +#if defined(_WIN32) +#include <windows.h> +// This include needs to be here due to our coding conventions. Unfortunately +// it drags in the definition of the dread ERROR macro. +#ifdef ERROR +#undef ERROR +#endif #endif namespace art { @@ -61,6 +69,8 @@ pid_t GetTid() { return owner; #elif defined(__BIONIC__) return gettid(); +#elif defined(_WIN32) + return static_cast<pid_t>(::GetCurrentThreadId()); #else return syscall(__NR_gettid); #endif @@ -68,12 +78,17 @@ pid_t GetTid() { std::string GetThreadName(pid_t tid) { std::string result; +#ifdef _WIN32 + UNUSED(tid); + result = "<unknown>"; +#else // TODO: make this less Linux-specific. if (ReadFileToString(StringPrintf("/proc/self/task/%d/comm", tid), &result)) { result.resize(result.size() - 1); // Lose the trailing '\n'. } else { result = "<unknown>"; } +#endif return result; } @@ -137,7 +152,7 @@ void SetThreadName(const char* thread_name) { } else { s = thread_name + len - 15; } -#if defined(__linux__) +#if defined(__linux__) || defined(_WIN32) // pthread_setname_np fails rather than truncating long strings. char buf[16]; // MAX_TASK_COMM_LEN=16 is hard-coded in the kernel. strncpy(buf, s, sizeof(buf)-1); @@ -153,6 +168,11 @@ void SetThreadName(const char* thread_name) { void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu) { *utime = *stime = *task_cpu = 0; +#ifdef _WIN32 + // TODO: implement this. + UNUSED(tid); + *state = 'S'; +#else std::string stats; // TODO: make this less Linux-specific. if (!ReadFileToString(StringPrintf("/proc/self/task/%d/stat", tid), &stats)) { @@ -167,6 +187,7 @@ void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu) *utime = strtoull(fields[11].c_str(), nullptr, 10); *stime = strtoull(fields[12].c_str(), nullptr, 10); *task_cpu = strtoull(fields[36].c_str(), nullptr, 10); +#endif } static void ParseStringAfterChar(const std::string& s, diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc index a7f4b28c84..5056edc607 100644 --- a/libartbase/base/zip_archive.cc +++ b/libartbase/base/zip_archive.cc @@ -18,7 +18,6 @@ #include <fcntl.h> #include <stdio.h> -#include <sys/mman.h> // For the PROT_* and MAP_* constants. #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> @@ -27,6 +26,7 @@ #include "android-base/stringprintf.h" #include "ziparchive/zip_archive.h" +#include "base/mman.h" #include "bit_utils.h" #include "unix_file/fd_file.h" @@ -203,6 +203,11 @@ MemMap ZipEntry::MapDirectlyOrExtract(const char* zip_filename, } static void SetCloseOnExec(int fd) { +#ifdef _WIN32 + // Exec is not supported on Windows. + UNUSED(fd); + PLOG(ERROR) << "SetCloseOnExec is not supported on Windows."; +#else // This dance is more portable than Linux's O_CLOEXEC open(2) flag. int flags = fcntl(fd, F_GETFD); if (flags == -1) { @@ -214,6 +219,7 @@ static void SetCloseOnExec(int fd) { PLOG(WARNING) << "fcntl(" << fd << ", F_SETFD, " << flags << ") failed"; return; } +#endif } ZipArchive* ZipArchive::Open(const char* filename, std::string* error_msg) { diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp index 7f25f02dea..a4f7e25128 100644 --- a/libdexfile/Android.bp +++ b/libdexfile/Android.bp @@ -43,30 +43,58 @@ cc_defaults { "libziparchive", "libz", ], + shared_libs: [ + // For MemMap. + "libartbase", + "liblog", + // For atrace. + "libcutils", + // For common macros. + "libbase", + ], + export_shared_lib_headers: [ + "libartbase", + "libbase", + ], }, - host: { + not_windows: { shared_libs: [ "libziparchive", "libz", + // For MemMap. + "libartbase", + "liblog", + // For atrace. + "libcutils", + // For common macros. + "libbase", + ], + export_shared_lib_headers: [ + "libartbase", + "libbase", + ], + }, + windows: { + static_libs: [ + "libziparchive", + "libz", + // For MemMap. + "libartbase", + "liblog", + // For atrace. + "libcutils", + // For common macros. + "libbase", ], + export_static_lib_headers: [ + "libartbase", + "libbase", + ], + cflags: ["-Wno-thread-safety"], }, }, generated_sources: ["dexfile_operator_srcs"], - shared_libs: [ - // For MemMap. - "libartbase", - "liblog", - // For atrace. - "libcutils", - // For common macros. - "libbase", - "libz", - ], export_include_dirs: ["."], - export_shared_lib_headers: [ - "libartbase", - "libbase", - ], } cc_defaults { @@ -121,6 +149,14 @@ art_cc_library { strip: { keep_symbols: true, }, + target: { + windows: { + enabled: true, + shared: { + enabled: false, + }, + }, + }, } art_cc_library { @@ -129,6 +165,79 @@ art_cc_library { "art_debug_defaults", "libdexfile_defaults", ], + target: { + windows: { + enabled: true, + shared: { + enabled: false, + }, + }, + }, +} + +cc_library_headers { + name: "libdexfile_external_headers", + host_supported: true, + header_libs: ["libbase_headers"], + export_header_lib_headers: ["libbase_headers"], + export_include_dirs: ["external/include"], + + target: { + windows: { + enabled: true, + }, + }, +} + +cc_library { + name: "libdexfile_external", + host_supported: true, + srcs: [ + "external/dex_file_ext.cc", + ], + header_libs: ["libdexfile_external_headers"], + shared_libs: [ + "libbase", + "libdexfile", + ], + + // TODO(b/120670568): Enable this when linking bug is fixed. + // stubs: { + // symbol_file: "external/libdexfile_external.map.txt", + // versions: ["1"], + // }, + + // Hide symbols using version scripts for targets that support it, i.e. all + // but Darwin. + // TODO(b/120670568): Clean this up when stubs above is enabled. + target: { + android: { + version_script: "external/libdexfile_external.map.txt", + }, + linux_bionic: { + version_script: "external/libdexfile_external.map.txt", + }, + linux_glibc: { + version_script: "external/libdexfile_external.map.txt", + }, + windows: { + version_script: "external/libdexfile_external.map.txt", + }, + }, +} + +// Support library with a C++ API for accessing the libdexfile API for external +// (non-ART) users. They should link to their own instance of this (either +// statically or through linker namespaces). +cc_library { + name: "libdexfile_support", + host_supported: true, + srcs: [ + "external/dex_file_supp.cc", + ], + header_libs: ["libdexfile_external_headers"], + shared_libs: ["libdexfile_external"], + export_header_lib_headers: ["libdexfile_external_headers"], } art_cc_test { diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc index 20a519bf99..57e838fe5d 100644 --- a/libdexfile/dex/art_dex_file_loader.cc +++ b/libdexfile/dex/art_dex_file_loader.cc @@ -16,7 +16,6 @@ #include "art_dex_file_loader.h" -#include <sys/mman.h> // For the PROT_* and MAP_* constants. #include <sys/stat.h> #include "android-base/stringprintf.h" @@ -24,6 +23,7 @@ #include "base/file_magic.h" #include "base/file_utils.h" #include "base/mem_map.h" +#include "base/mman.h" // For the PROT_* and MAP_* constants. #include "base/stl_util.h" #include "base/systrace.h" #include "base/unix_file/fd_file.h" @@ -156,14 +156,16 @@ bool ArtDexFileLoader::GetMultiDexChecksums(const char* filename, return false; } -std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg) const { +std::unique_ptr<const DexFile> ArtDexFileLoader::Open( + const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg, + std::unique_ptr<DexFileContainer> container) const { ScopedTrace trace(std::string("Open dex file from RAM ") + location); return OpenCommon(base, size, @@ -175,7 +177,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base, verify, verify_checksum, error_msg, - /*container=*/ nullptr, + std::move(container), /*verify_result=*/ nullptr); } diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h index 40d4673625..d41eac5329 100644 --- a/libdexfile/dex/art_dex_file_loader.h +++ b/libdexfile/dex/art_dex_file_loader.h @@ -54,14 +54,16 @@ class ArtDexFileLoader : public DexFileLoader { bool* only_contains_uncompressed_dex = nullptr) const override; // Opens .dex file, backed by existing memory - std::unique_ptr<const DexFile> Open(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg) const override; + std::unique_ptr<const DexFile> Open( + const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg, + std::unique_ptr<DexFileContainer> container = nullptr) const override; // Opens .dex file that has been memory-mapped by the caller. std::unique_ptr<const DexFile> Open(const std::string& location, diff --git a/libdexfile/dex/dex_file_layout.cc b/libdexfile/dex/dex_file_layout.cc index 75a31112bb..929025a253 100644 --- a/libdexfile/dex/dex_file_layout.cc +++ b/libdexfile/dex/dex_file_layout.cc @@ -16,9 +16,9 @@ #include "dex_file_layout.h" -#include <sys/mman.h> #include "base/bit_utils.h" +#include "base/mman.h" #include "dex_file.h" namespace art { @@ -26,6 +26,12 @@ namespace art { int DexLayoutSection::MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice) { +#ifdef _WIN32 + UNUSED(begin); + UNUSED(end); + UNUSED(advice); + PLOG(WARNING) << "madvise is unsupported on Windows."; +#else DCHECK_LE(begin, end); begin = AlignUp(begin, kPageSize); end = AlignDown(end, kPageSize); @@ -37,6 +43,7 @@ int DexLayoutSection::MadviseLargestPageAlignedRegion(const uint8_t* begin, } return result; } +#endif return 0; } @@ -50,6 +57,11 @@ void DexLayoutSection::Subsection::Madvise(const DexFile* dex_file, int advice) } void DexLayoutSections::Madvise(const DexFile* dex_file, MadviseState state) const { +#ifdef _WIN32 + UNUSED(dex_file); + UNUSED(state); + PLOG(WARNING) << "madvise is unsupported on Windows."; +#else // The dex file is already defaulted to random access everywhere. for (const DexLayoutSection& section : sections_) { switch (state) { @@ -79,6 +91,7 @@ void DexLayoutSections::Madvise(const DexFile* dex_file, MadviseState state) con } } } +#endif } std::ostream& operator<<(std::ostream& os, const DexLayoutSection& section) { diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc index 3667c8c289..a719d4176b 100644 --- a/libdexfile/dex/dex_file_loader.cc +++ b/libdexfile/dex/dex_file_loader.cc @@ -187,12 +187,18 @@ std::string DexFileLoader::GetDexCanonicalLocation(const char* dex_location) { std::string base_location = GetBaseLocation(dex_location); const char* suffix = dex_location + base_location.size(); DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator); +#ifdef _WIN32 + // Warning: No symbolic link processing here. + PLOG(WARNING) << "realpath is unsupported on Windows."; +#else // Warning: Bionic implementation of realpath() allocates > 12KB on the stack. // Do not run this code on a small stack, e.g. in signal handler. UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr)); if (path != nullptr && path.get() != base_location) { return std::string(path.get()) + suffix; - } else if (suffix[0] == 0) { + } +#endif + if (suffix[0] == 0) { return base_location; } else { return dex_location; @@ -212,14 +218,16 @@ bool DexFileLoader::GetMultiDexChecksums( return false; } -std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg) const { +std::unique_ptr<const DexFile> DexFileLoader::Open( + const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg, + std::unique_ptr<DexFileContainer> container) const { return OpenCommon(base, size, /*data_base=*/ nullptr, @@ -230,7 +238,7 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base, verify, verify_checksum, error_msg, - /*container=*/ nullptr, + std::move(container), /*verify_result=*/ nullptr); } diff --git a/libdexfile/dex/dex_file_loader.h b/libdexfile/dex/dex_file_loader.h index 8fc836e0f5..49e177fce6 100644 --- a/libdexfile/dex/dex_file_loader.h +++ b/libdexfile/dex/dex_file_loader.h @@ -121,14 +121,16 @@ class DexFileLoader { bool* zip_file_only_contains_uncompress_dex = nullptr) const; // Opens .dex file, backed by existing memory - virtual std::unique_ptr<const DexFile> Open(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg) const; + virtual std::unique_ptr<const DexFile> Open( + const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg, + std::unique_ptr<DexFileContainer> container = nullptr) const; // Open a dex file with a separate data section. virtual std::unique_ptr<const DexFile> OpenWithDataSection( diff --git a/libdexfile/external/dex_file_ext.cc b/libdexfile/external/dex_file_ext.cc new file mode 100644 index 0000000000..5c353b5ad8 --- /dev/null +++ b/libdexfile/external/dex_file_ext.cc @@ -0,0 +1,338 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <inttypes.h> +#include <stdint.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> + +#include <cerrno> +#include <cstring> +#include <map> +#include <memory> +#include <string> +#include <utility> +#include <vector> + +#include <android-base/logging.h> +#include <android-base/macros.h> +#include <android-base/mapped_file.h> +#include <android-base/stringprintf.h> + +#include <dex/class_accessor-inl.h> +#include <dex/code_item_accessors-inl.h> +#include <dex/dex_file-inl.h> +#include <dex/dex_file_loader.h> + +#include "art_api/ext_dex_file.h" + +extern "C" class ExtDexFileString { + public: + const std::string str_; +}; + +namespace art { +namespace { + +const ExtDexFileString empty_string{""}; + +struct MethodCacheEntry { + int32_t offset; // Offset relative to the start of the dex file header. + int32_t len; + int32_t index; // Method index. +}; + +class MappedFileContainer : public DexFileContainer { + public: + explicit MappedFileContainer(std::unique_ptr<android::base::MappedFile>&& map) + : map_(std::move(map)) {} + ~MappedFileContainer() override {} + int GetPermissions() override { return 0; } + bool IsReadOnly() override { return true; } + bool EnableWrite() override { return false; } + bool DisableWrite() override { return false; } + + private: + std::unique_ptr<android::base::MappedFile> map_; + DISALLOW_COPY_AND_ASSIGN(MappedFileContainer); +}; + +} // namespace +} // namespace art + +extern "C" { + +const ExtDexFileString* ExtDexFileMakeString(const char* str) { + if (str[0] == '\0') { + return &art::empty_string; + } + return new ExtDexFileString{str}; +} + +const char* ExtDexFileGetString(const ExtDexFileString* ext_string, /*out*/ size_t* size) { + DCHECK(ext_string != nullptr); + *size = ext_string->str_.size(); + return ext_string->str_.data(); +} + +void ExtDexFileFreeString(const ExtDexFileString* ext_string) { + DCHECK(ext_string != nullptr); + if (ext_string != &art::empty_string) { + delete (ext_string); + } +} + +// Wraps DexFile to add the caching needed by the external interface. This is +// what gets passed over as ExtDexFile*. +class ExtDexFile { + // Method cache for GetMethodInfoForOffset. This is populated as we iterate + // sequentially through the class defs. MethodCacheEntry.name is only set for + // methods returned by GetMethodInfoForOffset. + std::map<int32_t, art::MethodCacheEntry> method_cache_; + + // Index of first class def for which method_cache_ isn't complete. + uint32_t class_def_index_ = 0; + + public: + std::unique_ptr<const art::DexFile> dex_file_; + explicit ExtDexFile(std::unique_ptr<const art::DexFile>&& dex_file) + : dex_file_(std::move(dex_file)) {} + + art::MethodCacheEntry* GetMethodCacheEntryForOffset(int64_t dex_offset) { + // First look in the method cache. + auto it = method_cache_.upper_bound(dex_offset); + if (it != method_cache_.end() && dex_offset >= it->second.offset) { + return &it->second; + } + + for (; class_def_index_ < dex_file_->NumClassDefs(); class_def_index_++) { + art::ClassAccessor accessor(*dex_file_, class_def_index_); + + for (const art::ClassAccessor::Method& method : accessor.GetMethods()) { + art::CodeItemInstructionAccessor code = method.GetInstructions(); + if (!code.HasCodeItem()) { + continue; + } + + int32_t offset = reinterpret_cast<const uint8_t*>(code.Insns()) - dex_file_->Begin(); + int32_t len = code.InsnsSizeInBytes(); + int32_t index = method.GetIndex(); + auto res = method_cache_.emplace(offset + len, art::MethodCacheEntry{offset, len, index}); + if (offset <= dex_offset && dex_offset < offset + len) { + return &res.first->second; + } + } + } + + return nullptr; + } +}; + +int ExtDexFileOpenFromMemory(const void* addr, + /*inout*/ size_t* size, + const char* location, + /*out*/ const ExtDexFileString** ext_error_msg, + /*out*/ ExtDexFile** ext_dex_file) { + if (*size < sizeof(art::DexFile::Header)) { + *size = sizeof(art::DexFile::Header); + *ext_error_msg = nullptr; + return false; + } + + const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(addr); + uint32_t file_size = header->file_size_; + if (art::CompactDexFile::IsMagicValid(header->magic_)) { + // Compact dex files store the data section separately so that it can be shared. + // Therefore we need to extend the read memory range to include it. + // TODO: This might be wasteful as we might read data in between as well. + // In practice, this should be fine, as such sharing only happens on disk. + uint32_t computed_file_size; + if (__builtin_add_overflow(header->data_off_, header->data_size_, &computed_file_size)) { + *ext_error_msg = new ExtDexFileString{ + android::base::StringPrintf("Corrupt CompactDexFile header in '%s'", location)}; + return false; + } + if (computed_file_size > file_size) { + file_size = computed_file_size; + } + } else if (!art::StandardDexFile::IsMagicValid(header->magic_)) { + *ext_error_msg = new ExtDexFileString{ + android::base::StringPrintf("Unrecognized dex file header in '%s'", location)}; + return false; + } + + if (*size < file_size) { + *size = file_size; + *ext_error_msg = nullptr; + return false; + } + + std::string loc_str(location); + art::DexFileLoader loader; + std::string error_msg; + std::unique_ptr<const art::DexFile> dex_file = loader.Open(static_cast<const uint8_t*>(addr), + *size, + loc_str, + header->checksum_, + /*oat_dex_file=*/nullptr, + /*verify=*/false, + /*verify_checksum=*/false, + &error_msg); + if (dex_file == nullptr) { + *ext_error_msg = new ExtDexFileString{std::move(error_msg)}; + return false; + } + + *ext_dex_file = new ExtDexFile(std::move(dex_file)); + return true; +} + +int ExtDexFileOpenFromFd(int fd, + off_t offset, + const char* location, + /*out*/ const ExtDexFileString** ext_error_msg, + /*out*/ ExtDexFile** ext_dex_file) { + size_t length; + { + struct stat sbuf; + std::memset(&sbuf, 0, sizeof(sbuf)); + if (fstat(fd, &sbuf) == -1) { + *ext_error_msg = new ExtDexFileString{ + android::base::StringPrintf("fstat '%s' failed: %s", location, std::strerror(errno))}; + return false; + } + if (S_ISDIR(sbuf.st_mode)) { + *ext_error_msg = new ExtDexFileString{ + android::base::StringPrintf("Attempt to mmap directory '%s'", location)}; + return false; + } + length = sbuf.st_size; + } + + if (length < offset + sizeof(art::DexFile::Header)) { + *ext_error_msg = new ExtDexFileString{android::base::StringPrintf( + "Offset %" PRId64 " too large for '%s' of size %zu", int64_t{offset}, location, length)}; + return false; + } + + // Cannot use MemMap in libartbase here, because it pulls in dlopen which we + // can't have when being compiled statically. + std::unique_ptr<android::base::MappedFile> map = + android::base::MappedFile::FromFd(fd, offset, length, PROT_READ); + if (map == nullptr) { + *ext_error_msg = new ExtDexFileString{ + android::base::StringPrintf("mmap '%s' failed: %s", location, std::strerror(errno))}; + return false; + } + + const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(map->data()); + uint32_t file_size; + if (__builtin_add_overflow(offset, header->file_size_, &file_size)) { + *ext_error_msg = + new ExtDexFileString{android::base::StringPrintf("Corrupt header in '%s'", location)}; + return false; + } + if (length < file_size) { + *ext_error_msg = new ExtDexFileString{ + android::base::StringPrintf("Dex file '%s' too short: expected %" PRIu32 ", got %" PRIu64, + location, + file_size, + uint64_t{length})}; + return false; + } + + void* addr = map->data(); + size_t size = map->size(); + auto container = std::make_unique<art::MappedFileContainer>(std::move(map)); + + std::string loc_str(location); + std::string error_msg; + art::DexFileLoader loader; + std::unique_ptr<const art::DexFile> dex_file = loader.Open(reinterpret_cast<const uint8_t*>(addr), + size, + loc_str, + header->checksum_, + /*oat_dex_file=*/nullptr, + /*verify=*/false, + /*verify_checksum=*/false, + &error_msg, + std::move(container)); + if (dex_file == nullptr) { + *ext_error_msg = new ExtDexFileString{std::move(error_msg)}; + return false; + } + *ext_dex_file = new ExtDexFile(std::move(dex_file)); + return true; +} + +int ExtDexFileGetMethodInfoForOffset(ExtDexFile* ext_dex_file, + int64_t dex_offset, + /*out*/ ExtDexFileMethodInfo* method_info) { + if (!ext_dex_file->dex_file_->IsInDataSection(ext_dex_file->dex_file_->Begin() + dex_offset)) { + return false; // The DEX offset is not within the bytecode of this dex file. + } + + if (ext_dex_file->dex_file_->IsCompactDexFile()) { + // The data section of compact dex files might be shared. + // Check the subrange unique to this compact dex. + const art::CompactDexFile::Header& cdex_header = + ext_dex_file->dex_file_->AsCompactDexFile()->GetHeader(); + uint32_t begin = cdex_header.data_off_ + cdex_header.OwnedDataBegin(); + uint32_t end = cdex_header.data_off_ + cdex_header.OwnedDataEnd(); + if (dex_offset < begin || dex_offset >= end) { + return false; // The DEX offset is not within the bytecode of this dex file. + } + } + + art::MethodCacheEntry* entry = ext_dex_file->GetMethodCacheEntryForOffset(dex_offset); + if (entry != nullptr) { + method_info->offset = entry->offset; + method_info->len = entry->len; + method_info->name = + new ExtDexFileString{ext_dex_file->dex_file_->PrettyMethod(entry->index, false)}; + return true; + } + + return false; +} + +void ExtDexFileGetAllMethodInfos(ExtDexFile* ext_dex_file, + int with_signature, + ExtDexFileMethodInfoCallback* method_info_cb, + void* user_data) { + for (art::ClassAccessor accessor : ext_dex_file->dex_file_->GetClasses()) { + for (const art::ClassAccessor::Method& method : accessor.GetMethods()) { + art::CodeItemInstructionAccessor code = method.GetInstructions(); + if (!code.HasCodeItem()) { + continue; + } + + ExtDexFileMethodInfo method_info; + method_info.offset = static_cast<int32_t>(reinterpret_cast<const uint8_t*>(code.Insns()) - + ext_dex_file->dex_file_->Begin()); + method_info.len = code.InsnsSizeInBytes(); + method_info.name = new ExtDexFileString{ + ext_dex_file->dex_file_->PrettyMethod(method.GetIndex(), with_signature)}; + method_info_cb(&method_info, user_data); + } + } +} + +void ExtDexFileFree(ExtDexFile* ext_dex_file) { delete (ext_dex_file); } + +} // extern "C" diff --git a/libdexfile/external/dex_file_supp.cc b/libdexfile/external/dex_file_supp.cc new file mode 100644 index 0000000000..6514c8abf3 --- /dev/null +++ b/libdexfile/external/dex_file_supp.cc @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "art_api/ext_dex_file.h" + +namespace art_api { +namespace dex { + +DexFile::~DexFile() { ExtDexFileFree(ext_dex_file_); } + +MethodInfo DexFile::AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info) { + return {ext_method_info.offset, ext_method_info.len, DexString(ext_method_info.name)}; +} + +void DexFile::AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* ctx) { + auto vect = static_cast<MethodInfoVector*>(ctx); + vect->emplace_back(AbsorbMethodInfo(*ext_method_info)); +} + +} // namespace dex +} // namespace art_api diff --git a/libdexfile/external/include/art_api/ext_dex_file.h b/libdexfile/external/include/art_api/ext_dex_file.h new file mode 100644 index 0000000000..5f64ab1f9e --- /dev/null +++ b/libdexfile/external/include/art_api/ext_dex_file.h @@ -0,0 +1,248 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_ +#define ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_ + +// Dex file external API + +#include <sys/types.h> + +#include <cstring> +#include <memory> +#include <string> +#include <string_view> +#include <vector> + +#include <android-base/macros.h> + +extern "C" { + +// This is the stable C ABI that backs art_api::dex below. Structs and functions +// may only be added here. +// TODO(b/120978655): Move this to a separate pure C header. +// +// Clients should use the C++ wrappers in art_api::dex instead. + +// Opaque wrapper for an std::string allocated in libdexfile which must be freed +// using ExtDexFileFreeString. +class ExtDexFileString; + +// Returns an ExtDexFileString initialized to the given string. +const ExtDexFileString* ExtDexFileMakeString(const char* str); + +// Returns a pointer to the underlying null-terminated character array and its +// size for the given ExtDexFileString. +const char* ExtDexFileGetString(const ExtDexFileString* ext_string, /*out*/ size_t* size); + +// Frees an ExtDexFileString. +void ExtDexFileFreeString(const ExtDexFileString* ext_string); + +struct ExtDexFileMethodInfo { + int32_t offset; + int32_t len; + const ExtDexFileString* name; +}; + +class ExtDexFile; + +// See art_api::dex::DexFile::OpenFromMemory. Returns true on success. +int ExtDexFileOpenFromMemory(const void* addr, + /*inout*/ size_t* size, + const char* location, + /*out*/ const ExtDexFileString** error_msg, + /*out*/ ExtDexFile** ext_dex_file); + +// See art_api::dex::DexFile::OpenFromFd. Returns true on success. +int ExtDexFileOpenFromFd(int fd, + off_t offset, + const char* location, + /*out*/ const ExtDexFileString** error_msg, + /*out*/ ExtDexFile** ext_dex_file); + +// See art_api::dex::DexFile::GetMethodInfoForOffset. Returns true on success. +int ExtDexFileGetMethodInfoForOffset(ExtDexFile* ext_dex_file, + int64_t dex_offset, + /*out*/ ExtDexFileMethodInfo* method_info); + +typedef void ExtDexFileMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, + void* user_data); + +// See art_api::dex::DexFile::GetAllMethodInfos. +void ExtDexFileGetAllMethodInfos(ExtDexFile* ext_dex_file, + int with_signature, + ExtDexFileMethodInfoCallback* method_info_cb, + void* user_data); + +// Frees an ExtDexFile. +void ExtDexFileFree(ExtDexFile* ext_dex_file); + +} // extern "C" + +namespace art_api { +namespace dex { + +// Minimal std::string look-alike for a string returned from libdexfile. +class DexString final { + public: + DexString(DexString&& dex_str) { ReplaceExtString(std::move(dex_str)); } + explicit DexString(const char* str = "") : ext_string_(ExtDexFileMakeString(str)) {} + ~DexString() { ExtDexFileFreeString(ext_string_); } + + DexString& operator=(DexString&& dex_str) { + ReplaceExtString(std::move(dex_str)); + return *this; + } + + const char* data() const { + size_t ignored; + return ExtDexFileGetString(ext_string_, &ignored); + } + const char* c_str() const { return data(); } + + size_t size() const { + size_t len; + (void)ExtDexFileGetString(ext_string_, &len); + return len; + } + size_t length() const { return size(); } + + operator std::string_view() const { + size_t len; + const char* chars = ExtDexFileGetString(ext_string_, &len); + return std::string_view(chars, len); + } + + private: + friend class DexFile; + friend bool operator==(const DexString&, const DexString&); + explicit DexString(const ExtDexFileString* ext_string) : ext_string_(ext_string) {} + const ExtDexFileString* ext_string_; // Owned instance. Never nullptr. + + void ReplaceExtString(DexString&& dex_str) { + ext_string_ = dex_str.ext_string_; + dex_str.ext_string_ = ExtDexFileMakeString(""); + } + + DISALLOW_COPY_AND_ASSIGN(DexString); +}; + +inline bool operator==(const DexString& s1, const DexString& s2) { + size_t l1, l2; + const char* str1 = ExtDexFileGetString(s1.ext_string_, &l1); + const char* str2 = ExtDexFileGetString(s2.ext_string_, &l2); + // Use memcmp to avoid assumption about absence of null characters in the strings. + return l1 == l2 && !std::memcmp(str1, str2, l1); +} + +struct MethodInfo { + int32_t offset; // Code offset relative to the start of the dex file header + int32_t len; // Code length + DexString name; +}; + +inline bool operator==(const MethodInfo& s1, const MethodInfo& s2) { + return s1.offset == s2.offset && s1.len == s2.len && s1.name == s2.name; +} + +// External stable API to access ordinary dex files and CompactDex. This wraps +// the stable C ABI and handles instance ownership. Thread-compatible but not +// thread-safe. +class DexFile { + public: + DexFile(DexFile&& dex_file) { + ext_dex_file_ = dex_file.ext_dex_file_; + dex_file.ext_dex_file_ = nullptr; + } + virtual ~DexFile(); + + // Interprets a chunk of memory as a dex file. As long as *size is too small, + // returns nullptr, sets *size to a new size to try again with, and sets + // *error_msg to "". That might happen repeatedly. Also returns nullptr + // on error in which case *error_msg is set to a nonempty string. + // + // location is a string that describes the dex file, and is preferably its + // path. It is mostly used to make error messages better, and may be "". + // + // The caller must retain the memory. + static std::unique_ptr<DexFile> OpenFromMemory(const void* addr, + size_t* size, + const std::string& location, + /*out*/ std::string* error_msg) { + ExtDexFile* ext_dex_file; + const ExtDexFileString* ext_error_msg = nullptr; + if (ExtDexFileOpenFromMemory(addr, size, location.c_str(), &ext_error_msg, &ext_dex_file)) { + return std::unique_ptr<DexFile>(new DexFile(ext_dex_file)); + } + *error_msg = (ext_error_msg == nullptr) ? "" : std::string(DexString(ext_error_msg)); + return nullptr; + } + + // mmaps the given file offset in the open fd and reads a dexfile from there. + // Returns nullptr on error in which case *error_msg is set. + // + // location is a string that describes the dex file, and is preferably its + // path. It is mostly used to make error messages better, and may be "". + static std::unique_ptr<DexFile> OpenFromFd(int fd, + off_t offset, + const std::string& location, + /*out*/ std::string* error_msg) { + ExtDexFile* ext_dex_file; + const ExtDexFileString* ext_error_msg = nullptr; + if (ExtDexFileOpenFromFd(fd, offset, location.c_str(), &ext_error_msg, &ext_dex_file)) { + return std::unique_ptr<DexFile>(new DexFile(ext_dex_file)); + } + *error_msg = std::string(DexString(ext_error_msg)); + return nullptr; + } + + // Given an offset relative to the start of the dex file header, if there is a + // method whose instruction range includes that offset then returns info about + // it, otherwise returns a struct with offset == 0. + MethodInfo GetMethodInfoForOffset(int64_t dex_offset) { + ExtDexFileMethodInfo ext_method_info; + if (ExtDexFileGetMethodInfoForOffset(ext_dex_file_, dex_offset, &ext_method_info)) { + return AbsorbMethodInfo(ext_method_info); + } + return {/*offset=*/0, /*len=*/0, /*name=*/DexString()}; + } + + // Returns info structs about all methods in the dex file. MethodInfo.name + // receives the full function signature if with_signature is set, otherwise it + // gets the class and method name only. + std::vector<MethodInfo> GetAllMethodInfos(bool with_signature = true) { + MethodInfoVector res; + ExtDexFileGetAllMethodInfos( + ext_dex_file_, with_signature, AddMethodInfoCallback, static_cast<void*>(&res)); + return res; + } + + private: + explicit DexFile(ExtDexFile* ext_dex_file) : ext_dex_file_(ext_dex_file) {} + ExtDexFile* ext_dex_file_; // Owned instance. nullptr only in moved-from zombies. + + typedef std::vector<MethodInfo> MethodInfoVector; + + static MethodInfo AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info); + static void AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* user_data); + + DISALLOW_COPY_AND_ASSIGN(DexFile); +}; + +} // namespace dex +} // namespace art_api + +#endif // ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_ diff --git a/libdexfile/external/libdexfile_external.map.txt b/libdexfile/external/libdexfile_external.map.txt new file mode 100644 index 0000000000..450b633d32 --- /dev/null +++ b/libdexfile/external/libdexfile_external.map.txt @@ -0,0 +1,13 @@ +LIBDEXFILE_EXTERNAL_1 { + global: + ExtDexFileFree; + ExtDexFileFreeString; + ExtDexFileGetAllMethodInfos; + ExtDexFileGetMethodInfoForOffset; + ExtDexFileGetString; + ExtDexFileMakeString; + ExtDexFileOpenFromFd; + ExtDexFileOpenFromMemory; + local: + *; +}; diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 1c74a923e6..d2a5bb87f8 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -473,9 +473,6 @@ class OatDumper { GetQuickToInterpreterBridgeOffset); #undef DUMP_OAT_HEADER_OFFSET - os << "BOOT IMAGE CHECKSUM:\n"; - os << StringPrintf("0x%08x\n\n", oat_header.GetBootImageChecksum()); - // Print the key-value store. { os << "KEY VALUE STORE:\n"; diff --git a/runtime/Android.bp b/runtime/Android.bp index b03ef601a4..71c5b74363 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -345,6 +345,11 @@ libart_cc_defaults { static_libs: [ "libz", // For adler32. ], + cflags: [ + // ART is allowed to link to libicuuc directly + // since they are in the same module + "-DANDROID_LINK_SHARED_ICU4C", + ], }, android_arm: { ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS, @@ -377,12 +382,12 @@ libart_cc_defaults { export_generated_headers: ["cpp-define-generator-asm-support"], include_dirs: [ "art/sigchainlib", - "external/icu/icu4c/source/common", "external/zlib", ], header_libs: [ "art_cmdlineparser_headers", "cpp-define-generator-definitions", + "libicuuc_headers", "libnativehelper_header_only", "jni_platform_headers", ], @@ -420,6 +425,8 @@ libart_static_cc_defaults { "libbacktrace", "libbase", "libcutils", + "libdexfile_external", // libunwindstack dependency + "libdexfile_support", // libunwindstack dependency "liblog", "libnativebridge", "libnativeloader", @@ -553,9 +560,6 @@ art_cc_library { header_libs: [ "libnativehelper_header_only", ], - include_dirs: [ - "external/icu/icu4c/source/common", - ], } art_cc_test { diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc index cfc9f1d469..a7922a213c 100644 --- a/runtime/base/locks.cc +++ b/runtime/base/locks.cc @@ -61,6 +61,7 @@ Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr; Mutex* Locks::reference_queue_soft_references_lock_ = nullptr; Mutex* Locks::reference_queue_weak_references_lock_ = nullptr; Mutex* Locks::runtime_shutdown_lock_ = nullptr; +Mutex* Locks::runtime_thread_pool_lock_ = nullptr; Mutex* Locks::cha_lock_ = nullptr; Mutex* Locks::subtype_check_lock_ = nullptr; Mutex* Locks::thread_list_lock_ = nullptr; @@ -154,6 +155,7 @@ void Locks::Init() { DCHECK(user_code_suspension_lock_ != nullptr); DCHECK(dex_lock_ != nullptr); DCHECK(native_debug_interface_lock_ != nullptr); + DCHECK(runtime_thread_pool_lock_ != nullptr); } else { // Create global locks in level order from highest lock level to lowest. LockLevel current_lock_level = kInstrumentEntrypointsLock; @@ -189,6 +191,10 @@ void Locks::Init() { DCHECK(runtime_shutdown_lock_ == nullptr); runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kRuntimeThreadPoolLock); + DCHECK(runtime_thread_pool_lock_ == nullptr); + runtime_thread_pool_lock_ = new Mutex("runtime thread pool lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock); DCHECK(profiler_lock_ == nullptr); profiler_lock_ = new Mutex("profiler lock", current_lock_level); diff --git a/runtime/base/locks.h b/runtime/base/locks.h index 8cbe372c59..57719f1f6f 100644 --- a/runtime/base/locks.h +++ b/runtime/base/locks.h @@ -117,6 +117,7 @@ enum LockLevel : uint8_t { kJdwpEventListLock, kJdwpAttachLock, kJdwpStartLock, + kRuntimeThreadPoolLock, kRuntimeShutdownLock, kTraceLock, kHeapBitmapLock, @@ -224,8 +225,11 @@ class Locks { // Guards shutdown of the runtime. static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_); + // Runtime thread pool lock. + static Mutex* runtime_thread_pool_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_); + // Guards background profiler global state. - static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_); + static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_thread_pool_lock_); // Guards trace (ie traceview) requests. static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_); diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index d33541c4d3..3b92e2c658 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -212,6 +212,22 @@ static void HandleEarlierVerifyError(Thread* self, self->AssertPendingException(); } +// Ensures that methods have the kAccSkipAccessChecks bit set. We use the +// kAccVerificationAttempted bit on the class access flags to determine whether this has been done +// before. +template <bool kNeedsVerified = false> +static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size) + REQUIRES_SHARED(Locks::mutator_lock_) { + if (kNeedsVerified) { + // To not fail access-flags access checks, push a minimal state. + mirror::Class::SetStatus(klass, ClassStatus::kVerified, Thread::Current()); + } + if (!klass->WasVerificationAttempted()) { + klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size); + klass->SetVerificationAttempted(); + } +} + void ClassLinker::ThrowEarlierClassFailure(ObjPtr<mirror::Class> c, bool wrap_in_no_class_def) { // The class failed to initialize on a previous attempt, so we want to throw // a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we @@ -1037,20 +1053,15 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) { runtime->SetSentinel(heap->AllocNonMovableObject<true>( self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor())); - const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath(); - if (boot_class_path.size() != spaces.size()) { - *error_msg = StringPrintf("Boot class path has %zu components but there are %zu image spaces.", - boot_class_path.size(), - spaces.size()); - return false; - } + const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations(); + CHECK_LE(spaces.size(), boot_class_path_locations.size()); for (size_t i = 0u, size = spaces.size(); i != size; ++i) { // Boot class loader, use a null handle. std::vector<std::unique_ptr<const DexFile>> dex_files; if (!AddImageSpace(spaces[i], ScopedNullHandle<mirror::ClassLoader>(), /*dex_elements=*/ nullptr, - /*dex_location=*/ boot_class_path[i].c_str(), + /*dex_location=*/ boot_class_path_locations[i].c_str(), /*out*/&dex_files, error_msg)) { return false; @@ -1069,6 +1080,15 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) { return true; } +void ClassLinker::AddExtraBootDexFiles( + Thread* self, + std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files) { + for (std::unique_ptr<const DexFile>& dex_file : additional_dex_files) { + AppendToBootClassPath(self, *dex_file); + boot_dex_files_.push_back(std::move(dex_file)); + } +} + bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, ObjPtr<mirror::ClassLoader> class_loader) { return class_loader == nullptr || @@ -3946,6 +3966,7 @@ ObjPtr<mirror::Class> ClassLinker::CreatePrimitiveClass(Thread* self, Primitive: h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract); h_class->SetPrimitiveType(type); h_class->SetIfTable(GetClassRoot<mirror::Object>(this)->GetIfTable()); + EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(h_class, image_pointer_size_); mirror::Class::SetStatus(h_class, ClassStatus::kInitialized, self); const char* descriptor = Primitive::Descriptor(type); ObjPtr<mirror::Class> existing = InsertClass(descriptor, @@ -4093,6 +4114,7 @@ ObjPtr<mirror::Class> ClassLinker::CreateArrayClass(Thread* self, new_class->PopulateEmbeddedVTable(image_pointer_size_); ImTable* object_imt = java_lang_Object->GetImt(image_pointer_size_); new_class->SetImt(object_imt, image_pointer_size_); + EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(new_class, image_pointer_size_); mirror::Class::SetStatus(new_class, ClassStatus::kInitialized, self); // don't need to set new_class->SetObjectSize(..) // because Object::SizeOf delegates to Array::SizeOf @@ -4123,6 +4145,8 @@ ObjPtr<mirror::Class> ClassLinker::CreateArrayClass(Thread* self, // and remove "interface". access_flags |= kAccAbstract | kAccFinal; access_flags &= ~kAccInterface; + // Arrays are access-checks-clean and preverified. + access_flags |= kAccVerificationAttempted; new_class->SetAccessFlags(access_flags); @@ -4357,17 +4381,6 @@ bool ClassLinker::AttemptSupertypeVerification(Thread* self, return false; } -// Ensures that methods have the kAccSkipAccessChecks bit set. We use the -// kAccVerificationAttempted bit on the class access flags to determine whether this has been done -// before. -static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size) - REQUIRES_SHARED(Locks::mutator_lock_) { - if (!klass->WasVerificationAttempted()) { - klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size); - klass->SetVerificationAttempted(); - } -} - verifier::FailureKind ClassLinker::VerifyClass( Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) { { @@ -4844,6 +4857,7 @@ ObjPtr<mirror::Class> ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRun { // Lock on klass is released. Lock new class object. ObjectLock<mirror::Class> initialization_lock(self, klass); + EnsureSkipAccessChecksMethods(klass, image_pointer_size_); mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self); } @@ -5594,8 +5608,7 @@ bool ClassLinker::EnsureInitialized(Thread* self, DCHECK(c != nullptr); if (c->IsInitialized()) { - EnsureSkipAccessChecksMethods(c, image_pointer_size_); - self->AssertNoPendingException(); + DCHECK(c->WasVerificationAttempted()) << c->PrettyClassAndClassLoader(); return true; } // SubtypeCheckInfo::Initialized must happen-before any new-instance for that type. diff --git a/runtime/class_linker.h b/runtime/class_linker.h index d3eab7cb1d..d0a7c9b846 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -127,6 +127,12 @@ class ClassLinker { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_); + // Add boot class path dex files that were not included in the boot image. + // ClassLinker takes ownership of these dex files. + void AddExtraBootDexFiles(Thread* self, + std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files) + REQUIRES_SHARED(Locks::mutator_lock_); + // Add an image space to the class linker, may fix up classloader fields and dex cache fields. // The dex files that were newly opened for the space are placed in the out argument // out_dex_files. Returns true if the operation succeeded. diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index fe45b9e1f0..061c788a41 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -114,7 +114,8 @@ class ClassLinkerTest : public CommonRuntimeTest { EXPECT_EQ(0, primitive->GetIfTableCount()); EXPECT_TRUE(primitive->GetIfTable() != nullptr); EXPECT_EQ(primitive->GetIfTable()->Count(), 0u); - EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags()); + EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract | kAccVerificationAttempted, + primitive->GetAccessFlags()); } void AssertObjectClass(ObjPtr<mirror::Class> JavaLangObject) diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index a101976a87..a20baa0dc4 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -24,7 +24,6 @@ #include "nativehelper/scoped_local_ref.h" #include "android-base/stringprintf.h" -#include <unicode/uvernum.h> #include "art_field-inl.h" #include "base/file_utils.h" diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc index b46c93383e..7f697d1eb8 100644 --- a/runtime/dexopt_test.cc +++ b/runtime/dexopt_test.cc @@ -116,19 +116,19 @@ void DexoptTest::GenerateOatForTest(const std::string& dex_location, ASSERT_TRUE(odex_file.get() != nullptr) << error_msg; EXPECT_EQ(filter, odex_file->GetCompilerFilter()); - std::unique_ptr<ImageHeader> image_header( - gc::space::ImageSpace::ReadImageHeader(image_location.c_str(), - kRuntimeISA, - &error_msg)); - ASSERT_TRUE(image_header != nullptr) << error_msg; + std::string boot_image_checksums = gc::space::ImageSpace::GetBootClassPathChecksums( + Runtime::Current()->GetBootClassPath(), image_location, kRuntimeISA, &error_msg); + ASSERT_FALSE(boot_image_checksums.empty()) << error_msg; + const OatHeader& oat_header = odex_file->GetOatHeader(); - uint32_t boot_image_checksum = image_header->GetImageChecksum(); if (CompilerFilter::DependsOnImageChecksum(filter)) { + const char* checksums = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey); + ASSERT_TRUE(checksums != nullptr); if (with_alternate_image) { - EXPECT_NE(boot_image_checksum, oat_header.GetBootImageChecksum()); + EXPECT_NE(boot_image_checksums, checksums); } else { - EXPECT_EQ(boot_image_checksum, oat_header.GetBootImageChecksum()); + EXPECT_EQ(boot_image_checksums, checksums); } } } diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc index e508d5fddf..11ad8a84bf 100644 --- a/runtime/gc/allocator/dlmalloc.cc +++ b/runtime/gc/allocator/dlmalloc.cc @@ -38,6 +38,7 @@ static void art_heap_usage_error(const char* function, void* p); #pragma GCC diagnostic ignored "-Wempty-body" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic" +#pragma GCC diagnostic ignored "-Wexpansion-to-defined" #include "../../../external/dlmalloc/malloc.c" // Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity // of libbase, so undefine it now. diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h index 3160422b32..1014c0e15c 100644 --- a/runtime/gc/collector/concurrent_copying-inl.h +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -37,14 +37,15 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion( mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) { if (kEnableGenerationalConcurrentCopyingCollection - && young_gen_ && !done_scanning_.load(std::memory_order_acquire)) { - // Everything in the unevac space should be marked for generational CC except for large objects. - DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " " + // Everything in the unevac space should be marked for young generation CC, + // except for large objects. + DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) + << ref << " " << ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass(); - // Since the mark bitmap is still filled in from last GC, we can not use that or else the - // mutator may see references to the from space. Instead, use the baker pointer itself as - // the mark bit. + // Since the mark bitmap is still filled in from last GC (or from marking phase of 2-phase CC, + // we can not use that or else the mutator may see references to the from space. Instead, use + // the baker pointer itself as the mark bit. if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) { // TODO: We don't actually need to scan this object later, we just need to clear the gray // bit. @@ -244,7 +245,7 @@ inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_re DCHECK(region_space_->IsInUnevacFromSpace(from_ref)); if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) { return true; - } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_) + } else if (!kEnableGenerationalConcurrentCopyingCollection || done_scanning_.load(std::memory_order_acquire)) { // If the card table scanning is not finished yet, then only read-barrier // state should be checked. Checking the mark bitmap is unreliable as there diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 7736568620..3b57b0741d 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -95,6 +95,8 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, weak_ref_access_enabled_(true), copied_live_bytes_ratio_sum_(0.f), gc_count_(0), + region_space_inter_region_bitmap_(nullptr), + non_moving_space_inter_region_bitmap_(nullptr), reclaimed_bytes_ratio_sum_(0.f), young_gen_(young_gen), skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), @@ -188,6 +190,11 @@ void ConcurrentCopying::RunPhases() { { ReaderMutexLock mu(self, *Locks::mutator_lock_); InitializePhase(); + // In case of forced evacuation, all regions are evacuated and hence no + // need to compute live_bytes. + if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) { + MarkingPhase(); + } } if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) { // Switch to read barrier mark entrypoints before we gray the objects. This is required in case @@ -201,7 +208,7 @@ void ConcurrentCopying::RunPhases() { FlipThreadRoots(); { ReaderMutexLock mu(self, *Locks::mutator_lock_); - MarkingPhase(); + CopyingPhase(); } // Verify no from space refs. This causes a pause. if (kEnableNoFromSpaceRefsVerification) { @@ -280,6 +287,29 @@ void ConcurrentCopying::ActivateReadBarrierEntrypoints() { gc_barrier_->Increment(self, barrier_count); } +void ConcurrentCopying::CreateInterRegionRefBitmaps() { + DCHECK(kEnableGenerationalConcurrentCopyingCollection); + DCHECK(region_space_inter_region_bitmap_ == nullptr); + DCHECK(non_moving_space_inter_region_bitmap_ == nullptr); + DCHECK(region_space_ != nullptr); + DCHECK(heap_->non_moving_space_ != nullptr); + // Region-space + region_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create( + "region-space inter region ref bitmap", + reinterpret_cast<uint8_t*>(region_space_->Begin()), + region_space_->Limit() - region_space_->Begin())); + CHECK(region_space_inter_region_bitmap_ != nullptr) + << "Couldn't allocate region-space inter region ref bitmap"; + + // non-moving-space + non_moving_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create( + "non-moving-space inter region ref bitmap", + reinterpret_cast<uint8_t*>(heap_->non_moving_space_->Begin()), + heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin())); + CHECK(non_moving_space_inter_region_bitmap_ != nullptr) + << "Couldn't allocate non-moving-space inter region ref bitmap"; +} + void ConcurrentCopying::BindBitmaps() { Thread* self = Thread::Current(); WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); @@ -292,6 +322,7 @@ void ConcurrentCopying::BindBitmaps() { } else { CHECK(!space->IsZygoteSpace()); CHECK(!space->IsImageSpace()); + CHECK(space == region_space_ || space == heap_->non_moving_space_); if (kEnableGenerationalConcurrentCopyingCollection) { if (space == region_space_) { region_space_bitmap_ = region_space_->GetMarkBitmap(); @@ -299,12 +330,22 @@ void ConcurrentCopying::BindBitmaps() { DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect); space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); } - // Age all of the cards for the region space so that we know which evac regions to scan. - Runtime::Current()->GetHeap()->GetCardTable()->ModifyCardsAtomic( - space->Begin(), - space->End(), - AgeCardVisitor(), - VoidFunctor()); + if (young_gen_) { + // Age all of the cards for the region space so that we know which evac regions to scan. + heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(), + space->End(), + AgeCardVisitor(), + VoidFunctor()); + } else { + // In a full-heap GC cycle, the card-table corresponding to region-space and + // non-moving space can be cleared, because this cycle only needs to + // capture writes during the marking phase of this cycle to catch + // objects that skipped marking due to heap mutation. Furthermore, + // if the next GC is a young-gen cycle, then it only needs writes to + // be captured after the thread-flip of this GC cycle, as that is when + // the young-gen for the next GC cycle starts getting populated. + heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit()); + } } else { if (space == region_space_) { // It is OK to clear the bitmap with mutators running since the only place it is read is @@ -381,6 +422,7 @@ void ConcurrentCopying::InitializePhase() { if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) { region_space_bitmap_->Clear(); } + mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed); // Mark all of the zygote large objects without graying them. MarkZygoteLargeObjects(); } @@ -471,7 +513,7 @@ class ConcurrentCopying::FlipCallback : public Closure { TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); // Note: self is not necessarily equal to thread since thread may be suspended. Thread* self = Thread::Current(); - if (kVerifyNoMissingCardMarks) { + if (kVerifyNoMissingCardMarks && cc->young_gen_) { cc->VerifyNoMissingCardMarks(); } CHECK_EQ(thread, self); @@ -485,9 +527,11 @@ class ConcurrentCopying::FlipCallback : public Closure { } { TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings()); - // Only change live bytes for full CC. + // Only change live bytes for 1-phase full heap CC. cc->region_space_->SetFromSpace( - cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_); + cc->rb_table_, + evac_mode, + /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection); } cc->SwapStacks(); if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { @@ -496,9 +540,7 @@ class ConcurrentCopying::FlipCallback : public Closure { cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); } cc->is_marking_ = true; - cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, - std::memory_order_relaxed); - if (kIsDebugBuild && !cc->young_gen_) { + if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) { cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared(); } if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { @@ -684,7 +726,7 @@ void ConcurrentCopying::VerifyNoMissingCardMarks() { // Switch threads that from from-space to to-space refs. Forward/mark the thread roots. void ConcurrentCopying::FlipThreadRoots() { TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); - if (kVerboseMode) { + if (kVerboseMode || heap_->dump_region_info_before_gc_) { LOG(INFO) << "time=" << region_space_->Time(); region_space_->DumpNonFreeRegions(LOG_STREAM(INFO)); } @@ -860,13 +902,484 @@ class ConcurrentCopying::ImmuneSpaceScanObjVisitor { ConcurrentCopying* const collector_; }; -// Concurrently mark roots that are guarded by read barriers and process the mark stack. +template <bool kAtomicTestAndSet> +class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor { + public: + explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self) + : collector_(cc), self_(self) {} + + void VisitRoots(mirror::Object*** roots, + size_t count, + const RootInfo& info ATTRIBUTE_UNUSED) override + REQUIRES_SHARED(Locks::mutator_lock_) { + for (size_t i = 0; i < count; ++i) { + mirror::Object** root = roots[i]; + mirror::Object* ref = *root; + if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) { + collector_->PushOntoMarkStack(self_, ref); + } + } + } + + void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, + size_t count, + const RootInfo& info ATTRIBUTE_UNUSED) override + REQUIRES_SHARED(Locks::mutator_lock_) { + for (size_t i = 0; i < count; ++i) { + mirror::CompressedReference<mirror::Object>* const root = roots[i]; + if (!root->IsNull()) { + mirror::Object* ref = root->AsMirrorPtr(); + if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) { + collector_->PushOntoMarkStack(self_, ref); + } + } + } + } + + private: + ConcurrentCopying* const collector_; + Thread* const self_; +}; + +class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure { + public: + RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, + bool disable_weak_ref_access) + : concurrent_copying_(concurrent_copying), + disable_weak_ref_access_(disable_weak_ref_access) { + } + + void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS { + // Note: self is not necessarily equal to thread since thread may be suspended. + Thread* const self = Thread::Current(); + CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) + << thread->GetState() << " thread " << thread << " self " << self; + // Revoke thread local mark stacks. + accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); + if (tl_mark_stack != nullptr) { + MutexLock mu(self, concurrent_copying_->mark_stack_lock_); + concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); + thread->SetThreadLocalMarkStack(nullptr); + } + // Disable weak ref access. + if (disable_weak_ref_access_) { + thread->SetWeakRefAccessEnabled(false); + } + // If thread is a running mutator, then act on behalf of the garbage collector. + // See the code in ThreadList::RunCheckpoint. + concurrent_copying_->GetBarrier().Pass(self); + } + + protected: + ConcurrentCopying* const concurrent_copying_; + + private: + const bool disable_weak_ref_access_; +}; + +class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint : + public RevokeThreadLocalMarkStackCheckpoint { + public: + explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) : + RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {} + + void Run(Thread* thread) override + REQUIRES_SHARED(Locks::mutator_lock_) { + Thread* const self = Thread::Current(); + ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); + // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots + // only. + CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self); + thread->VisitRoots(&visitor, kVisitRootFlagAllRoots); + // Barrier handling is done in the base class' Run() below. + RevokeThreadLocalMarkStackCheckpoint::Run(thread); + } +}; + +void ConcurrentCopying::CaptureThreadRootsForMarking() { + TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings()); + if (kVerboseMode) { + LOG(INFO) << "time=" << region_space_->Time(); + region_space_->DumpNonFreeRegions(LOG_STREAM(INFO)); + } + Thread* const self = Thread::Current(); + CaptureThreadRootsForMarkingAndCheckpoint check_point(this); + ThreadList* thread_list = Runtime::Current()->GetThreadList(); + gc_barrier_->Init(self, 0); + size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr); + // If there are no threads to wait which implys that all the checkpoint functions are finished, + // then no need to release the mutator lock. + if (barrier_count == 0) { + return; + } + Locks::mutator_lock_->SharedUnlock(self); + { + ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); + gc_barrier_->Increment(self, barrier_count); + } + Locks::mutator_lock_->SharedLock(self); + if (kVerboseMode) { + LOG(INFO) << "time=" << region_space_->Time(); + region_space_->DumpNonFreeRegions(LOG_STREAM(INFO)); + LOG(INFO) << "GC end of CaptureThreadRootsForMarking"; + } +} + +// Used to scan ref fields of an object. +template <bool kHandleInterRegionRefs> +class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor { + public: + explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector, + size_t obj_region_idx) + : collector_(collector), + obj_region_idx_(obj_region_idx), + contains_inter_region_idx_(false) {} + + void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const + ALWAYS_INLINE + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES_SHARED(Locks::heap_bitmap_lock_) { + DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_); + DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj)); + CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset)); + } + + void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const + REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { + DCHECK(klass->IsTypeOfReferenceClass()); + // If the referent is not null, then we must re-visit the object during + // copying phase to enqueue it for delayed processing and setting + // read-barrier state to gray to ensure that call to GetReferent() triggers + // the read-barrier. We use same data structure that is used to remember + // objects with inter-region refs for this purpose too. + if (kHandleInterRegionRefs + && !contains_inter_region_idx_ + && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) { + contains_inter_region_idx_ = true; + } + } + + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + ALWAYS_INLINE + REQUIRES_SHARED(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + ALWAYS_INLINE + REQUIRES_SHARED(Locks::mutator_lock_) { + CheckReference(root->AsMirrorPtr()); + } + + bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) { + return contains_inter_region_idx_; + } + + private: + void CheckReference(mirror::Object* ref) const + REQUIRES_SHARED(Locks::mutator_lock_) { + if (ref == nullptr) { + // Nothing to do. + return; + } + if (!collector_->TestAndSetMarkBitForRef(ref)) { + collector_->PushOntoLocalMarkStack(ref); + } + if (kHandleInterRegionRefs && !contains_inter_region_idx_) { + size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref); + // If a region-space object refers to an outside object, we will have a + // mismatch of region idx, but the object need not be re-visited in + // copying phase. + if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) { + contains_inter_region_idx_ = true; + } + } + } + + ConcurrentCopying* const collector_; + const size_t obj_region_idx_; + mutable bool contains_inter_region_idx_; +}; + +void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) { + DCHECK(ref != nullptr); + DCHECK(!immune_spaces_.ContainsObject(ref)); + DCHECK(TestMarkBitmapForRef(ref)); + size_t obj_region_idx = static_cast<size_t>(-1); + if (LIKELY(region_space_->HasAddress(ref))) { + obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref); + // Add live bytes to the corresponding region + if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) { + // Newly Allocated regions are always chosen for evacuation. So no need + // to update live_bytes_. + size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>(); + size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); + region_space_->AddLiveBytes(ref, alloc_size); + } + } + ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true> + visitor(this, obj_region_idx); + ref->VisitReferences</*kVisitNativeRoots=*/ true, kDefaultVerifyFlags, kWithoutReadBarrier>( + visitor, visitor); + // Mark the corresponding card dirty if the object contains any + // inter-region reference. + if (visitor.ContainsInterRegionRefs()) { + if (obj_region_idx == static_cast<size_t>(-1)) { + // If an inter-region ref has been found in a non-region-space, then it + // must be non-moving-space. This is because this function cannot be + // called on a immune-space object, and a large-object-space object has + // only class object reference, which is either in some immune-space, or + // in non-moving-space. + DCHECK(heap_->non_moving_space_->HasAddress(ref)); + non_moving_space_inter_region_bitmap_->Set(ref); + } else { + region_space_inter_region_bitmap_->Set(ref); + } + } +} + +template <bool kAtomic> +bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) { + accounting::ContinuousSpaceBitmap* bitmap = nullptr; + accounting::LargeObjectBitmap* los_bitmap = nullptr; + if (LIKELY(region_space_->HasAddress(ref))) { + bitmap = region_space_bitmap_; + } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) { + bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap(); + } else if (immune_spaces_.ContainsObject(ref)) { + // References to immune space objects are always live. + DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref)); + return true; + } else { + // Should be a large object. Must be page aligned and the LOS must exist. + if (kIsDebugBuild + && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) { + // It must be heap corruption. Remove memory protection and dump data. + region_space_->Unprotect(); + heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr, + MemberOffset(0), + ref, + /* fatal */ true); + } + los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap(); + } + if (kAtomic) { + return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref); + } else { + return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref); + } +} + +bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) { + if (LIKELY(region_space_->HasAddress(ref))) { + return region_space_bitmap_->Test(ref); + } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) { + return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref); + } else if (immune_spaces_.ContainsObject(ref)) { + // References to immune space objects are always live. + DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref)); + return true; + } else { + // Should be a large object. Must be page aligned and the LOS must exist. + if (kIsDebugBuild + && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) { + // It must be heap corruption. Remove memory protection and dump data. + region_space_->Unprotect(); + heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr, + MemberOffset(0), + ref, + /* fatal */ true); + } + return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref); + } +} + +void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) { + if (kIsDebugBuild) { + Thread *self = Thread::Current(); + DCHECK_EQ(thread_running_gc_, self); + DCHECK(self->GetThreadLocalMarkStack() == nullptr); + } + DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal); + gc_mark_stack_->PushBack(ref); +} + +void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() { + // Process thread-local mark stack containing thread roots + ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false, + /* checkpoint_callback */ nullptr, + [this] (mirror::Object* ref) + REQUIRES_SHARED(Locks::mutator_lock_) { + AddLiveBytesAndScanRef(ref); + }); + + while (!gc_mark_stack_->IsEmpty()) { + mirror::Object* ref = gc_mark_stack_->PopBack(); + AddLiveBytesAndScanRef(ref); + } +} + +class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor { + public: + explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {} + + ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) { + ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false> + visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1)); + obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( + visitor, visitor); + } + + static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) { + reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj); + } + + private: + ConcurrentCopying* const collector_; +}; + +/* Invariants for two-phase CC + * =========================== + * A) Definitions + * --------------- + * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack + * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged + * 3) Black-dirty: marked in bitmap, and corresponding card is dirty + * 4) Gray: marked in bitmap, and exists in mark stack + * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is + * dirty, and exists in mark stack + * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack + * + * B) Before marking phase + * ----------------------- + * 1) All objects are white + * 2) Cards are either clean or aged (cannot be asserted without a STW pause) + * 3) Mark bitmap is cleared + * 4) Mark stack is empty + * + * C) During marking phase + * ------------------------ + * 1) If a black object holds an inter-region or white reference, then its + * corresponding card is dirty. In other words, it changes from being + * black-clean to black-dirty + * 2) No black-clean object points to a white object + * + * D) After marking phase + * ----------------------- + * 1) There are no gray objects + * 2) All newly allocated objects are in from space + * 3) No white object can be reachable, directly or otherwise, from a + * black-clean object + * + * E) During copying phase + * ------------------------ + * 1) Mutators cannot observe white and black-dirty objects + * 2) New allocations are in to-space (newly allocated regions are part of to-space) + * 3) An object in mark stack must have its rb_state = Gray + * + * F) During card table scan + * -------------------------- + * 1) Referents corresponding to root references are gray or in to-space + * 2) Every path from an object that is read or written by a mutator during + * this period to a dirty black object goes through some gray object. + * Mutators preserve this by graying black objects as needed during this + * period. Ensures that a mutator never encounters a black dirty object. + * + * G) After card table scan + * ------------------------ + * 1) There are no black-dirty objects + * 2) Referents corresponding to root references are gray, black-clean or in + * to-space + * + * H) After copying phase + * ----------------------- + * 1) Mark stack is empty + * 2) No references into evacuated from-space + * 3) No reference to an object which is unmarked and is also not in newly + * allocated region. In other words, no reference to white objects. +*/ + void ConcurrentCopying::MarkingPhase() { TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); if (kVerboseMode) { LOG(INFO) << "GC MarkingPhase"; } + accounting::CardTable* const card_table = heap_->GetCardTable(); + Thread* const self = Thread::Current(); + // Clear live_bytes_ of every non-free region, except the ones that are newly + // allocated. + region_space_->SetAllRegionLiveBytesZero(); + if (kIsDebugBuild) { + region_space_->AssertAllRegionLiveBytesZeroOrCleared(); + } + // Scan immune spaces + { + TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings()); + for (auto& space : immune_spaces_.GetSpaces()) { + DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); + accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); + accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); + ImmuneSpaceCaptureRefsVisitor visitor(this); + if (table != nullptr) { + table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor); + } else { + WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_); + card_table->Scan<false>( + live_bitmap, + space->Begin(), + space->Limit(), + visitor, + accounting::CardTable::kCardDirty - 1); + } + } + } + // Scan runtime roots + { + TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings()); + CaptureRootsForMarkingVisitor visitor(this, self); + Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots); + } + { + // TODO: don't visit the transaction roots if it's not active. + TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings()); + CaptureRootsForMarkingVisitor visitor(this, self); + Runtime::Current()->VisitNonThreadRoots(&visitor); + } + // Capture thread roots + CaptureThreadRootsForMarking(); + // Process mark stack + ProcessMarkStackForMarkingAndComputeLiveBytes(); + + if (kVerboseMode) { + LOG(INFO) << "GC end of MarkingPhase"; + } +} + +template <bool kNoUnEvac> +void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) { + Scan<kNoUnEvac>(obj); + // Set the read-barrier state of a reference-type object to gray if its + // referent is not marked yet. This is to ensure that if GetReferent() is + // called, it triggers the read-barrier to process the referent before use. + if (UNLIKELY((obj->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass()))) { + mirror::Object* referent = + obj->AsReference<kVerifyNone, kWithoutReadBarrier>()->GetReferent<kWithoutReadBarrier>(); + if (referent != nullptr && !IsInToSpace(referent)) { + obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState()); + } + } +} + +// Concurrently mark roots that are guarded by read barriers and process the mark stack. +void ConcurrentCopying::CopyingPhase() { + TimingLogger::ScopedTiming split("CopyingPhase", GetTimings()); + if (kVerboseMode) { + LOG(INFO) << "GC CopyingPhase"; + } Thread* self = Thread::Current(); + accounting::CardTable* const card_table = heap_->GetCardTable(); if (kIsDebugBuild) { MutexLock mu(self, *Locks::thread_list_lock_); CHECK(weak_ref_access_enabled_); @@ -879,7 +1392,7 @@ void ConcurrentCopying::MarkingPhase() { if (kUseBakerReadBarrier) { gc_grays_immune_objects_ = false; } - if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { + if (kEnableGenerationalConcurrentCopyingCollection) { if (kVerboseMode) { LOG(INFO) << "GC ScanCardsForSpace"; } @@ -897,39 +1410,76 @@ void ConcurrentCopying::MarkingPhase() { continue; } // Scan all of the objects on dirty cards in unevac from space, and non moving space. These - // are from previous GCs and may reference things in the from space. + // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things + // in the from space. // // Note that we do not need to process the large-object space (the only discontinuous space) // as it contains only large string objects and large primitive array objects, that have no // reference to other objects, except their class. There is no need to scan these large // objects, as the String class and the primitive array classes are expected to never move - // during a minor (young-generation) collection: + // during a collection: // - In the case where we run with a boot image, these classes are part of the image space, // which is an immune space. // - In the case where we run without a boot image, these classes are allocated in the // non-moving space (see art::ClassLinker::InitWithoutImage). - Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>( + card_table->Scan<false>( space->GetMarkBitmap(), space->Begin(), space->End(), [this, space](mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { - // Don't push or gray unevac refs. - if (kIsDebugBuild && space == region_space_) { - // We may get unevac large objects. - if (!region_space_->IsInUnevacFromSpace(obj)) { - CHECK(region_space_bitmap_->Test(obj)); - region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj); - LOG(FATAL) << "Scanning " << obj << " not in unevac space"; + // TODO: This code may be refactored to avoid scanning object while + // done_scanning_ is false by setting rb_state to gray, and pushing the + // object on mark stack. However, it will also require clearing the + // corresponding mark-bit and, for region space objects, + // decrementing the object's size from the corresponding region's + // live_bytes. + if (young_gen_) { + // Don't push or gray unevac refs. + if (kIsDebugBuild && space == region_space_) { + // We may get unevac large objects. + if (!region_space_->IsInUnevacFromSpace(obj)) { + CHECK(region_space_bitmap_->Test(obj)); + region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj); + LOG(FATAL) << "Scanning " << obj << " not in unevac space"; + } } + ScanDirtyObject</*kNoUnEvac*/ true>(obj); + } else if (space != region_space_) { + DCHECK(space == heap_->non_moving_space_); + // We need to process un-evac references as they may be unprocessed, + // if they skipped the marking phase due to heap mutation. + ScanDirtyObject</*kNoUnEvac*/ false>(obj); + non_moving_space_inter_region_bitmap_->Clear(obj); + } else if (region_space_->IsInUnevacFromSpace(obj)) { + ScanDirtyObject</*kNoUnEvac*/ false>(obj); + region_space_inter_region_bitmap_->Clear(obj); } - Scan<true>(obj); }, - accounting::CardTable::kCardDirty - 1); + accounting::CardTable::kCardAged); + + if (!young_gen_) { + auto visitor = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { + // We don't need to process un-evac references as any unprocessed + // ones will be taken care of in the card-table scan above. + ScanDirtyObject</*kNoUnEvac*/ true>(obj); + }; + if (space == region_space_) { + region_space_->ScanUnevacFromSpace(region_space_inter_region_bitmap_.get(), visitor); + } else { + DCHECK(space == heap_->non_moving_space_); + non_moving_space_inter_region_bitmap_->VisitMarkedRange( + reinterpret_cast<uintptr_t>(space->Begin()), + reinterpret_cast<uintptr_t>(space->End()), + visitor); + } + } } // Done scanning unevac space. done_scanning_.store(true, std::memory_order_release); + // NOTE: inter-region-ref bitmaps can be cleared here to release memory, if needed. + // Currently we do it in ReclaimPhase(). if (kVerboseMode) { LOG(INFO) << "GC end of ScanCardsForSpace"; } @@ -947,10 +1497,13 @@ void ConcurrentCopying::MarkingPhase() { if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) { table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor); } else { - // TODO: Scan only the aged cards. - live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), - reinterpret_cast<uintptr_t>(space->Limit()), - visitor); + WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_); + card_table->Scan<false>( + live_bitmap, + space->Begin(), + space->Limit(), + visitor, + accounting::CardTable::kCardDirty - 1); } } } @@ -1059,7 +1612,7 @@ void ConcurrentCopying::MarkingPhase() { CHECK(weak_ref_access_enabled_); } if (kVerboseMode) { - LOG(INFO) << "GC end of MarkingPhase"; + LOG(INFO) << "GC end of CopyingPhase"; } } @@ -1419,40 +1972,6 @@ class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor { ConcurrentCopying* const collector_; }; -class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure { - public: - RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, - bool disable_weak_ref_access) - : concurrent_copying_(concurrent_copying), - disable_weak_ref_access_(disable_weak_ref_access) { - } - - void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS { - // Note: self is not necessarily equal to thread since thread may be suspended. - Thread* self = Thread::Current(); - CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) - << thread->GetState() << " thread " << thread << " self " << self; - // Revoke thread local mark stacks. - accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); - if (tl_mark_stack != nullptr) { - MutexLock mu(self, concurrent_copying_->mark_stack_lock_); - concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); - thread->SetThreadLocalMarkStack(nullptr); - } - // Disable weak ref access. - if (disable_weak_ref_access_) { - thread->SetWeakRefAccessEnabled(false); - } - // If thread is a running mutator, then act on behalf of the garbage collector. - // See the code in ThreadList::RunCheckpoint. - concurrent_copying_->GetBarrier().Pass(self); - } - - private: - ConcurrentCopying* const concurrent_copying_; - const bool disable_weak_ref_access_; -}; - void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) { Thread* self = Thread::Current(); @@ -1510,7 +2029,11 @@ bool ConcurrentCopying::ProcessMarkStackOnce() { if (mark_stack_mode == kMarkStackModeThreadLocal) { // Process the thread-local mark stacks and the GC mark stack. count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false, - /* checkpoint_callback= */ nullptr); + /* checkpoint_callback= */ nullptr, + [this] (mirror::Object* ref) + REQUIRES_SHARED(Locks::mutator_lock_) { + ProcessMarkStackRef(ref); + }); while (!gc_mark_stack_->IsEmpty()) { mirror::Object* to_ref = gc_mark_stack_->PopBack(); ProcessMarkStackRef(to_ref); @@ -1566,8 +2089,10 @@ bool ConcurrentCopying::ProcessMarkStackOnce() { return count == 0; } +template <typename Processor> size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, - Closure* checkpoint_callback) { + Closure* checkpoint_callback, + const Processor& processor) { // Run a checkpoint to collect all thread local mark stacks and iterate over them all. RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback); size_t count = 0; @@ -1581,7 +2106,7 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) { for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) { mirror::Object* to_ref = p->AsMirrorPtr(); - ProcessMarkStackRef(to_ref); + processor(to_ref); ++count; } { @@ -1632,6 +2157,12 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { perform_scan = true; // Only add to the live bytes if the object was not already marked and we are not the young // GC. + // Why add live bytes even after 2-phase GC? + // We need to ensure that if there is a unevac region with any live + // objects, then its live_bytes must be non-zero. Otherwise, + // ClearFromSpace() will clear the region. Considering, that we may skip + // live objects during marking phase of 2-phase GC, we have to take care + // of such objects here. add_to_live_bytes = true; } break; @@ -1773,7 +2304,12 @@ void ConcurrentCopying::SwitchToSharedMarkStackMode() { DisableWeakRefAccessCallback dwrac(this); // Process the thread local mark stacks one last time after switching to the shared mark stack // mode and disable weak ref accesses. - ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac); + ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, + &dwrac, + [this] (mirror::Object* ref) + REQUIRES_SHARED(Locks::mutator_lock_) { + ProcessMarkStackRef(ref); + }); if (kVerboseMode) { LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; } @@ -2039,7 +2575,7 @@ void ConcurrentCopying::ReclaimPhase() { uint64_t cleared_objects; { TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); - region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects); + region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_); // `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since // RegionSpace::ClearFromSpace may clear empty unevac regions. CHECK_GE(cleared_bytes, from_bytes); @@ -2083,6 +2619,11 @@ void ConcurrentCopying::ReclaimPhase() { CheckEmptyMarkStack(); + if (heap_->dump_region_info_after_gc_) { + LOG(INFO) << "time=" << region_space_->Time(); + region_space_->DumpNonFreeRegions(LOG_STREAM(INFO)); + } + if (kVerboseMode) { LOG(INFO) << "GC end of ReclaimPhase"; } @@ -2348,7 +2889,7 @@ bool ConcurrentCopying::IsMarkedInNonMovingSpace(mirror::Object* from_ref) { DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref; if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) { return true; - } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_) + } else if (!kEnableGenerationalConcurrentCopyingCollection || done_scanning_.load(std::memory_order_acquire)) { // Read the comment in IsMarkedInUnevacFromSpace() accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap(); @@ -2939,7 +3480,7 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self, los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap(); DCHECK(los_bitmap->HasAddress(ref)); } - if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { + if (kEnableGenerationalConcurrentCopyingCollection) { // The sticky-bit CC collector is only compatible with Baker-style read barriers. DCHECK(kUseBakerReadBarrier); // Not done scanning, use AtomicSetReadBarrierPointer. @@ -3012,6 +3553,9 @@ void ConcurrentCopying::FinishPhase() { TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings()); // We do not currently use the region space cards at all, madvise them away to save ram. heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit()); + } else if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) { + region_space_inter_region_bitmap_->Clear(); + non_moving_space_inter_region_bitmap_->Clear(); } { MutexLock mu(self, skipped_blocks_lock_); diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index 237e070d1a..a2d48376a5 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -25,7 +25,7 @@ #include "mirror/object_reference.h" #include "offsets.h" -#include <unordered_map> +#include <memory> #include <vector> namespace art { @@ -79,6 +79,8 @@ class ConcurrentCopying : public GarbageCollector { void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_); void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); void FinishPhase() REQUIRES(!mark_stack_lock_, @@ -96,6 +98,9 @@ class ConcurrentCopying : public GarbageCollector { return kCollectorTypeCC; } void RevokeAllThreadLocalBuffers() override; + // Creates inter-region ref bitmaps for region-space and non-moving-space. + // Gets called in Heap construction after the two spaces are created. + void CreateInterRegionRefBitmaps(); void SetRegionSpace(space::RegionSpace* region_space) { DCHECK(region_space != nullptr); region_space_ = region_space; @@ -161,6 +166,13 @@ class ConcurrentCopying : public GarbageCollector { template <bool kNoUnEvac> void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + // Scan the reference fields of object 'obj' in the dirty cards during + // card-table scan. In addition to visiting the references, it also sets the + // read-barrier state to gray for Reference-type objects to ensure that + // GetReferent() called on these objects calls the read-barrier on the referent. + template <bool kNoUnEvac> + void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); // Process a field. template <bool kNoUnEvac> void Process(mirror::Object* obj, MemberOffset offset) @@ -198,7 +210,10 @@ class ConcurrentCopying : public GarbageCollector { void VerifyNoMissingCardMarks() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); - size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) + template <typename Processor> + size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, + Closure* checkpoint_callback, + const Processor& processor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) REQUIRES_SHARED(Locks::mutator_lock_); @@ -295,6 +310,15 @@ class ConcurrentCopying : public GarbageCollector { // Set the read barrier mark entrypoints to non-null. void ActivateReadBarrierEntrypoints(); + void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_); + void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); + bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); + template <bool kAtomic = false> + bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); + void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); + void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + space::RegionSpace* region_space_; // The underlying region space. std::unique_ptr<Barrier> gc_barrier_; std::unique_ptr<accounting::ObjectStack> gc_mark_stack_; @@ -368,6 +392,10 @@ class ConcurrentCopying : public GarbageCollector { // possible for minor GC if all allocated objects are in non-moving // space.) size_t gc_count_; + // Bit is set if the corresponding object has inter-region references that + // were found during the marking phase of two-phase full-heap GC cycle. + std::unique_ptr<accounting::ContinuousSpaceBitmap> region_space_inter_region_bitmap_; + std::unique_ptr<accounting::ContinuousSpaceBitmap> non_moving_space_inter_region_bitmap_; // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle float reclaimed_bytes_ratio_sum_; @@ -375,7 +403,7 @@ class ConcurrentCopying : public GarbageCollector { // Generational "sticky", only trace through dirty objects in region space. const bool young_gen_; // If true, the GC thread is done scanning marked objects on dirty and aged - // card (see ConcurrentCopying::MarkingPhase). + // card (see ConcurrentCopying::CopyingPhase). Atomic<bool> done_scanning_; // The skipped blocks are memory blocks/chucks that were copies of @@ -441,6 +469,10 @@ class ConcurrentCopying : public GarbageCollector { class VerifyNoFromSpaceRefsFieldVisitor; class VerifyNoFromSpaceRefsVisitor; class VerifyNoMissingCardMarkVisitor; + class ImmuneSpaceCaptureRefsVisitor; + template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor; + class CaptureThreadRootsForMarkingAndCheckpoint; + template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor; DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); }; diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc index 46ff7dc820..2ef3d92b57 100644 --- a/runtime/gc/collector/garbage_collector.cc +++ b/runtime/gc/collector/garbage_collector.cc @@ -90,13 +90,14 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) { Thread* self = Thread::Current(); uint64_t start_time = NanoTime(); uint64_t thread_cpu_start_time = ThreadCpuNanoTime(); - GetHeap()->CalculateWeightedAllocatedBytes(); + GetHeap()->CalculatePreGcWeightedAllocatedBytes(); Iteration* current_iteration = GetCurrentIteration(); current_iteration->Reset(gc_cause, clear_soft_references); // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't // change in the middle of a GC. is_transaction_active_ = Runtime::Current()->IsActiveTransaction(); RunPhases(); // Run all the GC phases. + GetHeap()->CalculatePostGcWeightedAllocatedBytes(); // Add the current timings to the cumulative timings. cumulative_timings_.AddLogger(*GetTimings()); // Update cumulative statistics with how many bytes the GC iteration freed. diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 9e1ba35a23..1c09b5c9bf 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -214,7 +214,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) { // New_num_bytes_allocated is zero if we didn't update num_bytes_allocated_. // That's fine. - CheckConcurrentGC(self, new_num_bytes_allocated, &obj); + CheckConcurrentGCForJava(self, new_num_bytes_allocated, &obj); } VerifyObject(obj); self->VerifyStack(); @@ -254,8 +254,8 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { - if (allocator_type != kAllocatorTypeTLAB && - allocator_type != kAllocatorTypeRegionTLAB && + if (allocator_type != kAllocatorTypeRegionTLAB && + allocator_type != kAllocatorTypeTLAB && allocator_type != kAllocatorTypeRosAlloc && UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) { return nullptr; @@ -396,30 +396,46 @@ inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_co inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size, bool grow) { - size_t new_footprint = num_bytes_allocated_.load(std::memory_order_relaxed) + alloc_size; - if (UNLIKELY(new_footprint > max_allowed_footprint_)) { - if (UNLIKELY(new_footprint > growth_limit_)) { + size_t old_target = target_footprint_.load(std::memory_order_relaxed); + while (true) { + size_t old_allocated = num_bytes_allocated_.load(std::memory_order_relaxed); + size_t new_footprint = old_allocated + alloc_size; + // Tests against heap limits are inherently approximate, since multiple allocations may + // race, and this is not atomic with the allocation. + if (UNLIKELY(new_footprint <= old_target)) { + return false; + } else if (UNLIKELY(new_footprint > growth_limit_)) { return true; } - if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) { - if (!grow) { + // We are between target_footprint_ and growth_limit_ . + if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) { + return false; + } else { + if (grow) { + if (target_footprint_.compare_exchange_weak(/*inout ref*/old_target, new_footprint, + std::memory_order_relaxed)) { + VlogHeapGrowth(old_target, new_footprint, alloc_size); + return false; + } // else try again. + } else { return true; } - // TODO: Grow for allocation is racy, fix it. - VlogHeapGrowth(max_allowed_footprint_, new_footprint, alloc_size); - max_allowed_footprint_ = new_footprint; } } - return false; } -// Request a GC if new_num_bytes_allocated is sufficiently large. -// A call with new_num_bytes_allocated == 0 is a fast no-op. -inline void Heap::CheckConcurrentGC(Thread* self, +inline bool Heap::ShouldConcurrentGCForJava(size_t new_num_bytes_allocated) { + // For a Java allocation, we only check whether the number of Java allocated bytes excceeds a + // threshold. By not considering native allocation here, we (a) ensure that Java heap bounds are + // maintained, and (b) reduce the cost of the check here. + return new_num_bytes_allocated >= concurrent_start_bytes_; +} + +inline void Heap::CheckConcurrentGCForJava(Thread* self, size_t new_num_bytes_allocated, ObjPtr<mirror::Object>* obj) { - if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { - RequestConcurrentGCAndSaveObject(self, false, obj); + if (UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) { + RequestConcurrentGCAndSaveObject(self, false /* force_full */, obj); } } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index bfb1019dd8..8f9967fdb3 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -17,6 +17,9 @@ #include "heap.h" #include <limits> +#if defined(__BIONIC__) || defined(__GLIBC__) +#include <malloc.h> // For mallinfo() +#endif #include <memory> #include <vector> @@ -187,7 +190,7 @@ Heap::Heap(size_t initial_size, bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold, - bool ignore_max_footprint, + bool ignore_target_footprint, bool use_tlab, bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, @@ -198,7 +201,9 @@ Heap::Heap(size_t initial_size, bool gc_stress_mode, bool measure_gc_performance, bool use_homogeneous_space_compaction_for_oom, - uint64_t min_interval_homogeneous_space_compaction_by_oom) + uint64_t min_interval_homogeneous_space_compaction_by_oom, + bool dump_region_info_before_gc, + bool dump_region_info_after_gc) : non_moving_space_(nullptr), rosalloc_space_(nullptr), dlmalloc_space_(nullptr), @@ -214,9 +219,11 @@ Heap::Heap(size_t initial_size, long_pause_log_threshold_(long_pause_log_threshold), long_gc_log_threshold_(long_gc_log_threshold), process_cpu_start_time_ns_(ProcessCpuNanoTime()), - last_process_cpu_time_ns_(process_cpu_start_time_ns_), - weighted_allocated_bytes_(0.0), - ignore_max_footprint_(ignore_max_footprint), + pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_), + post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_), + pre_gc_weighted_allocated_bytes_(0.0), + post_gc_weighted_allocated_bytes_(0.0), + ignore_target_footprint_(ignore_target_footprint), zygote_creation_lock_("zygote creation lock", kZygoteCreationLock), zygote_space_(nullptr), large_object_threshold_(large_object_threshold), @@ -229,13 +236,14 @@ Heap::Heap(size_t initial_size, next_gc_type_(collector::kGcTypePartial), capacity_(capacity), growth_limit_(growth_limit), - max_allowed_footprint_(initial_size), + target_footprint_(initial_size), concurrent_start_bytes_(std::numeric_limits<size_t>::max()), total_bytes_freed_ever_(0), total_objects_freed_ever_(0), num_bytes_allocated_(0), - new_native_bytes_allocated_(0), + native_bytes_registered_(0), old_native_bytes_allocated_(0), + native_objects_notified_(0), num_bytes_freed_revoke_(0), verify_missing_card_marks_(false), verify_system_weaks_(false), @@ -294,7 +302,9 @@ Heap::Heap(size_t initial_size, backtrace_lock_(nullptr), seen_backtrace_count_(0u), unique_backtrace_count_(0u), - gc_disabled_for_shutdown_(false) { + gc_disabled_for_shutdown_(false), + dump_region_info_before_gc_(dump_region_info_before_gc), + dump_region_info_after_gc_(dump_region_info_after_gc) { if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "Heap() entering"; } @@ -614,11 +624,11 @@ Heap::Heap(size_t initial_size, task_processor_.reset(new TaskProcessor()); reference_processor_.reset(new ReferenceProcessor()); pending_task_lock_ = new Mutex("Pending task lock"); - if (ignore_max_footprint_) { + if (ignore_target_footprint_) { SetIdealFootprint(std::numeric_limits<size_t>::max()); concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); } - CHECK_NE(max_allowed_footprint_, 0U); + CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U); // Create our garbage collectors. for (size_t i = 0; i < 2; ++i) { const bool concurrent = i != 0; @@ -656,6 +666,9 @@ Heap::Heap(size_t initial_size, concurrent_copying_collector_->SetRegionSpace(region_space_); if (kEnableGenerationalConcurrentCopyingCollection) { young_concurrent_copying_collector_->SetRegionSpace(region_space_); + // At this point, non-moving space should be created. + DCHECK(non_moving_space_ != nullptr); + concurrent_copying_collector_->CreateInterRegionRefBitmaps(); } garbage_collectors_.push_back(concurrent_copying_collector_); if (kEnableGenerationalConcurrentCopyingCollection) { @@ -1070,12 +1083,25 @@ void Heap::RemoveSpace(space::Space* space) { } } -void Heap::CalculateWeightedAllocatedBytes() { - uint64_t current_process_cpu_time = ProcessCpuNanoTime(); +double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns, + uint64_t current_process_cpu_time) const { uint64_t bytes_allocated = GetBytesAllocated(); - double weight = current_process_cpu_time - last_process_cpu_time_ns_; - weighted_allocated_bytes_ += weight * bytes_allocated; - last_process_cpu_time_ns_ = current_process_cpu_time; + double weight = current_process_cpu_time - gc_last_process_cpu_time_ns; + return weight * bytes_allocated; +} + +void Heap::CalculatePreGcWeightedAllocatedBytes() { + uint64_t current_process_cpu_time = ProcessCpuNanoTime(); + pre_gc_weighted_allocated_bytes_ += + CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time); + pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time; +} + +void Heap::CalculatePostGcWeightedAllocatedBytes() { + uint64_t current_process_cpu_time = ProcessCpuNanoTime(); + post_gc_weighted_allocated_bytes_ += + CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time); + post_gc_last_process_cpu_time_ns_ = current_process_cpu_time; } uint64_t Heap::GetTotalGcCpuTime() { @@ -1143,10 +1169,11 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { rosalloc_space_->DumpStats(os); } - os << "Registered native bytes allocated: " - << (old_native_bytes_allocated_.load(std::memory_order_relaxed) + - new_native_bytes_allocated_.load(std::memory_order_relaxed)) - << "\n"; + os << "Native bytes total: " << GetNativeBytes() + << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n"; + + os << "Total native bytes at last GC: " + << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n"; BaseMutex::DumpAll(os); } @@ -1157,8 +1184,12 @@ void Heap::ResetGcPerformanceInfo() { } process_cpu_start_time_ns_ = ProcessCpuNanoTime(); - last_process_cpu_time_ns_ = process_cpu_start_time_ns_; - weighted_allocated_bytes_ = 0u; + + pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_; + pre_gc_weighted_allocated_bytes_ = 0u; + + post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_; + post_gc_weighted_allocated_bytes_ = 0u; total_bytes_freed_ever_ = 0; total_objects_freed_ever_ = 0; @@ -1318,7 +1349,8 @@ void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType size_t total_bytes_free = GetFreeMemory(); oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM," - << " max allowed footprint " << max_allowed_footprint_ << ", growth limit " + << " target footprint " << target_footprint_.load(std::memory_order_relaxed) + << ", growth limit " << growth_limit_; // If the allocation failed due to fragmentation, print out the largest continuous allocation. if (total_bytes_free >= byte_count) { @@ -1384,6 +1416,11 @@ void Heap::Trim(Thread* self) { TrimSpaces(self); // Trim arenas that may have been used by JIT or verifier. runtime->GetArenaPool()->TrimMaps(); + { + // TODO: Move this to a callback called when startup is finished (b/120671223). + ScopedTrace trace2("Delete thread pool"); + runtime->DeleteThreadPool(); + } } class TrimIndirectReferenceTableClosure : public Closure { @@ -1853,7 +1890,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, } void Heap::SetTargetHeapUtilization(float target) { - DCHECK_GT(target, 0.0f); // asserted in Java code + DCHECK_GT(target, 0.1f); // asserted in Java code DCHECK_LT(target, 1.0f); target_utilization_ = target; } @@ -2267,8 +2304,8 @@ void Heap::ChangeCollector(CollectorType collector_type) { } if (IsGcConcurrent()) { concurrent_start_bytes_ = - std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - - kMinConcurrentRemainingBytes; + UnsignedDifference(target_footprint_.load(std::memory_order_relaxed), + kMinConcurrentRemainingBytes); } else { concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); } @@ -2597,6 +2634,39 @@ void Heap::TraceHeapSize(size_t heap_size) { ATRACE_INT("Heap size (KB)", heap_size / KB); } +size_t Heap::GetNativeBytes() { + size_t malloc_bytes; +#if defined(__BIONIC__) || defined(__GLIBC__) + size_t mmapped_bytes; + struct mallinfo mi = mallinfo(); + // In spite of the documentation, the jemalloc version of this call seems to do what we want, + // and it is thread-safe. + if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) { + // Shouldn't happen, but glibc declares uordblks as int. + // Avoiding sign extension gets us correct behavior for another 2 GB. + malloc_bytes = (unsigned int)mi.uordblks; + mmapped_bytes = (unsigned int)mi.hblkhd; + } else { + malloc_bytes = mi.uordblks; + mmapped_bytes = mi.hblkhd; + } + // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes + // dramatically different. (b/119580449) If so, fudge it. + if (mmapped_bytes > malloc_bytes) { + malloc_bytes = mmapped_bytes; + } +#else + // We should hit this case only in contexts in which GC triggering is not critical. Effectively + // disable GC triggering based on malloc(). + malloc_bytes = 1000; +#endif + return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed); + // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no + // more expensive, and it would allow us to count memory allocated by means other than malloc. + // However it would change as pages are unmapped and remapped due to memory pressure, among + // other things. It seems risky to trigger GCs as a result of such changes. +} + collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, bool clear_soft_references) { @@ -2647,16 +2717,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, ++runtime->GetStats()->gc_for_alloc_count; ++self->GetStats()->gc_for_alloc_count; } - const uint64_t bytes_allocated_before_gc = GetBytesAllocated(); - - if (gc_type == NonStickyGcType()) { - // Move all bytes from new_native_bytes_allocated_ to - // old_native_bytes_allocated_ now that GC has been triggered, resetting - // new_native_bytes_allocated_ to zero in the process. - old_native_bytes_allocated_.fetch_add( - new_native_bytes_allocated_.exchange(0, std::memory_order_relaxed), - std::memory_order_relaxed); - } + const size_t bytes_allocated_before_gc = GetBytesAllocated(); DCHECK_LT(gc_type, collector::kGcTypeMax); DCHECK_NE(gc_type, collector::kGcTypeNone); @@ -2683,7 +2744,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, // active_concurrent_copying_collector_. So we should not concurrency here. active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ? young_concurrent_copying_collector_ : concurrent_copying_collector_; - active_concurrent_copying_collector_->SetRegionSpace(region_space_); + DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_); } collector = active_concurrent_copying_collector_; break; @@ -2728,6 +2789,9 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, FinishGC(self, gc_type); // Inform DDMS that a GC completed. Dbg::GcDidFinish(); + + old_native_bytes_allocated_.store(GetNativeBytes()); + // Unload native libraries for class unloading. We do this after calling FinishGC to prevent // deadlocks in case the JNI_OnUnload function does allocations. { @@ -3502,16 +3566,17 @@ void Heap::DumpForSigQuit(std::ostream& os) { } size_t Heap::GetPercentFree() { - return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_); + return static_cast<size_t>(100.0f * static_cast<float>( + GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed)); } -void Heap::SetIdealFootprint(size_t max_allowed_footprint) { - if (max_allowed_footprint > GetMaxMemory()) { - VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " +void Heap::SetIdealFootprint(size_t target_footprint) { + if (target_footprint > GetMaxMemory()) { + VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to " << PrettySize(GetMaxMemory()); - max_allowed_footprint = GetMaxMemory(); + target_footprint = GetMaxMemory(); } - max_allowed_footprint_ = max_allowed_footprint; + target_footprint_.store(target_footprint, std::memory_order_relaxed); } bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const { @@ -3544,10 +3609,10 @@ double Heap::HeapGrowthMultiplier() const { } void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, - uint64_t bytes_allocated_before_gc) { + size_t bytes_allocated_before_gc) { // We know what our utilization is at this moment. // This doesn't actually resize any memory. It just lets the heap grow more when necessary. - const uint64_t bytes_allocated = GetBytesAllocated(); + const size_t bytes_allocated = GetBytesAllocated(); // Trace the new heap size after the GC is finished. TraceHeapSize(bytes_allocated); uint64_t target_size; @@ -3555,16 +3620,18 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, // Use the multiplier to grow more for foreground. const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for // foreground. - const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier); - const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier); + const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier); + const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier); if (gc_type != collector::kGcTypeSticky) { // Grow the heap for non sticky GC. - ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated; - CHECK_GE(delta, 0) << "bytes_allocated=" << bytes_allocated - << " target_utilization_=" << target_utilization_; + uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0); + DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated + << " target_utilization_=" << target_utilization_; target_size = bytes_allocated + delta * multiplier; - target_size = std::min(target_size, bytes_allocated + adjusted_max_free); - target_size = std::max(target_size, bytes_allocated + adjusted_min_free); + target_size = std::min(target_size, + static_cast<uint64_t>(bytes_allocated + adjusted_max_free)); + target_size = std::max(target_size, + static_cast<uint64_t>(bytes_allocated + adjusted_min_free)); next_gc_type_ = collector::kGcTypeSticky; } else { collector::GcType non_sticky_gc_type = NonStickyGcType(); @@ -3581,22 +3648,24 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, // We also check that the bytes allocated aren't over the footprint limit in order to prevent a // pathological case where dead objects which aren't reclaimed by sticky could get accumulated // if the sticky GC throughput always remained >= the full/partial throughput. + size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >= non_sticky_collector->GetEstimatedMeanThroughput() && non_sticky_collector->NumberOfIterations() > 0 && - bytes_allocated <= max_allowed_footprint_) { + bytes_allocated <= target_footprint) { next_gc_type_ = collector::kGcTypeSticky; } else { next_gc_type_ = non_sticky_gc_type; } // If we have freed enough memory, shrink the heap back down. - if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) { + if (bytes_allocated + adjusted_max_free < target_footprint) { target_size = bytes_allocated + adjusted_max_free; } else { - target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_)); + target_size = std::max(bytes_allocated, target_footprint); } } - if (!ignore_max_footprint_) { + CHECK_LE(target_size, std::numeric_limits<size_t>::max()); + if (!ignore_target_footprint_) { SetIdealFootprint(target_size); if (IsGcConcurrent()) { const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() + @@ -3605,26 +3674,25 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out // how many bytes were allocated during the GC we need to add freed_bytes back on. CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc); - const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes - + const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes - bytes_allocated_before_gc; // Calculate when to perform the next ConcurrentGC. // Estimate how many remaining bytes we will have when we need to start the next GC. size_t remaining_bytes = bytes_allocated_during_gc; remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes); remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); - if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { + size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); + if (UNLIKELY(remaining_bytes > target_footprint)) { // A never going to happen situation that from the estimated allocation rate we will exceed // the applications entire footprint with the given estimated allocation rate. Schedule // another GC nearly straight away. - remaining_bytes = kMinConcurrentRemainingBytes; + remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint); } - DCHECK_LE(remaining_bytes, max_allowed_footprint_); - DCHECK_LE(max_allowed_footprint_, GetMaxMemory()); + DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory()); // Start a concurrent GC when we get close to the estimated remaining bytes. When the // allocation rate is very high, remaining_bytes could tell us that we should start a GC // right away. - concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, - static_cast<size_t>(bytes_allocated)); + concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated); } } } @@ -3652,11 +3720,11 @@ void Heap::ClampGrowthLimit() { } void Heap::ClearGrowthLimit() { - if (max_allowed_footprint_ == growth_limit_ && growth_limit_ < capacity_) { - max_allowed_footprint_ = capacity_; + if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_ + && growth_limit_ < capacity_) { + target_footprint_.store(capacity_, std::memory_order_relaxed); concurrent_start_bytes_ = - std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - - kMinConcurrentRemainingBytes; + UnsignedDifference(capacity_, kMinConcurrentRemainingBytes); } growth_limit_ = capacity_; ScopedObjectAccess soa(Thread::Current()); @@ -3896,40 +3964,101 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) { static_cast<jlong>(timeout)); } -void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { - size_t old_value = new_native_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed); +// For GC triggering purposes, we count old (pre-last-GC) and new native allocations as +// different fractions of Java allocations. +// For now, we essentially do not count old native allocations at all, so that we can preserve the +// existing behavior of not limiting native heap size. If we seriously considered it, we would +// have to adjust collection thresholds when we encounter large amounts of old native memory, +// and handle native out-of-memory situations. + +static constexpr size_t kOldNativeDiscountFactor = 65536; // Approximately infinite for now. +static constexpr size_t kNewNativeDiscountFactor = 2; + +// If weighted java + native memory use exceeds our target by kStopForNativeFactor, and +// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid +// running out of memory. +static constexpr float kStopForNativeFactor = 2.0; +static constexpr size_t kHugeNativeAllocs = 200*1024*1024; + +// Return the ratio of the weighted native + java allocated bytes to its target value. +// A return value > 1.0 means we should collect. Significantly larger values mean we're falling +// behind. +inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) { + // Collection check for native allocation. Does not enforce Java heap bounds. + // With adj_start_bytes defined below, effectively checks + // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes, + // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above. + size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed); + if (old_native_bytes > current_native_bytes) { + // Net decrease; skip the check, but update old value. + // It's OK to lose an update if two stores race. + old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed); + return 0.0; + } else { + size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes); + size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor + + old_native_bytes / kOldNativeDiscountFactor; + size_t adj_start_bytes = concurrent_start_bytes_ + + NativeAllocationGcWatermark() / kNewNativeDiscountFactor; + return static_cast<float>(GetBytesAllocated() + weighted_native_bytes) + / static_cast<float>(adj_start_bytes); + } +} - if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() && - !IsGCRequestPending()) { - // Trigger another GC because there have been enough native bytes - // allocated since the last GC. +inline void Heap::CheckConcurrentGCForNative(Thread* self) { + size_t current_native_bytes = GetNativeBytes(); + float gc_urgency = NativeMemoryOverTarget(current_native_bytes); + if (UNLIKELY(gc_urgency >= 1.0)) { if (IsGcConcurrent()) { - RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true); + RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true); + if (gc_urgency > kStopForNativeFactor + && current_native_bytes > kHugeNativeAllocs) { + // We're in danger of running out of memory due to rampant native allocation. + if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { + LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency; + } + WaitForGcToComplete(kGcCauseForAlloc, self); + } } else { CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false); } } } +// About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect. +void Heap::NotifyNativeAllocations(JNIEnv* env) { + native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed); + CheckConcurrentGCForNative(ThreadForEnv(env)); +} + +// Register a native allocation with an explicit size. +// This should only be done for large allocations of non-malloc memory, which we wouldn't +// otherwise see. +void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { + native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed); + uint32_t objects_notified = + native_objects_notified_.fetch_add(1, std::memory_order_relaxed); + if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1 + || bytes > kCheckImmediatelyThreshold) { + CheckConcurrentGCForNative(ThreadForEnv(env)); + } +} + void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) { - // Take the bytes freed out of new_native_bytes_allocated_ first. If - // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed - // out of old_native_bytes_allocated_ to ensure all freed bytes are - // accounted for. size_t allocated; size_t new_freed_bytes; do { - allocated = new_native_bytes_allocated_.load(std::memory_order_relaxed); + allocated = native_bytes_registered_.load(std::memory_order_relaxed); new_freed_bytes = std::min(allocated, bytes); - } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated, - allocated - new_freed_bytes)); - if (new_freed_bytes < bytes) { - old_native_bytes_allocated_.fetch_sub(bytes - new_freed_bytes, std::memory_order_relaxed); - } + // We should not be registering more free than allocated bytes. + // But correctly keep going in non-debug builds. + DCHECK_EQ(new_freed_bytes, bytes); + } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated, + allocated - new_freed_bytes)); } size_t Heap::GetTotalMemory() const { - return std::max(max_allowed_footprint_, GetBytesAllocated()); + return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated()); } void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) { @@ -4231,8 +4360,8 @@ const Verification* Heap::GetVerification() const { return verification_.get(); } -void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size) { - VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint) << " to " +void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) { + VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to " << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; } @@ -4243,20 +4372,21 @@ class Heap::TriggerPostForkCCGcTask : public HeapTask { gc::Heap* heap = Runtime::Current()->GetHeap(); // Trigger a GC, if not already done. The first GC after fork, whenever it // takes place, will adjust the thresholds to normal levels. - if (heap->max_allowed_footprint_ == heap->growth_limit_) { + if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) { heap->RequestConcurrentGC(self, kGcCauseBackground, false); } } }; void Heap::PostForkChildAction(Thread* self) { - // Temporarily increase max_allowed_footprint_ and concurrent_start_bytes_ to + // Temporarily increase target_footprint_ and concurrent_start_bytes_ to // max values to avoid GC during app launch. if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) { - // Set max_allowed_footprint_ to the largest allowed value. + // Set target_footprint_ to the largest allowed value. SetIdealFootprint(growth_limit_); // Set concurrent_start_bytes_ to half of the heap size. - concurrent_start_bytes_ = std::max(max_allowed_footprint_ / 2, GetBytesAllocated()); + size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); + concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated()); GetTaskProcessor()->AddTask( self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS))); diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 57c7376be6..aa09cbed5c 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -126,7 +126,6 @@ static constexpr bool kUseThreadLocalAllocationStack = true; class Heap { public: - // If true, measure the total allocation time. static constexpr size_t kDefaultStartingSize = kPageSize; static constexpr size_t kDefaultInitialSize = 2 * MB; static constexpr size_t kDefaultMaximumSize = 256 * MB; @@ -155,6 +154,16 @@ class Heap { // Used so that we don't overflow the allocation time atomic integer. static constexpr size_t kTimeAdjust = 1024; + // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations. + // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order + // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec. + static constexpr uint32_t kNotifyNativeInterval = 32; + + // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the + // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to + // make it safe to allocate that many bytes between checks. + static constexpr size_t kCheckImmediatelyThreshold = 300000; + // How often we allow heap trimming to happen (nanoseconds). static constexpr uint64_t kHeapTrimWait = MsToNs(5000); // How long we wait after a transition request to perform a collector transition (nanoseconds). @@ -187,7 +196,7 @@ class Heap { bool low_memory_mode, size_t long_pause_threshold, size_t long_gc_threshold, - bool ignore_max_footprint, + bool ignore_target_footprint, bool use_tlab, bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, @@ -198,7 +207,9 @@ class Heap { bool gc_stress_mode, bool measure_gc_performance, bool use_homogeneous_space_compaction, - uint64_t min_interval_homogeneous_space_compaction_by_oom); + uint64_t min_interval_homogeneous_space_compaction_by_oom, + bool dump_region_info_before_gc, + bool dump_region_info_after_gc); ~Heap(); @@ -269,10 +280,22 @@ class Heap { void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) REQUIRES_SHARED(Locks::mutator_lock_); + // Inform the garbage collector of a non-malloc allocated native memory that might become + // reclaimable in the future as a result of Java garbage collection. void RegisterNativeAllocation(JNIEnv* env, size_t bytes) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); void RegisterNativeFree(JNIEnv* env, size_t bytes); + // Notify the garbage collector of malloc allocations that might be reclaimable + // as a result of Java garbage collection. Each such call represents approximately + // kNotifyNativeInterval such allocations. + void NotifyNativeAllocations(JNIEnv* env) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); + + uint32_t GetNotifyNativeInterval() { + return kNotifyNativeInterval; + } + // Change the allocator, updates entrypoints. void ChangeAllocator(AllocatorType allocator) REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_); @@ -397,11 +420,16 @@ class Heap { REQUIRES(!Locks::heap_bitmap_lock_) REQUIRES(Locks::mutator_lock_); - double GetWeightedAllocatedBytes() const { - return weighted_allocated_bytes_; + double GetPreGcWeightedAllocatedBytes() const { + return pre_gc_weighted_allocated_bytes_; } - void CalculateWeightedAllocatedBytes(); + double GetPostGcWeightedAllocatedBytes() const { + return post_gc_weighted_allocated_bytes_; + } + + void CalculatePreGcWeightedAllocatedBytes(); + void CalculatePostGcWeightedAllocatedBytes(); uint64_t GetTotalGcCpuTime(); uint64_t GetProcessCpuStartTime() const { @@ -531,21 +559,20 @@ class Heap { // Returns approximately how much free memory we have until the next GC happens. size_t GetFreeMemoryUntilGC() const { - return max_allowed_footprint_ - GetBytesAllocated(); + return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed), + GetBytesAllocated()); } // Returns approximately how much free memory we have until the next OOME happens. size_t GetFreeMemoryUntilOOME() const { - return growth_limit_ - GetBytesAllocated(); + return UnsignedDifference(growth_limit_, GetBytesAllocated()); } // Returns how much free memory we have until we need to grow the heap to perform an allocation. // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory. size_t GetFreeMemory() const { - size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_relaxed); - size_t total_memory = GetTotalMemory(); - // Make sure we don't get a negative number. - return total_memory - std::min(total_memory, byte_allocated); + return UnsignedDifference(GetTotalMemory(), + num_bytes_allocated_.load(std::memory_order_relaxed)); } // Get the space that corresponds to an object's address. Current implementation searches all @@ -858,6 +885,9 @@ class Heap { REQUIRES(!*gc_complete_lock_); void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_); + double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns, + uint64_t current_process_cpu_time) const; + // Create a mem map with a preferred base address. static MemMap MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, @@ -869,12 +899,16 @@ class Heap { return main_space_backup_ != nullptr; } + static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) { + return x > y ? x - y : 0; + } + static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { return + allocator_type != kAllocatorTypeRegionTLAB && allocator_type != kAllocatorTypeBumpPointer && allocator_type != kAllocatorTypeTLAB && - allocator_type != kAllocatorTypeRegion && - allocator_type != kAllocatorTypeRegionTLAB; + allocator_type != kAllocatorTypeRegion; } static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) { if (kUseReadBarrier) { @@ -882,24 +916,30 @@ class Heap { return true; } return - allocator_type != kAllocatorTypeBumpPointer && - allocator_type != kAllocatorTypeTLAB; + allocator_type != kAllocatorTypeTLAB && + allocator_type != kAllocatorTypeBumpPointer; } static bool IsMovingGc(CollectorType collector_type) { return + collector_type == kCollectorTypeCC || collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS || - collector_type == kCollectorTypeCC || collector_type == kCollectorTypeCCBackground || collector_type == kCollectorTypeHomogeneousSpaceCompact; } bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const REQUIRES_SHARED(Locks::mutator_lock_); - ALWAYS_INLINE void CheckConcurrentGC(Thread* self, - size_t new_num_bytes_allocated, - ObjPtr<mirror::Object>* obj) + + // Checks whether we should garbage collect: + ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated); + float NativeMemoryOverTarget(size_t current_native_bytes); + ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self, + size_t new_num_bytes_allocated, + ObjPtr<mirror::Object>* obj) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_, !*gc_complete_lock_); + void CheckConcurrentGCForNative(Thread* self) + REQUIRES(!*pending_task_lock_, !*gc_complete_lock_); accounting::ObjectStack* GetMarkStack() { return mark_stack_.get(); @@ -960,6 +1000,11 @@ class Heap { void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) REQUIRES_SHARED(Locks::mutator_lock_); + // Are we out of memory, and thus should force a GC or fail? + // For concurrent collectors, out of memory is defined by growth_limit_. + // For nonconcurrent collectors it is defined by target_footprint_ unless grow is + // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_ + // to accomodate the allocation. ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size, bool grow); @@ -1023,7 +1068,7 @@ class Heap { // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which // the GC was run. void GrowForUtilization(collector::GarbageCollector* collector_ran, - uint64_t bytes_allocated_before_gc = 0); + size_t bytes_allocated_before_gc = 0); size_t GetPercentFree(); @@ -1057,8 +1102,8 @@ class Heap { // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark // sweep GC, false for other GC types. bool IsGcConcurrent() const ALWAYS_INLINE { - return collector_type_ == kCollectorTypeCMS || - collector_type_ == kCollectorTypeCC || + return collector_type_ == kCollectorTypeCC || + collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCCBackground; } @@ -1087,15 +1132,19 @@ class Heap { return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull; } - // How large new_native_bytes_allocated_ can grow before we trigger a new - // GC. + // Return the amount of space we allow for native memory when deciding whether to + // collect. We collect when a weighted sum of Java memory plus native memory exceeds + // the similarly weighted sum of the Java heap size target and this value. ALWAYS_INLINE size_t NativeAllocationGcWatermark() const { - // Reuse max_free_ for the native allocation gc watermark, so that the - // native heap is treated in the same way as the Java heap in the case - // where the gc watermark update would exceed max_free_. Using max_free_ - // instead of the target utilization means the watermark doesn't depend on - // the current number of registered native allocations. - return max_free_; + // It probably makes most sense to use a constant multiple of target_footprint_ . + // This is a good indication of the live data size, together with the + // intended space-time trade-off, as expressed by SetTargetHeapUtilization. + // For a fixed target utilization, the amount of GC effort per native + // allocated byte remains roughly constant as the Java heap size changes. + // But we previously triggered on max_free_ native allocation which is often much + // smaller. To avoid unexpected growth, we partially keep that limit in place for now. + // TODO: Consider HeapGrowthMultiplier(). Maybe. + return std::min(target_footprint_.load(std::memory_order_relaxed), 2 * max_free_); } ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke); @@ -1105,6 +1154,11 @@ class Heap { // Remove a vlog code from heap-inl.h which is transitively included in half the world. static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size); + // Return our best approximation of the number of bytes of native memory that + // are currently in use, and could possibly be reclaimed as an indirect result + // of a garbage collection. + size_t GetNativeBytes(); + // All-known continuous spaces, where objects lie within fixed bounds. std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_); @@ -1175,15 +1229,18 @@ class Heap { // Starting time of the new process; meant to be used for measuring total process CPU time. uint64_t process_cpu_start_time_ns_; - // Last time GC started; meant to be used to measure the duration between two GCs. - uint64_t last_process_cpu_time_ns_; + // Last time (before and after) GC started; meant to be used to measure the + // duration between two GCs. + uint64_t pre_gc_last_process_cpu_time_ns_; + uint64_t post_gc_last_process_cpu_time_ns_; - // allocated_bytes * (current_process_cpu_time - last_process_cpu_time) - double weighted_allocated_bytes_; + // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time) + double pre_gc_weighted_allocated_bytes_; + double post_gc_weighted_allocated_bytes_; - // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is - // useful for benchmarking since it reduces time spent in GC to a low %. - const bool ignore_max_footprint_; + // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this + // is useful for benchmarking since it reduces time spent in GC to a low %. + const bool ignore_target_footprint_; // Lock which guards zygote space creation. Mutex zygote_creation_lock_; @@ -1232,14 +1289,18 @@ class Heap { // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap // programs it is "cleared" making it the same as capacity. + // Only weakly enforced for simultaneous allocations. size_t growth_limit_; - // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating - // a GC should be triggered. - size_t max_allowed_footprint_; + // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for + // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the + // concurrent GC case. + Atomic<size_t> target_footprint_; // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that // it completes ahead of an allocation failing. + // A multiple of this is also used to determine when to trigger a GC in response to native + // allocation. size_t concurrent_start_bytes_; // Since the heap was created, how many bytes have been freed. @@ -1252,19 +1313,18 @@ class Heap { // TLABS in their entirety, even if they have not yet been parceled out. Atomic<size_t> num_bytes_allocated_; - // Number of registered native bytes allocated since the last time GC was - // triggered. Adjusted after each RegisterNativeAllocation and - // RegisterNativeFree. Used to determine when to trigger GC for native - // allocations. - // See the REDESIGN section of go/understanding-register-native-allocation. - Atomic<size_t> new_native_bytes_allocated_; - - // Number of registered native bytes allocated prior to the last time GC was - // triggered, for debugging purposes. The current number of registered - // native bytes is determined by taking the sum of - // old_native_bytes_allocated_ and new_native_bytes_allocated_. + // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and + // RegisterNativeFree. Used to help determine when to trigger GC for native allocations. Should + // not include bytes allocated through the system malloc, since those are implicitly included. + Atomic<size_t> native_bytes_registered_; + + // Approximately the smallest value of GetNativeBytes() we've seen since the last GC. Atomic<size_t> old_native_bytes_allocated_; + // Total number of native objects of which we were notified since the beginning of time, mod 2^32. + // Allows us to check for GC only roughly every kNotifyNativeInterval allocations. + Atomic<uint32_t> native_objects_notified_; + // Number of bytes freed by thread local buffer revokes. This will // cancel out the ahead-of-time bulk counting of bytes allocated in // rosalloc thread-local buffers. It is temporarily accumulated @@ -1349,10 +1409,10 @@ class Heap { // Minimum free guarantees that you always have at least min_free_ free bytes after growing for // utilization, regardless of target utilization ratio. - size_t min_free_; + const size_t min_free_; // The ideal maximum free size, when we grow the heap for utilization. - size_t max_free_; + const size_t max_free_; // Target ideal heap utilization ratio. double target_utilization_; @@ -1448,6 +1508,11 @@ class Heap { // allocating. bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_); + // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to + // emit region info before and after each GC cycle. + bool dump_region_info_before_gc_; + bool dump_region_info_after_gc_; + // Boot image spaces. std::vector<space::ImageSpace*> boot_image_spaces_; diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 4c2074dee2..4f9b3f9955 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -21,13 +21,13 @@ #include <unistd.h> #include <random> -#include <thread> #include "android-base/stringprintf.h" #include "android-base/strings.h" #include "art_field-inl.h" #include "art_method-inl.h" +#include "base/array_ref.h" #include "base/bit_memory_region.h" #include "base/callee_save_type.h" #include "base/enums.h" @@ -44,6 +44,7 @@ #include "dex/dex_file_loader.h" #include "exec_utils.h" #include "gc/accounting/space_bitmap-inl.h" +#include "gc/task_processor.h" #include "image-inl.h" #include "image_space_fs.h" #include "intern_table-inl.h" @@ -59,6 +60,7 @@ namespace art { namespace gc { namespace space { +using android::base::StringAppendF; using android::base::StringPrintf; Atomic<uint32_t> ImageSpace::bitmap_index_(0); @@ -654,6 +656,22 @@ class ImageSpace::PatchArtMethodVisitor final : public ArtMethodVisitor { const CodeVisitor code_visitor_; }; +template <typename ReferenceVisitor> +class ImageSpace::ClassTableVisitor final { + public: + explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor) + : reference_visitor_(reference_visitor) {} + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(root->AsMirrorPtr() != nullptr); + root->Assign(reference_visitor_(root->AsMirrorPtr())); + } + + private: + ReferenceVisitor reference_visitor_; +}; + // Helper class encapsulating loading, so we can access private ImageSpace members (this is a // nested class), but not declare functions in the header. class ImageSpace::Loader { @@ -666,31 +684,12 @@ class ImageSpace::Loader { REQUIRES_SHARED(Locks::mutator_lock_) { TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image)); - const bool create_thread_pool = true; - std::unique_ptr<ThreadPool> thread_pool; - if (create_thread_pool) { - TimingLogger::ScopedTiming timing("CreateThreadPool", &logger); - ScopedThreadStateChange stsc(Thread::Current(), kNative); - constexpr size_t kStackSize = 64 * KB; - constexpr size_t kMaxRuntimeWorkers = 4u; - const size_t num_workers = - std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers); - thread_pool.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize)); - thread_pool->StartWorkers(Thread::Current()); - } - std::unique_ptr<ImageSpace> space = Init(image_filename, image_location, oat_file, &logger, - thread_pool.get(), image_reservation, error_msg); - if (thread_pool != nullptr) { - TimingLogger::ScopedTiming timing("CreateThreadPool", &logger); - ScopedThreadStateChange stsc(Thread::Current(), kNative); - thread_pool.reset(); - } if (space != nullptr) { uint32_t expected_reservation_size = RoundUp(space->GetImageHeader().GetImageSize(), kPageSize); @@ -701,11 +700,22 @@ class ImageSpace::Loader { TimingLogger::ScopedTiming timing("RelocateImage", &logger); ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin()); - if (!RelocateInPlace(*image_header, - space->GetMemMap()->Begin(), - space->GetLiveBitmap(), - oat_file, - error_msg)) { + const PointerSize pointer_size = image_header->GetPointerSize(); + bool result; + if (pointer_size == PointerSize::k64) { + result = RelocateInPlace<PointerSize::k64>(*image_header, + space->GetMemMap()->Begin(), + space->GetLiveBitmap(), + oat_file, + error_msg); + } else { + result = RelocateInPlace<PointerSize::k32>(*image_header, + space->GetMemMap()->Begin(), + space->GetLiveBitmap(), + oat_file, + error_msg); + } + if (!result) { return nullptr; } Runtime* runtime = Runtime::Current(); @@ -740,7 +750,6 @@ class ImageSpace::Loader { const char* image_location, const OatFile* oat_file, TimingLogger* logger, - ThreadPool* thread_pool, /*inout*/MemMap* image_reservation, /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -829,7 +838,6 @@ class ImageSpace::Loader { *image_header, file->Fd(), logger, - thread_pool, image_reservation, error_msg); if (!map.IsValid()) { @@ -920,7 +928,6 @@ class ImageSpace::Loader { const ImageHeader& image_header, int fd, TimingLogger* logger, - ThreadPool* pool, /*inout*/MemMap* image_reservation, /*out*/std::string* error_msg) { TimingLogger::ScopedTiming timing("MapImageFile", logger); @@ -964,9 +971,11 @@ class ImageSpace::Loader { } memcpy(map.Begin(), &image_header, sizeof(ImageHeader)); + Runtime::ScopedThreadPoolUsage stpu; + ThreadPool* const pool = stpu.GetThreadPool(); const uint64_t start = NanoTime(); Thread* const self = Thread::Current(); - const size_t kMinBlocks = 2; + static constexpr size_t kMinBlocks = 2u; const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks; for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) { auto function = [&](Thread*) { @@ -1089,11 +1098,8 @@ class ImageSpace::Loader { class FixupObjectVisitor : public FixupVisitor { public: template<typename... Args> - explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited, - const PointerSize pointer_size, - Args... args) + explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited, Args... args) : FixupVisitor(args...), - pointer_size_(pointer_size), visited_(visited) {} // Fix up separately since we also need to fix up method entrypoints. @@ -1105,39 +1111,14 @@ class ImageSpace::Loader { ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, - bool is_static ATTRIBUTE_UNUSED) const NO_THREAD_SAFETY_ANALYSIS { - // There could be overlap between ranges, we must avoid visiting the same reference twice. - // Avoid the class field since we already fixed it up in FixupClassVisitor. - if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) { - // Space is not yet added to the heap, don't do a read barrier. - mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>( - offset); - // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the - // image. - obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref)); - } - } - - // Visit a pointer array and forward corresponding native data. Ignores pointer arrays in the - // boot image. Uses the bitmap to ensure the same array is not visited multiple times. - template <typename Visitor> - void UpdatePointerArrayContents(mirror::PointerArray* array, const Visitor& visitor) const - NO_THREAD_SAFETY_ANALYSIS { - DCHECK(array != nullptr); - DCHECK(visitor.IsInAppImage(array)); - // The bit for the array contents is different than the bit for the array. Since we may have - // already visited the array as a long / int array from walking the bitmap without knowing it - // was a pointer array. - static_assert(kObjectAlignment == 8u, "array bit may be in another object"); - mirror::Object* const contents_bit = reinterpret_cast<mirror::Object*>( - reinterpret_cast<uintptr_t>(array) + kObjectAlignment); - // If the bit is not set then the contents have not yet been updated. - if (!visited_->Test(contents_bit)) { - array->Fixup<kVerifyNone>(array, pointer_size_, visitor); - visited_->Set(contents_bit); - } + // Space is not yet added to the heap, don't do a read barrier. + mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>( + offset); + // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the + // image. + obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref)); } // java.lang.ref.Reference visitor. @@ -1152,81 +1133,16 @@ class ImageSpace::Loader { void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { - if (visited_->Test(obj)) { - // Already visited. - return; - } - visited_->Set(obj); - - // Handle class specially first since we need it to be updated to properly visit the rest of - // the instance fields. - { - mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>(); - DCHECK(klass != nullptr) << "Null class in image"; - // No AsClass since our fields aren't quite fixed up yet. - mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass)); - if (klass != new_klass) { - obj->SetClass<kVerifyNone>(new_klass); - } - if (new_klass != klass && IsInAppImage(new_klass)) { - // Make sure the klass contents are fixed up since we depend on it to walk the fields. - operator()(new_klass); - } - } - - if (obj->IsClass()) { - mirror::Class* klass = obj->AsClass<kVerifyNone>(); - // Fixup super class before visiting instance fields which require - // information from their super class to calculate offsets. - mirror::Class* super_class = - klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>().Ptr(); - if (super_class != nullptr) { - mirror::Class* new_super_class = down_cast<mirror::Class*>(ForwardObject(super_class)); - if (new_super_class != super_class && IsInAppImage(new_super_class)) { - // Recursively fix all dependencies. - operator()(new_super_class); - } - } - } - - obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>( - *this, - *this); - // Note that this code relies on no circular dependencies. - // We want to use our own class loader and not the one in the image. - if (obj->IsClass<kVerifyNone>()) { - mirror::Class* as_klass = obj->AsClass<kVerifyNone>(); - FixupObjectAdapter visitor(boot_image_, app_image_, app_oat_); - as_klass->FixupNativePointers<kVerifyNone>(as_klass, pointer_size_, visitor); - // Deal with the pointer arrays. Use the helper function since multiple classes can reference - // the same arrays. - mirror::PointerArray* const vtable = as_klass->GetVTable<kVerifyNone, kWithoutReadBarrier>(); - if (vtable != nullptr && IsInAppImage(vtable)) { - operator()(vtable); - UpdatePointerArrayContents(vtable, visitor); - } - mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>(); - // Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid - // contents. - if (IsInAppImage(iftable)) { - operator()(iftable); - for (int32_t i = 0, count = iftable->Count(); i < count; ++i) { - if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) { - mirror::PointerArray* methods = - iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i); - if (visitor.IsInAppImage(methods)) { - operator()(methods); - DCHECK(methods != nullptr); - UpdatePointerArrayContents(methods, visitor); - } - } - } - } + if (!visited_->Set(obj)) { + // Not already visited. + obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>( + *this, + *this); + CHECK(!obj->IsClass()); } } private: - const PointerSize pointer_size_; gc::accounting::ContinuousSpaceBitmap* const visited_; }; @@ -1306,6 +1222,7 @@ class ImageSpace::Loader { // Relocate an image space mapped at target_base which possibly used to be at a different base // address. In place means modifying a single ImageSpace in place rather than relocating from // one ImageSpace to another. + template <PointerSize kPointerSize> static bool RelocateInPlace(ImageHeader& image_header, uint8_t* target_base, accounting::ContinuousSpaceBitmap* bitmap, @@ -1317,7 +1234,6 @@ class ImageSpace::Loader { uint32_t boot_image_end = 0; uint32_t boot_oat_begin = 0; uint32_t boot_oat_end = 0; - const PointerSize pointer_size = image_header.GetPointerSize(); gc::Heap* const heap = Runtime::Current()->GetHeap(); heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end); if (boot_image_begin == boot_image_end) { @@ -1359,11 +1275,8 @@ class ImageSpace::Loader { return true; } ScopedDebugDisallowReadBarriers sddrb(Thread::Current()); - // Need to update the image to be at the target base. - const ImageSection& objects_section = image_header.GetObjectsSection(); - uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset()); - uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End()); FixupObjectAdapter fixup_adapter(boot_image, app_image, app_oat); + PatchObjectVisitor<kPointerSize, FixupObjectAdapter> patch_object_visitor(fixup_adapter); if (fixup_image) { // Two pass approach, fix up all classes first, then fix up non class-objects. // The visited bitmap is used to ensure that pointer arrays are not forwarded twice. @@ -1371,16 +1284,64 @@ class ImageSpace::Loader { gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap", target_base, image_header.GetImageSize())); - FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(), - pointer_size, - boot_image, - app_image, - app_oat); - TimingLogger::ScopedTiming timing("Fixup classes", &logger); - // Fixup objects may read fields in the boot image, use the mutator lock here for sanity. Though - // its probably not required. + FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(), boot_image, app_image, app_oat); + { + TimingLogger::ScopedTiming timing("Fixup classes", &logger); + const auto& class_table_section = image_header.GetClassTableSection(); + if (class_table_section.Size() > 0u) { + ScopedObjectAccess soa(Thread::Current()); + ClassTableVisitor class_table_visitor(fixup_adapter); + size_t read_count = 0u; + const uint8_t* data = target_base + class_table_section.Offset(); + // We avoid making a copy of the data since we want modifications to be propagated to the + // memory map. + ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count); + for (ClassTable::TableSlot& slot : temp_set) { + slot.VisitRoot(class_table_visitor); + mirror::Class* klass = slot.Read<kWithoutReadBarrier>(); + if (!fixup_adapter.IsInAppImage(klass)) { + continue; + } + const bool already_marked = visited_bitmap->Set(klass); + CHECK(!already_marked) << "App image class already visited"; + patch_object_visitor.VisitClass(klass); + // Then patch the non-embedded vtable and iftable. + mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>(); + if (vtable != nullptr && + fixup_object_visitor.IsInAppImage(vtable) && + !visited_bitmap->Set(vtable)) { + patch_object_visitor.VisitPointerArray(vtable); + } + auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>(); + if (iftable != nullptr && fixup_object_visitor.IsInAppImage(iftable)) { + // Avoid processing the fields of iftable since we will process them later anyways + // below. + int32_t ifcount = klass->GetIfTableCount<kVerifyNone>(); + for (int32_t i = 0; i != ifcount; ++i) { + mirror::PointerArray* unpatched_ifarray = + iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i); + if (unpatched_ifarray != nullptr) { + // The iftable has not been patched, so we need to explicitly adjust the pointer. + mirror::PointerArray* ifarray = fixup_adapter(unpatched_ifarray); + if (fixup_object_visitor.IsInAppImage(ifarray) && + !visited_bitmap->Set(ifarray)) { + patch_object_visitor.VisitPointerArray(ifarray); + } + } + } + } + } + } + } + + // Fixup objects may read fields in the boot image, use the mutator lock here for sanity. + // Though its probably not required. + TimingLogger::ScopedTiming timing("Fixup cobjects", &logger); ScopedObjectAccess soa(Thread::Current()); - timing.NewTiming("Fixup objects"); + // Need to update the image to be at the target base. + const ImageSection& objects_section = image_header.GetObjectsSection(); + uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset()); + uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End()); bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor); // Fixup image roots. CHECK(app_image.InSource(reinterpret_cast<uintptr_t>( @@ -1392,96 +1353,19 @@ class ImageSpace::Loader { AsObjectArray<mirror::DexCache, kVerifyNone>(); for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) { mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i); - // Fix up dex cache pointers. - mirror::StringDexCacheType* strings = dex_cache->GetStrings(); - if (strings != nullptr) { - mirror::StringDexCacheType* new_strings = fixup_adapter.ForwardObject(strings); - if (strings != new_strings) { - dex_cache->SetStrings(new_strings); - } - dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter); - } - mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes(); - if (types != nullptr) { - mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types); - if (types != new_types) { - dex_cache->SetResolvedTypes(new_types); - } - dex_cache->FixupResolvedTypes<kWithoutReadBarrier>(new_types, fixup_adapter); - } - mirror::MethodDexCacheType* methods = dex_cache->GetResolvedMethods(); - if (methods != nullptr) { - mirror::MethodDexCacheType* new_methods = fixup_adapter.ForwardObject(methods); - if (methods != new_methods) { - dex_cache->SetResolvedMethods(new_methods); - } - for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) { - auto pair = mirror::DexCache::GetNativePairPtrSize(new_methods, j, pointer_size); - ArtMethod* orig = pair.object; - ArtMethod* copy = fixup_adapter.ForwardObject(orig); - if (orig != copy) { - pair.object = copy; - mirror::DexCache::SetNativePairPtrSize(new_methods, j, pair, pointer_size); - } - } - } - mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields(); - if (fields != nullptr) { - mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields); - if (fields != new_fields) { - dex_cache->SetResolvedFields(new_fields); - } - for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) { - mirror::FieldDexCachePair orig = - mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size); - mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index); - if (orig.object != copy.object) { - mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size); - } - } - } - - mirror::MethodTypeDexCacheType* method_types = dex_cache->GetResolvedMethodTypes(); - if (method_types != nullptr) { - mirror::MethodTypeDexCacheType* new_method_types = - fixup_adapter.ForwardObject(method_types); - if (method_types != new_method_types) { - dex_cache->SetResolvedMethodTypes(new_method_types); - } - dex_cache->FixupResolvedMethodTypes<kWithoutReadBarrier>(new_method_types, fixup_adapter); - } - GcRoot<mirror::CallSite>* call_sites = dex_cache->GetResolvedCallSites(); - if (call_sites != nullptr) { - GcRoot<mirror::CallSite>* new_call_sites = fixup_adapter.ForwardObject(call_sites); - if (call_sites != new_call_sites) { - dex_cache->SetResolvedCallSites(new_call_sites); - } - dex_cache->FixupResolvedCallSites<kWithoutReadBarrier>(new_call_sites, fixup_adapter); - } - - GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings(); - if (preresolved_strings != nullptr) { - GcRoot<mirror::String>* new_array = fixup_adapter.ForwardObject(preresolved_strings); - if (preresolved_strings != new_array) { - dex_cache->SetPreResolvedStrings(new_array); - } - const size_t num_preresolved_strings = dex_cache->NumPreResolvedStrings(); - for (size_t j = 0; j < num_preresolved_strings; ++j) { - new_array[j] = GcRoot<mirror::String>( - fixup_adapter(new_array[j].Read<kWithoutReadBarrier>())); - } - } + CHECK(dex_cache != nullptr); + patch_object_visitor.VisitDexCacheArrays(dex_cache); } } { // Only touches objects in the app image, no need for mutator lock. TimingLogger::ScopedTiming timing("Fixup methods", &logger); FixupArtMethodVisitor method_visitor(fixup_image, - pointer_size, + kPointerSize, boot_image, app_image, app_oat); - image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size); + image_header.VisitPackedArtMethods(&method_visitor, target_base, kPointerSize); } if (fixup_image) { { @@ -1492,26 +1376,14 @@ class ImageSpace::Loader { } { TimingLogger::ScopedTiming timing("Fixup imt", &logger); - image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size); + image_header.VisitPackedImTables(fixup_adapter, target_base, kPointerSize); } { TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger); - image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size); + image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, kPointerSize); } // In the app image case, the image methods are actually in the boot image. image_header.RelocateImageMethods(boot_image.Delta()); - const auto& class_table_section = image_header.GetClassTableSection(); - if (class_table_section.Size() > 0u) { - // Note that we require that ReadFromMemory does not make an internal copy of the elements. - // This also relies on visit roots not doing any verification which could fail after we update - // the roots to be the image addresses. - ScopedObjectAccess soa(Thread::Current()); - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - ClassTable temp_table; - temp_table.ReadFromMemory(target_base + class_table_section.Offset()); - FixupRootVisitor root_visitor(boot_image, app_image, app_oat); - temp_table.VisitRoots(root_visitor); - } // Fix up the intern table. const auto& intern_table_section = image_header.GetInternedStringsSection(); if (intern_table_section.Size() > 0u) { @@ -1654,8 +1526,10 @@ class ImageSpace::BootImageLoader { *error_msg = StringPrintf("Cannot read header of %s", filename.c_str()); return false; } - if (system_hdr.GetComponentCount() != boot_class_path_.size()) { - *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %zu", + if (system_hdr.GetComponentCount() == 0u || + system_hdr.GetComponentCount() > boot_class_path_.size()) { + *error_msg = StringPrintf("Unexpected component count in %s, received %u, " + "expected non-zero and <= %zu", filename.c_str(), system_hdr.GetComponentCount(), boot_class_path_.size()); @@ -1672,10 +1546,12 @@ class ImageSpace::BootImageLoader { return false; } + ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(), + system_hdr.GetComponentCount()); std::vector<std::string> locations = - ExpandMultiImageLocations(boot_class_path_locations_, image_location_); + ExpandMultiImageLocations(provided_locations, image_location_); std::vector<std::string> filenames = - ExpandMultiImageLocations(boot_class_path_locations_, filename); + ExpandMultiImageLocations(provided_locations, filename); DCHECK_EQ(locations.size(), filenames.size()); std::vector<std::unique_ptr<ImageSpace>> spaces; spaces.reserve(locations.size()); @@ -1694,7 +1570,7 @@ class ImageSpace::BootImageLoader { } for (size_t i = 0u, size = spaces.size(); i != size; ++i) { std::string expected_boot_class_path = - (i == 0u) ? android::base::Join(boot_class_path_locations_, ':') : std::string(); + (i == 0u) ? android::base::Join(provided_locations, ':') : std::string(); if (!OpenOatFile(spaces[i].get(), boot_class_path_[i], expected_boot_class_path, @@ -1766,22 +1642,6 @@ class ImageSpace::BootImageLoader { BitMemoryRegion visited_objects_; }; - template <typename ReferenceVisitor> - class ClassTableVisitor final { - public: - explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor) - : reference_visitor_(reference_visitor) {} - - void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const - REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK(root->AsMirrorPtr() != nullptr); - root->Assign(reference_visitor_(root->AsMirrorPtr())); - } - - private: - ReferenceVisitor reference_visitor_; - }; - template <PointerSize kPointerSize> static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces, uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -2014,7 +1874,6 @@ class ImageSpace::BootImageLoader { image_location.c_str(), /*oat_file=*/ nullptr, logger, - /*thread_pool=*/ nullptr, image_reservation, error_msg); } @@ -2444,9 +2303,113 @@ bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg return true; } +std::string ImageSpace::GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path, + const std::string& image_location, + InstructionSet image_isa, + /*out*/std::string* error_msg) { + std::string system_filename; + bool has_system = false; + std::string cache_filename; + bool has_cache = false; + bool dalvik_cache_exists = false; + bool is_global_cache = false; + if (!FindImageFilename(image_location.c_str(), + image_isa, + &system_filename, + &has_system, + &cache_filename, + &dalvik_cache_exists, + &has_cache, + &is_global_cache)) { + *error_msg = StringPrintf("Unable to find image file for %s and %s", + image_location.c_str(), + GetInstructionSetString(image_isa)); + return std::string(); + } + + DCHECK(has_system || has_cache); + const std::string& filename = has_system ? system_filename : cache_filename; + std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg); + if (header == nullptr) { + return std::string(); + } + if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) { + *error_msg = StringPrintf("Unexpected component count in %s, received %u, " + "expected non-zero and <= %zu", + filename.c_str(), + header->GetComponentCount(), + boot_class_path.size()); + return std::string(); + } + + std::string boot_image_checksum = + StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum()); + ArrayRef<const std::string> boot_class_path_tail = + ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount()); + for (const std::string& bcp_filename : boot_class_path_tail) { + std::vector<std::unique_ptr<const DexFile>> dex_files; + const ArtDexFileLoader dex_file_loader; + if (!dex_file_loader.Open(bcp_filename.c_str(), + bcp_filename, // The location does not matter here. + /*verify=*/ false, + /*verify_checksum=*/ false, + error_msg, + &dex_files)) { + return std::string(); + } + DCHECK(!dex_files.empty()); + StringAppendF(&boot_image_checksum, ":d"); + for (const std::unique_ptr<const DexFile>& dex_file : dex_files) { + StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum()); + } + } + return boot_image_checksum; +} + +std::string ImageSpace::GetBootClassPathChecksums( + const std::vector<ImageSpace*>& image_spaces, + const std::vector<const DexFile*>& boot_class_path) { + DCHECK(!image_spaces.empty()); + const ImageHeader& primary_header = image_spaces.front()->GetImageHeader(); + uint32_t component_count = primary_header.GetComponentCount(); + DCHECK_EQ(component_count, image_spaces.size()); + std::string boot_image_checksum = + StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum()); + size_t pos = 0u; + for (const ImageSpace* space : image_spaces) { + size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size(); + if (kIsDebugBuild) { + CHECK_NE(num_dex_files, 0u); + CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos); + for (size_t i = 0; i != num_dex_files; ++i) { + CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(), + boot_class_path[pos + i]->GetLocation()); + } + } + pos += num_dex_files; + } + ArrayRef<const DexFile* const> boot_class_path_tail = + ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos); + DCHECK(boot_class_path_tail.empty() || + !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str())); + for (const DexFile* dex_file : boot_class_path_tail) { + if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) { + StringAppendF(&boot_image_checksum, ":d"); + } + StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum()); + } + return boot_image_checksum; +} + std::vector<std::string> ImageSpace::ExpandMultiImageLocations( const std::vector<std::string>& dex_locations, const std::string& image_location) { + return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location); +} + +std::vector<std::string> ImageSpace::ExpandMultiImageLocations( + ArrayRef<const std::string> dex_locations, + const std::string& image_location) { DCHECK(!dex_locations.empty()); // Find the path. diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index dbc12d103e..14e364a44c 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -24,6 +24,8 @@ namespace art { +template <typename T> class ArrayRef; +class DexFile; class OatFile; namespace gc { @@ -124,6 +126,19 @@ class ImageSpace : public MemMapSpace { bool* has_data, bool *is_global_cache); + // Returns the checksums for the boot image and extra boot class path dex files, + // based on the boot class path, image location and ISA (may differ from the ISA of an + // initialized Runtime). The boot image and dex files do not need to be loaded in memory. + static std::string GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path, + const std::string& image_location, + InstructionSet image_isa, + /*out*/std::string* error_msg); + + // Returns the checksums for the boot image and extra boot class path dex files, + // based on the boot image and boot class path dex files loaded in memory. + static std::string GetBootClassPathChecksums(const std::vector<ImageSpace*>& image_spaces, + const std::vector<const DexFile*>& boot_class_path); + // Expand a single image location to multi-image locations based on the dex locations. static std::vector<std::string> ExpandMultiImageLocations( const std::vector<std::string>& dex_locations, @@ -188,7 +203,14 @@ class ImageSpace : public MemMapSpace { friend class Space; private: + // Internal overload that takes ArrayRef<> instead of vector<>. + static std::vector<std::string> ExpandMultiImageLocations( + ArrayRef<const std::string> dex_locations, + const std::string& image_location); + class BootImageLoader; + template <typename ReferenceVisitor> + class ClassTableVisitor; class Loader; template <typename PatchObjectVisitor> class PatchArtFieldVisitor; diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index 5ff1270b56..9f5c1179e5 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -193,6 +193,40 @@ inline uint64_t RegionSpace::GetObjectsAllocatedInternal() { return bytes; } +template <typename Visitor> +inline void RegionSpace::ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap, + Visitor&& visitor) { + const size_t iter_limit = kUseTableLookupReadBarrier + ? num_regions_ : std::min(num_regions_, non_free_region_index_limit_); + // Instead of region-wise scan, find contiguous blocks of un-evac regions and then + // visit them. Everything before visit_block_begin has been processed, while + // [visit_block_begin, visit_block_end) still needs to be visited. + uint8_t* visit_block_begin = nullptr; + uint8_t* visit_block_end = nullptr; + for (size_t i = 0; i < iter_limit; ++i) { + Region* r = ®ions_[i]; + if (r->IsInUnevacFromSpace()) { + // visit_block_begin set to nullptr means a new visit block needs to be stated. + if (visit_block_begin == nullptr) { + visit_block_begin = r->Begin(); + } + visit_block_end = r->End(); + } else if (visit_block_begin != nullptr) { + // Visit the block range as r is not adjacent to current visit block. + bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin), + reinterpret_cast<uintptr_t>(visit_block_end), + visitor); + visit_block_begin = nullptr; + } + } + // Visit last block, if not processed yet. + if (visit_block_begin != nullptr) { + bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin), + reinterpret_cast<uintptr_t>(visit_block_end), + visitor); + } +} + template<bool kToSpaceOnly, typename Visitor> inline void RegionSpace::WalkInternal(Visitor&& visitor) { // TODO: MutexLock on region_lock_ won't work due to lock order @@ -205,9 +239,10 @@ inline void RegionSpace::WalkInternal(Visitor&& visitor) { continue; } if (r->IsLarge()) { - // Avoid visiting dead large objects since they may contain dangling pointers to the - // from-space. - DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object"; + // We may visit a large object with live_bytes = 0 here. However, it is + // safe as it cannot contain dangling pointers because corresponding regions + // (and regions corresponding to dead referents) cannot be allocated for new + // allocations without first clearing regions' live_bytes and state. mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin()); DCHECK(obj->GetClass() != nullptr); visitor(obj); diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 21cae9371f..07783bacfe 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -319,6 +319,7 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, state == RegionState::kRegionStateLarge) && type == RegionType::kRegionTypeToSpace); bool should_evacuate = r->ShouldBeEvacuated(evac_mode); + bool is_newly_allocated = r->IsNewlyAllocated(); if (should_evacuate) { r->SetAsFromSpace(); DCHECK(r->IsInFromSpace()); @@ -329,6 +330,17 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, if (UNLIKELY(state == RegionState::kRegionStateLarge && type == RegionType::kRegionTypeToSpace)) { prev_large_evacuated = should_evacuate; + // In 2-phase full heap GC, this function is called after marking is + // done. So, it is possible that some newly allocated large object is + // marked but its live_bytes is still -1. We need to clear the + // mark-bit otherwise the live_bytes will not be updated in + // ConcurrentCopying::ProcessMarkStackRef() and hence will break the + // logic. + if (kEnableGenerationalConcurrentCopyingCollection + && !should_evacuate + && is_newly_allocated) { + GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin())); + } num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1; DCHECK_GT(num_expected_large_tails, 0U); } @@ -367,7 +379,8 @@ static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) { } void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, - /* out */ uint64_t* cleared_objects) { + /* out */ uint64_t* cleared_objects, + const bool clear_bitmap) { DCHECK(cleared_bytes != nullptr); DCHECK(cleared_objects != nullptr); *cleared_bytes = 0; @@ -395,13 +408,18 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, // (see b/62194020). uint8_t* clear_block_begin = nullptr; uint8_t* clear_block_end = nullptr; - auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) { + auto clear_region = [this, &clear_block_begin, &clear_block_end, clear_bitmap](Region* r) { r->Clear(/*zero_and_release_pages=*/false); if (clear_block_end != r->Begin()) { // Region `r` is not adjacent to the current clear block; zero and release // pages within the current block and restart a new clear block at the // beginning of region `r`. ZeroAndProtectRegion(clear_block_begin, clear_block_end); + if (clear_bitmap) { + GetLiveBitmap()->ClearRange( + reinterpret_cast<mirror::Object*>(clear_block_begin), + reinterpret_cast<mirror::Object*>(clear_block_end)); + } clear_block_begin = r->Begin(); } // Add region `r` to the clear block. @@ -426,20 +444,23 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, // It is also better to clear these regions now instead of at the end of the next GC to // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal // live percent evacuation logic. + *cleared_bytes += r->BytesAllocated(); + *cleared_objects += r->ObjectsAllocated(); + clear_region(r); size_t free_regions = 1; // Also release RAM for large tails. while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) { - DCHECK(r->IsLarge()); clear_region(®ions_[i + free_regions]); ++free_regions; } - *cleared_bytes += r->BytesAllocated(); - *cleared_objects += r->ObjectsAllocated(); num_non_free_regions_ -= free_regions; - clear_region(r); - GetLiveBitmap()->ClearRange( - reinterpret_cast<mirror::Object*>(r->Begin()), - reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize)); + // When clear_bitmap is true, this clearing of bitmap is taken care in + // clear_region(). + if (!clear_bitmap) { + GetLiveBitmap()->ClearRange( + reinterpret_cast<mirror::Object*>(r->Begin()), + reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize)); + } continue; } r->SetUnevacFromSpaceAsToSpace(); @@ -519,6 +540,11 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, } // Clear pages for the last block since clearing happens when a new block opens. ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin); + if (clear_bitmap) { + GetLiveBitmap()->ClearRange( + reinterpret_cast<mirror::Object*>(clear_block_begin), + reinterpret_cast<mirror::Object*>(clear_block_end)); + } // Update non_free_region_index_limit_. SetNonFreeRegionLimit(new_non_free_region_index_limit); evac_region_ = nullptr; @@ -809,8 +835,14 @@ void RegionSpace::Region::Dump(std::ostream& os) const { << " type=" << type_ << " objects_allocated=" << objects_allocated_ << " alloc_time=" << alloc_time_ - << " live_bytes=" << live_bytes_ - << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha + << " live_bytes=" << live_bytes_; + + if (live_bytes_ != static_cast<size_t>(-1)) { + os << " ratio over allocated bytes=" + << (static_cast<float>(live_bytes_) / RoundUp(BytesAllocated(), kRegionSize)); + } + + os << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha << " thread=" << thread_ << '\n'; } diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index 8810f8ce58..75c99ec964 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -209,6 +209,15 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { template <typename Visitor> ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_); + // Scans regions and calls visitor for objects in unevac-space corresponding + // to the bits set in 'bitmap'. + // Cannot acquire region_lock_ as visitor may need to acquire it for allocation. + // Should not be called concurrently with functions (like SetFromSpace()) which + // change regions' type. + template <typename Visitor> + ALWAYS_INLINE void ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap, + Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS; + accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override { return nullptr; } @@ -228,6 +237,11 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { return false; } + bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS { + DCHECK_LT(idx, num_regions_); + return regions_[idx].IsNewlyAllocated(); + } + bool IsInNewlyAllocatedRegion(mirror::Object* ref) { if (HasAddress(ref)) { Region* r = RefToRegionUnlocked(ref); @@ -291,7 +305,9 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { size_t FromSpaceSize() REQUIRES(!region_lock_); size_t UnevacFromSpaceSize() REQUIRES(!region_lock_); size_t ToSpaceSize() REQUIRES(!region_lock_); - void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects) + void ClearFromSpace(/* out */ uint64_t* cleared_bytes, + /* out */ uint64_t* cleared_objects, + const bool clear_bitmap) REQUIRES(!region_lock_); void AddLiveBytes(mirror::Object* ref, size_t alloc_size) { @@ -310,6 +326,40 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { } } + void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) { + MutexLock mu(Thread::Current(), region_lock_); + const size_t iter_limit = kUseTableLookupReadBarrier + ? num_regions_ + : std::min(num_regions_, non_free_region_index_limit_); + for (size_t i = 0; i < iter_limit; ++i) { + Region* r = ®ions_[i]; + // Newly allocated regions don't need up-to-date live_bytes_ for deciding + // whether to be evacuated or not. See Region::ShouldBeEvacuated(). + if (!r->IsFree() && !r->IsNewlyAllocated()) { + r->ZeroLiveBytes(); + } + } + } + + size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS { + DCHECK(HasAddress(ref)); + uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin()); + size_t reg_idx = offset / kRegionSize; + DCHECK_LT(reg_idx, num_regions_); + Region* reg = ®ions_[reg_idx]; + DCHECK_EQ(reg->Idx(), reg_idx); + DCHECK(reg->Contains(ref)); + return reg_idx; + } + // Return -1 as region index for references outside this region space. + size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS { + if (HasAddress(ref)) { + return RegionIdxForRefUnchecked(ref); + } else { + return static_cast<size_t>(-1); + } + } + void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_); bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_); @@ -515,11 +565,10 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode); void AddLiveBytes(size_t live_bytes) { - DCHECK(IsInUnevacFromSpace()); + DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace()); DCHECK(!IsLargeTail()); DCHECK_NE(live_bytes_, static_cast<size_t>(-1)); - // For large allocations, we always consider all bytes in the - // regions live. + // For large allocations, we always consider all bytes in the regions live. live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes; DCHECK_LE(live_bytes_, BytesAllocated()); } diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index d004d642d0..067c1fa1e4 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -253,13 +253,6 @@ static inline JValue Execute( DCHECK(!shadow_frame.GetMethod()->IsAbstract()); DCHECK(!shadow_frame.GetMethod()->IsNative()); - // Check that we are using the right interpreter. - if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) { - // The flag might be currently being updated on all threads. Retry with lock. - MutexLock tll_mu(self, *Locks::thread_list_lock_); - DCHECK_EQ(self->UseMterp(), CanUseMterp()); - } - if (LIKELY(!from_deoptimize)) { // Entering the method, but not via deoptimization. if (kIsDebugBuild) { CHECK_EQ(shadow_frame.GetDexPC(), 0u); @@ -366,6 +359,13 @@ static inline JValue Execute( } } else { // Enter the "with access check" interpreter. + + // The boot classpath should really not have to run access checks. + DCHECK(method->GetDeclaringClass()->GetClassLoader() != nullptr + || Runtime::Current()->IsVerificationSoftFail() + || Runtime::Current()->IsAotCompiler()) + << method->PrettyMethod(); + if (kInterpreterImplKind == kMterpImplKind) { // No access check variants for Mterp. Just use the switch version. if (transaction_active) { diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index a633a63873..62f5d911bc 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -140,8 +140,10 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self, uint16_t inst_data, JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK_EQ(self->UseMterp(), CanUseMterp()); // Make sure to check for async exceptions before anything else. - if (is_mterp && self->UseMterp()) { + if (is_mterp) { + DCHECK(self->UseMterp()); DCHECK(!self->ObserveAsyncException()); } else if (UNLIKELY(self->ObserveAsyncException())) { return false; @@ -219,7 +221,7 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self, // If the bit is not set, we explicitly recheck all the conditions. // If any of the conditions get falsified, it is important to clear the bit. bool use_fast_path = false; - if (is_mterp && self->UseMterp()) { + if (is_mterp) { use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke(); if (!use_fast_path) { use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method); diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index 912c44463f..fd1430aa70 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -546,12 +546,7 @@ DEFINE_RUNTIME_DEBUG_FLAG(MterpCheckHelper, kSlowMode); extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr) REQUIRES_SHARED(Locks::mutator_lock_) { - // Check that we are using the right interpreter. - if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) { - // The flag might be currently being updated on all threads. Retry with lock. - MutexLock tll_mu(self, *Locks::thread_list_lock_); - DCHECK_EQ(self->UseMterp(), CanUseMterp()); - } + DCHECK(self->UseMterp()); DCHECK(!Runtime::Current()->IsActiveTransaction()); const Instruction* inst = Instruction::At(dex_pc_ptr); uint16_t inst_data = inst->Fetch16(0); diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h index 82ea47609b..db43b243df 100644 --- a/runtime/intrinsics_list.h +++ b/runtime/intrinsics_list.h @@ -219,7 +219,7 @@ V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \ V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \ V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \ - V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \ + V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \ V(CRC32UpdateBytes, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \ SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V) diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc index 853c0cab96..7aa6ddfb00 100644 --- a/runtime/jit/debugger_interface.cc +++ b/runtime/jit/debugger_interface.cc @@ -19,14 +19,17 @@ #include <android-base/logging.h> #include "base/array_ref.h" +#include "base/logging.h" #include "base/mutex.h" #include "base/time_utils.h" +#include "base/utils.h" #include "dex/dex_file.h" #include "thread-current-inl.h" #include "thread.h" #include <atomic> #include <cstddef> +#include <deque> #include <map> // @@ -77,6 +80,10 @@ // namespace art { + +static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock); +static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock); + extern "C" { enum JITAction { JIT_NOACTION = 0, @@ -127,14 +134,14 @@ extern "C" { void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code; // The root data structure describing of all JITed methods. - JITDescriptor __jit_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {}; + JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {}; // The following globals mirror the ones above, but are used to register dex files. void __attribute__((noinline)) __dex_debug_register_code() { __asm__(""); } void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code; - JITDescriptor __dex_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {}; + JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {}; } // Mark the descriptor as "locked", so native tools know the data is being modified. @@ -157,8 +164,7 @@ static JITCodeEntry* CreateJITCodeEntryInternal( JITDescriptor& descriptor, void (*register_code_ptr)(), ArrayRef<const uint8_t> symfile, - bool copy_symfile) - REQUIRES(Locks::native_debug_interface_lock_) { + bool copy_symfile) { // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry. if (copy_symfile) { uint8_t* copy = new uint8_t[symfile.size()]; @@ -199,8 +205,7 @@ static void DeleteJITCodeEntryInternal( JITDescriptor& descriptor, void (*register_code_ptr)(), JITCodeEntry* entry, - bool free_symfile) - REQUIRES(Locks::native_debug_interface_lock_) { + bool free_symfile) { CHECK(entry != nullptr); const uint8_t* symfile = entry->symfile_addr_; @@ -238,11 +243,10 @@ static void DeleteJITCodeEntryInternal( } } -static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries - GUARDED_BY(*Locks::native_debug_interface_lock_); +static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries GUARDED_BY(g_dex_debug_lock); void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) { - MutexLock mu(self, *Locks::native_debug_interface_lock_); + MutexLock mu(self, g_dex_debug_lock); DCHECK(dexfile != nullptr); // This is just defensive check. The class linker should not register the dex file twice. if (g_dex_debug_entries.count(dexfile) == 0) { @@ -256,7 +260,7 @@ void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) { } void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) { - MutexLock mu(self, *Locks::native_debug_interface_lock_); + MutexLock mu(self, g_dex_debug_lock); auto it = g_dex_debug_entries.find(dexfile); // We register dex files in the class linker and free them in DexFile_closeDexFile, but // there might be cases where we load the dex file without using it in the class linker. @@ -270,46 +274,134 @@ void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) { } // Mapping from handle to entry. Used to manage life-time of the entries. -static std::map<const void*, JITCodeEntry*> g_jit_debug_entries - GUARDED_BY(*Locks::native_debug_interface_lock_); +static std::map<const void*, JITCodeEntry*> g_jit_debug_entries GUARDED_BY(g_jit_debug_lock); + +// Number of entries added since last packing. Used to pack entries in bulk. +static size_t g_jit_num_unpacked_entries GUARDED_BY(g_jit_debug_lock) = 0; + +// We postpone removal so that it is done in bulk. +static std::deque<const void*> g_jit_removed_entries GUARDED_BY(g_jit_debug_lock); + +// Split the JIT code cache into groups of fixed size and create singe JITCodeEntry for each group. +// The start address of method's code determines which group it belongs to. The end is irrelevant. +// As a consequnce, newly added mini debug infos will be merged and old ones (GCed) will be pruned. +static void MaybePackJitMiniDebugInfo(PackElfFileForJITFunction pack, + InstructionSet isa, + const InstructionSetFeatures* features) + REQUIRES(g_jit_debug_lock) { + // Size of memory range covered by each JITCodeEntry. + // The number of methods per entry is variable (depending on how many fit in that range). + constexpr uint32_t kGroupSize = 64 * KB; + // Even if there are no removed entries, we want to pack new entries on regular basis. + constexpr uint32_t kPackFrequency = 64; + + std::deque<const void*>& removed_entries = g_jit_removed_entries; + std::sort(removed_entries.begin(), removed_entries.end()); + if (removed_entries.empty() && g_jit_num_unpacked_entries < kPackFrequency) { + return; // Nothing to do. + } + + std::vector<const uint8_t*> added_elf_files; + std::vector<const void*> removed_symbols; + auto added_it = g_jit_debug_entries.begin(); + auto removed_it = removed_entries.begin(); + while (added_it != g_jit_debug_entries.end()) { + // Collect all entries that have been added or removed within our memory range. + const void* group_ptr = AlignDown(added_it->first, kGroupSize); + added_elf_files.clear(); + auto added_begin = added_it; + while (added_it != g_jit_debug_entries.end() && + AlignDown(added_it->first, kGroupSize) == group_ptr) { + added_elf_files.push_back((added_it++)->second->symfile_addr_); + } + removed_symbols.clear(); + while (removed_it != removed_entries.end() && + AlignDown(*removed_it, kGroupSize) == group_ptr) { + removed_symbols.push_back(*(removed_it++)); + } + + // Create new singe JITCodeEntry that covers this memory range. + if (added_elf_files.size() == 1 && removed_symbols.size() == 0) { + continue; // Nothing changed in this memory range. + } + uint64_t start_time = MilliTime(); + size_t symbols; + std::vector<uint8_t> packed = pack(isa, features, added_elf_files, removed_symbols, &symbols); + VLOG(jit) + << "JIT mini-debug-info packed" + << " for " << group_ptr + << " in " << MilliTime() - start_time << "ms" + << " files=" << added_elf_files.size() + << " removed=" << removed_symbols.size() + << " symbols=" << symbols + << " size=" << PrettySize(packed.size()); + + // Replace the old entries with the new one (with their lifetime temporally overlapping). + JITCodeEntry* packed_entry = CreateJITCodeEntryInternal( + __jit_debug_descriptor, + __jit_debug_register_code_ptr, + ArrayRef<const uint8_t>(packed), + /*copy_symfile=*/ true); + for (auto it = added_begin; it != added_it; ++it) { + DeleteJITCodeEntryInternal(__jit_debug_descriptor, + __jit_debug_register_code_ptr, + /*entry=*/ it->second, + /*free_symfile=*/ true); + } + g_jit_debug_entries.erase(added_begin, added_it); + g_jit_debug_entries.emplace(group_ptr, packed_entry); + } + CHECK(added_it == g_jit_debug_entries.end()); + CHECK(removed_it == removed_entries.end()); + removed_entries.clear(); + g_jit_num_unpacked_entries = 0; +} void AddNativeDebugInfoForJit(Thread* self, const void* code_ptr, - const std::vector<uint8_t>& symfile) { - MutexLock mu(self, *Locks::native_debug_interface_lock_); + const std::vector<uint8_t>& symfile, + PackElfFileForJITFunction pack, + InstructionSet isa, + const InstructionSetFeatures* features) { + MutexLock mu(self, g_jit_debug_lock); DCHECK_NE(symfile.size(), 0u); + MaybePackJitMiniDebugInfo(pack, isa, features); + JITCodeEntry* entry = CreateJITCodeEntryInternal( __jit_debug_descriptor, __jit_debug_register_code_ptr, ArrayRef<const uint8_t>(symfile), /*copy_symfile=*/ true); + VLOG(jit) + << "JIT mini-debug-info added" + << " for " << code_ptr + << " size=" << PrettySize(symfile.size()); + // We don't provide code_ptr for type debug info, which means we cannot free it later. // (this only happens when --generate-debug-info flag is enabled for the purpose // of being debugged with gdb; it does not happen for debuggable apps by default). if (code_ptr != nullptr) { bool ok = g_jit_debug_entries.emplace(code_ptr, entry).second; DCHECK(ok) << "Native debug entry already exists for " << std::hex << code_ptr; + // Count how many entries we have added since the last mini-debug-info packing. + // We avoid g_jit_debug_entries.size() here because it can shrink during packing. + g_jit_num_unpacked_entries++; } } void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) { - MutexLock mu(self, *Locks::native_debug_interface_lock_); - auto it = g_jit_debug_entries.find(code_ptr); + MutexLock mu(self, g_jit_debug_lock); // We generate JIT native debug info only if the right runtime flags are enabled, // but we try to remove it unconditionally whenever code is freed from JIT cache. - if (it != g_jit_debug_entries.end()) { - DeleteJITCodeEntryInternal(__jit_debug_descriptor, - __jit_debug_register_code_ptr, - it->second, - /*free_symfile=*/ true); - g_jit_debug_entries.erase(it); + if (!g_jit_debug_entries.empty()) { + g_jit_removed_entries.push_back(code_ptr); } } size_t GetJitMiniDebugInfoMemUsage() { - MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_); + MutexLock mu(Thread::Current(), g_jit_debug_lock); size_t size = 0; for (auto entry : g_jit_debug_entries) { size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*); diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h index 4b0d011295..17beb4baca 100644 --- a/runtime/jit/debugger_interface.h +++ b/runtime/jit/debugger_interface.h @@ -20,6 +20,7 @@ #include <inttypes.h> #include <vector> +#include "arch/instruction_set_features.h" #include "base/locks.h" namespace art { @@ -27,28 +28,35 @@ namespace art { class DexFile; class Thread; +// This method is declared in the compiler library. +// We need to pass it by pointer to be able to call it from runtime. +typedef std::vector<uint8_t> PackElfFileForJITFunction( + InstructionSet isa, + const InstructionSetFeatures* features, + std::vector<const uint8_t*>& added_elf_files, + std::vector<const void*>& removed_symbols, + /*out*/ size_t* num_symbols); + // Notify native tools (e.g. libunwind) that DEX file has been opened. -void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) - REQUIRES(!Locks::native_debug_interface_lock_); +void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile); // Notify native tools (e.g. libunwind) that DEX file has been closed. -void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) - REQUIRES(!Locks::native_debug_interface_lock_); +void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile); // Notify native tools (e.g. libunwind) that JIT has compiled a new method. // The method will make copy of the passed ELF file (to shrink it to the minimum size). void AddNativeDebugInfoForJit(Thread* self, const void* code_ptr, - const std::vector<uint8_t>& symfile) - REQUIRES(!Locks::native_debug_interface_lock_); + const std::vector<uint8_t>& symfile, + PackElfFileForJITFunction pack, + InstructionSet isa, + const InstructionSetFeatures* features); // Notify native tools (e.g. libunwind) that JIT code has been garbage collected. -void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) - REQUIRES(!Locks::native_debug_interface_lock_); +void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr); // Returns approximate memory used by debug info for JIT code. -size_t GetJitMiniDebugInfoMemUsage() - REQUIRES(!Locks::native_debug_interface_lock_); +size_t GetJitMiniDebugInfoMemUsage(); } // namespace art diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index e43d771270..03c97f47f0 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -58,7 +58,7 @@ void* Jit::jit_library_handle_ = nullptr; void* Jit::jit_compiler_handle_ = nullptr; void* (*Jit::jit_load_)(void) = nullptr; void (*Jit::jit_unload_)(void*) = nullptr; -bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr; +bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr; void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr; bool (*Jit::jit_generate_debug_info_)(void*) = nullptr; void (*Jit::jit_update_options_)(void*) = nullptr; @@ -242,7 +242,7 @@ bool Jit::LoadCompilerLibrary(std::string* error_msg) { return true; } -bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { +bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) { DCHECK(Runtime::Current()->UseJitCompilation()); DCHECK(!method->IsRuntimeMethod()); @@ -272,7 +272,7 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { VLOG(jit) << "Compiling method " << ArtMethod::PrettyMethod(method_to_compile) << " osr=" << std::boolalpha << osr; - bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr); + bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr); code_cache_->DoneCompiling(method_to_compile, self, osr); if (!success) { VLOG(jit) << "Failed to compile method " @@ -291,6 +291,12 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { return success; } +void Jit::WaitForWorkersToBeCreated() { + if (thread_pool_ != nullptr) { + thread_pool_->WaitForWorkersToBeCreated(); + } +} + void Jit::DeleteThreadPool() { Thread* self = Thread::Current(); DCHECK(Runtime::Current()->IsShuttingDown(self)); @@ -549,6 +555,7 @@ class JitCompileTask final : public Task { enum class TaskKind { kAllocateProfile, kCompile, + kCompileBaseline, kCompileOsr, }; @@ -568,10 +575,12 @@ class JitCompileTask final : public Task { ScopedObjectAccess soa(self); switch (kind_) { case TaskKind::kCompile: + case TaskKind::kCompileBaseline: case TaskKind::kCompileOsr: { Runtime::Current()->GetJit()->CompileMethod( method_, self, + /* baseline= */ (kind_ == TaskKind::kCompileBaseline), /* osr= */ (kind_ == TaskKind::kCompileOsr)); break; } diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 7ce5f07672..e5c976669e 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -161,7 +161,7 @@ class Jit { // Create JIT itself. static Jit* Create(JitCodeCache* code_cache, JitOptions* options); - bool CompileMethod(ArtMethod* method, Thread* self, bool osr) + bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) REQUIRES_SHARED(Locks::mutator_lock_); const JitCodeCache* GetCodeCache() const { @@ -174,6 +174,7 @@ class Jit { void CreateThreadPool(); void DeleteThreadPool(); + void WaitForWorkersToBeCreated(); // Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative // loggers. @@ -304,7 +305,7 @@ class Jit { static void* jit_compiler_handle_; static void* (*jit_load_)(void); static void (*jit_unload_)(void*); - static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool); + static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool); static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count); static void (*jit_update_options_)(void*); static bool (*jit_generate_debug_info_)(void*); diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 185ae3b2ac..679ca43e98 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -871,6 +871,9 @@ inline void Class::InitializeClassVisitor::operator()(ObjPtr<Object> obj, } inline void Class::SetAccessFlags(uint32_t new_access_flags) { + if (kIsDebugBuild) { + SetAccessFlagsDCheck(new_access_flags); + } // Called inside a transaction when setting pre-verified flag during boot image compilation. if (Runtime::Current()->IsActiveTransaction()) { SetField32<true>(AccessFlagsOffset(), new_access_flags); diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 83d76a98cd..c5ed1bf4dd 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -206,6 +206,10 @@ void Class::SetStatus(Handle<Class> h_this, ClassStatus new_status, Thread* self } } + if (kIsDebugBuild && new_status >= ClassStatus::kInitialized) { + CHECK(h_this->WasVerificationAttempted()) << h_this->PrettyClassAndClassLoader(); + } + if (!class_linker_initialized) { // When the class linker is being initialized its single threaded and by definition there can be // no waiters. During initialization classes may appear temporary but won't be retired as their @@ -1461,5 +1465,12 @@ template void Class::GetAccessFlagsDCheck<kVerifyReads>(); template void Class::GetAccessFlagsDCheck<kVerifyWrites>(); template void Class::GetAccessFlagsDCheck<kVerifyAll>(); +void Class::SetAccessFlagsDCheck(uint32_t new_access_flags) { + uint32_t old_access_flags = GetField32<kVerifyNone>(AccessFlagsOffset()); + // kAccVerificationAttempted is retained. + CHECK((old_access_flags & kAccVerificationAttempted) == 0 || + (new_access_flags & kAccVerificationAttempted) != 0); +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 66b140523d..d5aa514432 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -1306,6 +1306,8 @@ class MANAGED Class final : public Object { template<VerifyObjectFlags kVerifyFlags> void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_); + void SetAccessFlagsDCheck(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_); + // Check that the pointer size matches the one in the class linker. ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 3e5003ce13..892d4cc9e1 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -271,7 +271,7 @@ static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sd #endif } -static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) { +static void VMRuntime_registerNativeAllocationInternal(JNIEnv* env, jobject, jint bytes) { if (UNLIKELY(bytes < 0)) { ScopedObjectAccess soa(env); ThrowRuntimeException("allocation size negative %d", bytes); @@ -280,11 +280,7 @@ static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes)); } -static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) { - Runtime::Current()->RegisterSensitiveThread(); -} - -static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) { +static void VMRuntime_registerNativeFreeInternal(JNIEnv* env, jobject, jint bytes) { if (UNLIKELY(bytes < 0)) { ScopedObjectAccess soa(env); ThrowRuntimeException("allocation size negative %d", bytes); @@ -293,6 +289,18 @@ static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) { Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes)); } +static jint VMRuntime_getNotifyNativeInterval(JNIEnv*, jclass) { + return Runtime::Current()->GetHeap()->GetNotifyNativeInterval(); +} + +static void VMRuntime_notifyNativeAllocationsInternal(JNIEnv* env, jobject) { + Runtime::Current()->GetHeap()->NotifyNativeAllocations(env); +} + +static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) { + Runtime::Current()->RegisterSensitiveThread(); +} + static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) { Runtime* runtime = Runtime::Current(); runtime->UpdateProcessState(static_cast<ProcessState>(process_state)); @@ -710,9 +718,11 @@ static JNINativeMethod gMethods[] = { FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"), NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"), NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"), - NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"), + NATIVE_METHOD(VMRuntime, registerNativeAllocationInternal, "(I)V"), + NATIVE_METHOD(VMRuntime, registerNativeFreeInternal, "(I)V"), + NATIVE_METHOD(VMRuntime, getNotifyNativeInterval, "()I"), + NATIVE_METHOD(VMRuntime, notifyNativeAllocationsInternal, "()V"), NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"), - NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"), NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"), NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"), NATIVE_METHOD(VMRuntime, runHeapTasks, "()V"), diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc index b7ac1e8fe3..9ce47490e7 100644 --- a/runtime/native/dalvik_system_ZygoteHooks.cc +++ b/runtime/native/dalvik_system_ZygoteHooks.cc @@ -240,11 +240,6 @@ static jlong ZygoteHooks_nativePreFork(JNIEnv* env, jclass) { runtime->PreZygoteFork(); - if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) { - // Tracing active, pause it. - Trace::Pause(); - } - // Grab thread before fork potentially makes Thread::pthread_key_self_ unusable. return reinterpret_cast<jlong>(ThreadForEnv(env)); } diff --git a/runtime/oat.cc b/runtime/oat.cc index e931b28a31..d7c968f9d1 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -79,8 +79,7 @@ OatHeader::OatHeader(InstructionSet instruction_set, quick_generic_jni_trampoline_offset_(0), quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0), - quick_to_interpreter_bridge_offset_(0), - boot_image_checksum_(0) { + quick_to_interpreter_bridge_offset_(0) { // Don't want asserts in header as they would be checked in each file that includes it. But the // fields are private, so we check inside a method. static_assert(sizeof(magic_) == sizeof(kOatMagic), @@ -316,16 +315,6 @@ void OatHeader::SetQuickToInterpreterBridgeOffset(uint32_t offset) { quick_to_interpreter_bridge_offset_ = offset; } -uint32_t OatHeader::GetBootImageChecksum() const { - CHECK(IsValid()); - return boot_image_checksum_; -} - -void OatHeader::SetBootImageChecksum(uint32_t boot_image_checksum) { - CHECK(IsValid()); - boot_image_checksum_ = boot_image_checksum; -} - uint32_t OatHeader::GetKeyValueStoreSize() const { CHECK(IsValid()); return key_value_store_size_; diff --git a/runtime/oat.h b/runtime/oat.h index b09c81e3ad..ded148914f 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -31,8 +31,8 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - // Last oat version changed reason: Pass boot class path to LoadBootImage. - static constexpr uint8_t kOatVersion[] = { '1', '6', '5', '\0' }; + // Last oat version changed reason: Partial boot image. + static constexpr uint8_t kOatVersion[] = { '1', '6', '6', '\0' }; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; static constexpr const char* kDebuggableKey = "debuggable"; @@ -40,6 +40,7 @@ class PACKED(4) OatHeader { static constexpr const char* kCompilerFilter = "compiler-filter"; static constexpr const char* kClassPathKey = "classpath"; static constexpr const char* kBootClassPathKey = "bootclasspath"; + static constexpr const char* kBootClassPathChecksumsKey = "bootclasspath-checksums"; static constexpr const char* kConcurrentCopying = "concurrent-copying"; static constexpr const char* kCompilationReasonKey = "compilation-reason"; @@ -93,9 +94,6 @@ class PACKED(4) OatHeader { InstructionSet GetInstructionSet() const; uint32_t GetInstructionSetFeaturesBitmap() const; - uint32_t GetBootImageChecksum() const; - void SetBootImageChecksum(uint32_t boot_image_checksum); - uint32_t GetKeyValueStoreSize() const; const uint8_t* GetKeyValueStore() const; const char* GetStoreValueByKey(const char* key) const; @@ -137,8 +135,6 @@ class PACKED(4) OatHeader { uint32_t quick_resolution_trampoline_offset_; uint32_t quick_to_interpreter_bridge_offset_; - uint32_t boot_image_checksum_; - uint32_t key_value_store_size_; uint8_t key_value_store_[0]; // note variable width data at end diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 5c5523d9c8..de4826f417 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -581,9 +581,9 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e const char* dex_file_location_data = reinterpret_cast<const char*>(oat); oat += dex_file_location_size; - std::string dex_file_location = ResolveRelativeEncodedDexLocation( - abs_dex_location, - std::string(dex_file_location_data, dex_file_location_size)); + std::string dex_file_location(dex_file_location_data, dex_file_location_size); + std::string dex_file_name = + ResolveRelativeEncodedDexLocation(abs_dex_location, dex_file_location); uint32_t dex_file_checksum; if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_checksum))) { @@ -638,7 +638,7 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e error_msg, uncompressed_dex_files_.get()); } else { - loaded = dex_file_loader.Open(dex_file_location.c_str(), + loaded = dex_file_loader.Open(dex_file_name.c_str(), dex_file_location, /*verify=*/ false, /*verify_checksum=*/ false, @@ -819,7 +819,7 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e this, header->string_ids_size_, sizeof(GcRoot<mirror::String>), string_bss_mapping); std::string canonical_location = - DexFileLoader::GetDexCanonicalLocation(dex_file_location.c_str()); + DexFileLoader::GetDexCanonicalLocation(dex_file_name.c_str()); // Create the OatDexFile and add it to the owning container. OatDexFile* oat_dex_file = new OatDexFile(this, diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 6f32b98466..8b81bb9bfc 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -419,7 +419,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& // starts up. LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. " << "Allow oat file use. This is potentially dangerous."; - } else if (file.GetOatHeader().GetBootImageChecksum() != image_info->boot_image_checksum) { + } else if (!image_info->ValidateBootClassPathChecksums(file)) { VLOG(oat) << "Oat image checksum does not match image checksum."; return kOatBootImageOutOfDate; } @@ -560,6 +560,13 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() { return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr; } +bool OatFileAssistant::ImageInfo::ValidateBootClassPathChecksums(const OatFile& oat_file) const { + const char* oat_boot_class_path_checksums = + oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey); + return oat_boot_class_path_checksums != nullptr && + oat_boot_class_path_checksums == boot_class_path_checksums; +} + std::unique_ptr<OatFileAssistant::ImageInfo> OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) { CHECK(error_msg != nullptr); @@ -567,14 +574,11 @@ OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string Runtime* runtime = Runtime::Current(); std::unique_ptr<ImageInfo> info(new ImageInfo()); info->location = runtime->GetImageLocation(); - - std::unique_ptr<ImageHeader> image_header( - gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg)); - if (image_header == nullptr) { + info->boot_class_path_checksums = gc::space::ImageSpace::GetBootClassPathChecksums( + runtime->GetBootClassPath(), info->location, isa, error_msg); + if (info->boot_class_path_checksums.empty()) { return nullptr; } - - info->boot_image_checksum = image_header->GetImageChecksum(); return info; } diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h index 09c9d3b3cb..def55b879a 100644 --- a/runtime/oat_file_assistant.h +++ b/runtime/oat_file_assistant.h @@ -246,8 +246,10 @@ class OatFileAssistant { private: struct ImageInfo { - uint32_t boot_image_checksum = 0; + bool ValidateBootClassPathChecksums(const OatFile& oat_file) const; + std::string location; + std::string boot_class_path_checksums; static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg); diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 17ff3a244d..4a042598ba 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -151,6 +151,10 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .IntoKey(M::LongGCLogThreshold) .Define("-XX:DumpGCPerformanceOnShutdown") .IntoKey(M::DumpGCPerformanceOnShutdown) + .Define("-XX:DumpRegionInfoBeforeGC") + .IntoKey(M::DumpRegionInfoBeforeGC) + .Define("-XX:DumpRegionInfoAfterGC") + .IntoKey(M::DumpRegionInfoAfterGC) .Define("-XX:DumpJITInfoOnShutdown") .IntoKey(M::DumpJITInfoOnShutdown) .Define("-XX:IgnoreMaxFootprint") diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h index 2ffaf98103..c7731f44d2 100644 --- a/runtime/runtime-inl.h +++ b/runtime/runtime-inl.h @@ -28,6 +28,7 @@ #include "gc_root-inl.h" #include "interpreter/mterp/mterp.h" #include "obj_ptr-inl.h" +#include "scoped_thread_state_change-inl.h" #include "thread_list.h" namespace art { @@ -90,12 +91,23 @@ inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type) } template<typename Action> -void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) { - MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_); - lamda(); - Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) { - thread->tls32_.use_mterp.store(interpreter::CanUseMterp()); - }, nullptr); +void Runtime::DoAndMaybeSwitchInterpreter(Action lambda) { + Thread* self = Thread::Current(); + if (Runtime::Current()->IsShuttingDown(self) || Locks::mutator_lock_->IsExclusiveHeld(self)) { + MutexLock tll_mu(self, *Locks::thread_list_lock_); + lambda(); + Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) { + thread->tls32_.use_mterp.store(interpreter::CanUseMterp()); + }, nullptr); + } else { + ScopedThreadStateChange tsc(self, kSuspended); + ScopedSuspendAll ssa(__FUNCTION__); + MutexLock tll_mu(self, *Locks::thread_list_lock_); + lambda(); + Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) { + thread->tls32_.use_mterp.store(interpreter::CanUseMterp()); + }, nullptr); + } } } // namespace art diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 69ef2fb213..bd0e5a4815 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -34,6 +34,7 @@ #include <cstdio> #include <cstdlib> #include <limits> +#include <thread> #include <vector> #include "android-base/strings.h" @@ -233,6 +234,7 @@ Runtime::Runtime() class_linker_(nullptr), signal_catcher_(nullptr), java_vm_(nullptr), + thread_pool_ref_count_(0u), fault_message_(nullptr), threads_being_born_(0), shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)), @@ -320,7 +322,8 @@ Runtime::~Runtime() { } if (dump_gc_performance_on_shutdown_) { - heap_->CalculateWeightedAllocatedBytes(); + heap_->CalculatePreGcWeightedAllocatedBytes(); + heap_->CalculatePostGcWeightedAllocatedBytes(); uint64_t process_cpu_end_time = ProcessCpuNanoTime(); ScopedLogSeverity sls(LogSeverity::INFO); // This can't be called from the Heap destructor below because it @@ -335,13 +338,24 @@ Runtime::~Runtime() { << " out of process CPU time " << PrettyDuration(process_cpu_time) << " (" << ratio << ")" << "\n"; - double weighted_allocated_bytes = heap_->GetWeightedAllocatedBytes() / process_cpu_time; - LOG_STREAM(INFO) << "Weighted bytes allocated over CPU time: " - << " (" << PrettySize(weighted_allocated_bytes) << ")" + double pre_gc_weighted_allocated_bytes = + heap_->GetPreGcWeightedAllocatedBytes() / process_cpu_time; + double post_gc_weighted_allocated_bytes = + heap_->GetPostGcWeightedAllocatedBytes() / process_cpu_time; + + LOG_STREAM(INFO) << "Pre GC weighted bytes allocated over CPU time: " + << " (" << PrettySize(pre_gc_weighted_allocated_bytes) << ")"; + LOG_STREAM(INFO) << "Post GC weighted bytes allocated over CPU time: " + << " (" << PrettySize(post_gc_weighted_allocated_bytes) << ")" << "\n"; } + WaitForThreadPoolWorkersToStart(); + if (jit_ != nullptr) { + // Wait for the workers to be created since there can't be any threads attaching during + // shutdown. + jit_->WaitForWorkersToBeCreated(); // Stop the profile saver thread before marking the runtime as shutting down. // The saver will try to dump the profiles before being sopped and that // requires holding the mutator lock. @@ -390,6 +404,8 @@ Runtime::~Runtime() { // JIT compiler threads. jit_->DeleteThreadPool(); } + DeleteThreadPool(); + CHECK(thread_pool_ == nullptr); // Make sure our internal threads are dead before we start tearing down things they're using. GetRuntimeCallbacks()->StopDebugger(); @@ -920,6 +936,18 @@ void Runtime::InitNonZygoteOrPostFork( // Create the thread pools. heap_->CreateThreadPool(); + { + ScopedTrace timing("CreateThreadPool"); + constexpr size_t kStackSize = 64 * KB; + constexpr size_t kMaxRuntimeWorkers = 4u; + const size_t num_workers = + std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers); + MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_); + CHECK(thread_pool_ == nullptr); + thread_pool_.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize)); + thread_pool_->StartWorkers(Thread::Current()); + } + // Reset the gc performance data at zygote fork so that the GCs // before fork aren't attributed to an app. heap_->ResetGcPerformanceInfo(); @@ -964,8 +992,8 @@ void Runtime::StartDaemonThreads() { VLOG(startup) << "Runtime::StartDaemonThreads exiting"; } -static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames, - const std::vector<std::string>& dex_locations, +static size_t OpenDexFiles(ArrayRef<const std::string> dex_filenames, + ArrayRef<const std::string> dex_locations, std::vector<std::unique_ptr<const DexFile>>* dex_files) { DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr"; size_t failure_count = 0; @@ -1239,7 +1267,9 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { xgc_option.gcstress_, xgc_option.measure_, runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM), - runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs)); + runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs), + runtime_options.Exists(Opt::DumpRegionInfoBeforeGC), + runtime_options.Exists(Opt::DumpRegionInfoAfterGC)); if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) { LOG(ERROR) << "Dex file fallback disabled, cannot continue without image."; @@ -1422,6 +1452,21 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { GetInternTable()->AddImageStringsToTable(image_space, VoidFunctor()); } } + if (heap_->GetBootImageSpaces().size() != GetBootClassPath().size()) { + // The boot image did not contain all boot class path components. Load the rest. + DCHECK_LT(heap_->GetBootImageSpaces().size(), GetBootClassPath().size()); + size_t start = heap_->GetBootImageSpaces().size(); + DCHECK_LT(start, GetBootClassPath().size()); + std::vector<std::unique_ptr<const DexFile>> extra_boot_class_path; + if (runtime_options.Exists(Opt::BootClassPathDexList)) { + extra_boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList)); + } else { + OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()).SubArray(start), + ArrayRef<const std::string>(GetBootClassPathLocations()).SubArray(start), + &extra_boot_class_path); + } + class_linker_->AddExtraBootDexFiles(self, std::move(extra_boot_class_path)); + } if (IsJavaDebuggable()) { // Now that we have loaded the boot image, deoptimize its methods if we are running // debuggable, as the code may have been compiled non-debuggable. @@ -1432,7 +1477,9 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { if (runtime_options.Exists(Opt::BootClassPathDexList)) { boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList)); } else { - OpenDexFiles(GetBootClassPath(), GetBootClassPathLocations(), &boot_class_path); + OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()), + ArrayRef<const std::string>(GetBootClassPathLocations()), + &boot_class_path); } if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) { LOG(ERROR) << "Could not initialize without image: " << error_msg; @@ -2629,4 +2676,45 @@ void Runtime::DeoptimizeBootImage() { GetClassLinker()->VisitClasses(&visitor); } } + +Runtime::ScopedThreadPoolUsage::ScopedThreadPoolUsage() + : thread_pool_(Runtime::Current()->AcquireThreadPool()) {} + +Runtime::ScopedThreadPoolUsage::~ScopedThreadPoolUsage() { + Runtime::Current()->ReleaseThreadPool(); +} + +bool Runtime::DeleteThreadPool() { + // Make sure workers are started to prevent thread shutdown errors. + WaitForThreadPoolWorkersToStart(); + std::unique_ptr<ThreadPool> thread_pool; + { + MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_); + if (thread_pool_ref_count_ == 0) { + thread_pool = std::move(thread_pool_); + } + } + return thread_pool != nullptr; +} + +ThreadPool* Runtime::AcquireThreadPool() { + MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_); + ++thread_pool_ref_count_; + return thread_pool_.get(); +} + +void Runtime::ReleaseThreadPool() { + MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_); + CHECK_GT(thread_pool_ref_count_, 0u); + --thread_pool_ref_count_; +} + +void Runtime::WaitForThreadPoolWorkersToStart() { + // Need to make sure workers are created before deleting the pool. + ScopedThreadPoolUsage stpu; + if (stpu.GetThreadPool() != nullptr) { + stpu.GetThreadPool()->WaitForWorkersToBeCreated(); + } +} + } // namespace art diff --git a/runtime/runtime.h b/runtime/runtime.h index 76cfcd19d6..00158b86e4 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -669,7 +669,7 @@ class Runtime { // It ensures that two calls do not interfere with each other and // it makes it possible to DCHECK that thread local flag is correct. template<typename Action> - static void DoAndMaybeSwitchInterpreter(Action lamda); + static void DoAndMaybeSwitchInterpreter(Action lambda); // Returns the build fingerprint, if set. Otherwise an empty string is returned. std::string GetFingerprint() { @@ -792,6 +792,28 @@ class Runtime { return verifier_logging_threshold_ms_; } + // Atomically delete the thread pool if the reference count is 0. + bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_); + + // Wait for all the thread workers to be attached. + void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_); + + // Scoped usage of the runtime thread pool. Prevents the pool from being + // deleted. Note that the thread pool is only for startup and gets deleted after. + class ScopedThreadPoolUsage { + public: + ScopedThreadPoolUsage(); + ~ScopedThreadPoolUsage(); + + // Return the thread pool. + ThreadPool* GetThreadPool() const { + return thread_pool_; + } + + private: + ThreadPool* const thread_pool_; + }; + private: static void InitPlatformSignalHandlers(); @@ -828,6 +850,9 @@ class Runtime { // friend). std::string GetFaultMessage(); + ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_); + void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_); + // A pointer to the active runtime or null. static Runtime* instance_; @@ -911,6 +936,10 @@ class Runtime { std::unique_ptr<jit::JitCodeCache> jit_code_cache_; std::unique_ptr<jit::JitOptions> jit_options_; + // Runtime thread pool. The pool is only for startup and gets deleted after. + std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_); + size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_); + // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed // lock-free, so needs to be atomic. std::atomic<std::string*> fault_message_; @@ -1115,6 +1144,7 @@ class Runtime { // Note: See comments on GetFaultMessage. friend std::string GetFaultMessageForAbortLogging(); + friend class ScopedThreadPoolUsage; DISALLOW_COPY_AND_ASSIGN(Runtime); }; diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc index f2e5012991..d08be72402 100644 --- a/runtime/runtime_callbacks_test.cc +++ b/runtime/runtime_callbacks_test.cc @@ -147,6 +147,8 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava) self->TransitionFromSuspendedToRunnable(); bool started = runtime_->Start(); ASSERT_TRUE(started); + // Make sure the workers are done starting so we don't get callbacks for them. + runtime_->WaitForThreadPoolWorkersToStart(); cb_.state = CallbackState::kBase; // Ignore main thread attach. diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index 2b2919eff4..222c821df9 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -64,6 +64,8 @@ RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \ RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \ ThreadSuspendTimeout, ThreadList::kDefaultThreadSuspendTimeout) RUNTIME_OPTIONS_KEY (Unit, DumpGCPerformanceOnShutdown) +RUNTIME_OPTIONS_KEY (Unit, DumpRegionInfoBeforeGC) +RUNTIME_OPTIONS_KEY (Unit, DumpRegionInfoAfterGC) RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown) RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint) RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode) diff --git a/runtime/thread.cc b/runtime/thread.cc index 8bec2d9673..a97e4ccbf5 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -160,6 +160,7 @@ void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { } void Thread::InitTlsEntryPoints() { + ScopedTrace trace("InitTlsEntryPoints"); // Insert a placeholder so we can easily tell if we call an unimplemented entry point. uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints); uintptr_t* end = reinterpret_cast<uintptr_t*>( @@ -903,6 +904,8 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en tlsPtr_.pthread_self = pthread_self(); CHECK(is_started_); + ScopedTrace trace("Thread::Init"); + SetUpAlternateSignalStack(); if (!InitStackHwm()) { return false; @@ -912,7 +915,10 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en RemoveSuspendTrigger(); InitCardTable(); InitTid(); - interpreter::InitInterpreterTls(this); + { + ScopedTrace trace2("InitInterpreterTls"); + interpreter::InitInterpreterTls(this); + } #ifdef ART_TARGET_ANDROID __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; @@ -936,6 +942,7 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en } } + ScopedTrace trace3("ThreadList::Register"); thread_list->Register(this); return true; } @@ -943,6 +950,7 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en template <typename PeerAction> Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) { Runtime* runtime = Runtime::Current(); + ScopedTrace trace("Thread::Attach"); if (runtime == nullptr) { LOG(ERROR) << "Thread attaching to non-existent runtime: " << ((thread_name != nullptr) ? thread_name : "(Unnamed)"); @@ -950,6 +958,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_ } Thread* self; { + ScopedTrace trace2("Thread birth"); MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); if (runtime->IsShuttingDownLocked()) { LOG(WARNING) << "Thread attaching while runtime is shutting down: " << @@ -1251,6 +1260,7 @@ static void GetThreadStack(pthread_t thread, } bool Thread::InitStackHwm() { + ScopedTrace trace("InitStackHwm"); void* read_stack_base; size_t read_stack_size; size_t read_guard_size; @@ -4158,7 +4168,11 @@ void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) { CHECK(new_exception != nullptr); - Runtime::Current()->SetAsyncExceptionsThrown(); + { + StackHandleScope<1> hs(Thread::Current()); + auto h_exception = hs.NewHandleWrapper(&new_exception); + Runtime::Current()->SetAsyncExceptionsThrown(); + } if (kIsDebugBuild) { // Make sure we are in a checkpoint. MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index de698c269f..e1c756d6a8 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -1,3 +1,4 @@ + /* * Copyright (C) 2012 The Android Open Source Project * @@ -86,7 +87,7 @@ void ThreadPoolWorker::SetPthreadPriority(int priority) { void ThreadPoolWorker::Run() { Thread* self = Thread::Current(); Task* task = nullptr; - thread_pool_->creation_barier_.Wait(self); + thread_pool_->creation_barier_.Pass(self); while ((task = thread_pool_->GetTask(self)) != nullptr) { task->Run(self); task->Finalize(); @@ -150,7 +151,7 @@ void ThreadPool::CreateThreads() { MutexLock mu(self, task_queue_lock_); shutting_down_ = false; // Add one since the caller of constructor waits on the barrier too. - creation_barier_.Init(self, max_active_workers_ + 1); + creation_barier_.Init(self, max_active_workers_); while (GetThreadCount() < max_active_workers_) { const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(), GetThreadCount()); @@ -158,8 +159,16 @@ void ThreadPool::CreateThreads() { new ThreadPoolWorker(this, worker_name, worker_stack_size_)); } } - // Wait for all of the threads to attach. - creation_barier_.Wait(Thread::Current()); +} + +void ThreadPool::WaitForWorkersToBeCreated() { + creation_barier_.Increment(Thread::Current(), 0); +} + +const std::vector<ThreadPoolWorker*>& ThreadPool::GetWorkers() { + // Wait for all the workers to be created before returning them. + WaitForWorkersToBeCreated(); + return threads_; } void ThreadPool::DeleteThreads() { diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index f55d72ec30..0a2a50c9e1 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -101,9 +101,7 @@ class ThreadPool { return threads_.size(); } - const std::vector<ThreadPoolWorker*>& GetWorkers() const { - return threads_; - } + const std::vector<ThreadPoolWorker*>& GetWorkers(); // Broadcast to the workers and tell them to empty out the work queue. void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_); @@ -154,6 +152,9 @@ class ThreadPool { // Set the "nice" priorty for threads in the pool. void SetPthreadPriority(int priority); + // Wait for workers to be created. + void WaitForWorkersToBeCreated(); + protected: // get a task to run, blocks if there are no tasks left virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_); diff --git a/runtime/trace.cc b/runtime/trace.cc index f6c36cf989..ce955d8991 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -517,106 +517,6 @@ void Trace::Shutdown() { } } -void Trace::Pause() { - bool stop_alloc_counting = false; - Runtime* runtime = Runtime::Current(); - Trace* the_trace = nullptr; - - Thread* const self = Thread::Current(); - pthread_t sampling_pthread = 0U; - { - MutexLock mu(self, *Locks::trace_lock_); - if (the_trace_ == nullptr) { - LOG(ERROR) << "Trace pause requested, but no trace currently running"; - return; - } else { - the_trace = the_trace_; - sampling_pthread = sampling_pthread_; - } - } - - if (sampling_pthread != 0U) { - { - MutexLock mu(self, *Locks::trace_lock_); - the_trace_ = nullptr; - } - CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown"); - sampling_pthread_ = 0U; - { - MutexLock mu(self, *Locks::trace_lock_); - the_trace_ = the_trace; - } - } - - if (the_trace != nullptr) { - gc::ScopedGCCriticalSection gcs(self, - gc::kGcCauseInstrumentation, - gc::kCollectorTypeInstrumentation); - ScopedSuspendAll ssa(__FUNCTION__); - stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0; - - if (the_trace->trace_mode_ == TraceMode::kSampling) { - MutexLock mu(self, *Locks::thread_list_lock_); - runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr); - } else { - runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey); - runtime->GetInstrumentation()->RemoveListener( - the_trace, - instrumentation::Instrumentation::kMethodEntered | - instrumentation::Instrumentation::kMethodExited | - instrumentation::Instrumentation::kMethodUnwind); - } - } - - if (stop_alloc_counting) { - // Can be racy since SetStatsEnabled is not guarded by any locks. - Runtime::Current()->SetStatsEnabled(false); - } -} - -void Trace::Resume() { - Thread* self = Thread::Current(); - Trace* the_trace; - { - MutexLock mu(self, *Locks::trace_lock_); - if (the_trace_ == nullptr) { - LOG(ERROR) << "No trace to resume (or sampling mode), ignoring this request"; - return; - } - the_trace = the_trace_; - } - - Runtime* runtime = Runtime::Current(); - - // Enable count of allocs if specified in the flags. - bool enable_stats = (the_trace->flags_ & kTraceCountAllocs) != 0; - - { - gc::ScopedGCCriticalSection gcs(self, - gc::kGcCauseInstrumentation, - gc::kCollectorTypeInstrumentation); - ScopedSuspendAll ssa(__FUNCTION__); - - // Reenable. - if (the_trace->trace_mode_ == TraceMode::kSampling) { - CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread, - reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread"); - } else { - runtime->GetInstrumentation()->AddListener(the_trace, - instrumentation::Instrumentation::kMethodEntered | - instrumentation::Instrumentation::kMethodExited | - instrumentation::Instrumentation::kMethodUnwind); - // TODO: In full-PIC mode, we don't need to fully deopt. - runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey); - } - } - - // Can't call this when holding the mutator lock. - if (enable_stats) { - runtime->SetStatsEnabled(true); - } -} - TracingMode Trace::GetMethodTracingMode() { MutexLock mu(Thread::Current(), *Locks::trace_lock_); if (the_trace_ == nullptr) { diff --git a/runtime/trace.h b/runtime/trace.h index 108996231d..582f756ca9 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -156,9 +156,6 @@ class Trace final : public instrumentation::InstrumentationListener { REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !Locks::trace_lock_); - static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_); - static void Resume() REQUIRES(!Locks::trace_lock_); - // Stop tracing. This will finish the trace and write it to file/send it via DDMS. static void Stop() REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); diff --git a/test/175-alloc-big-bignums/expected.txt b/test/175-alloc-big-bignums/expected.txt new file mode 100644 index 0000000000..f75da10caf --- /dev/null +++ b/test/175-alloc-big-bignums/expected.txt @@ -0,0 +1 @@ +Test complete diff --git a/test/175-alloc-big-bignums/info.txt b/test/175-alloc-big-bignums/info.txt new file mode 100644 index 0000000000..8f6bcc3a55 --- /dev/null +++ b/test/175-alloc-big-bignums/info.txt @@ -0,0 +1,11 @@ +Allocate large numbers of huge BigIntegers in rapid succession. Most of the +associated memory will be in the C++ heap. This makes sure that we trigger +the garbage collector often enough to prevent us from running out of memory. + +The test allocates roughly 10GB of native memory, approximately 1MB of which +will be live at any point. Basically all native memory deallocation is +triggered by Java garbage collection. + +This test is a lot nastier than it looks. In particular, failure on target tends +to exhaust device memory, and kill off all processes on the device, including the +adb daemon :-( . diff --git a/test/175-alloc-big-bignums/src/Main.java b/test/175-alloc-big-bignums/src/Main.java new file mode 100644 index 0000000000..5fbeb46068 --- /dev/null +++ b/test/175-alloc-big-bignums/src/Main.java @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.math.BigInteger; + +// This is motivated by the assumption that BigInteger allocates malloc memory +// underneath. That's true (in 2018) on Android. + +public class Main { + public static void main(String[] args) throws Exception { + final int nIters = 20_000; // Presumed < 1_000_000. + final BigInteger big2_20 = BigInteger.valueOf(1024*1024); // 2^20 + BigInteger huge = BigInteger.valueOf(1).shiftLeft(4_000_000); // ~0.5MB + for (int i = 0; i < nIters; ++i) { // 10 GB total + huge = huge.add(BigInteger.ONE); + } + if (huge.bitLength() != 4_000_001) { + System.out.println("Wrong answer length: " + huge.bitLength()); + } else if (huge.mod(big2_20).compareTo(BigInteger.valueOf(nIters)) != 0) { + System.out.println("Wrong answer: ..." + huge.mod(big2_20)); + } else { + System.out.println("Test complete"); + } + } +} diff --git a/test/1919-vminit-thread-start-timing/src/art/Test1919.java b/test/1919-vminit-thread-start-timing/src/art/Test1919.java index 3d5c079f74..f6b770f7cf 100644 --- a/test/1919-vminit-thread-start-timing/src/art/Test1919.java +++ b/test/1919-vminit-thread-start-timing/src/art/Test1919.java @@ -21,10 +21,12 @@ public class Test1919 { public static void run() { for (Event e : getEvents()) { - if (PRINT_ALL_THREADS || - e.thr.equals(Thread.currentThread()) || - e.thr.getName().equals("JVMTI_THREAD-Test1919")) { - System.out.println(e.name + ": " + e.thr.getName()); + if (e.thr != null) { + if (PRINT_ALL_THREADS || + e.thr.equals(Thread.currentThread()) || + e.thr.getName().equals("JVMTI_THREAD-Test1919")) { + System.out.println(e.name + ": " + e.thr.getName()); + } } } } diff --git a/test/1934-jvmti-signal-thread/signal_threads.cc b/test/1934-jvmti-signal-thread/signal_threads.cc index 726a7a86ae..dfb08c1621 100644 --- a/test/1934-jvmti-signal-thread/signal_threads.cc +++ b/test/1934-jvmti-signal-thread/signal_threads.cc @@ -47,19 +47,19 @@ extern "C" JNIEXPORT jlong JNICALL Java_art_Test1934_allocNativeMonitor(JNIEnv* jvmti_env, jvmti_env->Allocate(sizeof(NativeMonitor), reinterpret_cast<unsigned char**>(&mon)))) { - return -1l; + return -1L; } if (JvmtiErrorToException(env, jvmti_env, jvmti_env->CreateRawMonitor("test-1934 start", &mon->start_monitor))) { - return -1l; + return -1L; } if (JvmtiErrorToException(env, jvmti_env, jvmti_env->CreateRawMonitor("test-1934 continue", &mon->continue_monitor))) { - return -1l; + return -1L; } mon->should_continue = false; mon->should_start = false; @@ -92,7 +92,7 @@ extern "C" JNIEXPORT void Java_art_Test1934_nativeWaitForOtherThread(JNIEnv* env while (!mon->should_continue) { if (JvmtiErrorToException(env, jvmti_env, - jvmti_env->RawMonitorWait(mon->continue_monitor, -1l))) { + jvmti_env->RawMonitorWait(mon->continue_monitor, -1L))) { JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor)); return; } @@ -112,7 +112,7 @@ extern "C" JNIEXPORT void Java_art_Test1934_nativeDoInterleaved(JNIEnv* env, while (!mon->should_start) { if (JvmtiErrorToException(env, jvmti_env, - jvmti_env->RawMonitorWait(mon->start_monitor, -1l))) { + jvmti_env->RawMonitorWait(mon->start_monitor, -1L))) { return; } } diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc index 17ccd9a781..00827cf8d5 100644 --- a/test/566-polymorphic-inlining/polymorphic_inline.cc +++ b/test/566-polymorphic-inlining/polymorphic_inline.cc @@ -46,7 +46,7 @@ static void do_checks(jclass cls, const char* method_name) { usleep(1000); } // Will either ensure it's compiled or do the compilation itself. - jit->CompileMethod(method, soa.Self(), /* osr */ false); + jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false); } CodeInfo info(header); diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc index b2b363447f..dc0e94cbc7 100644 --- a/test/570-checker-osr/osr.cc +++ b/test/570-checker-osr/osr.cc @@ -128,7 +128,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env, // Sleep to yield to the compiler thread. usleep(1000); // Will either ensure it's compiled or do the compilation itself. - jit->CompileMethod(m, Thread::Current(), /* osr */ true); + jit->CompileMethod(m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true); } }); } diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc index 52367c7731..82c82c6132 100644 --- a/test/626-const-class-linking/clear_dex_cache_types.cc +++ b/test/626-const-class-linking/clear_dex_cache_types.cc @@ -41,6 +41,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_nativeSkipVerification(JNIEnv*, jcla if (status == ClassStatus::kResolved) { ObjectLock<mirror::Class> lock(soa.Self(), klass); klass->SetStatus(klass, ClassStatus::kVerified, soa.Self()); + klass->SetVerificationAttempted(); } else { LOG(ERROR) << klass->PrettyClass() << " has unexpected status: " << status; } diff --git a/test/918-fields/expected.txt b/test/918-fields/expected.txt index af786159cc..0114ccc998 100644 --- a/test/918-fields/expected.txt +++ b/test/918-fields/expected.txt @@ -2,9 +2,9 @@ class java.lang.Math 25 false -[value, I, null] -class java.lang.Integer -18 +[bytesTransferred, I, null] +class java.io.InterruptedIOException +1 false [this$0, Lart/Test918;, null] class art.Test918$Foo @@ -18,3 +18,7 @@ false class art.Test918$Generics 0 false +[privateValue, I, null] +class art.Test918$Generics +2 +false diff --git a/test/918-fields/src/art/Test918.java b/test/918-fields/src/art/Test918.java index ca23c0357d..5328b0bbdb 100644 --- a/test/918-fields/src/art/Test918.java +++ b/test/918-fields/src/art/Test918.java @@ -16,6 +16,7 @@ package art; +import java.io.InterruptedIOException; import java.lang.reflect.Field; import java.util.Arrays; @@ -26,10 +27,11 @@ public class Test918 { public static void doTest() throws Exception { testField(Math.class, "PI"); - testField(Integer.class, "value"); + testField(InterruptedIOException.class, "bytesTransferred"); testField(Foo.class, "this$0"); testField(Bar.class, "VAL"); testField(Generics.class, "generics"); + testField(Generics.class, "privateValue"); } private static void testField(Class<?> base, String fieldName) @@ -71,5 +73,6 @@ public class Test918 { private static class Generics<T> { T generics; + private int privateValue = 42; } } diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 5d07601005..e3157ef39f 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -47,12 +47,8 @@ ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libnativebridgetest-target # Also need libopenjdkjvmti. ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libopenjdkjvmti-target libopenjdkjvmtid-target -ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar -ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar -ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar -ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar -ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar -ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar +ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += \ + $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar) # All tests require the host executables. The tests also depend on the core images, but on # specific version depending on the compiler. @@ -74,6 +70,7 @@ ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmti$(ART_HOST_SHLIB_EXTENSION) \ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmtid$(ART_HOST_SHLIB_EXTENSION) \ + $(HOST_CORE_DEX_LOCATIONS) \ ifneq ($(HOST_PREFER_32_BIT),true) ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \ diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc index 65127fcab1..55631a9651 100644 --- a/test/common/runtime_state.cc +++ b/test/common/runtime_state.cc @@ -227,7 +227,7 @@ static void ForceJitCompiled(Thread* self, ArtMethod* method) REQUIRES(!Locks::m // Make sure there is a profiling info, required by the compiler. ProfilingInfo::Create(self, method, /* retry_allocation */ true); // Will either ensure it's compiled or do the compilation itself. - jit->CompileMethod(method, self, /* osr */ false); + jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false); } } } diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index 16106ab1c5..25b8b4b55b 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -51,6 +51,7 @@ RELOCATE="n" STRIP_DEX="n" SECONDARY_DEX="" TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb) +TIMEOUT_DUMPER=timeout_dumper # Value in seconds if [ "$ART_USE_READ_BARRIER" != "false" ]; then TIME_OUT_VALUE=2400 # 40 minutes. @@ -548,7 +549,10 @@ if [ "$USE_JVM" = "y" ]; then exit fi -bpath_modules="core-oj core-libart core-simple conscrypt okhttp bouncycastle" +# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk +# because that's what we use for compiling the core.art image. +# It may contain additional modules from TEST_CORE_JARS. +bpath_modules="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt" if [ "${HOST}" = "y" ]; then framework="${ANDROID_HOST_OUT}/framework" if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then @@ -688,6 +692,8 @@ if [ "$BIONIC" = "y" ]; then echo "linux_bionic-x86 target doesn't seem to have been built!" >&2 exit 1 fi + # Set timeout_dumper manually so it works even with apex's + TIMEOUT_DUMPER=$OUT_DIR/soong/host/linux_bionic-x86/bin/timeout_dumper fi # Prevent test from silently falling back to interpreter in no-prebuild mode. This happens @@ -1026,7 +1032,8 @@ else # Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump # before abort. However, dumping threads might deadlock, so we also use the "-k" # option to definitely kill the child. - cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s timeout_dumper $cmdline" + # Note: Using "--foreground" to not propagate the signal to children, i.e., the runtime. + cmdline="timeout --foreground -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} $cmdline" fi if [ "$DEV_MODE" = "y" ]; then diff --git a/test/knownfailures.json b/test/knownfailures.json index ae20557e2f..879f2fd79d 100644 --- a/test/knownfailures.json +++ b/test/knownfailures.json @@ -447,19 +447,63 @@ }, { "tests": [ + "004-ThreadStress", + "130-hprof", + "579-inline-infinite", + "1946-list-descriptors" + ], + "description": ["Too slow to finish in the timeout"], + "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress" + }, + { + "tests": [ + "911-get-stack-trace" + ], + "description": ["Tests that fail when run with step-stress for unknown reasons."], + "bug": "b/120995005", + "variant": "jvmti-stress | step-stress" + }, + { + "tests": [ + "004-SignalTest", + "004-StackWalk", + "064-field-access", + "083-compiler-regressions", + "098-ddmc", + "107-int-math2", + "129-ThreadGetId", + "135-MirandaDispatch", + "132-daemon-locks-shutdown", + "163-app-image-methods", + "607-daemon-stress", + "674-hiddenapi", + "687-deopt", + "904-object-allocation" + ], + "description": ["Tests that sometimes fail when run with jvmti-stress for unknown reasons."], + "bug": "b/120995005", + "variant": "jvmti-stress | trace-stress | field-stress | step-stress" + }, + { + "tests": [ + "018-stack-overflow", "137-cfi", "595-profile-saving", + "597-deopt-busy-loop", + "597-deopt-new-string", + "660-clinit", "900-hello-plugin", "909-attach-agent", + "924-threads", "981-dedup-original-dex", "1900-track-alloc" ], - "description": ["Tests that require exact knowledge of the number of plugins and agents."], + "description": ["Tests that require exact knowledge of the deoptimization state, the ", + "number of plugins and agents, or breaks other openjdkjvmti assumptions."], "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress" }, { "tests": [ - "132-daemon-locks-shutdown", "607-daemon-stress", "602-deoptimizeable", "121-simple-suspend-check", @@ -569,6 +613,12 @@ "env_vars": {"SANITIZE_HOST": "address"} }, { + "tests": "175-alloc-big-bignums", + "description": "ASAN runs out of memory due to huge allocations.", + "variant": "host", + "env_vars": {"SANITIZE_HOST": "address"} + }, + { "tests": "202-thread-oome", "description": "ASAN aborts when large thread stacks are requested.", "variant": "host", @@ -1111,5 +1161,11 @@ "tests": ["454-get-vreg", "457-regs"], "variant": "baseline", "description": ["Tests are expected to fail with baseline."] + }, + { + "tests": ["708-jit-cache-churn"], + "variant": "jit-on-first-use", + "bug": "b/120112467", + "description": [ "Fails on Android Build hosts with uncaught std::bad_alloc." ] } ] diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc index e123e9f081..cd7af1064d 100644 --- a/test/ti-stress/stress.cc +++ b/test/ti-stress/stress.cc @@ -157,14 +157,12 @@ class ScopedClassInfo { : jvmtienv_(jvmtienv), class_(c), name_(nullptr), - generic_(nullptr), file_(nullptr), debug_ext_(nullptr) {} ~ScopedClassInfo() { if (class_ != nullptr) { jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_)); - jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_)); jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_)); jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_)); } @@ -173,12 +171,11 @@ class ScopedClassInfo { bool Init() { if (class_ == nullptr) { name_ = const_cast<char*>("<NONE>"); - generic_ = const_cast<char*>("<NONE>"); return true; } else { jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_); jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_); - return jvmtienv_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE && + return jvmtienv_->GetClassSignature(class_, &name_, nullptr) == JVMTI_ERROR_NONE && ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY && ret1 != JVMTI_ERROR_INVALID_CLASS && ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY && @@ -192,9 +189,6 @@ class ScopedClassInfo { const char* GetName() const { return name_; } - const char* GetGeneric() const { - return generic_; - } const char* GetSourceDebugExtension() const { if (debug_ext_ == nullptr) { return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>"; @@ -214,7 +208,6 @@ class ScopedClassInfo { jvmtiEnv* jvmtienv_; jclass class_; char* name_; - char* generic_; char* file_; char* debug_ext_; }; @@ -229,14 +222,12 @@ class ScopedMethodInfo { class_info_(nullptr), name_(nullptr), signature_(nullptr), - generic_(nullptr), first_line_(-1) {} ~ScopedMethodInfo() { DeleteLocalRef(env_, declaring_class_); jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_)); jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_)); - jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_)); } bool Init() { @@ -257,7 +248,7 @@ class ScopedMethodInfo { return false; } return class_info_->Init() && - (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE); + (jvmtienv_->GetMethodName(method_, &name_, &signature_, nullptr) == JVMTI_ERROR_NONE); } const ScopedClassInfo& GetDeclaringClassInfo() const { @@ -276,10 +267,6 @@ class ScopedMethodInfo { return signature_; } - const char* GetGeneric() const { - return generic_; - } - jint GetFirstLine() const { return first_line_; } @@ -292,7 +279,6 @@ class ScopedMethodInfo { std::unique_ptr<ScopedClassInfo> class_info_; char* name_; char* signature_; - char* generic_; jint first_line_; friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m); @@ -306,20 +292,18 @@ class ScopedFieldInfo { field_(field), class_info_(nullptr), name_(nullptr), - type_(nullptr), - generic_(nullptr) {} + type_(nullptr) {} ~ScopedFieldInfo() { jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_)); jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(type_)); - jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_)); } bool Init() { class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_)); return class_info_->Init() && (jvmtienv_->GetFieldName( - declaring_class_, field_, &name_, &type_, &generic_) == JVMTI_ERROR_NONE); + declaring_class_, field_, &name_, &type_, nullptr) == JVMTI_ERROR_NONE); } const ScopedClassInfo& GetDeclaringClassInfo() const { @@ -338,10 +322,6 @@ class ScopedFieldInfo { return type_; } - const char* GetGeneric() const { - return generic_; - } - private: jvmtiEnv* jvmtienv_; jclass declaring_class_; @@ -349,7 +329,6 @@ class ScopedFieldInfo { std::unique_ptr<ScopedClassInfo> class_info_; char* name_; char* type_; - char* generic_; friend std::ostream& operator<<(std::ostream &os, ScopedFieldInfo const& m); }; diff --git a/tools/bootjars.sh b/tools/bootjars.sh index ad6ee6b058..9f2282764e 100755 --- a/tools/bootjars.sh +++ b/tools/bootjars.sh @@ -72,8 +72,10 @@ if [[ $core_jars_only == y ]]; then # FIXME: The soong invocation we're using for getting the variables does not give us anything # defined in Android.common_path.mk, otherwise we would just use HOST-/TARGET_TEST_CORE_JARS. - # The core_jars_list must match the TEST_CORE_JARS variable in the Android.common_path.mk . - core_jars_list="core-oj core-libart core-simple conscrypt okhttp bouncycastle" + # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk + # because that's what we use for compiling the core.art image. + # It may contain additional modules from TEST_CORE_JARS. + core_jars_list="core-oj core-libart core-simple" core_jars_suffix= if [[ $mode == target ]]; then core_jars_suffix=-testdex diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh new file mode 100755 index 0000000000..94ccc41331 --- /dev/null +++ b/tools/build_linux_bionic.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This will build a target using linux_bionic. It can be called with normal make +# flags. +# +# TODO This runs a 'm clean' prior to building the targets in order to ensure +# that obsolete kati files don't mess up the build. + +if [[ -z $ANDROID_BUILD_TOP ]]; then + pushd . +else + pushd $ANDROID_BUILD_TOP +fi + +if [ ! -d art ]; then + echo "Script needs to be run at the root of the android tree" + exit 1 +fi + +source build/envsetup.sh >&/dev/null # for get_build_var +# Soong needs a bunch of variables set and will not run if they are missing. +# The default values of these variables is only contained in make, so use +# nothing to create the variables then remove all the other artifacts. +build/soong/soong_ui.bash --make-mode nothing +if [ $? != 0 ]; then + exit 1 +fi + +out_dir=$(get_build_var OUT_DIR) +host_out=$(get_build_var HOST_OUT) + +# TODO(b/31559095) Figure out a better way to do this. +# +# There is no good way to force soong to generate host-bionic builds currently +# so this is a hacky workaround. +tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX) + +cat $out_dir/soong/soong.variables > ${tmp_soong_var} +build/soong/soong_ui.bash --make-mode clean +mkdir -p $out_dir/soong + +python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables +import json +import sys +x = json.load(open(sys.argv[1])) +x['Allow_missing_dependencies'] = True +x['HostArch'] = 'x86_64' +x['CrossHost'] = 'linux_bionic' +x['CrossHostArch'] = 'x86_64' +if 'CrossHostSecondaryArch' in x: + del x['CrossHostSecondaryArch'] +json.dump(x, open(sys.argv[2], mode='w')) +END + +rm $tmp_soong_var + +build/soong/soong_ui.bash --make-mode --skip-make $@ diff --git a/tools/dist_linux_bionic.sh b/tools/dist_linux_bionic.sh new file mode 100755 index 0000000000..4c7ba1ca3f --- /dev/null +++ b/tools/dist_linux_bionic.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# Builds the given targets using linux-bionic and moves the output files to the +# DIST_DIR. Takes normal make arguments. + +if [[ -z $ANDROID_BUILD_TOP ]]; then + pushd . +else + pushd $ANDROID_BUILD_TOP +fi + +if [[ -z $DIST_DIR ]]; then + echo "DIST_DIR must be set!" + exit 1 +fi + +if [ ! -d art ]; then + echo "Script needs to be run at the root of the android tree" + exit 1 +fi + +source build/envsetup.sh >&/dev/null # for get_build_var +out_dir=$(get_build_var OUT_DIR) + +./art/tools/build_linux_bionic.sh $@ + +mkdir -p $DIST_DIR +cp -R ${out_dir}/soong/host/* $DIST_DIR/ diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index f97dd4fc7f..3c65b01e02 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -155,41 +155,6 @@ bug: 25437292 }, { - description: "Missing resource in classpath", - result: EXEC_FAILED, - names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testGet", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetBoolean", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetByteArray", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetDouble", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetFloat", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetInt", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetLong", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testKeys", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testNodeExists", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testPut", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutBoolean", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutByteArray", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutDouble", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutFloat", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutInt", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutLong", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemove", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemoveNode", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testSync", - "libcore.java.util.prefs.PreferencesTest#testHtmlEncoding", - "libcore.java.util.prefs.PreferencesTest#testPreferencesClobbersExistingFiles", - "org.apache.harmony.tests.java.util.PropertiesTest#test_storeToXMLLjava_io_OutputStreamLjava_lang_StringLjava_lang_String", - "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportNode", - "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree", - "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush", - "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync", - "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"], - bug: 120526172 -}, -{ description: "Only work with --mode=activity", result: EXEC_FAILED, names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ] @@ -233,5 +198,30 @@ modes: [device], bug: 116446372, names: ["libcore.libcore.io.FdsanTest#testSocket"] +}, +{ + description: "Host implementation of android_getaddrinfo differs from device implementation", + result: EXEC_FAILED, + modes: [host], + bug: 121230364, + names: [ + "libcore.libcore.net.InetAddressUtilsTest#parseNumericAddress[8]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[10]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[11]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[12]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[5]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[6]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[7]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[8]", + "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[9]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[10]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[11]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[12]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[5]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[6]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[7]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[8]", + "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[9]" + ] } ] diff --git a/tools/luci/config/cr-buildbucket.cfg b/tools/luci/config/cr-buildbucket.cfg index 29cca39a8a..8df8433f75 100644 --- a/tools/luci/config/cr-buildbucket.cfg +++ b/tools/luci/config/cr-buildbucket.cfg @@ -27,8 +27,6 @@ buckets { swarming { hostname: "chromium-swarm.appspot.com" builder_defaults { - dimensions: "cores:8" - dimensions: "cpu:x86-64" dimensions: "pool:luci.art.ci" service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com" execution_timeout_secs: 10800 # 3h diff --git a/tools/luci/config/luci-milo.cfg b/tools/luci/config/luci-milo.cfg index ce222932d7..60e8404c41 100644 --- a/tools/luci/config/luci-milo.cfg +++ b/tools/luci/config/luci-milo.cfg @@ -6,6 +6,7 @@ consoles { repo_url: "https://android.googlesource.com/platform/art" refs: "refs/heads/master" manifest_name: "REVISION" + include_experimental_builds: true builders { name: "buildbucket/luci.art.ci/angler-armv7-debug" diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh index f4a2dc1a6a..c7503bbd1b 100755 --- a/tools/run-jdwp-tests.sh +++ b/tools/run-jdwp-tests.sh @@ -55,9 +55,10 @@ function boot_classpath_arg { done } -# Note: This must match the TEST_CORE_JARS in Android.common_path.mk +# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk # because that's what we use for compiling the core.art image. -BOOT_CLASSPATH_JARS="core-oj core-libart core-simple conscrypt okhttp bouncycastle" +# It may contain additional modules from TEST_CORE_JARS. +BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt" vm_args="" art="$android_root/bin/art" diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh index 63f1fce13f..68c4fd8563 100755 --- a/tools/run-libcore-tests.sh +++ b/tools/run-libcore-tests.sh @@ -57,9 +57,10 @@ function boot_classpath_arg { done } -# Note: This must match the TEST_CORE_JARS in Android.common_path.mk +# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk # because that's what we use for compiling the core.art image. -BOOT_CLASSPATH_JARS="core-oj core-libart core-simple conscrypt okhttp bouncycastle" +# It may contain additional modules from TEST_CORE_JARS. +BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt" DEPS="core-tests jsr166-tests mockito-target" diff --git a/tools/timeout_dumper/timeout_dumper.cc b/tools/timeout_dumper/timeout_dumper.cc index 96d165c5c7..e04aefb26c 100644 --- a/tools/timeout_dumper/timeout_dumper.cc +++ b/tools/timeout_dumper/timeout_dumper.cc @@ -29,6 +29,7 @@ #include <thread> #include <memory> #include <set> +#include <string> #include <android-base/file.h> #include <android-base/logging.h> @@ -103,9 +104,22 @@ std::unique_ptr<std::string> FindAddr2line() { } } - std::string path = std::string(".") + kAddr2linePath; - if (access(path.c_str(), X_OK) == 0) { - return std::make_unique<std::string>(path); + { + std::string path = std::string(".") + kAddr2linePath; + if (access(path.c_str(), X_OK) == 0) { + return std::make_unique<std::string>(path); + } + } + + { + using android::base::Dirname; + + std::string exec_dir = android::base::GetExecutableDirectory(); + std::string derived_top = Dirname(Dirname(Dirname(Dirname(exec_dir)))); + std::string path = derived_top + kAddr2linePath; + if (access(path.c_str(), X_OK) == 0) { + return std::make_unique<std::string>(path); + } } constexpr const char* kHostAddr2line = "/usr/bin/addr2line"; @@ -356,6 +370,91 @@ std::set<pid_t> PtraceSiblings(pid_t pid) { return ret; } +void DumpABI(pid_t forked_pid) { + enum class ABI { kArm, kArm64, kMips, kMips64, kX86, kX86_64 }; +#if defined(__arm__) + constexpr ABI kDumperABI = ABI::kArm; +#elif defined(__aarch64__) + constexpr ABI kDumperABI = ABI::kArm64; +#elif defined(__mips__) && !defined(__LP64__) + constexpr ABI kDumperABI = ABI::kMips; +#elif defined(__mips__) && defined(__LP64__) + constexpr ABI kDumperABI = ABI::kMips64; +#elif defined(__i386__) + constexpr ABI kDumperABI = ABI::kX86; +#elif defined(__x86_64__) + constexpr ABI kDumperABI = ABI::kX86_64; +#else +#error Unsupported architecture +#endif + + char data[1024]; // Should be more than enough. + struct iovec io_vec; + io_vec.iov_base = &data; + io_vec.iov_len = 1024; + ABI to_print; + if (0 != ::ptrace(PTRACE_GETREGSET, forked_pid, /* NT_PRSTATUS */ 1, &io_vec)) { + LOG(ERROR) << "Could not get registers to determine abi."; + // Use 64-bit as default. + switch (kDumperABI) { + case ABI::kArm: + case ABI::kArm64: + to_print = ABI::kArm64; + break; + case ABI::kMips: + case ABI::kMips64: + to_print = ABI::kMips64; + break; + case ABI::kX86: + case ABI::kX86_64: + to_print = ABI::kX86_64; + break; + default: + __builtin_unreachable(); + } + } else { + // Check the length of the data. Assume that it's the same arch as the tool. + switch (kDumperABI) { + case ABI::kArm: + case ABI::kArm64: + to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64; + break; + case ABI::kMips: + case ABI::kMips64: + to_print = ABI::kMips64; // TODO Figure out how this should work. + break; + case ABI::kX86: + case ABI::kX86_64: + to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64; + break; + default: + __builtin_unreachable(); + } + } + std::string abi_str; + switch (to_print) { + case ABI::kArm: + abi_str = "arm"; + break; + case ABI::kArm64: + abi_str = "arm64"; + break; + case ABI::kMips: + abi_str = "mips"; + break; + case ABI::kMips64: + abi_str = "mips64"; + break; + case ABI::kX86: + abi_str = "x86"; + break; + case ABI::kX86_64: + abi_str = "x86_64"; + break; + } + std::cerr << "ABI: '" << abi_str << "'" << std::endl; +} + } // namespace ptrace template <typename T> @@ -495,19 +594,22 @@ void DumpThread(pid_t pid, } void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_main) { + LOG(ERROR) << "Timeout for process " << forked_pid; + CHECK_EQ(0, ::ptrace(PTRACE_ATTACH, forked_pid, 0, 0)); std::set<pid_t> tids = ptrace::PtraceSiblings(forked_pid); tids.insert(forked_pid); + ptrace::DumpABI(forked_pid); + // Check whether we have and should use addr2line. - std::unique_ptr<std::string> addr2line_path = addr2line::FindAddr2line(); - if (addr2line_path != nullptr) { - LOG(ERROR) << "Found addr2line at " << *addr2line_path; - } else { - LOG(ERROR) << "Did not find usable addr2line"; + std::unique_ptr<std::string> addr2line_path; + if (kUseAddr2line) { + addr2line_path = addr2line::FindAddr2line(); + if (addr2line_path == nullptr) { + LOG(ERROR) << "Did not find usable addr2line"; + } } - bool use_addr2line = kUseAddr2line && addr2line_path != nullptr; - LOG(ERROR) << (use_addr2line ? "U" : "Not u") << "sing addr2line"; if (!WaitForMainSigStop(saw_wif_stopped_for_main)) { LOG(ERROR) << "Did not receive SIGSTOP for pid " << forked_pid; @@ -520,11 +622,7 @@ void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_ } for (pid_t tid : tids) { - DumpThread(forked_pid, - tid, - use_addr2line ? addr2line_path.get() : nullptr, - " ", - backtrace_map.get()); + DumpThread(forked_pid, tid, addr2line_path.get(), " ", backtrace_map.get()); } } diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc index 96100b20d4..46ab8aa0e3 100644 --- a/tools/veridex/veridex.cc +++ b/tools/veridex/veridex.cc @@ -126,6 +126,7 @@ class Veridex { static int Run(int argc, char** argv) { VeridexOptions options; ParseArgs(&options, argc, argv); + android::base::InitLogging(argv); if (!options.dex_file) { LOG(ERROR) << "Required argument '" << kDexFileOption << "' not provided."; |