summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--adbconnection/adbconnection.cc11
-rw-r--r--build/Android.bp8
-rw-r--r--build/Android.gtest.mk3
-rw-r--r--build/art.go3
-rw-r--r--compiler/debug/elf_debug_info_writer.h4
-rw-r--r--compiler/dex/verification_results.cc4
-rw-r--r--compiler/driver/compiler_driver.cc33
-rw-r--r--compiler/driver/compiler_driver.h6
-rw-r--r--compiler/driver/compiler_options.cc1
-rw-r--r--compiler/driver/compiler_options.h8
-rw-r--r--compiler/driver/compiler_options_map-inl.h6
-rw-r--r--compiler/driver/compiler_options_map.def3
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc13
-rw-r--r--compiler/optimizing/code_generator.cc19
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc8
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc6
-rw-r--r--compiler/optimizing/code_generator_mips.cc6
-rw-r--r--compiler/optimizing/code_generator_mips64.cc6
-rw-r--r--compiler/optimizing/code_generator_x86.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc6
-rw-r--r--compiler/optimizing/induction_var_range_test.cc6
-rw-r--r--compiler/optimizing/instruction_builder.cc28
-rw-r--r--compiler/optimizing/instruction_builder.h3
-rw-r--r--compiler/optimizing/nodes.h16
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc4
-rw-r--r--compiler/utils/x86/assembler_x86.cc165
-rw-r--r--compiler/utils/x86/assembler_x86.h10
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc16
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc166
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h10
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc16
-rw-r--r--dex2oat/dex2oat.cc25
-rw-r--r--dex2oat/dex2oat_test.cc289
-rw-r--r--dex2oat/linker/image_writer.cc465
-rw-r--r--dex2oat/linker/image_writer.h83
-rw-r--r--dex2oat/linker/oat_writer.cc3
-rw-r--r--dex2oat/linker/oat_writer_test.cc68
-rw-r--r--dexlayout/dex_visualize.cc2
-rw-r--r--dexlayout/dexdiag_test.cc11
-rw-r--r--dexlayout/dexlayout.cc6
-rw-r--r--dexlayout/dexlayout_main.cc2
-rw-r--r--dexlist/dexlist.cc2
-rw-r--r--dt_fd_forward/dt_fd_forward.cc13
-rw-r--r--libartbase/base/bit_string_test.cc12
-rw-r--r--libartbase/base/bit_struct.h14
-rw-r--r--libartbase/base/bit_struct_test.cc50
-rw-r--r--libartbase/base/bit_utils_test.cc95
-rw-r--r--libartbase/base/common_art_test.cc4
-rw-r--r--libartbase/base/file_magic.cc2
-rw-r--r--libartbase/base/file_utils_test.cc6
-rw-r--r--libartbase/base/mem_map.cc8
-rw-r--r--libartbase/base/mem_map_test.cc134
-rw-r--r--libartbase/base/membarrier.cc2
-rw-r--r--libartbase/base/scoped_flock.cc5
-rw-r--r--libartbase/base/scoped_flock_test.cc2
-rw-r--r--libartbase/base/unix_file/fd_file.cc2
-rw-r--r--libartbase/base/unix_file/fd_file_test.cc3
-rw-r--r--libartbase/base/zip_archive.cc6
-rw-r--r--libartbase/base/zip_archive_test.cc5
-rw-r--r--libdexfile/dex/art_dex_file_loader.cc38
-rw-r--r--libdexfile/dex/art_dex_file_loader_test.cc28
-rw-r--r--libdexfile/dex/code_item_accessors_test.cc12
-rw-r--r--libdexfile/dex/compact_dex_file.cc2
-rw-r--r--libdexfile/dex/compact_dex_file_test.cc20
-rw-r--r--libdexfile/dex/dex_file_loader.cc20
-rw-r--r--libdexfile/dex/dex_file_loader_test.cc14
-rw-r--r--libdexfile/dex/dex_file_verifier.cc23
-rw-r--r--libdexfile/dex/dex_file_verifier_test.cc28
-rw-r--r--libdexfile/dex/dex_instruction_test.cc18
-rw-r--r--libdexfile/dex/type_lookup_table.cc2
-rw-r--r--libprofile/profile/profile_compilation_info.cc30
-rw-r--r--libprofile/profile/profile_compilation_info_test.cc194
-rw-r--r--oatdump/oatdump.cc62
-rw-r--r--openjdkjvmti/Android.bp1
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc12
-rw-r--r--openjdkjvmti/art_jvmti.h4
-rw-r--r--openjdkjvmti/deopt_manager.cc2
-rw-r--r--openjdkjvmti/events-inl.h1
-rw-r--r--openjdkjvmti/events.cc16
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc22
-rw-r--r--openjdkjvmti/object_tagging.cc34
-rw-r--r--openjdkjvmti/object_tagging.h23
-rw-r--r--openjdkjvmti/ti_class.cc9
-rw-r--r--openjdkjvmti/ti_class_definition.cc8
-rw-r--r--openjdkjvmti/ti_ddms.cc3
-rw-r--r--openjdkjvmti/ti_extension.cc42
-rw-r--r--openjdkjvmti/ti_heap.cc10
-rw-r--r--openjdkjvmti/ti_logging.cc71
-rw-r--r--openjdkjvmti/ti_logging.h102
-rw-r--r--openjdkjvmti/ti_method.cc2
-rw-r--r--openjdkjvmti/ti_monitor.cc6
-rw-r--r--openjdkjvmti/ti_object.cc2
-rw-r--r--openjdkjvmti/ti_redefine.cc22
-rw-r--r--openjdkjvmti/ti_search.cc14
-rw-r--r--openjdkjvmti/ti_stack.cc18
-rw-r--r--openjdkjvmti/ti_thread.cc4
-rw-r--r--openjdkjvmti/transform.cc2
-rw-r--r--profman/profile_assistant_test.cc4
-rw-r--r--profman/profman.cc10
-rw-r--r--runtime/Android.bp7
-rw-r--r--runtime/arch/arch_test.cc6
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc2
-rw-r--r--runtime/arch/arm64/callee_save_frame_arm64.h2
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc3
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S194
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc4
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc2
-rw-r--r--runtime/arch/stub_test.cc2
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc2
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.cc21
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.h2
-rw-r--r--runtime/arch/x86/instruction_set_features_x86_test.cc36
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc2
-rw-r--r--runtime/art_field.cc2
-rw-r--r--runtime/art_method.cc4
-rw-r--r--runtime/base/mem_map_arena_pool.cc2
-rw-r--r--runtime/base/mutex.cc12
-rw-r--r--runtime/cha.cc12
-rw-r--r--runtime/class_linker-inl.h13
-rw-r--r--runtime/class_linker.cc156
-rw-r--r--runtime/class_linker.h16
-rw-r--r--runtime/class_linker_test.cc20
-rw-r--r--runtime/class_loader_context.cc12
-rw-r--r--runtime/class_loader_context_test.cc6
-rw-r--r--runtime/class_loader_utils.h2
-rw-r--r--runtime/class_root.h3
-rw-r--r--runtime/class_table-inl.h1
-rw-r--r--runtime/common_runtime_test.h4
-rw-r--r--runtime/common_throws.cc20
-rw-r--r--runtime/debug_print.cc4
-rw-r--r--runtime/debugger.cc21
-rw-r--r--runtime/dex/dex_file_annotations.cc2
-rw-r--r--runtime/dex2oat_environment_test.h4
-rw-r--r--runtime/dexopt_test.cc15
-rw-r--r--runtime/elf_file.cc20
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h22
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h2
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc10
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc16
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/gc/accounting/atomic_stack.h4
-rw-r--r--runtime/gc/accounting/bitmap.cc4
-rw-r--r--runtime/gc/accounting/card_table.cc4
-rw-r--r--runtime/gc/accounting/mod_union_table.cc2
-rw-r--r--runtime/gc/accounting/read_barrier_table.h4
-rw-r--r--runtime/gc/accounting/remembered_set.cc2
-rw-r--r--runtime/gc/accounting/space_bitmap.cc4
-rw-r--r--runtime/gc/allocator/rosalloc.cc6
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h16
-rw-r--r--runtime/gc/collector/concurrent_copying.cc81
-rw-r--r--runtime/gc/collector/immune_spaces.cc2
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc42
-rw-r--r--runtime/gc/collector/mark_sweep.cc12
-rw-r--r--runtime/gc/collector/semi_space.cc3
-rw-r--r--runtime/gc/heap-inl.h8
-rw-r--r--runtime/gc/heap.cc32
-rw-r--r--runtime/gc/heap_test.cc6
-rw-r--r--runtime/gc/reference_processor.cc10
-rw-r--r--runtime/gc/reference_queue.cc6
-rw-r--r--runtime/gc/space/bump_pointer_space.cc2
-rw-r--r--runtime/gc/space/dlmalloc_space.cc2
-rw-r--r--runtime/gc/space/image_space.cc127
-rw-r--r--runtime/gc/space/image_space.h10
-rw-r--r--runtime/gc/space/image_space_test.cc11
-rw-r--r--runtime/gc/space/large_object_space.cc10
-rw-r--r--runtime/gc/space/malloc_space.cc3
-rw-r--r--runtime/gc/space/region_space-inl.h15
-rw-r--r--runtime/gc/space/region_space.cc8
-rw-r--r--runtime/gc/space/region_space.h9
-rw-r--r--runtime/gc/space/rosalloc_space_random_test.cc2
-rw-r--r--runtime/gc/space/rosalloc_space_static_test.cc2
-rw-r--r--runtime/gc/space/space_test.h1
-rw-r--r--runtime/gc/system_weak_test.cc10
-rw-r--r--runtime/gc/verification.cc3
-rw-r--r--runtime/handle_scope-inl.h2
-rw-r--r--runtime/hidden_api.h4
-rw-r--r--runtime/hprof/hprof.cc9
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/image.h45
-rw-r--r--runtime/indirect_reference_table.cc8
-rw-r--r--runtime/instrumentation.cc71
-rw-r--r--runtime/instrumentation_test.cc30
-rw-r--r--runtime/intern_table-inl.h10
-rw-r--r--runtime/interpreter/interpreter.cc6
-rw-r--r--runtime/interpreter/interpreter_cache.h22
-rw-r--r--runtime/interpreter/interpreter_common.cc21
-rw-r--r--runtime/interpreter/interpreter_common.h139
-rw-r--r--runtime/interpreter/interpreter_switch_impl-inl.h (renamed from runtime/interpreter/interpreter_switch_impl.cc)45
-rw-r--r--runtime/interpreter/interpreter_switch_impl0.cc30
-rw-r--r--runtime/interpreter/interpreter_switch_impl1.cc30
-rw-r--r--runtime/interpreter/interpreter_switch_impl2.cc30
-rw-r--r--runtime/interpreter/interpreter_switch_impl3.cc30
-rw-r--r--runtime/interpreter/mterp/arm/floating_point.S8
-rw-r--r--runtime/interpreter/mterp/arm/invoke.S8
-rw-r--r--runtime/interpreter/mterp/arm/main.S36
-rw-r--r--runtime/interpreter/mterp/arm/object.S2
-rw-r--r--runtime/interpreter/mterp/arm64/invoke.S8
-rw-r--r--runtime/interpreter/mterp/arm64/main.S35
-rw-r--r--runtime/interpreter/mterp/arm64/object.S2
-rw-r--r--runtime/interpreter/mterp/common/gen_setup.py37
-rwxr-xr-xruntime/interpreter/mterp/gen_mterp.py4
-rw-r--r--runtime/interpreter/mterp/mterp.cc100
-rw-r--r--runtime/interpreter/mterp/mterp.h11
-rw-r--r--runtime/interpreter/mterp/x86/arithmetic.S53
-rw-r--r--runtime/interpreter/mterp/x86/invoke.S10
-rw-r--r--runtime/interpreter/mterp/x86/main.S76
-rw-r--r--runtime/interpreter/mterp/x86_64/arithmetic.S50
-rw-r--r--runtime/interpreter/mterp/x86_64/invoke.S10
-rw-r--r--runtime/interpreter/mterp/x86_64/main.S74
-rw-r--r--runtime/interpreter/unstarted_runtime.cc8
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc4
-rw-r--r--runtime/jdwp/jdwp_handler.cc3
-rw-r--r--runtime/jit/jit.cc12
-rw-r--r--runtime/jit/jit_code_cache.cc33
-rw-r--r--runtime/jit/profile_saver.cc18
-rw-r--r--runtime/jit/profiling_info.h2
-rw-r--r--runtime/jni/check_jni.cc2
-rw-r--r--runtime/jni/jni_internal.cc20
-rw-r--r--runtime/jni/jni_internal_test.cc6
-rw-r--r--runtime/method_handles.cc7
-rw-r--r--runtime/mirror/array-inl.h1
-rw-r--r--runtime/mirror/class-inl.h10
-rw-r--r--runtime/mirror/class.cc2
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/dex_cache-inl.h23
-rw-r--r--runtime/mirror/dex_cache.h21
-rw-r--r--runtime/mirror/dex_cache_test.cc2
-rw-r--r--runtime/mirror/object-inl.h6
-rw-r--r--runtime/mirror/object-readbarrier-inl.h2
-rw-r--r--runtime/mirror/object.cc2
-rw-r--r--runtime/mirror/string-inl.h2
-rw-r--r--runtime/monitor.cc14
-rw-r--r--runtime/monitor_android.cc2
-rw-r--r--runtime/monitor_test.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc17
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc5
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc6
-rw-r--r--runtime/native/dalvik_system_VMStack.cc2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc5
-rw-r--r--runtime/native/java_lang_Class.cc2
-rw-r--r--runtime/native/java_lang_Thread.cc2
-rw-r--r--runtime/native/java_lang_VMClassLoader.cc1
-rw-r--r--runtime/native/java_lang_invoke_MethodHandleImpl.cc2
-rw-r--r--runtime/native/sun_misc_Unsafe.cc4
-rw-r--r--runtime/non_debuggable_classes.cc1
-rw-r--r--runtime/oat.h5
-rw-r--r--runtime/oat_file.cc76
-rw-r--r--runtime/oat_file.h10
-rw-r--r--runtime/oat_file_assistant.cc39
-rw-r--r--runtime/oat_file_assistant.h1
-rw-r--r--runtime/oat_file_assistant_test.cc48
-rw-r--r--runtime/oat_file_manager.cc14
-rw-r--r--runtime/oat_file_test.cc27
-rw-r--r--runtime/proxy_test.h4
-rw-r--r--runtime/quick_exception_handler.cc9
-rw-r--r--runtime/runtime-inl.h11
-rw-r--r--runtime/runtime.cc55
-rw-r--r--runtime/runtime.h15
-rw-r--r--runtime/runtime_android.cc6
-rw-r--r--runtime/runtime_callbacks_test.cc12
-rw-r--r--runtime/runtime_linux.cc6
-rw-r--r--runtime/signal_catcher.cc4
-rw-r--r--runtime/stack_map.h2
-rw-r--r--runtime/subtype_check.h20
-rw-r--r--runtime/subtype_check_bits.h6
-rw-r--r--runtime/subtype_check_bits_and_status.h8
-rw-r--r--runtime/subtype_check_info_test.cc150
-rw-r--r--runtime/subtype_check_test.cc26
-rw-r--r--runtime/thread.cc45
-rw-r--r--runtime/thread.h54
-rw-r--r--runtime/thread_list.cc9
-rw-r--r--runtime/thread_pool.cc4
-rw-r--r--runtime/thread_pool_test.cc2
-rw-r--r--runtime/trace.cc2
-rw-r--r--runtime/transaction.cc1
-rw-r--r--runtime/vdex_file.cc16
-rw-r--r--runtime/vdex_file_test.cc8
-rw-r--r--runtime/verifier/method_verifier.cc30
-rw-r--r--runtime/verifier/method_verifier_test.cc2
-rw-r--r--runtime/verifier/reg_type.cc10
-rw-r--r--runtime/verifier/reg_type_cache-inl.h8
-rw-r--r--runtime/verifier/reg_type_cache.cc4
-rw-r--r--runtime/verifier/reg_type_test.cc50
-rw-r--r--runtime/verifier/verifier_deps.cc10
-rw-r--r--test/1957-error-ext/expected.txt4
-rw-r--r--test/1957-error-ext/info.txt1
-rw-r--r--test/1957-error-ext/lasterror.cc112
-rwxr-xr-xtest/1957-error-ext/run18
-rw-r--r--test/1957-error-ext/src/Main.java21
-rw-r--r--test/1957-error-ext/src/art/Redefinition.java91
-rw-r--r--test/1957-error-ext/src/art/Test1957.java86
-rw-r--r--test/602-deoptimizeable/info.txt7
-rw-r--r--test/602-deoptimizeable/src/Main.java33
-rw-r--r--test/626-const-class-linking/clear_dex_cache_types.cc1
-rw-r--r--test/687-deopt/expected.txt1
-rw-r--r--test/687-deopt/info.txt2
-rw-r--r--test/687-deopt/src/Main.java53
-rw-r--r--test/905-object-free/src/art/Test905.java62
-rw-r--r--test/Android.bp1
-rw-r--r--test/StringLiterals/StringLiterals.java40
-rw-r--r--test/knownfailures.json10
-rw-r--r--tools/ahat/Android.mk18
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java3
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java59
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/CsvGreylistConsumer.java35
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/CsvWriter.java49
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java3
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java19
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java5
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java5
-rw-r--r--tools/class2greylist/test/Android.mk4
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java65
-rw-r--r--tools/cpp-define-generator/asm_defines.cc2
-rw-r--r--tools/cpp-define-generator/globals.def5
-rw-r--r--tools/cpp-define-generator/thread.def6
-rw-r--r--tools/jfuzz/jfuzz.cc4
-rw-r--r--tools/ti-fast/tifast.cc2
-rw-r--r--tools/titrace/instruction_decoder.cc2
-rw-r--r--tools/veridex/flow_analysis.cc2
324 files changed, 4286 insertions, 2686 deletions
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index c716d92a9d..ba25393f0d 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -20,6 +20,7 @@
#include "android-base/endian.h"
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -251,6 +252,8 @@ void AdbConnectionState::StartDebuggerThreads() {
runtime->StartThreadBirth();
}
ScopedLocalRef<jobject> thr(soa.Env(), CreateAdbConnectionThread(soa.Self()));
+ // Note: Using pthreads instead of std::thread to not abort when the thread cannot be
+ // created (exception support required).
pthread_t pthread;
std::unique_ptr<CallbackData> data(new CallbackData { this, soa.Env()->NewGlobalRef(thr.get()) });
started_debugger_threads_ = true;
@@ -268,7 +271,7 @@ void AdbConnectionState::StartDebuggerThreads() {
runtime->EndThreadBirth();
return;
}
- data.release();
+ data.release(); // NOLINT pthreads API.
}
static bool FlagsSet(int16_t data, int16_t flags) {
@@ -426,11 +429,11 @@ void AdbConnectionState::SendAgentFds(bool require_handshake) {
cmsg->cmsg_type = SCM_RIGHTS;
// Duplicate the fds before sending them.
- android::base::unique_fd read_fd(dup(adb_connection_socket_));
+ android::base::unique_fd read_fd(art::DupCloexec(adb_connection_socket_));
CHECK_NE(read_fd.get(), -1) << "Failed to dup read_fd_: " << strerror(errno);
- android::base::unique_fd write_fd(dup(adb_connection_socket_));
+ android::base::unique_fd write_fd(art::DupCloexec(adb_connection_socket_));
CHECK_NE(write_fd.get(), -1) << "Failed to dup write_fd: " << strerror(errno);
- android::base::unique_fd write_lock_fd(dup(adb_write_event_fd_));
+ android::base::unique_fd write_lock_fd(art::DupCloexec(adb_write_event_fd_));
CHECK_NE(write_lock_fd.get(), -1) << "Failed to dup write_lock_fd: " << strerror(errno);
dt_fd_forward::FdSet {
diff --git a/build/Android.bp b/build/Android.bp
index 9797268680..09d3a183f9 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -18,9 +18,12 @@ bootstrap_go_package {
}
art_clang_tidy_errors = [
+ "android-cloexec-dup",
+ "android-cloexec-open",
"bugprone-argument-comment",
"bugprone-lambda-function-name",
"bugprone-unused-raii", // Protect scoped things like MutexLock.
+ "bugprone-unused-return-value",
"bugprone-virtual-near-miss",
"modernize-use-bool-literals",
"modernize-use-nullptr",
@@ -34,9 +37,12 @@ art_clang_tidy_errors = [
"misc-unused-using-decls",
]
// Should be: strings.Join(art_clang_tidy_errors, ",").
-art_clang_tidy_errors_str = "bugprone-argument-comment"
+art_clang_tidy_errors_str = "android-cloexec-dup"
+ + ",android-cloexec-open"
+ + ",bugprone-argument-comment"
+ ",bugprone-lambda-function-name"
+ ",bugprone-unused-raii"
+ + ",bugprone-unused-return-value"
+ ",bugprone-virtual-near-miss"
+ ",modernize-redundant-void-arg"
+ ",modernize-use-bool-literals"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 4badc5a907..e2a0a391ab 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,6 +61,7 @@ GTEST_DEX_DIRECTORIES := \
StaticLeafMethods \
Statics \
StaticsFromCode \
+ StringLiterals \
Transaction \
XandY
@@ -174,7 +175,7 @@ ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods Prof
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dexanalyze_test_DEX_DEPS := MultiDex
ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed EmptyUncompressed
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed EmptyUncompressed StringLiterals
ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi
diff --git a/build/art.go b/build/art.go
index 4edf41ee55..0b5ee4435c 100644
--- a/build/art.go
+++ b/build/art.go
@@ -154,8 +154,7 @@ func hostFlags(ctx android.BaseContext) []string {
if len(ctx.AConfig().SanitizeHost()) > 0 {
// art/test/137-cfi/cfi.cc
// error: stack frame size of 1944 bytes in function 'Java_Main_unwindInProcess'
- // error: stack frame size of 6520 bytes in function 'art::interpreter::ExecuteSwitchImplCpp'
- hostFrameSizeLimit = 7400
+ hostFrameSizeLimit = 6400
}
cflags = append(cflags,
fmt.Sprintf("-Wframe-larger-than=%d", hostFrameSizeLimit),
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index fe05992960..bb550b3060 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -372,10 +372,10 @@ class ElfCompilationUnitWriter {
}
// Base class.
- mirror::Class* base_class = type->GetSuperClass();
+ ObjPtr<mirror::Class> base_class = type->GetSuperClass();
if (base_class != nullptr) {
info_.StartTag(DW_TAG_inheritance);
- base_class_references.emplace(info_.size(), base_class);
+ base_class_references.emplace(info_.size(), base_class.Ptr());
info_.WriteRef4(DW_AT_type, 0);
info_.WriteUdata(DW_AT_data_member_location, 0);
info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 1e0b94de81..dd947d90b7 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -79,7 +79,7 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
if (inserted) {
// Successfully added, release the unique_ptr since we no longer have ownership.
DCHECK_EQ(GetVerifiedMethod(ref), verified_method.get());
- verified_method.release();
+ verified_method.release(); // NOLINT b/117926937
} else {
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod();
@@ -117,7 +117,7 @@ void VerificationResults::CreateVerifiedMethodFor(MethodReference ref) {
/*expected*/ nullptr,
verified_method.get()) ==
AtomicMap::InsertResult::kInsertResultSuccess) {
- verified_method.release();
+ verified_method.release(); // NOLINT b/117926937
}
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c5416d5a3d..864b215a90 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -708,25 +708,42 @@ void CompilerDriver::Resolve(jobject class_loader,
}
}
-static void ResolveConstStrings(CompilerDriver* driver,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
+void CompilerDriver::ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+ bool only_startup_strings,
+ TimingLogger* timings) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
+ size_t num_instructions = 0u;
for (const DexFile* dex_file : dex_files) {
dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file));
TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
for (ClassAccessor accessor : dex_file->GetClasses()) {
- if (!driver->IsClassToCompile(accessor.GetDescriptor())) {
+ if (!IsClassToCompile(accessor.GetDescriptor())) {
// Compilation is skipped, do not resolve const-string in code of this class.
// FIXME: Make sure that inlining honors this. b/26687569
continue;
}
+
+ const bool is_startup_class =
+ profile_compilation_info_ != nullptr &&
+ profile_compilation_info_->ContainsClass(*dex_file, accessor.GetClassIdx());
+
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ const bool is_clinit = (method.GetAccessFlags() & kAccConstructor) != 0 &&
+ (method.GetAccessFlags() & kAccStatic) != 0;
+ const bool is_startup_clinit = is_startup_class && is_clinit;
+
+ if (only_startup_strings &&
+ profile_compilation_info_ != nullptr &&
+ (!profile_compilation_info_->GetMethodHotness(method.GetReference()).IsStartup() &&
+ !is_startup_clinit)) {
+ continue;
+ }
+
// Resolve const-strings in the code. Done to have deterministic allocation behavior. Right
// now this is single-threaded for simplicity.
// TODO: Collect the relevant string indices in parallel, then allocate them sequentially
@@ -740,6 +757,7 @@ static void ResolveConstStrings(CompilerDriver* driver,
: inst->VRegB_31c());
ObjPtr<mirror::String> string = class_linker->ResolveString(string_index, dex_cache);
CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
+ ++num_instructions;
break;
}
@@ -750,6 +768,7 @@ static void ResolveConstStrings(CompilerDriver* driver,
}
}
}
+ VLOG(compiler) << "Resolved " << num_instructions << " const string instructions";
}
// Initialize type check bit strings for check-cast and instance-of in the code. Done to have
@@ -897,8 +916,10 @@ void CompilerDriver::PreCompile(jobject class_loader,
if (GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
// Resolve strings from const-string. Do this now to have a deterministic image.
- ResolveConstStrings(this, dex_files, timings);
+ ResolveConstStrings(dex_files, /*only_startup_strings=*/ false, timings);
VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
+ } else if (GetCompilerOptions().ResolveStartupConstStrings()) {
+ ResolveConstStrings(dex_files, /*only_startup_strings=*/ true, timings);
}
Verify(class_loader, dex_files, timings);
@@ -1146,7 +1167,7 @@ static void MaybeAddToImageClasses(Thread* self,
if (klass->IsArrayClass()) {
MaybeAddToImageClasses(self, klass->GetComponentType(), image_classes);
}
- klass.Assign(klass->GetSuperClass());
+ klass = klass->GetSuperClass();
}
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 343f67c6d5..9a83e55c96 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -430,6 +430,12 @@ class CompilerDriver {
typedef AtomicDexRefMap<MethodReference, CompiledMethod*> MethodTable;
private:
+ // Resolve const string literals that are loaded from dex code. If only_startup_strings is
+ // specified, only methods that are marked startup in the profile are resolved.
+ void ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+ bool only_startup_strings,
+ /*inout*/ TimingLogger* timings);
+
// All method references that this compiler has compiled.
MethodTable compiled_methods_;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 3ab9afc5d6..6b0e45629b 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -69,6 +69,7 @@ CompilerOptions::CompilerOptions()
force_determinism_(false),
deduplicate_code_(true),
count_hotness_in_compiled_code_(false),
+ resolve_startup_const_strings_(false),
register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
passes_to_run_(nullptr) {
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index e9cbf74428..4a6bbfaae6 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -313,6 +313,10 @@ class CompilerOptions final {
return count_hotness_in_compiled_code_;
}
+ bool ResolveStartupConstStrings() const {
+ return resolve_startup_const_strings_;
+ }
+
private:
bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -392,6 +396,10 @@ class CompilerOptions final {
// won't be atomic for performance reasons, so we accept races, just like in interpreter.
bool count_hotness_in_compiled_code_;
+ // Whether we eagerly resolve all of the const strings that are loaded from startup methods in the
+ // profile.
+ bool resolve_startup_const_strings_;
+
RegisterAllocator::Strategy register_allocation_strategy_;
// If not null, specifies optimization passes which will be run instead of defaults.
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index d4a582fb35..5a844959c4 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -80,6 +80,7 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
if (map.Exists(Base::CountHotnessInCompiledCode)) {
options->count_hotness_in_compiled_code_ = true;
}
+ map.AssignIfExists(Base::ResolveStartupConstStrings, &options->resolve_startup_const_strings_);
if (map.Exists(Base::DumpTimings)) {
options->dump_timings_ = true;
@@ -184,6 +185,11 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.template WithType<std::string>()
.IntoKey(Map::RegisterAllocationStrategy)
+ .Define("--resolve-startup-const-strings=_")
+ .template WithType<bool>()
+ .WithValueMap({{"false", false}, {"true", true}})
+ .IntoKey(Map::ResolveStartupConstStrings)
+
.Define("--verbose-methods=_")
.template WithType<ParseStringList<','>>()
.IntoKey(Map::VerboseMethods);
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 238cd465df..a593240365 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -52,13 +52,14 @@ COMPILER_OPTIONS_KEY (Unit, Baseline)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
+COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, false)
COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
COMPILER_OPTIONS_KEY (std::string, DumpCFG)
COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
// TODO: Add type parser.
COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
-COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
+COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
COMPILER_OPTIONS_KEY (Unit, CountHotnessInCompiledCode)
COMPILER_OPTIONS_KEY (Unit, DumpTimings)
COMPILER_OPTIONS_KEY (Unit, DumpPassTimings)
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 7c29df877a..e15161e093 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -598,9 +598,10 @@ static HInstruction* BuildSSAGraph3(HGraph* graph,
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
HInstruction* new_array = new (allocator) HNewArray(
- constant_10,
- constant_10,
- 0);
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (allocator) HGoto());
@@ -977,7 +978,11 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
graph_->AddBlock(block);
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (GetAllocator()) HGoto());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d440cf3e4c..d8e442c642 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1653,19 +1653,12 @@ void CodeGenerator::EmitJitRoots(uint8_t* code,
EmitJitRootPatches(code, roots_data);
}
-QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
- ScopedObjectAccess soa(Thread::Current());
- if (array_klass == nullptr) {
- // This can only happen for non-primitive arrays, as primitive arrays can always
- // be resolved.
- return kQuickAllocArrayResolved32;
- }
-
- switch (array_klass->GetComponentSize()) {
- case 1: return kQuickAllocArrayResolved8;
- case 2: return kQuickAllocArrayResolved16;
- case 4: return kQuickAllocArrayResolved32;
- case 8: return kQuickAllocArrayResolved64;
+QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
+ switch (new_array->GetComponentSizeShift()) {
+ case 0: return kQuickAllocArrayResolved8;
+ case 1: return kQuickAllocArrayResolved16;
+ case 2: return kQuickAllocArrayResolved32;
+ case 3: return kQuickAllocArrayResolved64;
}
LOG(FATAL) << "Unreachable";
return kQuickAllocArrayResolved;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 4e73e0bdcb..3f560780ce 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -636,7 +636,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void GenerateNop() = 0;
- static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
+ static QuickEntrypointEnum GetArrayAllocationEntrypoint(HNewArray* new_array);
protected:
// Patch info used for recording locations of required linker patches and their targets,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0a28f6557a..a9acf90762 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1205,6 +1205,7 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
// mr : Runtime reserved.
// ip1 : VIXL core temp.
// ip0 : VIXL core temp.
+ // x18 : Platform register.
//
// Blocked fp registers:
// d31 : VIXL fp temp.
@@ -1213,6 +1214,7 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
while (!reserved_core_registers.IsEmpty()) {
blocked_core_registers_[reserved_core_registers.PopLowestIndex().GetCode()] = true;
}
+ blocked_core_registers_[X18] = true;
CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
while (!reserved_fp_registers.IsEmpty()) {
@@ -5004,10 +5006,8 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 8bd4af50f3..d5b734d55a 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -5043,10 +5043,8 @@ void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 1f0e200cb7..c6d0f3f618 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -8702,10 +8702,8 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 0005d8f6b6..039b3ca3ff 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -6633,10 +6633,8 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ca1e93b75e..9f34a51d84 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4525,10 +4525,8 @@ void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e6643fb08c..dac2dba605 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4371,10 +4371,8 @@ void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index e5bc6ef22c..223e08e1b4 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -701,7 +701,11 @@ TEST_F(InductionVarRangeTest, MaxValue) {
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ x_,
+ /* length= */ x_,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
entry_block_->AddInstruction(new_array);
HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index b576f8399d..bd94789144 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1842,15 +1842,27 @@ void HInstructionBuilder::BuildArrayAccess(const Instruction& instruction,
graph_->SetHasBoundsChecks(true);
}
+HNewArray* HInstructionBuilder::BuildNewArray(uint32_t dex_pc,
+ dex::TypeIndex type_index,
+ HInstruction* length) {
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+
+ const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(type_index));
+ DCHECK_EQ(descriptor[0], '[');
+ size_t component_type_shift = Primitive::ComponentSizeShift(Primitive::GetType(descriptor[1]));
+
+ HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc, component_type_shift);
+ AppendInstruction(new_array);
+ return new_array;
+}
+
HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
const InstructionOperands& operands) {
const size_t number_of_operands = operands.GetNumberOfOperands();
HInstruction* length = graph_->GetIntConstant(number_of_operands, dex_pc);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(object);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
DCHECK_EQ(descriptor[0], '[') << descriptor;
char primitive = descriptor[1];
@@ -1863,13 +1875,13 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
for (size_t i = 0; i < number_of_operands; ++i) {
HInstruction* value = LoadLocal(operands.GetOperand(i), type);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
- HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(new_array, index, value, type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
- latest_result_ = object;
+ latest_result_ = new_array;
- return object;
+ return new_array;
}
template <typename T>
@@ -2892,10 +2904,8 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
- HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(new_array);
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
BuildConstructorFenceForAllocation(new_array);
break;
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index af1b86ca6f..2ab2139216 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -179,6 +179,9 @@ class HInstructionBuilder : public ValueObject {
uint32_t call_site_idx,
const InstructionOperands& operands);
+ // Builds a new array node.
+ HNewArray* BuildNewArray(uint32_t dex_pc, dex::TypeIndex type_index, HInstruction* length);
+
// Builds a new array node and the instructions that fill it.
HNewArray* BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 97b50d36da..6ebe89eae1 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4849,10 +4849,11 @@ class HNeg final : public HUnaryOperation {
class HNewArray final : public HExpression<2> {
public:
- HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
+ HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc, size_t component_size_shift)
: HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, cls);
SetRawInputAt(1, length);
+ SetPackedField<ComponentSizeShiftField>(component_size_shift);
}
bool IsClonable() const override { return true; }
@@ -4874,10 +4875,23 @@ class HNewArray final : public HExpression<2> {
return InputAt(1);
}
+ size_t GetComponentSizeShift() {
+ return GetPackedField<ComponentSizeShiftField>();
+ }
+
DECLARE_INSTRUCTION(NewArray);
protected:
DEFAULT_COPY_CONSTRUCTOR(NewArray);
+
+ private:
+ static constexpr size_t kFieldComponentSizeShift = kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldComponentSizeShiftSize = MinimumBitsToStore(3u);
+ static constexpr size_t kNumberOfNewArrayPackedBits =
+ kFieldComponentSizeShift + kFieldComponentSizeShiftSize;
+ static_assert(kNumberOfNewArrayPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+ using ComponentSizeShiftField =
+ BitField<size_t, kFieldComponentSizeShift, kFieldComponentSizeShift>;
};
class HAdd final : public HBinaryOperation {
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 85e4326494..0d279ede19 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@ const char* const VixlJniHelpersResults[] = {
" f0: f1bc 0f00 cmp.w ip, #0\n",
" f4: bf18 it ne\n",
" f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
- " fa: f8d9 c08c ldr.w ip, [r9, #140] ; 0x8c\n",
+ " fa: f8d9 c094 ldr.w ip, [r9, #148] ; 0x94\n",
" fe: f1bc 0f00 cmp.w ip, #0\n",
" 102: d171 bne.n 1e8 <VixlJniHelpers+0x1e8>\n",
" 104: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
@@ -153,7 +153,7 @@ const char* const VixlJniHelpersResults[] = {
" 21c: f8d9 8034 ldr.w r8, [r9, #52] ; 0x34\n",
" 220: 4770 bx lr\n",
" 222: 4660 mov r0, ip\n",
- " 224: f8d9 c2d4 ldr.w ip, [r9, #724] ; 0x2d4\n",
+ " 224: f8d9 c2dc ldr.w ip, [r9, #732] ; 0x2dc\n",
" 228: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 86f9010ea3..2d1e451232 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -59,6 +59,98 @@ std::ostream& operator<<(std::ostream& os, const Address& addr) {
}
}
+uint8_t X86Assembler::EmitVexByteZero(bool is_two_byte) {
+ uint8_t vex_zero = 0xC0;
+ if (!is_two_byte) {
+ vex_zero |= 0xC4;
+ } else {
+ vex_zero |= 0xC5;
+ }
+ return vex_zero;
+}
+
+uint8_t X86Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm ) {
+ // VEX Byte 1
+ uint8_t vex_prefix = 0;
+ if (!r) {
+ vex_prefix |= 0x80; // VEX.R
+ }
+ if (!x) {
+ vex_prefix |= 0x40; // VEX.X
+ }
+ if (!b) {
+ vex_prefix |= 0x20; // VEX.B
+ }
+
+ // VEX.mmmmm
+ switch (mmmmm) {
+ case 1:
+ // implied 0F leading opcode byte
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // implied leading 0F 38 opcode byte
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // implied leading OF 3A opcode byte
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown opcode bytes";
+ }
+ return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexByte2(bool w, int l, X86ManagedRegister operand, int pp) {
+ uint8_t vex_prefix = 0;
+ // VEX Byte 2
+ if (w) {
+ vex_prefix |= 0x80;
+ }
+ // VEX.vvvv
+ if (operand.IsXmmRegister()) {
+ XmmRegister vvvv = operand.AsXmmRegister();
+ int inverted_reg = 15-static_cast<int>(vvvv);
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ } else if (operand.IsCpuRegister()) {
+ Register vvvv = operand.AsCpuRegister();
+ int inverted_reg = 15 - static_cast<int>(vvvv);
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ }
+
+ // VEX.L
+ if (l == 256) {
+ vex_prefix |= 0x04;
+ }
+
+ // VEX.pp
+ switch (pp) {
+ case 0:
+ // SIMD Pefix - None
+ vex_prefix |= 0x00;
+ break;
+ case 1:
+ // SIMD Prefix - 66
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // SIMD Prefix - F3
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // SIMD Prefix - F2
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown SIMD Prefix";
+ }
+
+ return vex_prefix;
+}
+
void X86Assembler::call(Register reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xFF);
@@ -179,6 +271,60 @@ void X86Assembler::movntl(const Address& dst, Register src) {
EmitOperand(src, dst);
}
+void X86Assembler::blsi(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(dst),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(3, src);
+}
+
+void X86Assembler::blsmsk(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(dst),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(2, src);
+}
+
+void X86Assembler::blsr(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(dst),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(1, src);
+}
+
void X86Assembler::bswapl(Register dst) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -1267,6 +1413,25 @@ void X86Assembler::pand(XmmRegister dst, XmmRegister src) {
EmitXmmRegisterOperand(dst, src);
}
+void X86Assembler::andn(Register dst, Register src1, Register src2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(src1),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field
+ EmitUint8(0xF2);
+ EmitRegisterOperand(dst, src2);
+}
+
void X86Assembler::andnpd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 5ac9236d6b..275e5c1234 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -337,6 +337,10 @@ class X86Assembler final : public Assembler {
void movntl(const Address& dst, Register src);
+ void blsi(Register dst, Register src); // no addr variant (for now)
+ void blsmsk(Register dst, Register src); // no addr variant (for now)
+ void blsr(Register dst, Register src); // no addr varianr (for now)
+
void bswapl(Register dst);
void bsfl(Register dst, Register src);
@@ -500,6 +504,7 @@ class X86Assembler final : public Assembler {
void andps(XmmRegister dst, const Address& src);
void pand(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void andn(Register dst, Register src1, Register src2); // no addr variant (for now)
void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void andnps(XmmRegister dst, XmmRegister src);
void pandn(XmmRegister dst, XmmRegister src);
@@ -837,6 +842,11 @@ class X86Assembler final : public Assembler {
void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
void EmitGenericShift(int rm, const Operand& operand, Register shifter);
+ // Emit a 3 byte VEX Prefix
+ uint8_t EmitVexByteZero(bool is_two_byte);
+ uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+ uint8_t EmitVexByte2(bool w , int l , X86ManagedRegister operand, int pp);
+
ConstantArea constant_area_;
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index ad75174d23..1d8bfe7fa7 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -349,6 +349,18 @@ TEST_F(AssemblerX86Test, RepMovsw) {
DriverStr(expected, "rep_movsw");
}
+TEST_F(AssemblerX86Test, Blsmask) {
+ DriverStr(RepeatRR(&x86::X86Assembler::blsmsk, "blsmsk %{reg2}, %{reg1}"), "blsmsk");
+}
+
+TEST_F(AssemblerX86Test, Blsi) {
+ DriverStr(RepeatRR(&x86::X86Assembler::blsi, "blsi %{reg2}, %{reg1}"), "blsi");
+}
+
+TEST_F(AssemblerX86Test, Blsr) {
+ DriverStr(RepeatRR(&x86::X86Assembler::blsr, "blsr %{reg2}, %{reg1}"), "blsr");
+}
+
TEST_F(AssemblerX86Test, Bsfl) {
DriverStr(RepeatRR(&x86::X86Assembler::bsfl, "bsfl %{reg2}, %{reg1}"), "bsfl");
}
@@ -657,6 +669,10 @@ TEST_F(AssemblerX86Test, PAnd) {
DriverStr(RepeatFF(&x86::X86Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
}
+TEST_F(AssemblerX86Test, Andn) {
+ DriverStr(RepeatRRR(&x86::X86Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
+}
+
TEST_F(AssemblerX86Test, AndnPD) {
DriverStr(RepeatFF(&x86::X86Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd");
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index bd31561937..ae68fe934e 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -64,6 +64,99 @@ std::ostream& operator<<(std::ostream& os, const Address& addr) {
}
}
+uint8_t X86_64Assembler::EmitVexByteZero(bool is_two_byte) {
+ uint8_t vex_zero = 0xC0;
+ if (!is_two_byte) {
+ vex_zero |= 0xC4;
+ } else {
+ vex_zero |= 0xC5;
+ }
+ return vex_zero;
+}
+
+uint8_t X86_64Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm) {
+ // VEX Byte 1
+ uint8_t vex_prefix = 0;
+ if (!r) {
+ vex_prefix |= 0x80; // VEX.R
+ }
+ if (!x) {
+ vex_prefix |= 0x40; // VEX.X
+ }
+ if (!b) {
+ vex_prefix |= 0x20; // VEX.B
+ }
+
+ // VEX.mmmmm
+ switch (mmmmm) {
+ case 1:
+ // implied 0F leading opcode byte
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // implied leading 0F 38 opcode byte
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // implied leading OF 3A opcode byte
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown opcode bytes";
+ }
+
+ return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexByte2(bool w, int l, X86_64ManagedRegister operand, int pp) {
+ // VEX Byte 2
+ uint8_t vex_prefix = 0;
+ if (w) {
+ vex_prefix |= 0x80;
+ }
+ // VEX.vvvv
+ if (operand.IsXmmRegister()) {
+ XmmRegister vvvv = operand.AsXmmRegister();
+ int inverted_reg = 15-static_cast<int>(vvvv.AsFloatRegister());
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ } else if (operand.IsCpuRegister()) {
+ CpuRegister vvvv = operand.AsCpuRegister();
+ int inverted_reg = 15 - static_cast<int>(vvvv.AsRegister());
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ }
+
+ // VEX.L
+ if (l == 256) {
+ vex_prefix |= 0x04;
+ }
+
+ // VEX.pp
+ switch (pp) {
+ case 0:
+ // SIMD Pefix - None
+ vex_prefix |= 0x00;
+ break;
+ case 1:
+ // SIMD Prefix - 66
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // SIMD Prefix - F3
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // SIMD Prefix - F2
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown SIMD Prefix";
+ }
+
+ return vex_prefix;
+}
+
void X86_64Assembler::call(CpuRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg);
@@ -1483,6 +1576,25 @@ void X86_64Assembler::pand(XmmRegister dst, XmmRegister src) {
EmitXmmRegisterOperand(dst.LowBits(), src);
}
+void X86_64Assembler::andn(CpuRegister dst, CpuRegister src1, CpuRegister src2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(dst.NeedsRex(),
+ /*x=*/ false,
+ src2.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(src1.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field
+ EmitUint8(0xF2);
+ EmitRegisterOperand(dst.LowBits(), src2.LowBits());
+}
+
void X86_64Assembler::andnpd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -3260,6 +3372,60 @@ void X86_64Assembler::setcc(Condition condition, CpuRegister dst) {
EmitUint8(0xC0 + dst.LowBits());
}
+void X86_64Assembler::blsi(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ src.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(3, src.LowBits());
+}
+
+void X86_64Assembler::blsmsk(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ src.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(2, src.LowBits());
+}
+
+void X86_64Assembler::blsr(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ src.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(1, src.LowBits());
+}
+
void X86_64Assembler::bswapl(CpuRegister dst) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex(false, false, false, false, dst.NeedsRex());
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e696635e62..ff13ea3293 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -543,6 +543,7 @@ class X86_64Assembler final : public Assembler {
void andps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void pand(XmmRegister dst, XmmRegister src);
+ void andn(CpuRegister dst, CpuRegister src1, CpuRegister src2);
void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void andnps(XmmRegister dst, XmmRegister src);
void pandn(XmmRegister dst, XmmRegister src);
@@ -796,6 +797,10 @@ class X86_64Assembler final : public Assembler {
void bsfq(CpuRegister dst, CpuRegister src);
void bsfq(CpuRegister dst, const Address& src);
+ void blsi(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+ void blsmsk(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+ void blsr(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+
void bsrl(CpuRegister dst, CpuRegister src);
void bsrl(CpuRegister dst, const Address& src);
void bsrq(CpuRegister dst, CpuRegister src);
@@ -951,6 +956,11 @@ class X86_64Assembler final : public Assembler {
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
+ // Emit a 3 byte VEX Prefix
+ uint8_t EmitVexByteZero(bool is_two_byte);
+ uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+ uint8_t EmitVexByte2(bool w , int l , X86_64ManagedRegister operand, int pp);
+
ConstantArea constant_area_;
DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index fe42f9b19b..528e037bdc 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1414,7 +1414,9 @@ TEST_F(AssemblerX86_64Test, Andpd) {
TEST_F(AssemblerX86_64Test, Pand) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
}
-
+TEST_F(AssemblerX86_64Test, Andn) {
+ DriverStr(RepeatRRR(&x86_64::X86_64Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
+}
TEST_F(AssemblerX86_64Test, andnpd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd");
}
@@ -1785,6 +1787,18 @@ TEST_F(AssemblerX86_64Test, RetAndLeave) {
DriverFn(&ret_and_leave_fn, "retleave");
}
+TEST_F(AssemblerX86_64Test, Blsmask) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsmsk, "blsmsk %{reg2}, %{reg1}"), "blsmsk");
+}
+
+TEST_F(AssemblerX86_64Test, Blsi) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsi, "blsi %{reg2}, %{reg1}"), "blsi");
+}
+
+TEST_F(AssemblerX86_64Test, Blsr) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsr, "blsr %{reg2}, %{reg1}"), "blsr");
+}
+
TEST_F(AssemblerX86_64Test, Bswapl) {
DriverStr(Repeatr(&x86_64::X86_64Assembler::bswapl, "bswap %{reg}"), "bswapl");
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 71cdfd2c08..f0f2b3e397 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -477,6 +477,9 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" compiling the apk. If specified, the string will be embedded verbatim in");
UsageError(" the key value store of the oat file.");
UsageError("");
+ UsageError(" --resolve-startup-const-strings=true|false: If true, the compiler eagerly");
+ UsageError(" resolves strings referenced from const-string of startup methods.");
+ UsageError("");
UsageError(" Example: --compilation-reason=install");
UsageError("");
std::cerr << "See log for usage error information\n";
@@ -586,8 +589,10 @@ class WatchDog {
const char* reason = "dex2oat watch dog thread waiting";
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_lock, (&mutex_), reason);
while (!shutting_down_) {
- int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts));
- if (rc == ETIMEDOUT) {
+ int rc = pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts);
+ if (rc == EINTR) {
+ continue;
+ } else if (rc == ETIMEDOUT) {
Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds",
timeout_in_milliseconds_/1000));
} else if (rc != 0) {
@@ -657,21 +662,21 @@ class Dex2Oat final {
if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
// We want to just exit on non-debug builds, not bringing the runtime down
// in an orderly fashion. So release the following fields.
- driver_.release();
- image_writer_.release();
+ driver_.release(); // NOLINT
+ image_writer_.release(); // NOLINT
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
- dex_file.release();
+ dex_file.release(); // NOLINT
}
new std::vector<MemMap>(std::move(opened_dex_files_maps_)); // Leak MemMaps.
for (std::unique_ptr<File>& vdex_file : vdex_files_) {
- vdex_file.release();
+ vdex_file.release(); // NOLINT
}
for (std::unique_ptr<File>& oat_file : oat_files_) {
- oat_file.release();
+ oat_file.release(); // NOLINT
}
- runtime_.release();
- verification_results_.release();
- key_value_store_.release();
+ runtime_.release(); // NOLINT
+ verification_results_.release(); // NOLINT
+ key_value_store_.release(); // NOLINT
}
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index a1fed5f6d9..898940a948 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -39,6 +39,7 @@
#include "dex/dex_file_loader.h"
#include "dex2oat_environment_test.h"
#include "dex2oat_return_codes.h"
+#include "intern_table-inl.h"
#include "oat.h"
#include "oat_file.h"
#include "profile/profile_compilation_info.h"
@@ -136,14 +137,13 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
ASSERT_TRUE(success) << error_msg << std::endl << output_;
// Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
@@ -156,14 +156,13 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
if (!test_accepts_odex_file_on_failure) {
// Verify there's no loadable odex file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() == nullptr);
}
@@ -324,26 +323,26 @@ class Dex2oatSwapTest : public Dex2oatTest {
};
TEST_F(Dex2oatSwapTest, DoNotUseSwapDefaultSingleSmall) {
- RunTest(false /* use_fd */, false /* expect_use */);
- RunTest(true /* use_fd */, false /* expect_use */);
+ RunTest(/*use_fd=*/ false, /*expect_use=*/ false);
+ RunTest(/*use_fd=*/ true, /*expect_use=*/ false);
}
TEST_F(Dex2oatSwapTest, DoNotUseSwapSingle) {
- RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
- RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
+ RunTest(/*use_fd=*/ false, /*expect_use=*/ false, { "--swap-dex-size-threshold=0" });
+ RunTest(/*use_fd=*/ true, /*expect_use=*/ false, { "--swap-dex-size-threshold=0" });
}
TEST_F(Dex2oatSwapTest, DoNotUseSwapSmall) {
- RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
- RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
+ RunTest(/*use_fd=*/ false, /*expect_use=*/ false, { "--swap-dex-count-threshold=0" });
+ RunTest(/*use_fd=*/ true, /*expect_use=*/ false, { "--swap-dex-count-threshold=0" });
}
TEST_F(Dex2oatSwapTest, DoUseSwapSingleSmall) {
- RunTest(false /* use_fd */,
- true /* expect_use */,
+ RunTest(/*use_fd=*/ false,
+ /*expect_use=*/ true,
{ "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
- RunTest(true /* use_fd */,
- true /* expect_use */,
+ RunTest(/*use_fd=*/ true,
+ /*expect_use=*/ true,
{ "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
}
@@ -369,7 +368,7 @@ class Dex2oatSwapUseTest : public Dex2oatSwapTest {
void GrabResult1() {
if (!kIsTargetBuild) {
native_alloc_1_ = ParseNativeAlloc();
- swap_1_ = ParseSwap(false /* expected */);
+ swap_1_ = ParseSwap(/*expected=*/ false);
} else {
native_alloc_1_ = std::numeric_limits<size_t>::max();
swap_1_ = 0;
@@ -379,7 +378,7 @@ class Dex2oatSwapUseTest : public Dex2oatSwapTest {
void GrabResult2() {
if (!kIsTargetBuild) {
native_alloc_2_ = ParseNativeAlloc();
- swap_2_ = ParseSwap(true /* expected */);
+ swap_2_ = ParseSwap(/*expected=*/ true);
} else {
native_alloc_2_ = 0;
swap_2_ = std::numeric_limits<size_t>::max();
@@ -449,15 +448,15 @@ TEST_F(Dex2oatSwapUseTest, CheckSwapUsage) {
// investigate (b/29259363).
TEST_DISABLED_FOR_X86();
- RunTest(false /* use_fd */,
- false /* expect_use */);
+ RunTest(/*use_fd=*/ false,
+ /*expect_use=*/ false);
GrabResult1();
std::string output_1 = output_;
output_ = "";
- RunTest(false /* use_fd */,
- true /* expect_use */,
+ RunTest(/*use_fd=*/ false,
+ /*expect_use=*/ true,
{ "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
GrabResult2();
std::string output_2 = output_;
@@ -513,14 +512,13 @@ class Dex2oatVeryLargeTest : public Dex2oatTest {
}
// Host/target independent checks.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_GT(app_image_file.length(), 0u);
@@ -636,7 +634,9 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const std::string& dex_location,
size_t num_classes,
uint32_t checksum) {
- int profile_test_fd = open(test_profile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ int profile_test_fd = open(test_profile.c_str(),
+ O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
+ 0644);
CHECK_GE(profile_test_fd, 0);
ProfileCompilationInfo info;
@@ -662,7 +662,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
std::vector<std::unique_ptr<const DexFile>> dex_files;
const ArtDexFileLoader dex_file_loader;
ASSERT_TRUE(dex_file_loader.Open(
- location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
+ location, location, /*verify=*/ true, /*verify_checksum=*/ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
GenerateProfile(profile_location,
@@ -714,8 +714,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
CompileProfileOdex(dex_location,
odex_location,
app_image_file,
- /* use_fd */ false,
- /* num_profile_classes */ 0);
+ /*use_fd=*/ false,
+ /*num_profile_classes=*/ 0);
CheckValidity();
ASSERT_TRUE(success_);
// Don't check the result since CheckResult relies on the class being in the profile.
@@ -727,8 +727,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
CompileProfileOdex(dex_location,
odex_location,
app_image_file,
- /* use_fd */ false,
- /* num_profile_classes */ 1);
+ /*use_fd=*/ false,
+ /*num_profile_classes=*/ 1);
CheckValidity();
ASSERT_TRUE(success_);
CheckResult(dex_location, odex_location, app_image_file);
@@ -756,8 +756,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
CompileProfileOdex(dex_location,
odex_location,
app_image_file_name,
- /* use_fd */ true,
- /* num_profile_classes */ 1,
+ /*use_fd=*/ true,
+ /*num_profile_classes=*/ 1,
{ input_vdex, output_vdex });
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
@@ -768,10 +768,10 @@ class Dex2oatLayoutTest : public Dex2oatTest {
CompileProfileOdex(dex_location,
odex_location,
app_image_file_name,
- /* use_fd */ true,
- /* num_profile_classes */ 1,
+ /*use_fd=*/ true,
+ /*num_profile_classes=*/ 1,
{ input_vdex, output_vdex },
- /* expect_success */ true);
+ /*expect_success=*/ true);
EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u);
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
@@ -784,14 +784,13 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const std::string& app_image_file_name) {
// Host/target independent checks.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
@@ -799,7 +798,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
std::vector<std::unique_ptr<const DexFile>> dex_files;
const ArtDexFileLoader dex_file_loader;
ASSERT_TRUE(dex_file_loader.Open(
- location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
+ location, location, /*verify=*/ true, /*verify_checksum=*/ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
@@ -852,11 +851,11 @@ class Dex2oatLayoutTest : public Dex2oatTest {
};
TEST_F(Dex2oatLayoutTest, TestLayout) {
- RunTest(/* app-image */ false);
+ RunTest(/*app_image=*/ false);
}
TEST_F(Dex2oatLayoutTest, TestLayoutAppImage) {
- RunTest(/* app-image */ true);
+ RunTest(/*app_image=*/ true);
}
TEST_F(Dex2oatLayoutTest, TestVdexLayout) {
@@ -881,8 +880,8 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
odex_location,
CompilerFilter::kQuicken,
{ input_vdex, output_vdex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
// Unquicken by running the verify compiler filter on the vdex file.
@@ -893,8 +892,8 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
odex_location,
CompilerFilter::kVerify,
{ input_vdex, output_vdex, kDisableCompactDex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
CheckResult(dex_location, odex_location);
@@ -922,8 +921,8 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
odex_location,
CompilerFilter::kQuicken,
{ input_vdex, output_vdex, "--compact-dex-level=fast"},
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
@@ -935,8 +934,8 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
odex_location2,
CompilerFilter::kVerify,
{ input_vdex, output_vdex, "--compact-dex-level=none"},
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
ASSERT_EQ(vdex_file2->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
@@ -946,14 +945,13 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
void CheckResult(const std::string& dex_location, const std::string& odex_location) {
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
ASSERT_GE(odex_file->GetOatDexFiles().size(), 1u);
@@ -1326,14 +1324,13 @@ TEST_F(Dex2oatTest, LayoutSections) {
EXPECT_EQ(res, 0);
// Open our generated oat file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex->GetLocation().c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1436,14 +1433,13 @@ TEST_F(Dex2oatTest, GenerateCompactDex) {
{"--compact-dex-level=fast"});
EXPECT_EQ(res, 0);
// Open our generated oat file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1681,14 +1677,13 @@ TEST_F(Dex2oatTest, CompactDexGenerationFailure) {
});
// Open our generated oat file.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
temp_dex.GetFilename().c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1705,7 +1700,7 @@ TEST_F(Dex2oatTest, CompactDexGenerationFailureMultiDex) {
// Create a multidex file with only one dex that gets rejected for cdex conversion.
ScratchFile apk_file;
{
- FILE* file = fdopen(dup(apk_file.GetFd()), "w+b");
+ FILE* file = fdopen(DupCloexec(apk_file.GetFd()), "w+b");
ZipWriter writer(file);
// Add vdex to zip.
writer.StartEntry("classes.dex", ZipWriter::kCompress);
@@ -1759,14 +1754,13 @@ TEST_F(Dex2oatTest, VerifyCompilationReason) {
{ "--compilation-reason=install" },
true);
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_STREQ("install", odex_file->GetCompilationReason());
@@ -1785,14 +1779,13 @@ TEST_F(Dex2oatTest, VerifyNoCompilationReason) {
{},
true);
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_EQ(nullptr, odex_file->GetCompilationReason());
@@ -1816,21 +1809,20 @@ TEST_F(Dex2oatTest, DontExtract) {
{
// Check the vdex doesn't have dex.
std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_location.c_str(),
- /*writable*/ false,
- /*low_4gb*/ false,
- /*unquicken*/ false,
+ /*writable=*/ false,
+ /*low_4gb=*/ false,
+ /*unquicken=*/ false,
&error_msg));
ASSERT_TRUE(vdex != nullptr);
EXPECT_FALSE(vdex->HasDexSection()) << output_;
}
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr) << dex_location;
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1847,7 +1839,7 @@ TEST_F(Dex2oatTest, DontExtract) {
std::unique_ptr<File> vdex_file(OS::OpenFileForReading(vdex_location.c_str()));
ASSERT_TRUE(vdex_file != nullptr);
ASSERT_GT(vdex_file->GetLength(), 0u);
- FILE* file = fdopen(dup(dm_file.GetFd()), "w+b");
+ FILE* file = fdopen(DupCloexec(dm_file.GetFd()), "w+b");
ZipWriter writer(file);
auto write_all_bytes = [&](File* file) {
std::unique_ptr<uint8_t[]> bytes(new uint8_t[file->GetLength()]);
@@ -1929,8 +1921,8 @@ TEST_F(Dex2oatTest, QuickenedInput) {
// Disable cdex since we want to compare against the original dex file
// after unquickening.
{ input_vdex, output_vdex, kDisableCompactDex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
// Unquicken by running the verify compiler filter on the vdex file and verify it matches.
std::string odex_location2 = GetOdexDir() + "/unquickened.odex";
@@ -1944,8 +1936,8 @@ TEST_F(Dex2oatTest, QuickenedInput) {
CompilerFilter::kVerify,
// Disable cdex to avoid needing to write out the shared section.
{ input_vdex, output_vdex, kDisableCompactDex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
ASSERT_EQ(vdex_unquickened->Flush(), 0) << "Could not flush and close vdex file";
ASSERT_TRUE(success_);
@@ -1973,7 +1965,7 @@ TEST_F(Dex2oatTest, QuickenedInput) {
TEST_F(Dex2oatTest, CompactDexInvalidSource) {
ScratchFile invalid_dex;
{
- FILE* file = fdopen(dup(invalid_dex.GetFd()), "w+b");
+ FILE* file = fdopen(DupCloexec(invalid_dex.GetFd()), "w+b");
ZipWriter writer(file);
writer.StartEntry("classes.dex", ZipWriter::kAlign32);
DexFile::Header header = {};
@@ -2015,7 +2007,7 @@ TEST_F(Dex2oatTest, CompactDexInZip) {
// Create a zip containing the invalid dex.
ScratchFile invalid_dex_zip;
{
- FILE* file = fdopen(dup(invalid_dex_zip.GetFd()), "w+b");
+ FILE* file = fdopen(DupCloexec(invalid_dex_zip.GetFd()), "w+b");
ZipWriter writer(file);
writer.StartEntry("classes.dex", ZipWriter::kCompress);
ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0);
@@ -2062,14 +2054,13 @@ TEST_F(Dex2oatTest, AppImageNoProfile) {
[](const OatFile&) {});
// Open our generated oat file.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
odex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ImageHeader header = {};
@@ -2082,6 +2073,86 @@ TEST_F(Dex2oatTest, AppImageNoProfile) {
EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtFields).Size(), 0u);
}
+TEST_F(Dex2oatTest, AppImageResolveStrings) {
+ using Hotness = ProfileCompilationInfo::MethodHotness;
+ // Create a profile with the startup method marked.
+ ScratchFile profile_file;
+ std::vector<uint16_t> methods;
+ std::vector<dex::TypeIndex> classes;
+ {
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("StringLiterals"));
+ for (ClassAccessor accessor : dex->GetClasses()) {
+ if (accessor.GetDescriptor() == std::string("LStringLiterals$StartupClass;")) {
+ classes.push_back(accessor.GetClassIdx());
+ }
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ std::string method_name(dex->GetMethodName(dex->GetMethodId(method.GetIndex())));
+ if (method_name == "startUpMethod") {
+ methods.push_back(method.GetIndex());
+ }
+ }
+ }
+ ASSERT_GT(classes.size(), 0u);
+ ASSERT_GT(methods.size(), 0u);
+ // Here, we build the profile from the method lists.
+ ProfileCompilationInfo info;
+ info.AddClassesForDex(dex.get(), classes.begin(), classes.end());
+ info.AddMethodsForDex(Hotness::kFlagStartup, dex.get(), methods.begin(), methods.end());
+ // Save the profile since we want to use it with dex2oat to produce an oat file.
+ ASSERT_TRUE(info.Save(profile_file.GetFd()));
+ }
+ const std::string out_dir = GetScratchDir();
+ const std::string odex_location = out_dir + "/base.odex";
+ const std::string app_image_location = out_dir + "/base.art";
+ GenerateOdexForTest(GetTestDexFileName("StringLiterals"),
+ odex_location,
+ CompilerFilter::Filter::kSpeedProfile,
+ { "--app-image-file=" + app_image_location,
+ "--resolve-startup-const-strings=true",
+ "--profile-file=" + profile_file.GetFilename()},
+ /*expect_success=*/ true,
+ /*use_fd=*/ false,
+ [](const OatFile&) {});
+ // Open our generated oat file.
+ std::string error_msg;
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
+ odex_location.c_str(),
+ odex_location.c_str(),
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ odex_location.c_str(),
+ /*reservation=*/ nullptr,
+ &error_msg));
+ ASSERT_TRUE(odex_file != nullptr);
+ // Check the strings in the app image intern table only contain the "startup" strigs.
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<gc::space::ImageSpace> space =
+ gc::space::ImageSpace::CreateFromAppImage(app_image_location.c_str(),
+ odex_file.get(),
+ &error_msg);
+ ASSERT_TRUE(space != nullptr) << error_msg;
+ std::set<std::string> seen;
+ InternTable intern_table;
+ intern_table.AddImageStringsToTable(space.get(), [&](InternTable::UnorderedSet& interns)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (const GcRoot<mirror::String>& str : interns) {
+ seen.insert(str.Read()->ToModifiedUtf8());
+ }
+ });
+ // Normal methods
+ EXPECT_TRUE(seen.find("Loading ") != seen.end());
+ EXPECT_TRUE(seen.find("Starting up") != seen.end());
+ EXPECT_TRUE(seen.find("abcd.apk") != seen.end());
+ EXPECT_TRUE(seen.find("Unexpected error") == seen.end());
+ EXPECT_TRUE(seen.find("Shutting down!") == seen.end());
+ // Classes initializers
+ EXPECT_TRUE(seen.find("Startup init") != seen.end());
+ EXPECT_TRUE(seen.find("Other class init") == seen.end());
+ }
+}
+
+
TEST_F(Dex2oatClassLoaderContextTest, StoredClassLoaderContext) {
std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
const std::string out_dir = GetScratchDir();
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 6410c7a6d9..60a4a32e7e 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -27,7 +27,6 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
-#include "base/bit_memory_region.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
#include "base/globals.h"
@@ -88,14 +87,6 @@ using ::art::mirror::String;
namespace art {
namespace linker {
-static inline size_t RelocationIndex(size_t relocation_offset, PointerSize target_ptr_size) {
- static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
- "Expecting heap GC roots and references to have the same size.");
- DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(target_ptr_size));
- DCHECK_ALIGNED(relocation_offset, sizeof(GcRoot<mirror::Object>));
- return relocation_offset / sizeof(GcRoot<mirror::Object>);
-}
-
static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source,
ImageHeader::StorageMode image_storage_mode,
/*out*/ std::vector<uint8_t>* storage) {
@@ -233,7 +224,7 @@ bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
// Used to store information that will later be used to calculate image
// offsets to string references in the AppImage.
- std::vector<RefInfoPair> string_ref_info;
+ std::vector<HeapReferencePointerInfo> string_ref_info;
if (ClassLinker::kAppImageMayContainStrings && compile_app_image_) {
// Count the number of string fields so we can allocate the appropriate
// amount of space in the image section.
@@ -282,36 +273,31 @@ bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
TimingLogger::ScopedTiming t("AppImage:CalculateImageOffsets", timings);
ScopedObjectAccess soa(self);
- size_t managed_string_refs = 0,
- native_string_refs = 0;
+ size_t managed_string_refs = 0;
+ size_t native_string_refs = 0;
/*
* Iterate over the string reference info and calculate image offsets.
- * The first element of the pair is the object the reference belongs to
- * and the second element is the offset to the field. If the offset has
- * a native ref tag 1) the object is a DexCache and 2) the offset needs
- * to be calculated using the relocation information for the DexCache's
- * strings array.
+ * The first element of the pair is either the object the reference belongs
+ * to or the beginning of the native reference array it is located in. In
+ * the first case the second element is the offset of the field relative to
+ * the object's base address. In the second case, it is the index of the
+ * StringDexCacheType object in the array.
*/
- for (const RefInfoPair& ref_info : string_ref_info) {
- uint32_t image_offset;
+ for (const HeapReferencePointerInfo& ref_info : string_ref_info) {
+ uint32_t base_offset;
- if (HasNativeRefTag(ref_info.second)) {
+ if (HasDexCacheNativeRefTag(ref_info.first)) {
++native_string_refs;
-
- // Only DexCaches can contain native references to Java strings.
- ObjPtr<mirror::DexCache> dex_cache(ref_info.first->AsDexCache());
-
- // No need to set or clear native ref tags. The existing tag will be
- // carried forward.
- image_offset = native_object_relocations_[dex_cache->GetStrings()].offset +
- ref_info.second;
+ auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTag(
+ ref_info.first));
+ base_offset = SetDexCacheNativeRefTag(GetImageOffset(obj_ptr));
} else {
++managed_string_refs;
- image_offset = GetImageOffset(ref_info.first) + ref_info.second;
+ base_offset = GetImageOffset(reinterpret_cast<mirror::Object*>(ref_info.first));
}
- string_reference_offsets_.push_back(image_offset);
+ string_reference_offsets_.emplace_back(base_offset, ref_info.second);
}
CHECK_EQ(image_infos_.back().num_string_references_,
@@ -325,18 +311,16 @@ bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
// This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
// bin size sums being calculated.
TimingLogger::ScopedTiming t("AllocMemory", timings);
- if (!AllocMemory()) {
- return false;
- }
-
- return true;
+ return AllocMemory();
}
class ImageWriter::CollectStringReferenceVisitor {
public:
explicit CollectStringReferenceVisitor(const ImageWriter& image_writer)
- : dex_cache_string_ref_counter_(0),
- image_writer_(image_writer) {}
+ : image_writer_(image_writer),
+ curr_obj_(nullptr),
+ string_ref_info_(0),
+ dex_cache_string_ref_counter_(0) {}
// Used to prevent repeated null checks in the code that calls the visitor.
ALWAYS_INLINE
@@ -362,7 +346,7 @@ class ImageWriter::CollectStringReferenceVisitor {
}
}
- // Collects info for Java fields that reference Java Strings.
+ // Collects info for managed fields that reference managed Strings.
ALWAYS_INLINE
void operator() (ObjPtr<mirror::Object> obj,
MemberOffset member_offset,
@@ -373,7 +357,8 @@ class ImageWriter::CollectStringReferenceVisitor {
member_offset);
if (image_writer_.IsValidAppImageStringReference(referred_obj)) {
- string_ref_info_.emplace_back(obj.Ptr(), member_offset.Uint32Value());
+ string_ref_info_.emplace_back(reinterpret_cast<uintptr_t>(obj.Ptr()),
+ member_offset.Uint32Value());
}
}
@@ -384,84 +369,104 @@ class ImageWriter::CollectStringReferenceVisitor {
operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
}
+ void AddStringRefInfo(uint32_t first, uint32_t second) {
+ string_ref_info_.emplace_back(first, second);
+ }
+
+ std::vector<HeapReferencePointerInfo>&& MoveRefInfo() {
+ return std::move(string_ref_info_);
+ }
+
// Used by the wrapper function to obtain a native reference count.
- size_t GetDexCacheStringRefCounter() const {
+ size_t GetDexCacheStringRefCount() const {
return dex_cache_string_ref_counter_;
}
- // Resets the native reference count.
- void ResetDexCacheStringRefCounter() {
+ void SetObject(ObjPtr<mirror::Object> obj) {
+ curr_obj_ = obj;
dex_cache_string_ref_counter_ = 0;
}
- ObjPtr<mirror::Object> curr_obj_;
- mutable std::vector<RefInfoPair> string_ref_info_;
-
private:
- mutable size_t dex_cache_string_ref_counter_;
const ImageWriter& image_writer_;
+ ObjPtr<mirror::Object> curr_obj_;
+ mutable std::vector<HeapReferencePointerInfo> string_ref_info_;
+ mutable size_t dex_cache_string_ref_counter_;
};
-std::vector<ImageWriter::RefInfoPair> ImageWriter::CollectStringReferenceInfo() const
+std::vector<ImageWriter::HeapReferencePointerInfo> ImageWriter::CollectStringReferenceInfo() const
REQUIRES_SHARED(Locks::mutator_lock_) {
gc::Heap* const heap = Runtime::Current()->GetHeap();
CollectStringReferenceVisitor visitor(*this);
+ /*
+ * References to managed strings can occur either in the managed heap or in
+ * native memory regions. Information about managed references is collected
+ * by the CollectStringReferenceVisitor and directly added to the internal
+ * info vector.
+ *
+ * Native references to managed strings can only occur through DexCache
+ * objects. This is verified by VerifyNativeGCRootInvariants(). Due to the
+ * fact that these native references are encapsulated in std::atomic objects
+ * the VisitReferences() function can't pass the visiting object the address
+ * of the reference. Instead, the VisitReferences() function loads the
+ * reference into a temporary variable and passes that address to the
+ * visitor. As a consequence of this we can't uniquely identify the location
+ * of the string reference in the visitor.
+ *
+ * Due to these limitations, the visitor will only count the number of
+ * managed strings reachable through the native references of a DexCache
+ * object. If there are any such strings, this function will then iterate
+ * over the native references, test the string for membership in the
+ * AppImage, and add the tagged DexCache pointer and string array offset to
+ * the info vector if necessary.
+ */
heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsInBootImage(object.Ptr())) {
- // Many native GC roots are wrapped in std::atomics. Due to the
- // semantics of atomic objects we can't actually visit the addresses of
- // the native GC roots. Instead the visiting functions will pass the
- // visitor the address of a temporary copy of the native GC root and, if
- // it is changed, copy it back into the original location.
- //
- // This analysis requires the actual address of the native GC root so
- // we will only count them in the visitor and then collect them manually
- // afterwards. This count will then be used to verify that we collected
- // all native GC roots.
- visitor.curr_obj_ = object;
- if (object->IsDexCache()) {
- object->VisitReferences</* kVisitNativeRoots */ true,
- kVerifyNone,
- kWithoutReadBarrier>(visitor, visitor);
-
- ObjPtr<mirror::DexCache> dex_cache = object->AsDexCache();
- size_t new_native_ref_counter = 0;
-
- for (size_t string_index = 0; string_index < dex_cache->NumStrings(); ++string_index) {
- mirror::StringDexCachePair dc_pair = dex_cache->GetStrings()[string_index].load();
- mirror::Object* referred_obj = dc_pair.object.AddressWithoutBarrier()->AsMirrorPtr();
-
- if (IsValidAppImageStringReference(referred_obj)) {
- ++new_native_ref_counter;
-
- uint32_t string_vector_offset =
- (string_index * sizeof(mirror::StringDexCachePair)) +
- offsetof(mirror::StringDexCachePair, object);
+ visitor.SetObject(object);
- visitor.string_ref_info_.emplace_back(object.Ptr(),
- SetNativeRefTag(string_vector_offset));
+ if (object->IsDexCache()) {
+ object->VisitReferences</* kVisitNativeRoots= */ true,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
+
+ if (visitor.GetDexCacheStringRefCount() > 0) {
+ size_t string_info_collected = 0;
+
+ ObjPtr<mirror::DexCache> dex_cache = object->AsDexCache();
+ DCHECK_LE(visitor.GetDexCacheStringRefCount(), dex_cache->NumStrings());
+
+ for (uint32_t index = 0; index < dex_cache->NumStrings(); ++index) {
+ // GetResolvedString() can't be used here due to the circular
+ // nature of the cache and the collision detection this requires.
+ ObjPtr<mirror::String> referred_string =
+ dex_cache->GetStrings()[index].load().object.Read();
+
+ if (IsValidAppImageStringReference(referred_string)) {
+ ++string_info_collected;
+ visitor.AddStringRefInfo(
+ SetDexCacheNativeRefTag(reinterpret_cast<uintptr_t>(object.Ptr())), index);
+ }
}
- }
- CHECK_EQ(visitor.GetDexCacheStringRefCounter(), new_native_ref_counter);
+ DCHECK_EQ(string_info_collected, visitor.GetDexCacheStringRefCount());
+ }
} else {
- object->VisitReferences</* kVisitNativeRoots */ false,
- kVerifyNone,
- kWithoutReadBarrier>(visitor, visitor);
+ object->VisitReferences</* kVisitNativeRoots= */ false,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
}
-
- visitor.ResetDexCacheStringRefCounter();
}
});
- return std::move(visitor.string_ref_info_);
+ return visitor.MoveRefInfo();
}
class ImageWriter::NativeGCRootInvariantVisitor {
public:
explicit NativeGCRootInvariantVisitor(const ImageWriter& image_writer) :
+ curr_obj_(nullptr), class_violation_(false), class_loader_violation_(false),
image_writer_(image_writer) {}
ALWAYS_INLINE
@@ -478,12 +483,12 @@ class ImageWriter::NativeGCRootInvariantVisitor {
ObjPtr<mirror::Object> referred_obj = root->AsMirrorPtr();
if (curr_obj_->IsClass()) {
- class_violation = class_violation ||
- image_writer_.IsValidAppImageStringReference(referred_obj);
+ class_violation_ = class_violation_ ||
+ image_writer_.IsValidAppImageStringReference(referred_obj);
} else if (curr_obj_->IsClassLoader()) {
- class_loader_violation = class_loader_violation ||
- image_writer_.IsValidAppImageStringReference(referred_obj);
+ class_loader_violation_ = class_loader_violation_ ||
+ image_writer_.IsValidAppImageStringReference(referred_obj);
} else if (!curr_obj_->IsDexCache()) {
LOG(FATAL) << "Dex2Oat:AppImage | " <<
@@ -504,12 +509,12 @@ class ImageWriter::NativeGCRootInvariantVisitor {
// Returns true iff the only reachable native string references are through DexCache objects.
bool InvariantsHold() const {
- return !(class_violation || class_loader_violation);
+ return !(class_violation_ || class_loader_violation_);
}
ObjPtr<mirror::Object> curr_obj_;
- mutable bool class_violation = false,
- class_loader_violation = false;
+ mutable bool class_violation_;
+ mutable bool class_loader_violation_;
private:
const ImageWriter& image_writer_;
@@ -525,9 +530,9 @@ void ImageWriter::VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mu
visitor.curr_obj_ = object;
if (!IsInBootImage(object.Ptr())) {
- object->VisitReferences</* kVisitNativeRefernces */ true,
- kVerifyNone,
- kWithoutReadBarrier>(visitor, visitor);
+ object->VisitReferences</* kVisitNativeReferences= */ true,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
}
});
@@ -538,12 +543,12 @@ void ImageWriter::VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mu
* Build the error string
*/
- if (UNLIKELY(visitor.class_violation)) {
+ if (UNLIKELY(visitor.class_violation_)) {
error_str << "Class";
error = true;
}
- if (UNLIKELY(visitor.class_loader_violation)) {
+ if (UNLIKELY(visitor.class_loader_violation_)) {
if (error) {
error_str << ", ";
}
@@ -552,8 +557,8 @@ void ImageWriter::VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mu
}
CHECK(visitor.InvariantsHold()) <<
- "Native GC root invariant failure. String refs reachable through the following objects: " <<
- error_str.str();
+ "Native GC root invariant failure. String ref invariants don't hold for the following " <<
+ "object types: " << error_str.str();
}
void ImageWriter::CopyMetadata() {
@@ -563,7 +568,7 @@ void ImageWriter::CopyMetadata() {
const ImageInfo& image_info = image_infos_.back();
std::vector<ImageSection> image_sections = image_info.CreateImageSections().second;
- uint32_t* sfo_section_base = reinterpret_cast<uint32_t*>(
+ auto* sfo_section_base = reinterpret_cast<AppImageReferenceOffsetInfo*>(
image_info.image_.Begin() +
image_sections[ImageHeader::kSectionStringReferenceOffsets].Offset());
@@ -672,22 +677,6 @@ bool ImageWriter::Write(int image_fd,
return false;
}
- // Write out relocations.
- size_t relocations_position_in_file = bitmap_position_in_file + bitmap_section.Size();
- ArrayRef<const uint8_t> relocations = MaybeCompressData(
- ArrayRef<const uint8_t>(image_info.relocation_bitmap_),
- image_storage_mode_,
- &compressed_data);
- image_header->sections_[ImageHeader::kSectionImageRelocations] =
- ImageSection(bitmap_section.Offset() + bitmap_section.Size(), relocations.size());
- if (!image_file->PwriteFully(relocations.data(),
- relocations.size(),
- relocations_position_in_file)) {
- PLOG(ERROR) << "Failed to write image file relocations " << image_filename;
- image_file->Erase();
- return false;
- }
-
int err = image_file->Flush();
if (err < 0) {
PLOG(ERROR) << "Failed to flush image file " << image_filename << " with result " << err;
@@ -708,9 +697,7 @@ bool ImageWriter::Write(int image_fd,
}
if (VLOG_IS_ON(compiler)) {
- size_t separately_written_section_size = bitmap_section.Size() +
- image_header->GetImageRelocationsSection().Size() +
- sizeof(ImageHeader);
+ size_t separately_written_section_size = bitmap_section.Size() + sizeof(ImageHeader);
size_t total_uncompressed_size = raw_image_data.size() + separately_written_section_size,
total_compressed_size = image_data.size() + separately_written_section_size;
@@ -721,7 +708,7 @@ bool ImageWriter::Write(int image_fd,
}
}
- CHECK_EQ(relocations_position_in_file + relocations.size(),
+ CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(),
static_cast<size_t>(image_file->GetLength()));
if (image_file->FlushCloseOrErase() != 0) {
@@ -2342,9 +2329,15 @@ std::pair<size_t, std::vector<ImageSection>> ImageWriter::ImageInfo::CreateImage
// Round up to the alignment of the offsets we are going to store.
cur_pos = RoundUp(class_table_section.End(), sizeof(uint32_t));
+ // The size of string_reference_offsets_ can't be used here because it hasn't
+ // been filled with AppImageReferenceOffsetInfo objects yet. The
+ // num_string_references_ value is calculated separately, before we can
+ // compute the actual offsets.
const ImageSection& string_reference_offsets =
sections[ImageHeader::kSectionStringReferenceOffsets] =
- ImageSection(cur_pos, sizeof(uint32_t) * num_string_references_);
+ ImageSection(cur_pos,
+ sizeof(typename decltype(string_reference_offsets_)::value_type) *
+ num_string_references_);
// Return the number of bytes described by these sections, and the sections
// themselves.
@@ -2366,8 +2359,6 @@ void ImageWriter::CreateHeader(size_t oat_index) {
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
*bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
- // The relocations section shall be finished later as we do not know its actual size yet.
-
if (VLOG_IS_ON(compiler)) {
LOG(INFO) << "Creating header for " << oat_filenames_[oat_index];
size_t idx = 0;
@@ -2394,7 +2385,7 @@ void ImageWriter::CreateHeader(size_t oat_index) {
// Create the header, leave 0 for data size since we will fill this in as we are writing the
// image.
- ImageHeader* header = new (image_info.image_.Begin()) ImageHeader(
+ new (image_info.image_.Begin()) ImageHeader(
PointerToLowMemUInt32(image_info.image_begin_),
image_end,
sections.data(),
@@ -2411,28 +2402,6 @@ void ImageWriter::CreateHeader(size_t oat_index) {
static_cast<uint32_t>(target_ptr_size_),
image_storage_mode_,
/*data_size*/0u);
-
- // Resize relocation bitmap for recording reference/pointer relocations.
- size_t number_of_relocation_locations = RelocationIndex(image_end, target_ptr_size_);
- DCHECK(image_info.relocation_bitmap_.empty());
- image_info.relocation_bitmap_.resize(
- BitsToBytesRoundUp(number_of_relocation_locations * (compile_app_image_ ? 2u : 1u)));
- // Record header relocations.
- RecordImageRelocation(&header->image_begin_, oat_index);
- RecordImageRelocation(&header->oat_file_begin_, oat_index);
- RecordImageRelocation(&header->oat_data_begin_, oat_index);
- RecordImageRelocation(&header->oat_data_end_, oat_index);
- RecordImageRelocation(&header->oat_file_end_, oat_index);
- if (compile_app_image_) {
- RecordImageRelocation(&header->boot_image_begin_, oat_index, /* app_to_boot_image */ true);
- RecordImageRelocation(&header->boot_oat_begin_, oat_index, /* app_to_boot_image */ true);
- } else {
- DCHECK_EQ(header->boot_image_begin_, 0u);
- DCHECK_EQ(header->boot_oat_begin_, 0u);
- }
- RecordImageRelocation(&header->image_roots_, oat_index);
- // Skip non-null check for `patch_delta_` as it is actually 0 but still needs to be recorded.
- RecordImageRelocation</* kCheckNotNull */ false>(&header->patch_delta_, oat_index);
}
ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
@@ -2492,28 +2461,23 @@ class ImageWriter::FixupRootVisitor : public RootVisitor {
ImageWriter* const image_writer_;
};
-void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index) {
+void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) {
for (size_t i = 0; i < ImTable::kSize; ++i) {
ArtMethod* method = orig->Get(i, target_ptr_size_);
void** address = reinterpret_cast<void**>(copy->AddressOfElement(i, target_ptr_size_));
- CopyAndFixupPointer(address, method, oat_index);
+ CopyAndFixupPointer(address, method);
DCHECK_EQ(copy->Get(i, target_ptr_size_), NativeLocationInImage(method));
}
}
-void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig,
- ImtConflictTable* copy,
- size_t oat_index) {
+void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
const size_t count = orig->NumEntries(target_ptr_size_);
for (size_t i = 0; i < count; ++i) {
ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
- CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_),
- interface_method,
- oat_index);
- CopyAndFixupPointer(copy->AddressOfImplementationMethod(i, target_ptr_size_),
- implementation_method,
- oat_index);
+ CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_), interface_method);
+ CopyAndFixupPointer(
+ copy->AddressOfImplementationMethod(i, target_ptr_size_), implementation_method);
DCHECK_EQ(copy->GetInterfaceMethod(i, target_ptr_size_),
NativeLocationInImage(interface_method));
DCHECK_EQ(copy->GetImplementationMethod(i, target_ptr_size_),
@@ -2538,8 +2502,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
memcpy(dest, pair.first, sizeof(ArtField));
CopyAndFixupReference(
reinterpret_cast<ArtField*>(dest)->GetDeclaringClassAddressWithoutBarrier(),
- reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass(),
- oat_index);
+ reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass());
break;
}
case NativeObjectRelocationType::kRuntimeMethod:
@@ -2572,15 +2535,14 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
case NativeObjectRelocationType::kIMTable: {
ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first);
ImTable* dest_imt = reinterpret_cast<ImTable*>(dest);
- CopyAndFixupImTable(orig_imt, dest_imt, oat_index);
+ CopyAndFixupImTable(orig_imt, dest_imt);
break;
}
case NativeObjectRelocationType::kIMTConflictTable: {
auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
CopyAndFixupImtConflictTable(
orig_table,
- new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_),
- oat_index);
+ new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
break;
}
}
@@ -2590,10 +2552,8 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
- CopyAndFixupPointer(reinterpret_cast<void**>(&image_header->image_methods_[i]),
- method,
- oat_index,
- PointerSize::k32);
+ CopyAndFixupPointer(
+ reinterpret_cast<void**>(&image_header->image_methods_[i]), method, PointerSize::k32);
}
FixupRootVisitor root_visitor(this);
@@ -2618,9 +2578,6 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
MutexLock lock(Thread::Current(), *Locks::intern_table_lock_);
DCHECK(!temp_intern_table.strong_interns_.tables_.empty());
DCHECK(!temp_intern_table.strong_interns_.tables_[0].empty()); // Inserted at the beginning.
- for (const GcRoot<mirror::String>& slot : temp_intern_table.strong_interns_.tables_[0]) {
- RecordImageRelocation(&slot, oat_index);
- }
}
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
@@ -2649,9 +2606,6 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
ReaderMutexLock lock(self, temp_class_table.lock_);
DCHECK(!temp_class_table.classes_.empty());
DCHECK(!temp_class_table.classes_[0].empty()); // The ClassSet was inserted at the beginning.
- for (const ClassTable::TableSlot& slot : temp_class_table.classes_[0]) {
- RecordImageRelocation(&slot, oat_index);
- }
}
}
@@ -2668,15 +2622,13 @@ void ImageWriter::CopyAndFixupObjects() {
void ImageWriter::FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
mirror::Class* klass,
- Bin array_type,
- size_t oat_index) {
+ Bin array_type) {
CHECK(klass->IsArrayClass());
CHECK(arr->IsIntArray() || arr->IsLongArray()) << klass->PrettyClass() << " " << arr;
// Fixup int and long pointers for the ArtMethod or ArtField arrays.
const size_t num_elements = arr->GetLength();
- CopyAndFixupReference(dst->GetFieldObjectReferenceAddr<kVerifyNone>(Class::ClassOffset()),
- arr->GetClass(),
- oat_index);
+ CopyAndFixupReference(
+ dst->GetFieldObjectReferenceAddr<kVerifyNone>(Class::ClassOffset()), arr->GetClass());
auto* dest_array = down_cast<mirror::PointerArray*>(dst);
for (size_t i = 0, count = num_elements; i < count; ++i) {
void* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
@@ -2698,7 +2650,7 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst,
UNREACHABLE();
}
}
- CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem, oat_index);
+ CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem);
}
}
@@ -2729,14 +2681,14 @@ void ImageWriter::CopyAndFixupObject(Object* obj) {
// safe since we mark all of the objects that may reference non immune objects as gray.
CHECK(dst->AtomicSetMarkBit(0, 1));
}
- FixupObject(obj, dst, oat_index);
+ FixupObject(obj, dst);
}
// Rewrite all the references in the copied object to point to their image address equivalent
class ImageWriter::FixupVisitor {
public:
- FixupVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
- : image_writer_(image_writer), copy_(copy), oat_index_(oat_index) {
+ FixupVisitor(ImageWriter* image_writer, Object* copy)
+ : image_writer_(image_writer), copy_(copy) {
}
// Ignore class roots since we don't have a way to map them to the destination. These are handled
@@ -2751,9 +2703,7 @@ class ImageWriter::FixupVisitor {
ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Copy the reference and record the fixup if necessary.
image_writer_->CopyAndFixupReference(
- copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset),
- ref.Ptr(),
- oat_index_);
+ copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset), ref);
}
// java.lang.ref.Reference visitor.
@@ -2766,13 +2716,12 @@ class ImageWriter::FixupVisitor {
protected:
ImageWriter* const image_writer_;
mirror::Object* const copy_;
- size_t oat_index_;
};
class ImageWriter::FixupClassVisitor final : public FixupVisitor {
public:
- FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
- : FixupVisitor(image_writer, copy, oat_index) {}
+ FixupClassVisitor(ImageWriter* image_writer, Object* copy)
+ : FixupVisitor(image_writer, copy) {}
void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -2828,14 +2777,13 @@ T* ImageWriter::NativeCopyLocation(T* obj) {
class ImageWriter::NativeLocationVisitor {
public:
- NativeLocationVisitor(ImageWriter* image_writer, size_t oat_index)
- : image_writer_(image_writer),
- oat_index_(oat_index) {}
+ explicit NativeLocationVisitor(ImageWriter* image_writer)
+ : image_writer_(image_writer) {}
template <typename T>
T* operator()(T* ptr, void** dest_addr) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (ptr != nullptr) {
- image_writer_->CopyAndFixupPointer(dest_addr, ptr, oat_index_);
+ image_writer_->CopyAndFixupPointer(dest_addr, ptr);
}
// TODO: The caller shall overwrite the value stored by CopyAndFixupPointer()
// with the value we return here. We should try to avoid the duplicate work.
@@ -2844,12 +2792,11 @@ class ImageWriter::NativeLocationVisitor {
private:
ImageWriter* const image_writer_;
- const size_t oat_index_;
};
-void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index) {
- orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this, oat_index));
- FixupClassVisitor visitor(this, copy, oat_index);
+void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
+ orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
+ FixupClassVisitor visitor(this, copy);
ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
if (kBitstringSubtypeCheckEnabled && compile_app_image_) {
@@ -2877,7 +2824,7 @@ void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oa
copy->SetClinitThreadId(static_cast<pid_t>(0));
}
-void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
+void ImageWriter::FixupObject(Object* orig, Object* copy) {
DCHECK(orig != nullptr);
DCHECK(copy != nullptr);
if (kUseBakerReadBarrier) {
@@ -2889,13 +2836,13 @@ void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig));
if (it != pointer_arrays_.end()) {
// Should only need to fixup every pointer array exactly once.
- FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second, oat_index);
+ FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second);
pointer_arrays_.erase(it);
return;
}
}
if (orig->IsClass()) {
- FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy), oat_index);
+ FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy));
} else {
ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
Runtime::Current()->GetClassLinker()->GetClassRoots();
@@ -2905,11 +2852,9 @@ void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
auto* dest = down_cast<mirror::Executable*>(copy);
auto* src = down_cast<mirror::Executable*>(orig);
ArtMethod* src_method = src->GetArtMethod();
- CopyAndFixupPointer(dest, mirror::Executable::ArtMethodOffset(), src_method, oat_index);
+ CopyAndFixupPointer(dest, mirror::Executable::ArtMethodOffset(), src_method);
} else if (klass == GetClassRoot<mirror::DexCache>(class_roots)) {
- FixupDexCache(down_cast<mirror::DexCache*>(orig),
- down_cast<mirror::DexCache*>(copy),
- oat_index);
+ FixupDexCache(down_cast<mirror::DexCache*>(orig), down_cast<mirror::DexCache*>(copy));
} else if (klass->IsClassLoaderClass()) {
mirror::ClassLoader* copy_loader = down_cast<mirror::ClassLoader*>(copy);
// If src is a ClassLoader, set the class table to null so that it gets recreated by the
@@ -2920,7 +2865,7 @@ void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
// roots.
copy_loader->SetAllocator(nullptr);
}
- FixupVisitor visitor(this, copy, oat_index);
+ FixupVisitor visitor(this, copy);
orig->VisitReferences(visitor, visitor);
}
}
@@ -2928,8 +2873,7 @@ void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
template <typename T>
void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
std::atomic<mirror::DexCachePair<T>>* new_array,
- uint32_t array_index,
- size_t oat_index) {
+ uint32_t array_index) {
static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
"Size check for removing std::atomic<>.");
mirror::DexCachePair<T>* orig_pair =
@@ -2937,15 +2881,14 @@ void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>*
mirror::DexCachePair<T>* new_pair =
reinterpret_cast<mirror::DexCachePair<T>*>(&new_array[array_index]);
CopyAndFixupReference(
- new_pair->object.AddressWithoutBarrier(), orig_pair->object.Read(), oat_index);
+ new_pair->object.AddressWithoutBarrier(), orig_pair->object.Read());
new_pair->index = orig_pair->index;
}
template <typename T>
void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
std::atomic<mirror::NativeDexCachePair<T>>* new_array,
- uint32_t array_index,
- size_t oat_index) {
+ uint32_t array_index) {
static_assert(
sizeof(std::atomic<mirror::NativeDexCachePair<T>>) == sizeof(mirror::NativeDexCachePair<T>),
"Size check for removing std::atomic<>.");
@@ -2956,9 +2899,8 @@ void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair
reinterpret_cast<DexCache::ConversionPair64*>(new_array) + array_index;
*new_pair = *orig_pair; // Copy original value and index.
if (orig_pair->first != 0u) {
- CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
- reinterpret_cast64<void*>(orig_pair->first),
- oat_index);
+ CopyAndFixupPointer(
+ reinterpret_cast<void**>(&new_pair->first), reinterpret_cast64<void*>(orig_pair->first));
}
} else {
DexCache::ConversionPair32* orig_pair =
@@ -2967,26 +2909,22 @@ void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair
reinterpret_cast<DexCache::ConversionPair32*>(new_array) + array_index;
*new_pair = *orig_pair; // Copy original value and index.
if (orig_pair->first != 0u) {
- CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
- reinterpret_cast32<void*>(orig_pair->first),
- oat_index);
+ CopyAndFixupPointer(
+ reinterpret_cast<void**>(&new_pair->first), reinterpret_cast32<void*>(orig_pair->first));
}
}
}
void ImageWriter::FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
GcRoot<mirror::CallSite>* new_array,
- uint32_t array_index,
- size_t oat_index) {
- CopyAndFixupReference(new_array[array_index].AddressWithoutBarrier(),
- orig_array[array_index].Read(),
- oat_index);
+ uint32_t array_index) {
+ CopyAndFixupReference(
+ new_array[array_index].AddressWithoutBarrier(), orig_array[array_index].Read());
}
template <typename EntryType>
void ImageWriter::FixupDexCacheArray(DexCache* orig_dex_cache,
DexCache* copy_dex_cache,
- size_t oat_index,
MemberOffset array_offset,
uint32_t size) {
EntryType* orig_array = orig_dex_cache->GetFieldPtr64<EntryType*>(array_offset);
@@ -2994,45 +2932,37 @@ void ImageWriter::FixupDexCacheArray(DexCache* orig_dex_cache,
if (orig_array != nullptr) {
// Though the DexCache array fields are usually treated as native pointers, we clear
// the top 32 bits for 32-bit targets.
- CopyAndFixupPointer(copy_dex_cache, array_offset, orig_array, oat_index, PointerSize::k64);
+ CopyAndFixupPointer(copy_dex_cache, array_offset, orig_array, PointerSize::k64);
EntryType* new_array = NativeCopyLocation(orig_array);
for (uint32_t i = 0; i != size; ++i) {
- FixupDexCacheArrayEntry(orig_array, new_array, i, oat_index);
+ FixupDexCacheArrayEntry(orig_array, new_array, i);
}
}
}
-void ImageWriter::FixupDexCache(DexCache* orig_dex_cache,
- DexCache* copy_dex_cache,
- size_t oat_index) {
+void ImageWriter::FixupDexCache(DexCache* orig_dex_cache, DexCache* copy_dex_cache) {
FixupDexCacheArray<mirror::StringDexCacheType>(orig_dex_cache,
copy_dex_cache,
- oat_index,
DexCache::StringsOffset(),
orig_dex_cache->NumStrings());
FixupDexCacheArray<mirror::TypeDexCacheType>(orig_dex_cache,
copy_dex_cache,
- oat_index,
DexCache::ResolvedTypesOffset(),
orig_dex_cache->NumResolvedTypes());
FixupDexCacheArray<mirror::MethodDexCacheType>(orig_dex_cache,
copy_dex_cache,
- oat_index,
DexCache::ResolvedMethodsOffset(),
orig_dex_cache->NumResolvedMethods());
FixupDexCacheArray<mirror::FieldDexCacheType>(orig_dex_cache,
copy_dex_cache,
- oat_index,
DexCache::ResolvedFieldsOffset(),
orig_dex_cache->NumResolvedFields());
FixupDexCacheArray<mirror::MethodTypeDexCacheType>(orig_dex_cache,
copy_dex_cache,
- oat_index,
DexCache::ResolvedMethodTypesOffset(),
orig_dex_cache->NumResolvedMethodTypes());
FixupDexCacheArray<GcRoot<mirror::CallSite>>(orig_dex_cache,
copy_dex_cache,
- oat_index,
DexCache::ResolvedCallSitesOffset(),
orig_dex_cache->NumResolvedCallSites());
@@ -3141,9 +3071,8 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
- CopyAndFixupReference(copy->GetDeclaringClassAddressWithoutBarrier(),
- orig->GetDeclaringClassUnchecked(),
- oat_index);
+ CopyAndFixupReference(
+ copy->GetDeclaringClassAddressWithoutBarrier(), orig->GetDeclaringClassUnchecked());
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
@@ -3156,7 +3085,7 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
if (orig_table != nullptr) {
// Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
quick_code = GetOatAddress(StubType::kQuickIMTConflictTrampoline);
- CopyAndFixupPointer(copy, ArtMethod::DataOffset(target_ptr_size_), orig_table, oat_index);
+ CopyAndFixupPointer(copy, ArtMethod::DataOffset(target_ptr_size_), orig_table);
} else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
quick_code = GetOatAddress(StubType::kQuickResolutionTrampoline);
} else {
@@ -3190,9 +3119,6 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
// Note this is not the code_ pointer, that is handled above.
copy->SetEntryPointFromJniPtrSize(
GetOatAddress(StubType::kJNIDlsymLookup), target_ptr_size_);
- MemberOffset offset = ArtMethod::EntryPointFromJniOffset(target_ptr_size_);
- const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
- RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ compile_app_image_);
} else {
CHECK(copy->GetDataPtrSize(target_ptr_size_) == nullptr);
}
@@ -3200,9 +3126,6 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
}
if (quick_code != nullptr) {
copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_);
- MemberOffset offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(target_ptr_size_);
- const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
- RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootOatFile(quick_code));
}
}
@@ -3366,55 +3289,15 @@ ImageWriter::ImageInfo::ImageInfo()
: intern_table_(new InternTable),
class_table_(new ClassTable) {}
-template <bool kCheckNotNull /* = true */>
-void ImageWriter::RecordImageRelocation(const void* dest,
- size_t oat_index,
- bool app_to_boot_image /* = false */) {
- // Check that we're not recording a relocation for null.
- if (kCheckNotNull) {
- DCHECK(reinterpret_cast<const uint32_t*>(dest)[0] != 0u);
- }
- // Calculate the offset within the image.
- ImageInfo* image_info = &image_infos_[oat_index];
- DCHECK(image_info->image_.HasAddress(dest))
- << "MemMap range " << static_cast<const void*>(image_info->image_.Begin())
- << "-" << static_cast<const void*>(image_info->image_.End())
- << " does not contain " << dest;
- size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_.Begin();
- ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_.Begin());
- size_t image_end = image_header->GetClassTableSection().End();
- DCHECK_LT(offset, image_end);
- // Calculate the location index.
- size_t size = RelocationIndex(image_end, target_ptr_size_);
- size_t index = RelocationIndex(offset, target_ptr_size_);
- if (app_to_boot_image) {
- index += size;
- }
- // Mark the location in the bitmap.
- DCHECK(compile_app_image_ || !app_to_boot_image);
- MemoryRegion region(image_info->relocation_bitmap_.data(), image_info->relocation_bitmap_.size());
- BitMemoryRegion bit_region(region, /* bit_offset */ 0u, compile_app_image_ ? 2u * size : size);
- DCHECK(!bit_region.LoadBit(index));
- bit_region.StoreBit(index, /* value*/ true);
-}
-
template <typename DestType>
-void ImageWriter::CopyAndFixupReference(DestType* dest,
- ObjPtr<mirror::Object> src,
- size_t oat_index) {
+void ImageWriter::CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src) {
static_assert(std::is_same<DestType, mirror::CompressedReference<mirror::Object>>::value ||
std::is_same<DestType, mirror::HeapReference<mirror::Object>>::value,
"DestType must be a Compressed-/HeapReference<Object>.");
dest->Assign(GetImageAddress(src.Ptr()));
- if (src != nullptr) {
- RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootImage(src.Ptr()));
- }
}
-void ImageWriter::CopyAndFixupPointer(void** target,
- void* value,
- size_t oat_index,
- PointerSize pointer_size) {
+void ImageWriter::CopyAndFixupPointer(void** target, void* value, PointerSize pointer_size) {
void* new_value = NativeLocationInImage(value);
if (pointer_size == PointerSize::k32) {
*reinterpret_cast<uint32_t*>(target) = reinterpret_cast32<uint32_t>(new_value);
@@ -3422,24 +3305,22 @@ void ImageWriter::CopyAndFixupPointer(void** target,
*reinterpret_cast<uint64_t*>(target) = reinterpret_cast64<uint64_t>(new_value);
}
DCHECK(value != nullptr);
- RecordImageRelocation(target, oat_index, /* app_to_boot_image */ IsInBootImage(value));
}
-void ImageWriter::CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+void ImageWriter::CopyAndFixupPointer(void** target, void* value)
REQUIRES_SHARED(Locks::mutator_lock_) {
- CopyAndFixupPointer(target, value, oat_index, target_ptr_size_);
+ CopyAndFixupPointer(target, value, target_ptr_size_);
}
void ImageWriter::CopyAndFixupPointer(
- void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size) {
+ void* object, MemberOffset offset, void* value, PointerSize pointer_size) {
void** target =
reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(object) + offset.Uint32Value());
- return CopyAndFixupPointer(target, value, oat_index, pointer_size);
+ return CopyAndFixupPointer(target, value, pointer_size);
}
-void ImageWriter::CopyAndFixupPointer(
- void* object, MemberOffset offset, void* value, size_t oat_index) {
- return CopyAndFixupPointer(object, offset, value, oat_index, target_ptr_size_);
+void ImageWriter::CopyAndFixupPointer(void* object, MemberOffset offset, void* value) {
+ return CopyAndFixupPointer(object, offset, value, target_ptr_size_);
}
} // namespace linker
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 93e4be5558..e019a501a2 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -26,6 +26,7 @@
#include <set>
#include <stack>
#include <string>
+#include <unordered_map>
#include "art_method.h"
#include "base/bit_utils.h"
@@ -287,9 +288,8 @@ class ImageWriter final {
/*
* Creates ImageSection objects that describe most of the sections of a
- * boot or AppImage. The following sections are not included:
+ * boot or AppImage. The following sections are not included:
* - ImageHeader::kSectionImageBitmap
- * - ImageHeader::kSectionImageRelocations
*
* In addition, the ImageHeader is not covered here.
*
@@ -397,12 +397,6 @@ class ImageWriter final {
// Class table associated with this image for serialization.
std::unique_ptr<ClassTable> class_table_;
-
- // Relocations of references/pointers. For boot image, it contains one bit
- // for each location that can be relocated. For app image, it contains twice
- // that many bits, first half contains relocations within this image and the
- // second half contains relocations for references to the boot image.
- std::vector<uint8_t> relocation_bitmap_;
};
// We use the lock word to store the offset of the object in the image.
@@ -496,11 +490,9 @@ class ImageWriter final {
void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index)
+ void CopyAndFixupImTable(ImTable* orig, ImTable* copy)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyAndFixupImtConflictTable(ImtConflictTable* orig,
- ImtConflictTable* copy,
- size_t oat_index)
+ void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
@@ -517,45 +509,37 @@ class ImageWriter final {
*/
void CopyMetadata();
- template <bool kCheckNotNull = true>
- void RecordImageRelocation(const void* dest, size_t oat_index, bool app_to_boot_image = false);
- void FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index)
+ void FixupClass(mirror::Class* orig, mirror::Class* copy)
REQUIRES_SHARED(Locks::mutator_lock_);
- void FixupObject(mirror::Object* orig, mirror::Object* copy, size_t oat_index)
+ void FixupObject(mirror::Object* orig, mirror::Object* copy)
REQUIRES_SHARED(Locks::mutator_lock_);
template <typename T>
void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
std::atomic<mirror::DexCachePair<T>>* new_array,
- uint32_t array_index,
- size_t oat_index)
+ uint32_t array_index)
REQUIRES_SHARED(Locks::mutator_lock_);
template <typename T>
void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
std::atomic<mirror::NativeDexCachePair<T>>* new_array,
- uint32_t array_index,
- size_t oat_index)
+ uint32_t array_index)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
GcRoot<mirror::CallSite>* new_array,
- uint32_t array_index,
- size_t oat_index)
+ uint32_t array_index)
REQUIRES_SHARED(Locks::mutator_lock_);
template <typename EntryType>
void FixupDexCacheArray(mirror::DexCache* orig_dex_cache,
mirror::DexCache* copy_dex_cache,
- size_t oat_index,
MemberOffset array_offset,
uint32_t size)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCache(mirror::DexCache* orig_dex_cache,
- mirror::DexCache* copy_dex_cache,
- size_t oat_index)
+ mirror::DexCache* copy_dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
mirror::Class* klass,
- Bin array_type,
- size_t oat_index)
+ Bin array_type)
REQUIRES_SHARED(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
@@ -598,19 +582,27 @@ class ImageWriter final {
REQUIRES_SHARED(Locks::mutator_lock_);
/*
- * A pair containing the information necessary to calculate the position of a
- * managed object's field or native reference inside an AppImage.
+ * This type holds the information necessary for calculating
+ * AppImageReferenceOffsetInfo values after the object relocations have been
+ * computed.
*
- * The first element of this pair is a raw mirror::Object pointer because its
- * usage will cross a suspend point and ObjPtr would produce a false positive.
+ * The first element will always be a pointer to a managed object. If the
+ * pointer has been tagged (testable with HasDexCacheNativeRefTag) it
+ * indicates that the referenced object is a DexCache object that requires
+ * special handling during loading and the second element has no meaningful
+ * value. If the pointer isn't tagged then the second element is an
+ * object-relative offset to a field containing a string reference.
*
- * The second element is an offset either into the object or into the string
- * array of a DexCache object.
+ * Note that it is possible for an untagged DexCache pointer to occur in the
+ * first position if it has a managed reference that needs to be updated.
*
* TODO (chriswailes): Add a note indicating the source line where we ensure
* that no moving garbage collection will occur.
+ *
+ * TODO (chriswailes): Replace with std::variant once ART is building with
+ * C++17
*/
- typedef std::pair<mirror::Object*, uint32_t> RefInfoPair;
+ typedef std::pair<uintptr_t, uint32_t> HeapReferencePointerInfo;
/*
* Collects the info necessary for calculating image offsets to string field
@@ -630,23 +622,18 @@ class ImageWriter final {
* references that will be included in the AppImage. This allows use to both
* allocate enough memory for soring the offsets and correctly calculate the
* offsets of various objects into the image. Once the image offset
- * calculations are done for Java objects the reference object/offset pairs
+ * calculations are done for managed objects the reference object/offset pairs
* are translated to image offsets. The CopyMetadata function then copies
* these offsets into the image.
- *
- * A vector containing pairs of object pointers and offsets. The offsets are
- * tagged to indicate if the offset is for a field of a mirror object or a
- * native reference. If the offset is tagged as a native reference it must
- * have come from a DexCache's string array.
*/
- std::vector<RefInfoPair> CollectStringReferenceInfo() const
+ std::vector<HeapReferencePointerInfo> CollectStringReferenceInfo() const
REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Ensures that assumptions about native GC roots and AppImages hold.
*
* This function verifies the following condition(s):
- * - Native references to Java strings are only reachable through DexCache
+ * - Native references to managed strings are only reachable through DexCache
* objects
*/
void VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
@@ -711,18 +698,18 @@ class ImageWriter final {
// Copy a reference and record image relocation.
template <typename DestType>
- void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src, size_t oat_index)
+ void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src)
REQUIRES_SHARED(Locks::mutator_lock_);
// Copy a native pointer and record image relocation.
- void CopyAndFixupPointer(void** target, void* value, size_t oat_index, PointerSize pointer_size)
+ void CopyAndFixupPointer(void** target, void* value, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+ void CopyAndFixupPointer(void** target, void* value)
REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupPointer(
- void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size)
+ void* object, MemberOffset offset, void* value, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyAndFixupPointer(void* object, MemberOffset offset, void* value, size_t oat_index)
+ void CopyAndFixupPointer(void* object, MemberOffset offset, void* value)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
@@ -789,7 +776,7 @@ class ImageWriter final {
mirror::ObjectArray<mirror::Object>* boot_image_live_objects_;
// Offsets into the image that indicate where string references are recorded.
- std::vector<uint32_t> string_reference_offsets_;
+ std::vector<AppImageReferenceOffsetInfo> string_reference_offsets_;
// Which mode the image is stored as, see image.h
const ImageHeader::StorageMode image_storage_mode_;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index acd49d5b45..23c486d05b 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -26,6 +26,7 @@
#include "base/bit_vector-inl.h"
#include "base/enums.h"
#include "base/file_magic.h"
+#include "base/file_utils.h"
#include "base/indenter.h"
#include "base/logging.h" // For VLOG
#include "base/os.h"
@@ -3430,7 +3431,7 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
&error_msg);
} else if (oat_dex_file->source_.IsRawFile()) {
File* raw_file = oat_dex_file->source_.GetRawFile();
- int dup_fd = dup(raw_file->Fd());
+ int dup_fd = DupCloexec(raw_file->Fd());
if (dup_fd < 0) {
PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
return false;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index f764b42bc8..83fb17cf05 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set_features.h"
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
@@ -87,7 +88,7 @@ class OatTest : public CommonCompilerTest {
void SetupCompiler(const std::vector<std::string>& compiler_options) {
std::string error_msg;
if (!compiler_options_->ParseCompilerOptions(compiler_options,
- false /* ignore_unrecognized */,
+ /*ignore_unrecognized=*/ false,
&error_msg)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
@@ -176,7 +177,7 @@ class OatTest : public CommonCompilerTest {
oat_rodata,
&key_value_store,
verify,
- /* update_input_vdex */ false,
+ /*update_input_vdex=*/ false,
CopyOption::kOnlyIfCompressed,
&opened_dex_files_maps,
&opened_dex_files)) {
@@ -236,7 +237,7 @@ class OatTest : public CommonCompilerTest {
}
if (!oat_writer.WriteHeader(elf_writer->GetStream(),
- /* image_file_location_oat_checksum */ 42U)) {
+ /*image_file_location_oat_checksum=*/ 42U)) {
return false;
}
@@ -404,14 +405,13 @@ TEST_F(OatTest, WriteRead) {
if (kCompile) { // OatWriter strips the code, regenerate to compare
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ true,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ true,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
@@ -522,18 +522,17 @@ TEST_F(OatTest, EmptyTextSection) {
tmp_oat.GetFile(),
dex_files,
key_value_store,
- /* verify */ false);
+ /*verify=*/ false);
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(oat_file != nullptr);
EXPECT_LT(static_cast<size_t>(oat_file->Size()),
@@ -604,14 +603,13 @@ void OatTest::TestDexFileInput(bool verify, bool low_4gb, bool use_profile) {
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
+ /*executable=*/ false,
low_4gb,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
if (low_4gb) {
@@ -727,7 +725,7 @@ void OatTest::TestZipFileInput(bool verify) {
input_filenames,
key_value_store,
verify,
- /* profile_compilation_info */ nullptr);
+ /*profile_compilation_info=*/ nullptr);
if (verify) {
ASSERT_FALSE(success);
@@ -735,14 +733,13 @@ void OatTest::TestZipFileInput(bool verify) {
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -769,7 +766,7 @@ void OatTest::TestZipFileInput(bool verify) {
{
// Test using the AddZipDexFileSource() interface with the zip file handle.
- File zip_fd(dup(zip_file.GetFd()), /* check_usage */ false);
+ File zip_fd(DupCloexec(zip_file.GetFd()), /*check_usage=*/ false);
ASSERT_NE(-1, zip_fd.Fd());
ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
@@ -785,14 +782,13 @@ void OatTest::TestZipFileInput(bool verify) {
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -843,7 +839,7 @@ void OatTest::TestZipFileInputWithEmptyDex() {
oat_file.GetFile(),
input_filenames,
key_value_store,
- /* verify */ false,
+ /*verify=*/ false,
profile_compilation_info.get());
ASSERT_FALSE(success);
}
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index 4a36744e97..27cec8d951 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -53,7 +53,7 @@ class Dumper {
bool OpenAndPrintHeader(size_t dex_index) {
// Open the file and emit the gnuplot prologue.
- out_file_ = fopen(MultidexName("layout", dex_index, ".gnuplot").c_str(), "w");
+ out_file_ = fopen(MultidexName("layout", dex_index, ".gnuplot").c_str(), "we");
if (out_file_ == nullptr) {
return false;
}
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index d3bfd144c3..47ef0a5cf3 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -68,14 +68,13 @@ class DexDiagTest : public CommonRuntimeTest {
EXPECT_TRUE(!oat_location.empty());
std::cout << "==" << oat_location << std::endl;
std::string error_msg;
- std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd= */ -1,
+ std::unique_ptr<OatFile> oat(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base= */ nullptr,
- /* executable= */ false,
- /* low_4gb= */ false,
- /* abs_dex_location= */ nullptr,
- /* reservation= */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
EXPECT_TRUE(oat != nullptr) << error_msg;
return oat;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index db6945fbaa..8905aa31c4 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1554,7 +1554,7 @@ void DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
// Overwrite the existing vector with the new ordering, note that the sets of objects are
// equivalent, but the order changes. This is why this is not a memory leak.
// TODO: Consider cleaning this up with a shared_ptr.
- class_datas[class_data_index].release();
+ class_datas[class_data_index].release(); // NOLINT b/117926937
class_datas[class_data_index].reset(class_data);
++class_data_index;
}
@@ -1570,7 +1570,7 @@ void DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
// Overwrite the existing vector with the new ordering, note that the sets of objects are
// equivalent, but the order changes. This is why this is not a memory leak.
// TODO: Consider cleaning this up with a shared_ptr.
- class_defs[i].release();
+ class_defs[i].release(); // NOLINT b/117926937
class_defs[i].reset(new_class_def_order[i]);
}
}
@@ -1671,7 +1671,7 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
// Now we know what order we want the string data, reorder them.
size_t data_index = 0;
for (dex_ir::StringId* string_id : string_ids) {
- string_datas[data_index].release();
+ string_datas[data_index].release(); // NOLINT b/117926937
string_datas[data_index].reset(string_id->DataItem());
++data_index;
}
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index d212e71f06..41b60da133 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -190,7 +190,7 @@ int DexlayoutDriver(int argc, char** argv) {
// Open profile file.
std::unique_ptr<ProfileCompilationInfo> profile_info;
if (options.profile_file_name_) {
- int profile_fd = open(options.profile_file_name_, O_RDONLY);
+ int profile_fd = open(options.profile_file_name_, O_RDONLY | O_CLOEXEC);
if (profile_fd < 0) {
PLOG(ERROR) << "Can't open " << options.profile_file_name_;
return 1;
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index adb6a54d8a..7d3ae71d5f 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -257,7 +257,7 @@ int dexlistDriver(int argc, char** argv) {
// Open alternative output file.
if (gOptions.outputFileName) {
- gOutFile = fopen(gOptions.outputFileName, "w");
+ gOutFile = fopen(gOptions.outputFileName, "we");
if (!gOutFile) {
PLOG(ERROR) << "Can't open " << gOptions.outputFileName;
free(gOptions.argCopy);
diff --git a/dt_fd_forward/dt_fd_forward.cc b/dt_fd_forward/dt_fd_forward.cc
index 116cdf84ed..a99f7850c0 100644
--- a/dt_fd_forward/dt_fd_forward.cc
+++ b/dt_fd_forward/dt_fd_forward.cc
@@ -105,12 +105,21 @@ static void SendListenMessage(const android::base::unique_fd& fd) {
TEMP_FAILURE_RETRY(send(fd, kListenStartMessage, sizeof(kListenStartMessage), MSG_EOR));
}
+// Copy from file_utils, so we do not need to depend on libartbase.
+static int DupCloexec(int fd) {
+#if defined(__linux__)
+ return fcntl(fd, F_DUPFD_CLOEXEC, 0);
+#else
+ return dup(fd);
+#endif
+}
+
jdwpTransportError FdForwardTransport::SetupListen(int listen_fd) {
std::lock_guard<std::mutex> lk(state_mutex_);
if (!ChangeState(TransportState::kClosed, TransportState::kListenSetup)) {
return ERR(ILLEGAL_STATE);
} else {
- listen_fd_.reset(dup(listen_fd));
+ listen_fd_.reset(DupCloexec(listen_fd));
SendListenMessage(listen_fd_);
CHECK(ChangeState(TransportState::kListenSetup, TransportState::kListening));
return OK;
@@ -339,7 +348,7 @@ IOResult FdForwardTransport::ReceiveFdsFromSocket(bool* do_handshake) {
write_lock_fd_.reset(out_fds.write_lock_fd_);
// We got the fds. Send ack.
- close_notify_fd_.reset(dup(listen_fd_));
+ close_notify_fd_.reset(DupCloexec(listen_fd_));
SendAcceptMessage(close_notify_fd_);
return IOResult::kOk;
diff --git a/libartbase/base/bit_string_test.cc b/libartbase/base/bit_string_test.cc
index 89a71a1894..45f4d4ec0d 100644
--- a/libartbase/base/bit_string_test.cc
+++ b/libartbase/base/bit_string_test.cc
@@ -110,17 +110,17 @@ TEST(InstanceOfBitString, ReadWrite) {
ASSERT_EQ(BitString::kCapacity, 3u);
EXPECT_BITSTRING_STR("BitString[]", bs);
- bs = SetBitStringCharAt(bs, /*i*/0, /*val*/1u);
+ bs = SetBitStringCharAt(bs, /*i=*/0, /*val=*/1u);
EXPECT_BITSTRING_STR("BitString[1]", bs);
- bs = SetBitStringCharAt(bs, /*i*/1, /*val*/2u);
+ bs = SetBitStringCharAt(bs, /*i=*/1, /*val=*/2u);
EXPECT_BITSTRING_STR("BitString[1,2]", bs);
- bs = SetBitStringCharAt(bs, /*i*/2, /*val*/3u);
+ bs = SetBitStringCharAt(bs, /*i=*/2, /*val=*/3u);
EXPECT_BITSTRING_STR("BitString[1,2,3]", bs);
// There should be at least "kCapacity" # of checks here, 1 for each unique position.
- EXPECT_EQ(MakeBitStringChar(/*idx*/0, /*val*/1u), bs[0]);
- EXPECT_EQ(MakeBitStringChar(/*idx*/1, /*val*/2u), bs[1]);
- EXPECT_EQ(MakeBitStringChar(/*idx*/2, /*val*/3u), bs[2]);
+ EXPECT_EQ(MakeBitStringChar(/*idx=*/0, /*val=*/1u), bs[0]);
+ EXPECT_EQ(MakeBitStringChar(/*idx=*/1, /*val=*/2u), bs[1]);
+ EXPECT_EQ(MakeBitStringChar(/*idx=*/2, /*val=*/3u), bs[2]);
// Each maximal value should be tested here for each position.
uint32_t max_bitstring_ints[] = {
diff --git a/libartbase/base/bit_struct.h b/libartbase/base/bit_struct.h
index 9814fd4f3a..292eca0e0c 100644
--- a/libartbase/base/bit_struct.h
+++ b/libartbase/base/bit_struct.h
@@ -274,13 +274,13 @@ using BitStructUint =
// If a standard-layout union contains several standard-layout structs that share a common
// initial sequence ... it is permitted to inspect the common initial sequence of any of
// standard-layout struct members.
-#define BITSTRUCT_DEFINE_START(name, bitwidth) \
- union name { \
- art::detail::DefineBitStructSize<(bitwidth)> _; \
- static constexpr size_t BitStructSizeOf() { return (bitwidth); } \
- name& operator=(const name& other) { _ = other._; return *this; } \
- name(const name& other) : _(other._) {} \
- name() = default; \
+#define BITSTRUCT_DEFINE_START(name, bitwidth) \
+ union name { /* NOLINT */ \
+ art::detail::DefineBitStructSize<(bitwidth)> _; \
+ static constexpr size_t BitStructSizeOf() { return (bitwidth); } \
+ name& operator=(const name& other) { _ = other._; return *this; } /* NOLINT */ \
+ name(const name& other) : _(other._) {} \
+ name() = default; \
~name() = default;
// End the definition of a bitstruct, and insert a sanity check
diff --git a/libartbase/base/bit_struct_test.cc b/libartbase/base/bit_struct_test.cc
index 577682ccce..a2389ebfc7 100644
--- a/libartbase/base/bit_struct_test.cc
+++ b/libartbase/base/bit_struct_test.cc
@@ -73,7 +73,7 @@ struct CustomBitStruct {
TEST(BitStructs, Custom) {
CustomBitStruct expected(0b1111);
- BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f{};
+ BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f{};
EXPECT_EQ(1u, sizeof(f));
@@ -85,9 +85,9 @@ TEST(BitStructs, Custom) {
EXPECT_EQ(AsUint(f), 0b11110000u);
}
-BITSTRUCT_DEFINE_START(TestTwoCustom, /* size */ 8)
- BitStructField<CustomBitStruct, /*lsb*/0, /*width*/4> f4_a;
- BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f4_b;
+BITSTRUCT_DEFINE_START(TestTwoCustom, /* size= */ 8)
+ BitStructField<CustomBitStruct, /*lsb=*/0, /*width=*/4> f4_a;
+ BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f4_b;
BITSTRUCT_DEFINE_END(TestTwoCustom);
TEST(BitStructs, TwoCustom) {
@@ -122,7 +122,7 @@ TEST(BitStructs, TwoCustom) {
}
TEST(BitStructs, Number) {
- BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn{};
+ BitStructNumber<uint16_t, /*lsb=*/4, /*width=*/4> bsn{};
EXPECT_EQ(2u, sizeof(bsn));
bsn = 0b1111;
@@ -135,20 +135,20 @@ TEST(BitStructs, Number) {
EXPECT_EQ(AsUint(bsn), 0b11110000u);
}
-BITSTRUCT_DEFINE_START(TestBitStruct, /* size */ 8)
- BitStructInt</*lsb*/0, /*width*/3> i3;
- BitStructUint</*lsb*/3, /*width*/4> u4;
+BITSTRUCT_DEFINE_START(TestBitStruct, /* size= */ 8)
+ BitStructInt</*lsb=*/0, /*width=*/3> i3;
+ BitStructUint</*lsb=*/3, /*width=*/4> u4;
- BitStructUint</*lsb*/0, /*width*/7> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
BITSTRUCT_DEFINE_END(TestBitStruct);
TEST(BitStructs, Test1) {
{
// Check minimal size selection is correct.
- BitStructInt</*lsb*/0, /*width*/3> i3;
- BitStructUint</*lsb*/3, /*width*/4> u4;
+ BitStructInt</*lsb=*/0, /*width=*/3> i3;
+ BitStructUint</*lsb=*/3, /*width=*/4> u4;
- BitStructUint</*lsb*/0, /*width*/7> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
EXPECT_EQ(1u, sizeof(i3));
EXPECT_EQ(1u, sizeof(u4));
@@ -216,12 +216,12 @@ TEST(BitStructs, Test1) {
}
}
-BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size */ 32)
- BitStructUint</*lsb*/0, /*width*/3> u3;
- BitStructUint</*lsb*/3, /*width*/10> u10;
- BitStructUint</*lsb*/13, /*width*/19> u19;
+BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size= */ 32)
+ BitStructUint</*lsb=*/0, /*width=*/3> u3;
+ BitStructUint</*lsb=*/3, /*width=*/10> u10;
+ BitStructUint</*lsb=*/13, /*width=*/19> u19;
- BitStructUint</*lsb*/0, /*width*/32> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/32> alias_all;
BITSTRUCT_DEFINE_END(MixedSizeBitStruct);
// static_assert(sizeof(MixedSizeBitStruct) == sizeof(uint32_t), "TestBitStructs#MixedSize");
@@ -255,11 +255,11 @@ TEST(BitStructs, Mixed) {
EXPECT_EQ(0b10101010101010101011111010100111u, AsUint(tst));
}
-BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size */ 8)
- BitStructInt</*lsb*/0, /*width*/3> i3;
- BitStructUint</*lsb*/3, /*width*/4> u4;
+BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size= */ 8)
+ BitStructInt</*lsb=*/0, /*width=*/3> i3;
+ BitStructUint</*lsb=*/3, /*width=*/4> u4;
- BitStructUint</*lsb*/0, /*width*/8> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/8> alias_all;
BITSTRUCT_DEFINE_END(TestBitStruct_u8);
TEST(BitStructs, FieldAssignment) {
@@ -283,11 +283,11 @@ TEST(BitStructs, FieldAssignment) {
}
}
-BITSTRUCT_DEFINE_START(NestedStruct, /* size */ 64)
- BitStructField<MixedSizeBitStruct, /*lsb*/0> mixed_lower;
- BitStructField<MixedSizeBitStruct, /*lsb*/32> mixed_upper;
+BITSTRUCT_DEFINE_START(NestedStruct, /* size= */ 64)
+ BitStructField<MixedSizeBitStruct, /*lsb=*/0> mixed_lower;
+ BitStructField<MixedSizeBitStruct, /*lsb=*/32> mixed_upper;
- BitStructUint</*lsb*/0, /*width*/64> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/64> alias_all;
BITSTRUCT_DEFINE_END(NestedStruct);
TEST(BitStructs, NestedFieldAssignment) {
diff --git a/libartbase/base/bit_utils_test.cc b/libartbase/base/bit_utils_test.cc
index 3a80600b57..91fc3b0bb9 100644
--- a/libartbase/base/bit_utils_test.cc
+++ b/libartbase/base/bit_utils_test.cc
@@ -353,89 +353,92 @@ static_assert(MaskLeastSignificant<int8_t>(8) == 0xFF, "TestMaskLeastSignificant
static_assert(MaskLeastSignificant<uint64_t>(63) == (std::numeric_limits<uint64_t>::max() >> 1u),
"TestMaskLeastSignificant#6");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/0) == 0xFF, "TestBitFieldClear#1");
-static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/0) == 0xFF, "TestBitFieldClear#1");
+static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb=*/0, /*width=*/32) == 0x0,
"TestBitFieldClear#2");
-static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb=*/0, /*width=*/32) == 0x0,
"TestBitFieldClear#3");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/2) == 0b11111100, "TestBitFieldClear#4");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/3) == 0b11111000, "TestBitFieldClear#5");
-static_assert(BitFieldClear(0xFF, /*lsb*/1, /*width*/3) == 0b11110001, "TestBitFieldClear#6");
-static_assert(BitFieldClear(0xFF, /*lsb*/2, /*width*/3) == 0b11100011, "TestBitFieldClear#7");
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/2) == 0b11111100, "TestBitFieldClear#4");
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/3) == 0b11111000, "TestBitFieldClear#5");
+static_assert(BitFieldClear(0xFF, /*lsb=*/1, /*width=*/3) == 0b11110001, "TestBitFieldClear#6");
+static_assert(BitFieldClear(0xFF, /*lsb=*/2, /*width=*/3) == 0b11100011, "TestBitFieldClear#7");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/0) == 0x0, "TestBitFieldExtract#1");
-static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32)
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/0) == 0x0, "TestBitFieldExtract#1");
+static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb=*/0, /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldExtract#2");
-static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32)
+static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb=*/0, /*width=*/32)
== std::numeric_limits<int32_t>::max(),
"TestBitFieldExtract#3");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/2) == 0b00000011,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/0, /*width=*/2) == 0b00000011,
"TestBitFieldExtract#4");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/0, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#5");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/1, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/1, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#6");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/2, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/2, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#7");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/3, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/3, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#8");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/8, /*width*/3) == 0b00000000,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/8, /*width=*/3) == 0b00000000,
"TestBitFieldExtract#9");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/7, /*width*/3) == 0b00000001,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/7, /*width=*/3) == 0b00000001,
"TestBitFieldExtract#10");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/6, /*width*/3) == 0b00000011,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/6, /*width=*/3) == 0b00000011,
"TestBitFieldExtract#11");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/2) == -1, "TestBitFieldExtract#12");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/3) == -1, "TestBitFieldExtract#13");
-static_assert(BitFieldExtract(0xFF, /*lsb*/1, /*width*/3) == -1, "TestBitFieldExtract#14");
-static_assert(BitFieldExtract(0xFF, /*lsb*/2, /*width*/3) == -1, "TestBitFieldExtract#15");
-static_assert(BitFieldExtract(0xFF, /*lsb*/3, /*width*/3) == -1, "TestBitFieldExtract#16");
-static_assert(BitFieldExtract(0xFF, /*lsb*/8, /*width*/3) == 0b00000000, "TestBitFieldExtract#17");
-static_assert(BitFieldExtract(0xFF, /*lsb*/7, /*width*/3) == 0b00000001, "TestBitFieldExtract#18");
-static_assert(BitFieldExtract(0xFF, /*lsb*/6, /*width*/3) == 0b00000011, "TestBitFieldExtract#19");
-static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb*/2, /*width*/4)
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/2) == -1, "TestBitFieldExtract#12");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/3) == -1, "TestBitFieldExtract#13");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/1, /*width=*/3) == -1, "TestBitFieldExtract#14");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/2, /*width=*/3) == -1, "TestBitFieldExtract#15");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/3, /*width=*/3) == -1, "TestBitFieldExtract#16");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/8, /*width=*/3) == 0b00000000,
+ "TestBitFieldExtract#17");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/7, /*width=*/3) == 0b00000001,
+ "TestBitFieldExtract#18");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/6, /*width=*/3) == 0b00000011,
+ "TestBitFieldExtract#19");
+static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb=*/2, /*width=*/4)
== 0b00001010,
"TestBitFieldExtract#20");
-static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb*/2, /*width*/4)
+static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb=*/2, /*width=*/4)
== static_cast<int8_t>(0b11111010),
"TestBitFieldExtract#21");
-static_assert(BitFieldInsert(0xFF, /*data*/0x0, /*lsb*/0, /*width*/0) == 0xFF,
+static_assert(BitFieldInsert(0xFF, /*data=*/0x0, /*lsb=*/0, /*width=*/0) == 0xFF,
"TestBitFieldInsert#1");
static_assert(BitFieldInsert(std::numeric_limits<uint32_t>::max(),
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#2");
static_assert(BitFieldInsert(std::numeric_limits<int32_t>::max(),
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#3");
static_assert(BitFieldInsert(0u,
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#4");
static_assert(BitFieldInsert(-(-0),
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#5");
-static_assert(BitFieldInsert(0x00, /*data*/0b11u, /*lsb*/0, /*width*/2) == 0b00000011,
+static_assert(BitFieldInsert(0x00, /*data=*/0b11u, /*lsb=*/0, /*width=*/2) == 0b00000011,
"TestBitFieldInsert#6");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/0, /*width*/3) == 0b00000111,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/0, /*width=*/3) == 0b00000111,
"TestBitFieldInsert#7");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/1, /*width*/3) == 0b00001110,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/1, /*width=*/3) == 0b00001110,
"TestBitFieldInsert#8");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/2, /*width*/3) == 0b00011100,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/2, /*width=*/3) == 0b00011100,
"TestBitFieldInsert#9");
-static_assert(BitFieldInsert(0b01011100, /*data*/0b1101u, /*lsb*/4, /*width*/4) == 0b11011100,
+static_assert(BitFieldInsert(0b01011100, /*data=*/0b1101u, /*lsb=*/4, /*width=*/4) == 0b11011100,
"TestBitFieldInsert#10");
template <typename Container>
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index b65710bc00..987ceb64df 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -62,7 +62,7 @@ ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix)
: ScratchFile(other.GetFilename() + suffix) {}
ScratchFile::ScratchFile(const std::string& filename) : filename_(filename) {
- int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
+ int fd = open(filename_.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
CHECK_NE(-1, fd);
file_.reset(new File(fd, GetFilename(), true));
}
@@ -251,7 +251,7 @@ std::unique_ptr<const DexFile> CommonArtTestImpl::LoadExpectSingleDexFile(const
static constexpr bool kVerifyChecksum = true;
const ArtDexFileLoader dex_file_loader;
if (!dex_file_loader.Open(
- location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
+ location, location, /* verify= */ true, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
} else {
diff --git a/libartbase/base/file_magic.cc b/libartbase/base/file_magic.cc
index d8d843beeb..1471c59b73 100644
--- a/libartbase/base/file_magic.cc
+++ b/libartbase/base/file_magic.cc
@@ -31,7 +31,7 @@ using android::base::StringPrintf;
File OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
CHECK(magic != nullptr);
- File fd(filename, O_RDONLY, /* check_usage */ false);
+ File fd(filename, O_RDONLY, /* check_usage= */ false);
if (fd.Fd() == -1) {
*error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
return File();
diff --git a/libartbase/base/file_utils_test.cc b/libartbase/base/file_utils_test.cc
index 2a7273b85e..f7c9c5e264 100644
--- a/libartbase/base/file_utils_test.cc
+++ b/libartbase/base/file_utils_test.cc
@@ -71,12 +71,12 @@ TEST_F(FileUtilsTest, GetAndroidRootSafe) {
// Set ANDROID_ROOT to something else (but the directory must exist). So use dirname.
UniqueCPtr<char> root_dup(strdup(android_root_env.c_str()));
char* dir = dirname(root_dup.get());
- ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, 1 /* overwrite */));
+ ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, /* overwrite */ 1));
std::string android_root2 = GetAndroidRootSafe(&error_msg);
EXPECT_STREQ(dir, android_root2.c_str());
// Set a bogus value for ANDROID_ROOT. This should be an error.
- ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", 1 /* overwrite */));
+ ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", /* overwrite */ 1));
EXPECT_EQ(GetAndroidRootSafe(&error_msg), "");
// Unset ANDROID_ROOT and see that it still returns something (as libart code is running).
@@ -90,7 +90,7 @@ TEST_F(FileUtilsTest, GetAndroidRootSafe) {
// Reset ANDROID_ROOT, as other things may depend on it.
- ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), 1 /* overwrite */));
+ ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), /* overwrite */ 1));
}
TEST_F(FileUtilsTest, ReplaceFileExtension) {
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 06a168d761..532ca28b50 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -394,7 +394,7 @@ MemMap MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
return Invalid();
}
const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
- return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
+ return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
}
template<typename A, typename B>
@@ -696,8 +696,8 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end,
tail_name,
tail_prot,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
- /* fd */ -1,
- /* offset */ 0,
+ /* fd= */ -1,
+ /* offset= */ 0,
error_msg,
use_debug_name);
}
@@ -771,7 +771,7 @@ MemMap MemMap::TakeReservedMemory(size_t byte_count) {
uint8_t* begin = Begin();
ReleaseReservedMemory(byte_count); // Performs necessary DCHECK()s on this reservation.
size_t base_size = RoundUp(byte_count, kPageSize);
- return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse */ false);
+ return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse= */ false);
}
void MemMap::ReleaseReservedMemory(size_t byte_count) {
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index bf143d472d..5815cf99e7 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -53,7 +53,7 @@ class MemMapTest : public CommonArtTest {
// Find a valid map address and unmap it before returning.
std::string error_msg;
MemMap map = MemMap::MapAnonymous("temp",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
size,
PROT_READ,
low_4gb,
@@ -68,7 +68,7 @@ class MemMapTest : public CommonArtTest {
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a two-page memory region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
2 * page_size,
PROT_READ | PROT_WRITE,
low_4gb,
@@ -165,17 +165,17 @@ TEST_F(MemMapTest, Start) {
TEST_F(MemMapTest, ReplaceMapping_SameSize) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
void* source_addr = source.Begin();
@@ -200,21 +200,21 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) {
TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
5 * kPageSize, // Need to make it larger
// initially so we know
// there won't be mappings
// in the way we we move
// source.
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
3 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -246,17 +246,17 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
3 * kPageSize,
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -285,11 +285,11 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
MemMap dest =
MemMap::MapAnonymous(
"MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
// the way we we move source.
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
@@ -299,7 +299,7 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
dest.Begin() + kPageSize,
2 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
@@ -332,20 +332,20 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
0,
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -358,7 +358,7 @@ TEST_F(MemMapTest, MapAnonymousFailNullError) {
reinterpret_cast<uint8_t*>(kPageSize),
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
nullptr);
ASSERT_FALSE(map.IsValid());
}
@@ -368,20 +368,20 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
0,
PROT_READ,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -394,12 +394,12 @@ TEST_F(MemMapTest, MapFile32Bit) {
constexpr size_t kMapSize = kPageSize;
std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
- MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+ MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /*start*/0,
- /*low_4gb*/true,
+ /*start=*/0,
+ /*low_4gb=*/true,
scratch_file.GetFilename().c_str(),
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
@@ -413,23 +413,23 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
CommonInit();
std::string error_msg;
// Find a valid address.
- uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
+ uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
// Map at an address that should work, which should succeed.
MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
valid_address,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_TRUE(map0.BaseBegin() == valid_address);
// Map at an unspecified address, which should succeed.
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -439,7 +439,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
reinterpret_cast<uint8_t*>(map1.BaseBegin()),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_FALSE(map2.IsValid()) << error_msg;
ASSERT_TRUE(!error_msg.empty());
@@ -469,12 +469,12 @@ TEST_F(MemMapTest, RemapFileViewAtEnd) {
memset(&data[2 * kPageSize], 0xaa, kPageSize);
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
- MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+ MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /*start*/0,
- /*low_4gb*/true,
+ /*start=*/0,
+ /*low_4gb=*/true,
scratch_file.GetFilename().c_str(),
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
@@ -522,7 +522,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
reinterpret_cast<uint8_t*>(start_addr),
size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_msg);
if (map.IsValid()) {
break;
@@ -543,7 +543,7 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
reinterpret_cast<uint8_t*>(ptr),
2 * kPageSize, // brings it over the top.
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -558,7 +558,7 @@ TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -571,7 +571,7 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
reinterpret_cast<uint8_t*>(0xF0000000),
0x20000000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -585,9 +585,9 @@ TEST_F(MemMapTest, MapAnonymousReuse) {
nullptr,
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /* low_4gb= */ false,
+ /* reuse= */ false,
+ /* reservation= */ nullptr,
&error_msg);
ASSERT_TRUE(map.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -595,9 +595,9 @@ TEST_F(MemMapTest, MapAnonymousReuse) {
reinterpret_cast<uint8_t*>(map.BaseBegin()),
0x10000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
- /* reuse */ true,
- /* reservation */ nullptr,
+ /* low_4gb= */ false,
+ /* reuse= */ true,
+ /* reservation= */ nullptr,
&error_msg);
ASSERT_TRUE(map2.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -609,10 +609,10 @@ TEST_F(MemMapTest, CheckNoGaps) {
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
MemMap map = MemMap::MapAnonymous("MapAnonymous0",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize * kNumPages,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -627,7 +627,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
map_base,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -635,7 +635,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
map_base + kPageSize,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -643,7 +643,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
map_base + kPageSize * 2,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -672,10 +672,10 @@ TEST_F(MemMapTest, AlignBy) {
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
14 * page_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(m0.IsValid());
uint8_t* base0 = m0.Begin();
@@ -778,10 +778,10 @@ TEST_F(MemMapTest, Reservation) {
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
MemMap reservation = MemMap::MapAnonymous("Test reservation",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kMapSize,
PROT_NONE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(reservation.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -791,14 +791,14 @@ TEST_F(MemMapTest, Reservation) {
static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
uint8_t* addr1 = reservation.Begin();
MemMap map1 = MemMap::MapFileAtAddress(addr1,
- /* byte_count */ kChunk1Size,
+ /* byte_count= */ kChunk1Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start */ 0,
- /* low_4gb */ false,
+ /* start= */ 0,
+ /* low_4gb= */ false,
scratch_file.GetFilename().c_str(),
- /* reuse */ false,
+ /* reuse= */ false,
&reservation,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
@@ -816,10 +816,10 @@ TEST_F(MemMapTest, Reservation) {
uint8_t* addr2 = reservation.Begin();
MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
addr2,
- /* byte_count */ kChunk2Size,
+ /* byte_count= */ kChunk2Size,
PROT_READ,
- /* low_4gb */ false,
- /* reuse */ false,
+ /* low_4gb= */ false,
+ /* reuse= */ false,
&reservation,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
@@ -833,14 +833,14 @@ TEST_F(MemMapTest, Reservation) {
const size_t kChunk3Size = reservation.Size() - 1u;
uint8_t* addr3 = reservation.Begin();
MemMap map3 = MemMap::MapFileAtAddress(addr3,
- /* byte_count */ kChunk3Size,
+ /* byte_count= */ kChunk3Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start */ dchecked_integral_cast<size_t>(addr3 - addr1),
- /* low_4gb */ false,
+ /* start= */ dchecked_integral_cast<size_t>(addr3 - addr1),
+ /* low_4gb= */ false,
scratch_file.GetFilename().c_str(),
- /* reuse */ false,
+ /* reuse= */ false,
&reservation,
&error_msg);
ASSERT_TRUE(map3.IsValid()) << error_msg;
diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc
index 490dbf3fa0..def949e3a7 100644
--- a/libartbase/base/membarrier.cc
+++ b/libartbase/base/membarrier.cc
@@ -29,7 +29,7 @@
#include <linux/membarrier.h>
#define CHECK_MEMBARRIER_CMD(art_value, membarrier_value) \
- static_assert(static_cast<int>(art_value) == membarrier_value, "Bad value for " # art_value)
+ static_assert(static_cast<int>(art_value) == (membarrier_value), "Bad value for " # art_value)
CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kQuery, MEMBARRIER_CMD_QUERY);
CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kGlobal, MEMBARRIER_CMD_SHARED);
CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kPrivateExpedited, MEMBARRIER_CMD_PRIVATE_EXPEDITED);
diff --git a/libartbase/base/scoped_flock.cc b/libartbase/base/scoped_flock.cc
index d679328cef..2f16fb2820 100644
--- a/libartbase/base/scoped_flock.cc
+++ b/libartbase/base/scoped_flock.cc
@@ -22,6 +22,7 @@
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
+#include "file_utils.h"
#include "unix_file/fd_file.h"
namespace art {
@@ -40,7 +41,7 @@ using android::base::StringPrintf;
// to acquire a lock, and the unlock / close in the corresponding
// destructor. Callers should explicitly flush files they're writing to if
// that is the desired behaviour.
- std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, false /* check_usage */));
+ std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, /* auto_flush= */ false));
if (file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
return nullptr;
@@ -98,7 +99,7 @@ ScopedFlock LockedFile::DupOf(const int fd, const std::string& path,
// destructor. Callers should explicitly flush files they're writing to if
// that is the desired behaviour.
ScopedFlock locked_file(
- new LockedFile(dup(fd), path, false /* check_usage */, read_only_mode));
+ new LockedFile(DupCloexec(fd), path, /* check_usage= */ false, read_only_mode));
if (locked_file->Fd() == -1) {
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
locked_file->GetPath().c_str(), strerror(errno));
diff --git a/libartbase/base/scoped_flock_test.cc b/libartbase/base/scoped_flock_test.cc
index f9ac1e0230..22356cd096 100644
--- a/libartbase/base/scoped_flock_test.cc
+++ b/libartbase/base/scoped_flock_test.cc
@@ -38,7 +38,7 @@ TEST_F(ScopedFlockTest, TestLocking) {
// Attempt to acquire a second lock on the same file. This must fail.
ScopedFlock second_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
O_RDONLY,
- /* block */ false,
+ /* block= */ false,
&error_msg);
ASSERT_TRUE(second_lock.get() == nullptr);
ASSERT_TRUE(!error_msg.empty());
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index de60277432..76894c6a8a 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -431,7 +431,7 @@ bool FdFile::Unlink() {
bool is_current = false;
{
struct stat this_stat, current_stat;
- int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY));
+ int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY | O_CLOEXEC));
if (cur_fd > 0) {
// File still exists.
if (fstat(fd_, &this_stat) == 0 && fstat(cur_fd, &current_stat) == 0) {
diff --git a/libartbase/base/unix_file/fd_file_test.cc b/libartbase/base/unix_file/fd_file_test.cc
index 9c39bb50ec..3a9cf59148 100644
--- a/libartbase/base/unix_file/fd_file_test.cc
+++ b/libartbase/base/unix_file/fd_file_test.cc
@@ -15,6 +15,7 @@
*/
#include "base/common_art_test.h" // For ScratchFile
+#include "base/file_utils.h"
#include "gtest/gtest.h"
#include "fd_file.h"
#include "random_access_file_test.h"
@@ -25,7 +26,7 @@ class FdFileTest : public RandomAccessFileTest {
protected:
RandomAccessFile* MakeTestFile() override {
FILE* tmp = tmpfile();
- int fd = dup(fileno(tmp));
+ int fd = art::DupCloexec(fileno(tmp));
fclose(tmp);
return new FdFile(fd, false);
}
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index 174d22792a..f5761cfbec 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -75,10 +75,10 @@ MemMap ZipEntry::ExtractToMemMap(const char* zip_filename,
name += " extracted in memory from ";
name += zip_filename;
MemMap map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
GetUncompressedLength(),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
error_msg);
if (!map.IsValid()) {
DCHECK(!error_msg->empty());
@@ -138,7 +138,7 @@ MemMap ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* erro
MAP_PRIVATE,
zip_fd,
offset,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
name.c_str(),
error_msg);
diff --git a/libartbase/base/zip_archive_test.cc b/libartbase/base/zip_archive_test.cc
index b9238811ca..969cf1297c 100644
--- a/libartbase/base/zip_archive_test.cc
+++ b/libartbase/base/zip_archive_test.cc
@@ -23,6 +23,7 @@
#include <memory>
#include "base/common_art_test.h"
+#include "file_utils.h"
#include "os.h"
#include "unix_file/fd_file.h"
@@ -41,7 +42,7 @@ TEST_F(ZipArchiveTest, FindAndExtract) {
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
- std::unique_ptr<File> file(new File(dup(tmp.GetFd()), tmp.GetFilename(), false));
+ std::unique_ptr<File> file(new File(DupCloexec(tmp.GetFd()), tmp.GetFilename(), false));
ASSERT_TRUE(file.get() != nullptr);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
@@ -49,7 +50,7 @@ TEST_F(ZipArchiveTest, FindAndExtract) {
file.reset(nullptr);
uint32_t computed_crc = crc32(0L, Z_NULL, 0);
- int fd = open(tmp.GetFilename().c_str(), O_RDONLY);
+ int fd = open(tmp.GetFilename().c_str(), O_RDONLY | O_CLOEXEC);
ASSERT_NE(-1, fd);
const size_t kBufSize = 32768;
uint8_t buf[kBufSize];
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 4f73967353..20a519bf99 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -95,7 +95,7 @@ bool ArtDexFileLoader::GetMultiDexChecksums(const char* filename,
File fd;
if (zip_fd != -1) {
if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
- fd = File(DupCloexec(zip_fd), false /* check_usage */);
+ fd = File(DupCloexec(zip_fd), /* check_usage= */ false);
}
} else {
fd = OpenAndReadMagic(filename, &magic, error_msg);
@@ -142,9 +142,9 @@ bool ArtDexFileLoader::GetMultiDexChecksums(const char* filename,
if (IsMagicValid(magic)) {
std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
filename,
- /* verify */ false,
- /* verify_checksum */ false,
- /* mmap_shared */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
+ /* mmap_shared= */ false,
error_msg));
if (dex_file == nullptr) {
return false;
@@ -167,16 +167,16 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
ScopedTrace trace(std::string("Open dex file from RAM ") + location);
return OpenCommon(base,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
location_checksum,
oat_dex_file,
verify,
verify_checksum,
error_msg,
- /*container*/ nullptr,
- /*verify_result*/ nullptr);
+ /*container=*/ nullptr,
+ /*verify_result=*/ nullptr);
}
std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
@@ -199,8 +199,8 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& locatio
uint8_t* begin = map.Begin();
std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
location_checksum,
kNoOatDexFile,
@@ -208,7 +208,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& locatio
verify_checksum,
error_msg,
std::make_unique<MemMapContainer>(std::move(map)),
- /*verify_result*/ nullptr);
+ /*verify_result=*/ nullptr);
// Opening CompactDex is only supported from vdex files.
if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
*error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
@@ -240,7 +240,7 @@ bool ArtDexFileLoader::Open(const char* filename,
location,
verify,
verify_checksum,
- /* mmap_shared */ false,
+ /* mmap_shared= */ false,
error_msg));
if (dex_file.get() != nullptr) {
dex_files->push_back(std::move(dex_file));
@@ -290,7 +290,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
CHECK(!location.empty());
MemMap map;
{
- File delayed_close(fd, /* check_usage */ false);
+ File delayed_close(fd, /* check_usage= */ false);
struct stat sbuf;
memset(&sbuf, 0, sizeof(sbuf));
if (fstat(fd, &sbuf) == -1) {
@@ -308,7 +308,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
mmap_shared ? MAP_SHARED : MAP_PRIVATE,
fd,
0,
- /*low_4gb*/false,
+ /*low_4gb=*/false,
location.c_str(),
error_msg);
if (!map.IsValid()) {
@@ -330,8 +330,8 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
dex_header->checksum_,
kNoOatDexFile,
@@ -339,7 +339,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
verify_checksum,
error_msg,
std::make_unique<MemMapContainer>(std::move(map)),
- /*verify_result*/ nullptr);
+ /*verify_result=*/ nullptr);
// Opening CompactDex is only supported from vdex files.
if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
@@ -407,8 +407,8 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
size_t size = map.Size();
std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
zip_entry->GetCrc32(),
kNoOatDexFile,
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index a7d03637b1..f7a20629f5 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -217,9 +217,9 @@ TEST_F(ArtDexFileLoaderTest, GetMethodSignature) {
std::string plain_method = std::string("GetMethodSignature.") + r.name;
ASSERT_EQ(plain_method,
- raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ false));
+ raw->PrettyMethod(cur_method->GetIndex(), /* with_signature= */ false));
ASSERT_EQ(r.pretty_method,
- raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ true));
+ raw->PrettyMethod(cur_method->GetIndex(), /* with_signature= */ true));
}
}
@@ -332,8 +332,8 @@ TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_DataDir) {
std::string error_msg;
bool success = loader.Open(data_location_path.c_str(),
data_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -360,8 +360,8 @@ TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemDir) {
std::string error_msg;
bool success = loader.Open(system_location_path.c_str(),
system_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -388,8 +388,8 @@ TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemFrameworkDir) {
std::string error_msg;
bool success = loader.Open(system_framework_location_path.c_str(),
system_framework_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -416,8 +416,8 @@ TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_DataDir_MultiDex) {
std::string error_msg;
bool success = loader.Open(data_multi_location_path.c_str(),
data_multi_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -445,8 +445,8 @@ TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemDir_MultiDex) {
std::string error_msg;
bool success = loader.Open(system_multi_location_path.c_str(),
system_multi_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -474,8 +474,8 @@ TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemFrameworkDir_MultiDex) {
std::string error_msg;
bool success = loader.Open(system_framework_multi_location_path.c_str(),
system_framework_multi_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
diff --git a/libdexfile/dex/code_item_accessors_test.cc b/libdexfile/dex/code_item_accessors_test.cc
index 2bb4dde649..87f4bab672 100644
--- a/libdexfile/dex/code_item_accessors_test.cc
+++ b/libdexfile/dex/code_item_accessors_test.cc
@@ -45,10 +45,10 @@ std::unique_ptr<const DexFile> CreateFakeDex(bool compact_dex, std::vector<uint8
std::unique_ptr<const DexFile> dex(dex_file_loader.Open(data->data(),
data->size(),
"location",
- /*location_checksum*/ 123,
- /*oat_dex_file*/nullptr,
- /*verify*/false,
- /*verify_checksum*/false,
+ /*location_checksum=*/ 123,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
&error_msg));
CHECK(dex != nullptr) << error_msg;
return dex;
@@ -56,11 +56,11 @@ std::unique_ptr<const DexFile> CreateFakeDex(bool compact_dex, std::vector<uint8
TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
std::vector<uint8_t> standard_dex_data;
- std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex*/false,
+ std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex=*/false,
&standard_dex_data));
ASSERT_TRUE(standard_dex != nullptr);
std::vector<uint8_t> compact_dex_data;
- std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex*/true,
+ std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex=*/true,
&compact_dex_data));
ASSERT_TRUE(compact_dex != nullptr);
static constexpr uint16_t kRegisterSize = 2;
diff --git a/libdexfile/dex/compact_dex_file.cc b/libdexfile/dex/compact_dex_file.cc
index 302b59ee91..641c523158 100644
--- a/libdexfile/dex/compact_dex_file.cc
+++ b/libdexfile/dex/compact_dex_file.cc
@@ -100,7 +100,7 @@ CompactDexFile::CompactDexFile(const uint8_t* base,
location_checksum,
oat_dex_file,
std::move(container),
- /*is_compact_dex*/ true),
+ /*is_compact_dex=*/ true),
debug_info_offsets_(DataBegin() + GetHeader().debug_info_offsets_pos_,
GetHeader().debug_info_base_,
GetHeader().debug_info_offsets_table_offset_) {}
diff --git a/libdexfile/dex/compact_dex_file_test.cc b/libdexfile/dex/compact_dex_file_test.cc
index 517c5873ed..799967e255 100644
--- a/libdexfile/dex/compact_dex_file_test.cc
+++ b/libdexfile/dex/compact_dex_file_test.cc
@@ -68,11 +68,11 @@ TEST(CompactDexFileTest, CodeItemFields) {
uint16_t out_outs_size;
uint16_t out_tries_size;
uint32_t out_insns_size_in_code_units;
- code_item->DecodeFields</*kDecodeOnlyInstructionCount*/false>(&out_insns_size_in_code_units,
- &out_registers_size,
- &out_ins_size,
- &out_outs_size,
- &out_tries_size);
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount=*/false>(&out_insns_size_in_code_units,
+ &out_registers_size,
+ &out_ins_size,
+ &out_outs_size,
+ &out_tries_size);
ASSERT_EQ(registers_size, out_registers_size);
ASSERT_EQ(ins_size, out_ins_size);
ASSERT_EQ(outs_size, out_outs_size);
@@ -80,11 +80,11 @@ TEST(CompactDexFileTest, CodeItemFields) {
ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
++out_insns_size_in_code_units; // Force value to change.
- code_item->DecodeFields</*kDecodeOnlyInstructionCount*/true>(&out_insns_size_in_code_units,
- /*registers_size*/ nullptr,
- /*ins_size*/ nullptr,
- /*outs_size*/ nullptr,
- /*tries_size*/ nullptr);
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount=*/true>(&out_insns_size_in_code_units,
+ /*registers_size=*/ nullptr,
+ /*ins_size=*/ nullptr,
+ /*outs_size=*/ nullptr,
+ /*tries_size=*/ nullptr);
ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
};
static constexpr uint32_t kMax32 = std::numeric_limits<uint32_t>::max();
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 4aafc665ee..3667c8c289 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -222,16 +222,16 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
std::string* error_msg) const {
return OpenCommon(base,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0,
location,
location_checksum,
oat_dex_file,
verify,
verify_checksum,
error_msg,
- /*container*/ nullptr,
- /*verify_result*/ nullptr);
+ /*container=*/ nullptr,
+ /*verify_result=*/ nullptr);
}
std::unique_ptr<const DexFile> DexFileLoader::OpenWithDataSection(
@@ -255,8 +255,8 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenWithDataSection(
verify,
verify_checksum,
error_msg,
- /*container*/ nullptr,
- /*verify_result*/ nullptr);
+ /*container=*/ nullptr,
+ /*verify_result=*/ nullptr);
}
bool DexFileLoader::OpenAll(
@@ -290,7 +290,7 @@ bool DexFileLoader::OpenAll(
size,
location,
dex_header->checksum_,
- /*oat_dex_file*/ nullptr,
+ /*oat_dex_file=*/ nullptr,
verify,
verify_checksum,
error_msg));
@@ -410,11 +410,11 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
std::unique_ptr<const DexFile> dex_file = OpenCommon(
map.data(),
map.size(),
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
zip_entry->GetCrc32(),
- /*oat_dex_file*/ nullptr,
+ /*oat_dex_file=*/ nullptr,
verify,
verify_checksum,
error_msg,
diff --git a/libdexfile/dex/dex_file_loader_test.cc b/libdexfile/dex/dex_file_loader_test.cc
index 53786171cc..9c61d1ac5f 100644
--- a/libdexfile/dex/dex_file_loader_test.cc
+++ b/libdexfile/dex/dex_file_loader_test.cc
@@ -221,7 +221,7 @@ static bool OpenDexFilesBase64(const char* base64,
bool success = dex_file_loader.OpenAll(dex_bytes->data(),
dex_bytes->size(),
location,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
error_code,
error_msg,
@@ -256,9 +256,9 @@ static std::unique_ptr<const DexFile> OpenDexFileInMemoryBase64(const char* base
dex_bytes->size(),
location,
location_checksum,
- /* oat_dex_file */ nullptr,
- /* verify */ true,
- /* verify_checksum */ true,
+ /* oat_dex_file= */ nullptr,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_message));
if (expect_success) {
CHECK(dex_file != nullptr) << error_message;
@@ -348,7 +348,7 @@ TEST_F(DexFileLoaderTest, Version40Rejected) {
ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
dex_bytes.size(),
kLocationString,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_code,
&error_msg,
@@ -367,7 +367,7 @@ TEST_F(DexFileLoaderTest, Version41Rejected) {
ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
dex_bytes.size(),
kLocationString,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_code,
&error_msg,
@@ -386,7 +386,7 @@ TEST_F(DexFileLoaderTest, ZeroLengthDexRejected) {
ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
dex_bytes.size(),
kLocationString,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_code,
&error_msg,
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index f273c84027..499a89b2ab 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -341,42 +341,43 @@ bool DexFileVerifier::CheckHeader() {
bool result =
CheckValidOffsetAndSize(header_->link_off_,
header_->link_size_,
- 0 /* unaligned */,
+ /* alignment= */ 0,
"link") &&
CheckValidOffsetAndSize(header_->map_off_,
header_->map_off_,
- 4,
+ /* alignment= */ 4,
"map") &&
CheckValidOffsetAndSize(header_->string_ids_off_,
header_->string_ids_size_,
- 4,
+ /* alignment= */ 4,
"string-ids") &&
CheckValidOffsetAndSize(header_->type_ids_off_,
header_->type_ids_size_,
- 4,
+ /* alignment= */ 4,
"type-ids") &&
CheckSizeLimit(header_->type_ids_size_, DexFile::kDexNoIndex16, "type-ids") &&
CheckValidOffsetAndSize(header_->proto_ids_off_,
header_->proto_ids_size_,
- 4,
+ /* alignment= */ 4,
"proto-ids") &&
CheckSizeLimit(header_->proto_ids_size_, DexFile::kDexNoIndex16, "proto-ids") &&
CheckValidOffsetAndSize(header_->field_ids_off_,
header_->field_ids_size_,
- 4,
+ /* alignment= */ 4,
"field-ids") &&
CheckValidOffsetAndSize(header_->method_ids_off_,
header_->method_ids_size_,
- 4,
+ /* alignment= */ 4,
"method-ids") &&
CheckValidOffsetAndSize(header_->class_defs_off_,
header_->class_defs_size_,
- 4,
+ /* alignment= */ 4,
"class-defs") &&
CheckValidOffsetAndSize(header_->data_off_,
header_->data_size_,
- 0, // Unaligned, spec doesn't talk about it, even though size
- // is supposed to be a multiple of 4.
+ // Unaligned, spec doesn't talk about it, even though size
+ // is supposed to be a multiple of 4.
+ /* alignment= */ 0,
"data");
return result;
}
@@ -1197,7 +1198,7 @@ bool DexFileVerifier::CheckIntraClassDataItem() {
ClassAccessor::Method method(*dex_file_, field.ptr_pos_);
if (!CheckIntraClassDataItemMethods(&method,
accessor.NumDirectMethods(),
- nullptr /* direct_it */,
+ /* direct_method= */ nullptr,
0u,
&have_class,
&class_type_index,
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index a22a457dbe..c3180f0660 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -107,8 +107,8 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
bool success = dex_file_loader.OpenAll(dex_bytes.get(),
length,
location,
- /* verify */ true,
- /* verify_checksum */ true,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_code,
error_msg,
&tmp);
@@ -1621,13 +1621,13 @@ TEST_F(DexFileVerifierTest, Checksum) {
dex_file->Begin(),
dex_file->Size(),
"good checksum, no verify",
- /*verify_checksum*/ false,
+ /*verify_checksum=*/ false,
&error_msg));
EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
dex_file->Begin(),
dex_file->Size(),
"good checksum, verify",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
// Bad checksum: !verify_checksum passes verify_checksum fails.
@@ -1638,13 +1638,13 @@ TEST_F(DexFileVerifierTest, Checksum) {
dex_file->Begin(),
dex_file->Size(),
"bad checksum, no verify",
- /*verify_checksum*/ false,
+ /*verify_checksum=*/ false,
&error_msg));
EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
dex_file->Begin(),
dex_file->Size(),
"bad checksum, verify",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
EXPECT_NE(error_msg.find("Bad checksum"), std::string::npos) << error_msg;
}
@@ -1691,7 +1691,7 @@ TEST_F(DexFileVerifierTest, BadStaticMethodName) {
dex_file->Begin(),
dex_file->Size(),
"bad static method name",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1735,7 +1735,7 @@ TEST_F(DexFileVerifierTest, BadVirtualMethodName) {
dex_file->Begin(),
dex_file->Size(),
"bad virtual method name",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1779,7 +1779,7 @@ TEST_F(DexFileVerifierTest, BadClinitSignature) {
dex_file->Begin(),
dex_file->Size(),
"bad clinit signature",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1823,7 +1823,7 @@ TEST_F(DexFileVerifierTest, BadClinitSignatureAgain) {
dex_file->Begin(),
dex_file->Size(),
"bad clinit signature",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1860,7 +1860,7 @@ TEST_F(DexFileVerifierTest, BadInitSignature) {
dex_file->Begin(),
dex_file->Size(),
"bad init signature",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -2063,7 +2063,7 @@ TEST_F(DexFileVerifierTest, InvokeCustomDexSamples) {
dex_file->Begin(),
dex_file->Size(),
"good checksum, verify",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
// TODO(oth): Test corruptions (b/35308502)
}
@@ -2110,7 +2110,7 @@ TEST_F(DexFileVerifierTest, BadStaticFieldInitialValuesArray) {
dex_file->Begin(),
dex_file->Size(),
"bad static field initial values array",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -2166,7 +2166,7 @@ TEST_F(DexFileVerifierTest, GoodStaticFieldInitialValuesArray) {
dex_file->Begin(),
dex_file->Size(),
"good static field initial values array",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
diff --git a/libdexfile/dex/dex_instruction_test.cc b/libdexfile/dex/dex_instruction_test.cc
index 6ce9dbafc8..02400f4a14 100644
--- a/libdexfile/dex/dex_instruction_test.cc
+++ b/libdexfile/dex/dex_instruction_test.cc
@@ -71,10 +71,13 @@ static void Build4rcc(uint16_t num_args, uint16_t method_idx, uint16_t proto_idx
TEST(Instruction, PropertiesOf45cc) {
uint16_t instruction[4];
- Build45cc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
- 0xcafe /* arg_regs */, instruction);
+ Build45cc(/* num_args= */ 4u,
+ /* method_idx= */ 16u,
+ /* proto_idx= */ 32u,
+ /* arg_regs= */ 0xcafe,
+ instruction);
- DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
+ DexInstructionIterator ins(instruction, /*dex_pc=*/ 0u);
ASSERT_EQ(4u, ins->SizeInCodeUnits());
ASSERT_TRUE(ins->HasVRegA());
@@ -106,10 +109,13 @@ TEST(Instruction, PropertiesOf45cc) {
TEST(Instruction, PropertiesOf4rcc) {
uint16_t instruction[4];
- Build4rcc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
- 0xcafe /* arg_regs */, instruction);
+ Build4rcc(/* num_args= */ 4u,
+ /* method_idx= */ 16u,
+ /* proto_idx= */ 32u,
+ /* arg_regs_start= */ 0xcafe,
+ instruction);
- DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
+ DexInstructionIterator ins(instruction, /*dex_pc=*/ 0u);
ASSERT_EQ(4u, ins->SizeInCodeUnits());
ASSERT_TRUE(ins->HasVRegA());
diff --git a/libdexfile/dex/type_lookup_table.cc b/libdexfile/dex/type_lookup_table.cc
index 00ec358b02..7d80a2e7f7 100644
--- a/libdexfile/dex/type_lookup_table.cc
+++ b/libdexfile/dex/type_lookup_table.cc
@@ -94,7 +94,7 @@ TypeLookupTable TypeLookupTable::Open(const uint8_t* dex_data_pointer,
DCHECK_ALIGNED(raw_data, alignof(Entry));
const Entry* entries = reinterpret_cast<const Entry*>(raw_data);
size_t mask_bits = CalculateMaskBits(num_class_defs);
- return TypeLookupTable(dex_data_pointer, mask_bits, entries, /* owned_entries */ nullptr);
+ return TypeLookupTable(dex_data_pointer, mask_bits, entries, /* owned_entries= */ nullptr);
}
uint32_t TypeLookupTable::Lookup(const char* str, uint32_t hash) const {
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 2ebde5e06d..6bd49a43eb 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -190,8 +190,8 @@ bool ProfileCompilationInfo::AddClasses(const std::set<DexCacheResolvedClasses>&
bool ProfileCompilationInfo::MergeWith(const std::string& filename) {
std::string error;
int flags = O_RDONLY | O_NOFOLLOW | O_CLOEXEC;
- ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
- /*block*/false, &error);
+ ScopedFlock profile_file =
+ LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
@@ -221,8 +221,8 @@ bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_inv
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
- /*block*/false, &error);
+ ScopedFlock profile_file =
+ LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
@@ -259,8 +259,8 @@ bool ProfileCompilationInfo::Save(const std::string& filename, uint64_t* bytes_w
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
- /*block*/false, &error);
+ ScopedFlock profile_file =
+ LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
@@ -1393,8 +1393,8 @@ bool ProfileCompilationInfo::RemapProfileIndex(
// verify_checksum is false because we want to differentiate between a missing dex data and
// a mismatched checksum.
const DexFileData* dex_data = FindDexData(other_profile_line_header.dex_location,
- 0u,
- false /* verify_checksum */);
+ /* checksum= */ 0u,
+ /* verify_checksum= */ false);
if ((dex_data != nullptr) && (dex_data->checksum != other_profile_line_header.checksum)) {
LOG(WARNING) << "Checksum mismatch for dex " << other_profile_line_header.dex_location;
return false;
@@ -1481,8 +1481,8 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other,
// verify_checksum is false because we want to differentiate between a missing dex data and
// a mismatched checksum.
const DexFileData* dex_data = FindDexData(other_dex_data->profile_key,
- 0u,
- /* verify_checksum */ false);
+ /* checksum= */ 0u,
+ /* verify_checksum= */ false);
if ((dex_data != nullptr) && (dex_data->checksum != other_dex_data->checksum)) {
LOG(WARNING) << "Checksum mismatch for dex " << other_dex_data->profile_key;
return false;
@@ -1829,7 +1829,7 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd,
flags |= ((m & 1) != 0) ? MethodHotness::kFlagPostStartup : MethodHotness::kFlagStartup;
info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
profile_key,
- /*method_idx*/ 0,
+ /*checksum=*/ 0,
method_idx,
max_method);
}
@@ -1975,20 +1975,20 @@ void ProfileCompilationInfo::DexFileData::SetMethodHotness(size_t index,
MethodHotness::Flag flags) {
DCHECK_LT(index, num_method_ids);
if ((flags & MethodHotness::kFlagStartup) != 0) {
- method_bitmap.StoreBit(MethodBitIndex(/*startup*/ true, index), /*value*/ true);
+ method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ true, index), /*value=*/ true);
}
if ((flags & MethodHotness::kFlagPostStartup) != 0) {
- method_bitmap.StoreBit(MethodBitIndex(/*startup*/ false, index), /*value*/ true);
+ method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ false, index), /*value=*/ true);
}
}
ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::DexFileData::GetHotnessInfo(
uint32_t dex_method_index) const {
MethodHotness ret;
- if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ true, dex_method_index))) {
+ if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ true, dex_method_index))) {
ret.AddFlag(MethodHotness::kFlagStartup);
}
- if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ false, dex_method_index))) {
+ if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ false, dex_method_index))) {
ret.AddFlag(MethodHotness::kFlagPostStartup);
}
auto it = method_map.find(dex_method_index);
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 417abaa435..a2bfe5028d 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -43,22 +43,22 @@ class ProfileCompilationInfoTest : public CommonArtTest {
protected:
bool AddMethod(const std::string& dex_location,
uint32_t checksum,
- uint16_t method_index,
+ uint16_t method_idx,
ProfileCompilationInfo* info) {
return info->AddMethodIndex(Hotness::kFlagHot,
dex_location,
checksum,
- method_index,
+ method_idx,
kMaxMethodIds);
}
bool AddMethod(const std::string& dex_location,
uint32_t checksum,
- uint16_t method_index,
+ uint16_t method_idx,
const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi,
ProfileCompilationInfo* info) {
return info->AddMethod(
- dex_location, checksum, method_index, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
+ dex_location, checksum, method_idx, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
}
bool AddClass(const std::string& dex_location,
@@ -115,9 +115,9 @@ class ProfileCompilationInfoTest : public CommonArtTest {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */2, kMaxMethodIds);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum= */2, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
return pmi;
}
@@ -148,8 +148,8 @@ class ProfileCompilationInfoTest : public CommonArtTest {
ScratchFile profile;
ProfileCompilationInfo saved_info;
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -207,8 +207,8 @@ TEST_F(ProfileCompilationInfoTest, SaveFd) {
ProfileCompilationInfo saved_info;
// Save a few methods.
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -221,9 +221,9 @@ TEST_F(ProfileCompilationInfoTest, SaveFd) {
// Save more methods.
for (uint16_t i = 0; i < 100; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location3", /* checksum */ 3, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location3", /* checksum= */ 3, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(profile.GetFile()->ResetOffset());
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -240,19 +240,19 @@ TEST_F(ProfileCompilationInfoTest, AddMethodsAndClassesFail) {
ScratchFile profile;
ProfileCompilationInfo info;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info));
// Trying to add info for an existing file but with a different checksum.
- ASSERT_FALSE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info));
+ ASSERT_FALSE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info));
}
TEST_F(ProfileCompilationInfoTest, MergeFail) {
ScratchFile profile;
ProfileCompilationInfo info1;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info1));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
// Use the same file, change the checksum.
ProfileCompilationInfo info2;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info2));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
ASSERT_FALSE(info1.MergeWith(info2));
}
@@ -262,10 +262,10 @@ TEST_F(ProfileCompilationInfoTest, MergeFdFail) {
ScratchFile profile;
ProfileCompilationInfo info1;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info1));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
// Use the same file, change the checksum.
ProfileCompilationInfo info2;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info2));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
ASSERT_TRUE(info1.Save(profile.GetFd()));
ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -280,13 +280,13 @@ TEST_F(ProfileCompilationInfoTest, SaveMaxMethods) {
ProfileCompilationInfo saved_info;
// Save the maximum number of methods
for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
}
// Save the maximum number of classes
for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
- ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, dex::TypeIndex(i), &saved_info));
- ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -390,7 +390,7 @@ TEST_F(ProfileCompilationInfoTest, UnexpectedContent) {
ProfileCompilationInfo saved_info;
// Save the maximum number of methods
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -415,9 +415,9 @@ TEST_F(ProfileCompilationInfoTest, SaveInlineCaches) {
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the
// class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -431,11 +431,11 @@ TEST_F(ProfileCompilationInfoTest, SaveInlineCaches) {
ASSERT_TRUE(loaded_info.Equals(saved_info));
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi);
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
- loaded_info.GetMethod("dex_location4", /* checksum */ 4, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi2 != nullptr);
ASSERT_TRUE(*loaded_pmi2 == pmi);
}
@@ -448,7 +448,7 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -459,7 +459,7 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
MakeMegamorphic(&pmi_extra);
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
}
ASSERT_TRUE(profile.GetFile()->ResetOffset());
@@ -477,7 +477,7 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
ASSERT_TRUE(loaded_info.Equals(saved_info));
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
@@ -491,7 +491,7 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -502,7 +502,7 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
MakeMegamorphic(&pmi_extra);
for (uint16_t method_idx = 5; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
}
// Mark all inline caches with missing types and add them to the profile again.
@@ -510,7 +510,7 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
ProfileCompilationInfo::OfflineProfileMethodInfo missing_types = GetOfflineProfileMethodInfo();
SetIsMissingTypes(&missing_types);
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
}
ASSERT_TRUE(profile.GetFile()->ResetOffset());
@@ -528,7 +528,7 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
ASSERT_TRUE(loaded_info.Equals(saved_info));
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
}
@@ -542,8 +542,8 @@ TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCache) {
// Modify the checksum to trigger a mismatch.
pmi2.dex_references[0].dex_checksum++;
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /*method_idx*/ 0, pmi1, &info));
- ASSERT_FALSE(AddMethod("dex_location2", /* checksum */ 2, /*method_idx*/ 0, pmi2, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /*method_idx=*/ 0, pmi1, &info));
+ ASSERT_FALSE(AddMethod("dex_location2", /* checksum= */ 2, /*method_idx=*/ 0, pmi2, &info));
}
// Verify that profiles behave correctly even if the methods are added in a different
@@ -556,8 +556,8 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
@@ -567,8 +567,8 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
ProfileCompilationInfo::InlineCacheMap* ic_map_reindexed = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(ic_map_reindexed);
- pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
- pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+ pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
+ pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(1, dex::TypeIndex(0));
@@ -579,15 +579,15 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
// Profile 1 and Profile 2 get the same methods but in different order.
// This will trigger a different dex numbers.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &info));
}
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
ASSERT_TRUE(AddMethod(
- "dex_location2", /* checksum */ 2, method_idx, pmi_reindexed, &info_reindexed));
+ "dex_location2", /* checksum= */ 2, method_idx, pmi_reindexed, &info_reindexed));
ASSERT_TRUE(AddMethod(
- "dex_location1", /* checksum */ 1, method_idx, pmi_reindexed, &info_reindexed));
+ "dex_location1", /* checksum= */ 1, method_idx, pmi_reindexed, &info_reindexed));
}
ProfileCompilationInfo info_backup;
@@ -597,11 +597,11 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
ASSERT_TRUE(info.Equals(info_backup));
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+ info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi);
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
- info.GetMethod("dex_location2", /* checksum */ 2, method_idx);
+ info.GetMethod("dex_location2", /* dex_checksum= */ 2, method_idx);
ASSERT_TRUE(loaded_pmi2 != nullptr);
ASSERT_TRUE(*loaded_pmi2 == pmi);
}
@@ -612,34 +612,34 @@ TEST_F(ProfileCompilationInfoTest, AddMoreDexFileThanLimit) {
// Save a few methods.
for (uint16_t i = 0; i < std::numeric_limits<uint8_t>::max(); i++) {
std::string dex_location = std::to_string(i);
- ASSERT_TRUE(AddMethod(dex_location, /* checksum */ 1, /* method_idx */ i, &info));
+ ASSERT_TRUE(AddMethod(dex_location, /* checksum= */ 1, /* method_idx= */ i, &info));
}
// We only support at most 255 dex files.
ASSERT_FALSE(AddMethod(
- /*dex_location*/ "256", /* checksum */ 1, /* method_idx */ 0, &info));
+ /*dex_location=*/ "256", /* checksum= */ 1, /* method_idx= */ 0, &info));
}
TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
// Create a megamorphic inline cache.
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
pmi,
&info_megamorphic));
// Create a profile with no inline caches (for the same method).
ProfileCompilationInfo info_no_inline_cache;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
&info_no_inline_cache));
// Merge the megamorphic cache into the empty one.
@@ -653,23 +653,23 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
// Create an inline cache with missing types
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
pmi,
&info_megamorphic));
// Create a profile with no inline caches (for the same method).
ProfileCompilationInfo info_no_inline_cache;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
&info_no_inline_cache));
// Merge the missing type cache into the empty one.
@@ -766,26 +766,26 @@ TEST_F(ProfileCompilationInfoTest, SampledMethodsTest) {
TEST_F(ProfileCompilationInfoTest, LoadFromZipCompress) {
TestProfileLoadFromZip("primary.prof",
ZipWriter::kCompress | ZipWriter::kAlign32,
- /*should_succeed*/true);
+ /*should_succeed=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipUnCompress) {
TestProfileLoadFromZip("primary.prof",
ZipWriter::kAlign32,
- /*should_succeed*/true);
+ /*should_succeed=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipUnAligned) {
TestProfileLoadFromZip("primary.prof",
0,
- /*should_succeed*/true);
+ /*should_succeed=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadZipEntry) {
TestProfileLoadFromZip("invalid.profile.entry",
0,
- /*should_succeed*/true,
- /*should_succeed_with_empty_profile*/true);
+ /*should_succeed=*/true,
+ /*should_succeed_with_empty_profile=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadProfile) {
@@ -835,7 +835,7 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOk) {
info.AddMethodIndex(Hotness::kFlagHot,
old_name,
dex->GetLocationChecksum(),
- /* method_idx */ 0,
+ /* method_idx= */ 0,
dex->NumMethodIds());
}
@@ -845,7 +845,7 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOk) {
// Verify that we find the methods when searched with the original dex files.
for (const std::unique_ptr<const DexFile>& dex : dex_files) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
- info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+ info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
ASSERT_TRUE(loaded_pmi != nullptr);
}
}
@@ -856,9 +856,9 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOkButNoUpdate) {
ProfileCompilationInfo info;
info.AddMethodIndex(Hotness::kFlagHot,
"my.app",
- /* checksum */ 123,
- /* method_idx */ 0,
- /* num_method_ids */ 10);
+ /* checksum= */ 123,
+ /* method_idx= */ 0,
+ /* num_method_ids= */ 10);
// Update the profile keys based on the original dex files
ASSERT_TRUE(info.UpdateProfileKeys(dex_files));
@@ -867,13 +867,13 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOkButNoUpdate) {
// location.
for (const std::unique_ptr<const DexFile>& dex : dex_files) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
- info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+ info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
ASSERT_TRUE(loaded_pmi == nullptr);
}
// Verify that we can find the original entry.
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
- info.GetMethod("my.app", /* checksum */ 123, /* method_idx */ 0);
+ info.GetMethod("my.app", /* dex_checksum= */ 123, /* dex_method_index= */ 0);
ASSERT_TRUE(loaded_pmi != nullptr);
}
@@ -892,7 +892,7 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyFail) {
info.AddMethodIndex(Hotness::kFlagHot,
old_name,
dex->GetLocationChecksum(),
- /* method_idx */ 0,
+ /* method_idx= */ 0,
dex->NumMethodIds());
}
@@ -900,8 +900,8 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyFail) {
// This will cause the rename to fail because an existing entry would already have that name.
info.AddMethodIndex(Hotness::kFlagHot,
dex_files[0]->GetLocation(),
- /* checksum */ 123,
- /* method_idx */ 0,
+ /* checksum= */ 123,
+ /* method_idx= */ 0,
dex_files[0]->NumMethodIds());
ASSERT_FALSE(info.UpdateProfileKeys(dex_files));
@@ -916,10 +916,10 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoading) {
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -941,8 +941,12 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoading) {
// Dex location 2 and 4 should have been filtered out
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2", /* checksum */ 2, method_idx));
- ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx));
+ ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2",
+ /* dex_checksum= */ 2,
+ method_idx));
+ ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4",
+ /* dex_checksum= */ 4,
+ method_idx));
}
// Dex location 1 should have all all the inline caches referencing dex location 2 set to
@@ -950,7 +954,7 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoading) {
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// The methods for dex location 1 should be in the profile data.
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ method_idx);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
ASSERT_TRUE(loaded_pmi1 != nullptr);
// Verify the inline cache.
@@ -989,8 +993,8 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoading) {
ProfileCompilationInfo::OfflineProfileMethodInfo expected_pmi(ic_map);
// The dex references should not have dex_location2 in the list.
- expected_pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
- expected_pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
+ expected_pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
+ expected_pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
// Now check that we get back what we expect.
ASSERT_TRUE(*loaded_pmi1 == expected_pmi);
@@ -1006,10 +1010,10 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoadingRemoveAll) {
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1038,9 +1042,9 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoadingKeepAll) {
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the
// class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1060,13 +1064,13 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoadingKeepAll) {
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi);
}
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
- loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx);
+ loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, method_idx);
ASSERT_TRUE(loaded_pmi2 != nullptr);
ASSERT_TRUE(*loaded_pmi2 == pmi);
}
@@ -1081,8 +1085,8 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoadingWithClasses) {
ProfileCompilationInfo saved_info;
uint16_t item_count = 1000;
for (uint16_t i = 0; i < item_count; i++) {
- ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, dex::TypeIndex(i), &saved_info));
- ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1101,7 +1105,7 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoadingWithClasses) {
// Compute the expectation.
ProfileCompilationInfo expected_info;
for (uint16_t i = 0; i < item_count; i++) {
- ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &expected_info));
+ ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &expected_info));
}
// Validate the expectation.
@@ -1112,7 +1116,7 @@ TEST_F(ProfileCompilationInfoTest, FilteredLoadingWithClasses) {
TEST_F(ProfileCompilationInfoTest, ClearData) {
ProfileCompilationInfo info;
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
}
ASSERT_FALSE(IsEmpty(info));
info.ClearData();
@@ -1122,7 +1126,7 @@ TEST_F(ProfileCompilationInfoTest, ClearData) {
TEST_F(ProfileCompilationInfoTest, ClearDataAndSave) {
ProfileCompilationInfo info;
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
}
info.ClearData();
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index feb05d60db..86a36f292b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1860,14 +1860,13 @@ class ImageDumper {
oat_file = runtime->GetOatFileManager().FindOpenedOatFileFromOatLocation(oat_location);
}
if (oat_file == nullptr) {
- oat_file = OatFile::Open(/* zip_fd= */ -1,
+ oat_file = OatFile::Open(/*zip_fd=*/ -1,
oat_location,
oat_location,
- /* requested_base= */ nullptr,
- /* executable= */ false,
- /* low_4gb= */ false,
- /* abs_dex_location= */ nullptr,
- /* reservation= */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg);
}
if (oat_file == nullptr) {
@@ -1955,7 +1954,6 @@ class ImageDumper {
const auto& intern_section = image_header_.GetInternedStringsSection();
const auto& class_table_section = image_header_.GetClassTableSection();
const auto& bitmap_section = image_header_.GetImageBitmapSection();
- const auto& relocations_section = image_header_.GetImageRelocationsSection();
stats_.header_bytes = header_bytes;
@@ -1995,11 +1993,7 @@ class ImageDumper {
CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
stats_.alignment_bytes += RoundUp(bitmap_offset, kPageSize) - bitmap_offset;
- // There should be no space between the bitmap and relocations.
- CHECK_EQ(bitmap_section.Offset() + bitmap_section.Size(), relocations_section.Offset());
-
stats_.bitmap_bytes += bitmap_section.Size();
- stats_.relocations_bytes += relocations_section.Size();
stats_.art_field_bytes += field_section.Size();
stats_.art_method_bytes += method_section.Size();
stats_.dex_cache_arrays_bytes += dex_cache_arrays_section.Size();
@@ -2102,9 +2096,9 @@ class ImageDumper {
}
}
- static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass)
+ static void DumpFields(std::ostream& os, mirror::Object* obj, ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Class* super = klass->GetSuperClass();
+ ObjPtr<mirror::Class> super = klass->GetSuperClass();
if (super != nullptr) {
DumpFields(os, obj, super);
}
@@ -2432,7 +2426,6 @@ class ImageDumper {
size_t interned_strings_bytes;
size_t class_table_bytes;
size_t bitmap_bytes;
- size_t relocations_bytes;
size_t alignment_bytes;
size_t managed_code_bytes;
@@ -2462,7 +2455,6 @@ class ImageDumper {
interned_strings_bytes(0),
class_table_bytes(0),
bitmap_bytes(0),
- relocations_bytes(0),
alignment_bytes(0),
managed_code_bytes(0),
managed_code_bytes_ignoring_deduplication(0),
@@ -2626,7 +2618,6 @@ class ImageDumper {
"interned_string_bytes = %8zd (%2.0f%% of art file bytes)\n"
"class_table_bytes = %8zd (%2.0f%% of art file bytes)\n"
"bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "relocations_bytes = %8zd (%2.0f%% of art file bytes)\n"
"alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n",
header_bytes, PercentOfFileBytes(header_bytes),
object_bytes, PercentOfFileBytes(object_bytes),
@@ -2638,13 +2629,12 @@ class ImageDumper {
PercentOfFileBytes(interned_strings_bytes),
class_table_bytes, PercentOfFileBytes(class_table_bytes),
bitmap_bytes, PercentOfFileBytes(bitmap_bytes),
- relocations_bytes, PercentOfFileBytes(relocations_bytes),
alignment_bytes, PercentOfFileBytes(alignment_bytes))
<< std::flush;
CHECK_EQ(file_bytes,
header_bytes + object_bytes + art_field_bytes + art_method_bytes +
dex_cache_arrays_bytes + interned_strings_bytes + class_table_bytes +
- bitmap_bytes + relocations_bytes + alignment_bytes);
+ bitmap_bytes + alignment_bytes);
}
os << "object_bytes breakdown:\n";
@@ -2758,14 +2748,13 @@ static int DumpImages(Runtime* runtime, OatDumperOptions* options, std::ostream*
// We need to map the oat file in the low 4gb or else the fixup wont be able to fit oat file
// pointers into 32 bit pointer sized ArtMethods.
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd= */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
options->app_oat_,
options->app_oat_,
- /* requested_base= */ nullptr,
- /* executable= */ false,
- /* low_4gb= */ true,
- /* abs_dex_location= */ nullptr,
- /* reservation= */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ true,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file " << options->app_oat_ << " with error " << error_msg;
@@ -2882,14 +2871,13 @@ static int DumpOat(Runtime* runtime,
<< "oatdump might fail if the oat file does not contain the dex code.";
}
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd= */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base= */ nullptr,
- /* executable= */ false,
- /* low_4gb= */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_filename,
- /* reservation= */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2908,14 +2896,13 @@ static int SymbolizeOat(const char* oat_filename,
std::string& output_name,
bool no_bits) {
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd= */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base= */ nullptr,
- /* executable= */ false,
- /* low_4gb= */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_filename,
- /* reservation= */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2956,14 +2943,13 @@ class IMTDumper {
if (oat_filename != nullptr) {
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd= */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base= */ nullptr,
- /* executable= */ false,
+ /*executable=*/ false,
/*low_4gb=*/false,
dex_filename,
- /* reservation= */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index d8902d60d1..7621d48f6e 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -41,6 +41,7 @@ cc_defaults {
"ti_field.cc",
"ti_heap.cc",
"ti_jni.cc",
+ "ti_logging.cc",
"ti_method.cc",
"ti_monitor.cc",
"ti_object.cc",
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 48f326a54d..a2fabbfe83 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -58,6 +58,7 @@
#include "ti_field.h"
#include "ti_heap.h"
#include "ti_jni.h"
+#include "ti_logging.h"
#include "ti_method.h"
#include "ti_monitor.h"
#include "ti_object.h"
@@ -787,7 +788,7 @@ class JvmtiFunctions {
classes,
&error_msg);
if (res != OK) {
- LOG(WARNING) << "FAILURE TO RETRANFORM " << error_msg;
+ JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANFORM " << error_msg;
}
return res;
}
@@ -806,7 +807,7 @@ class JvmtiFunctions {
class_definitions,
&error_msg);
if (res != OK) {
- LOG(WARNING) << "FAILURE TO REDEFINE " << error_msg;
+ JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE " << error_msg;
}
return res;
}
@@ -1195,7 +1196,7 @@ class JvmtiFunctions {
#undef ADD_CAPABILITY
gEventHandler->HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
- /*added*/true);
+ /*added=*/true);
return ret;
}
@@ -1219,7 +1220,7 @@ class JvmtiFunctions {
#undef DEL_CAPABILITY
gEventHandler->HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
- /*added*/false);
+ /*added=*/false);
return OK;
}
@@ -1489,7 +1490,8 @@ ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, j
local_data(nullptr),
ti_version(version),
capabilities(),
- event_info_mutex_("jvmtiEnv_EventInfoMutex") {
+ event_info_mutex_("jvmtiEnv_EventInfoMutex"),
+ last_error_mutex_("jvmtiEnv_LastErrorMutex", art::LockLevel::kGenericBottomLock) {
object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
functions = &gJvmtiInterface;
}
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 1218e3b9a7..7433e54eda 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -102,6 +102,10 @@ struct ArtJvmTiEnv : public jvmtiEnv {
// RW lock to protect access to all of the event data.
art::ReaderWriterMutex event_info_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::string last_error_ GUARDED_BY(last_error_mutex_);
+ // Lock to touch the last-error-message.
+ art::Mutex last_error_mutex_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+
ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, jint ti_version);
static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index d20c756522..8bac38a355 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -289,7 +289,7 @@ class ScopedDeoptimizationContext : public art::ValueObject {
uninterruptible_cause_ = critical_section_.Enter(art::gc::kGcCauseInstrumentation,
art::gc::kCollectorTypeCriticalSection);
art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
- /*long_suspend*/ false);
+ /*long_suspend=*/ false);
}
~ScopedDeoptimizationContext()
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index ca66556bb0..8e06fe3a24 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -25,6 +25,7 @@
#include "events.h"
#include "jni/jni_internal.h"
#include "nativehelper/scoped_local_ref.h"
+#include "runtime-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "ti_breakpoint.h"
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 300a0094d4..48df53a143 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -517,7 +517,7 @@ class JvmtiMethodTraceListener final : public art::instrumentation::Instrumentat
self,
jnienv,
art::jni::EncodeArtMethod(method),
- /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+ /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
val);
}
}
@@ -545,7 +545,7 @@ class JvmtiMethodTraceListener final : public art::instrumentation::Instrumentat
self,
jnienv,
art::jni::EncodeArtMethod(method),
- /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+ /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
val);
}
}
@@ -572,7 +572,7 @@ class JvmtiMethodTraceListener final : public art::instrumentation::Instrumentat
self,
jnienv,
art::jni::EncodeArtMethod(method),
- /*was_popped_by_exception*/ static_cast<jboolean>(JNI_TRUE),
+ /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_TRUE),
val);
// Match RI behavior of just throwing away original exception if a new one is thrown.
if (LIKELY(!self->IsExceptionPending())) {
@@ -777,7 +777,7 @@ class JvmtiMethodTraceListener final : public art::instrumentation::Instrumentat
context.get(),
/*out*/ out_method,
/*out*/ dex_pc);
- clf.WalkStack(/* include_transitions */ false);
+ clf.WalkStack(/* include_transitions= */ false);
}
// Call-back when an exception is thrown.
@@ -793,8 +793,8 @@ class JvmtiMethodTraceListener final : public art::instrumentation::Instrumentat
FindCatchMethodsFromThrow(self, exception_object, &catch_method, &catch_pc);
uint32_t dex_pc = 0;
art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
- /* check_suspended */ true,
- /* abort_on_error */ art::kIsDebugBuild);
+ /* check_suspended= */ true,
+ /* abort_on_error= */ art::kIsDebugBuild);
ScopedLocalRef<jobject> exception(jnienv,
AddLocalRef<jobject>(jnienv, exception_object.Get()));
RunEventCallback<ArtJvmtiEvent::kException>(
@@ -819,8 +819,8 @@ class JvmtiMethodTraceListener final : public art::instrumentation::Instrumentat
art::JNIEnvExt* jnienv = self->GetJniEnv();
uint32_t dex_pc;
art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
- /* check_suspended */ true,
- /* abort_on_error */ art::kIsDebugBuild);
+ /* check_suspended= */ true,
+ /* abort_on_error= */ art::kIsDebugBuild);
ScopedLocalRef<jobject> exception(jnienv,
AddLocalRef<jobject>(jnienv, exception_object.Get()));
RunEventCallback<ArtJvmtiEvent::kExceptionCatch>(
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index aedec270b5..6745d91d53 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -67,7 +67,9 @@ static void DoDexUnquicken(const art::DexFile& new_dex_file,
const art::DexFile& original_dex_file) {
const art::VdexFile* vdex = GetVdex(original_dex_file);
if (vdex != nullptr) {
- vdex->UnquickenDexFile(new_dex_file, original_dex_file, /* decompile_return_instruction */true);
+ vdex->UnquickenDexFile(new_dex_file,
+ original_dex_file,
+ /* decompile_return_instruction= */ true);
}
new_dex_file.UnhideApis();
}
@@ -79,7 +81,7 @@ static void DCheckVerifyDexFile(const art::DexFile& dex) {
dex.Begin(),
dex.Size(),
"FixedUpDexFile_Verification.dex",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error)) {
LOG(FATAL) << "Failed to verify de-quickened dex file: " << error;
}
@@ -113,9 +115,9 @@ std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& origi
options.class_filter_.insert(descriptor);
}
art::DexLayout dex_layout(options,
- /*info*/ nullptr,
- /*out_file*/ nullptr,
- /*header*/ nullptr);
+ /*info=*/ nullptr,
+ /*out_file=*/ nullptr,
+ /*header=*/ nullptr);
std::unique_ptr<art::DexContainer> dex_container;
bool result = dex_layout.ProcessDexFile(
original.GetLocation().c_str(),
@@ -136,11 +138,11 @@ std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& origi
new_dex_file = dex_file_loader.Open(
data.data(),
data.size(),
- /*location*/"Unquickening_dexfile.dex",
- /*location_checksum*/0,
- /*oat_dex_file*/nullptr,
- /*verify*/false,
- /*verify_checksum*/false,
+ /*location=*/"Unquickening_dexfile.dex",
+ /*location_checksum=*/0,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
&error);
if (new_dex_file == nullptr) {
diff --git a/openjdkjvmti/object_tagging.cc b/openjdkjvmti/object_tagging.cc
index 1562fb6eb6..0a51bf2f6b 100644
--- a/openjdkjvmti/object_tagging.cc
+++ b/openjdkjvmti/object_tagging.cc
@@ -43,6 +43,34 @@ namespace openjdkjvmti {
// Instantiate for jlong = JVMTI tags.
template class JvmtiWeakTable<jlong>;
+void ObjectTagTable::Allow() {
+ JvmtiWeakTable<jlong>::Allow();
+ SendDelayedFreeEvents();
+}
+
+void ObjectTagTable::Broadcast(bool broadcast_for_checkpoint) {
+ JvmtiWeakTable<jlong>::Broadcast(broadcast_for_checkpoint);
+ if (!broadcast_for_checkpoint) {
+ SendDelayedFreeEvents();
+ }
+}
+
+void ObjectTagTable::SendDelayedFreeEvents() {
+ std::vector<jlong> to_send;
+ {
+ art::MutexLock mu(art::Thread::Current(), lock_);
+ to_send.swap(null_tags_);
+ }
+ for (jlong t : to_send) {
+ SendSingleFreeEvent(t);
+ }
+}
+
+void ObjectTagTable::SendSingleFreeEvent(jlong tag) {
+ event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>(
+ jvmti_env_, art::Thread::Current(), tag);
+}
+
bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
if (new_tag == 0) {
jlong tmp;
@@ -50,6 +78,7 @@ bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
}
return JvmtiWeakTable<jlong>::Set(obj, new_tag);
}
+
bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
if (new_tag == 0) {
jlong tmp;
@@ -61,9 +90,10 @@ bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
bool ObjectTagTable::DoesHandleNullOnSweep() {
return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree);
}
+
void ObjectTagTable::HandleNullSweep(jlong tag) {
- event_handler_->DispatchEventOnEnv<ArtJvmtiEvent::kObjectFree>(
- jvmti_env_, art::Thread::Current(), tag);
+ art::MutexLock mu(art::Thread::Current(), lock_);
+ null_tags_.push_back(tag);
}
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/object_tagging.h b/openjdkjvmti/object_tagging.h
index 4181302f3a..ca05a05541 100644
--- a/openjdkjvmti/object_tagging.h
+++ b/openjdkjvmti/object_tagging.h
@@ -48,7 +48,18 @@ class EventHandler;
class ObjectTagTable final : public JvmtiWeakTable<jlong> {
public:
ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
- : event_handler_(event_handler), jvmti_env_(env) {}
+ : lock_("Object tag table lock", art::LockLevel::kGenericBottomLock),
+ event_handler_(event_handler),
+ jvmti_env_(env) {}
+
+ // Denotes that weak-refs are visible on all threads. Used by semi-space.
+ void Allow() override
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+ // Used by cms and the checkpoint system.
+ void Broadcast(bool broadcast_for_checkpoint) override
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
bool Set(art::mirror::Object* obj, jlong tag) override
REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -77,6 +88,16 @@ class ObjectTagTable final : public JvmtiWeakTable<jlong> {
void HandleNullSweep(jlong tag) override;
private:
+ void SendDelayedFreeEvents()
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ void SendSingleFreeEvent(jlong tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_, !lock_);
+
+ art::Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+ std::vector<jlong> null_tags_ GUARDED_BY(lock_);
EventHandler* event_handler_;
ArtJvmTiEnv* jvmti_env_;
};
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index f6113dfecc..e4692707f4 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -73,6 +73,7 @@
#include "thread_list.h"
#include "ti_class_definition.h"
#include "ti_class_loader-inl.h"
+#include "ti_logging.h"
#include "ti_phase.h"
#include "ti_redefine.h"
#include "transform.h"
@@ -113,8 +114,8 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
checksum,
std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
+ /*verify=*/true,
+ /*verify_checksum=*/true,
&error_msg));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg;
@@ -932,8 +933,8 @@ jvmtiError ClassUtil::GetClassLoaderClassDescriptors(jvmtiEnv* env,
return ERR(ILLEGAL_ARGUMENT);
} else if (!jnienv->IsInstanceOf(loader,
art::WellKnownClasses::dalvik_system_BaseDexClassLoader)) {
- LOG(ERROR) << "GetClassLoaderClassDescriptors is only implemented for BootClassPath and "
- << "dalvik.system.BaseDexClassLoader class loaders";
+ JVMTI_LOG(ERROR, env) << "GetClassLoaderClassDescriptors is only implemented for "
+ << "BootClassPath and dalvik.system.BaseDexClassLoader class loaders";
// TODO Possibly return OK With no classes would be better since these ones cannot have any
// real classes associated with them.
return ERR(NOT_IMPLEMENTED);
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 895e73450e..9e8288f997 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -246,17 +246,17 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
mmap_name += name_;
std::string error;
dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
dequick_size,
PROT_NONE,
- /*low_4gb*/ false,
+ /*low_4gb=*/ false,
&error);
mmap_name += "-TEMP";
temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
dequick_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/ false,
+ /*low_4gb=*/ false,
&error);
if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
// Need to save the initial dexfile so we don't need to search for it in the fault-handler.
diff --git a/openjdkjvmti/ti_ddms.cc b/openjdkjvmti/ti_ddms.cc
index bf063faf7b..9de5cbc3ea 100644
--- a/openjdkjvmti/ti_ddms.cc
+++ b/openjdkjvmti/ti_ddms.cc
@@ -39,6 +39,7 @@
#include "debugger.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
+#include "ti_logging.h"
namespace openjdkjvmti {
@@ -69,7 +70,7 @@ jvmtiError DDMSUtil::HandleChunk(jvmtiEnv* env,
data_arr,
/*out*/reinterpret_cast<uint32_t*>(type_out),
/*out*/&out_data)) {
- LOG(WARNING) << "Something went wrong with handling the ddm chunk.";
+ JVMTI_LOG(WARNING, env) << "Something went wrong with handling the ddm chunk.";
return ERR(INTERNAL);
} else {
jvmtiError error = OK;
diff --git a/openjdkjvmti/ti_extension.cc b/openjdkjvmti/ti_extension.cc
index c61d6e585c..5d398844b2 100644
--- a/openjdkjvmti/ti_extension.cc
+++ b/openjdkjvmti/ti_extension.cc
@@ -39,7 +39,9 @@
#include "ti_class.h"
#include "ti_ddms.h"
#include "ti_heap.h"
+#include "ti_logging.h"
#include "ti_monitor.h"
+
#include "thread-inl.h"
namespace openjdkjvmti {
@@ -272,6 +274,44 @@ jvmtiError ExtensionUtil::GetExtensionFunctions(jvmtiEnv* env,
if (error != ERR(NONE)) {
return error;
}
+
+ // GetLastError extension
+ error = add_extension(
+ reinterpret_cast<jvmtiExtensionFunction>(LogUtil::GetLastError),
+ "com.android.art.misc.get_last_error_message",
+ "In some cases the jvmti plugin will log data about errors to the android logcat. These can"
+ " be useful to tools so we make (some) of the messages available here as well. This will"
+ " fill the given 'msg' buffer with the last non-fatal message associated with this"
+ " jvmti-env. Note this is best-effort only, not all log messages will be accessible through"
+ " this API. This will return the last error-message from all threads. Care should be taken"
+ " interpreting the return value when used with a multi-threaded program. The error message"
+ " will only be cleared by a call to 'com.android.art.misc.clear_last_error_message' and will"
+ " not be cleared by intervening successful calls. If no (tracked) error message has been"
+ " sent since the last call to clear_last_error_message this API will return"
+ " JVMTI_ERROR_ABSENT_INFORMATION. Not all failures will cause an error message to be"
+ " recorded.",
+ {
+ { "msg", JVMTI_KIND_ALLOC_BUF, JVMTI_TYPE_CCHAR, false },
+ },
+ {
+ ERR(NULL_POINTER),
+ ERR(ABSENT_INFORMATION),
+ });
+ if (error != ERR(NONE)) {
+ return error;
+ }
+
+ // ClearLastError extension
+ error = add_extension(
+ reinterpret_cast<jvmtiExtensionFunction>(LogUtil::ClearLastError),
+ "com.android.art.misc.clear_last_error_message",
+ "Clears the error message returned by 'com.android.art.misc.get_last_error_message'.",
+ { },
+ { });
+ if (error != ERR(NONE)) {
+ return error;
+ }
+
// Copy into output buffer.
*extension_count_ptr = ext_vector.size();
@@ -424,7 +464,7 @@ jvmtiError ExtensionUtil::SetExtensionEventCallback(jvmtiEnv* env,
}
}
return event_handler->SetEvent(art_env,
- /*event_thread*/nullptr,
+ /*thread=*/nullptr,
static_cast<ArtJvmtiEvent>(extension_event_index),
mode);
}
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 6c79a602c3..01ef4c6286 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -981,7 +981,9 @@ class FollowReferencesHelper final {
// TODO: We don't have this info.
if (thread != nullptr) {
ref_info->jni_local.depth = 0;
- art::ArtMethod* method = thread->GetCurrentMethod(nullptr, false /* abort_on_error */);
+ art::ArtMethod* method = thread->GetCurrentMethod(nullptr,
+ /* check_suspended= */ true,
+ /* abort_on_error= */ false);
if (method != nullptr) {
ref_info->jni_local.method = art::jni::EncodeArtMethod(method);
}
@@ -1012,7 +1014,7 @@ class FollowReferencesHelper final {
ref_info->stack_local.slot = static_cast<jint>(java_info.GetVReg());
const art::StackVisitor* visitor = java_info.GetVisitor();
ref_info->stack_local.location =
- static_cast<jlocation>(visitor->GetDexPc(false /* abort_on_failure */));
+ static_cast<jlocation>(visitor->GetDexPc(/* abort_on_failure= */ false));
ref_info->stack_local.depth = static_cast<jint>(visitor->GetFrameDepth());
art::ArtMethod* method = visitor->GetMethod();
if (method != nullptr) {
@@ -1173,7 +1175,7 @@ class FollowReferencesHelper final {
stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_SUPERCLASS,
nullptr,
klass,
- klass->GetSuperClass());
+ klass->GetSuperClass().Ptr());
if (stop_reports_) {
return;
}
@@ -1447,7 +1449,7 @@ jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
}
jvmtiError HeapUtil::ForceGarbageCollection(jvmtiEnv* env ATTRIBUTE_UNUSED) {
- art::Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ art::Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
return ERR(NONE);
}
diff --git a/openjdkjvmti/ti_logging.cc b/openjdkjvmti/ti_logging.cc
new file mode 100644
index 0000000000..1d24d3b6b6
--- /dev/null
+++ b/openjdkjvmti/ti_logging.cc
@@ -0,0 +1,71 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_logging.h"
+
+#include "art_jvmti.h"
+
+#include "base/mutex.h"
+#include "thread-current-inl.h"
+
+namespace openjdkjvmti {
+
+jvmtiError LogUtil::GetLastError(jvmtiEnv* env, char** data) {
+ if (env == nullptr || data == nullptr) {
+ return ERR(INVALID_ENVIRONMENT);
+ }
+ ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
+ art::MutexLock mu(art::Thread::Current(), tienv->last_error_mutex_);
+ if (tienv->last_error_.empty()) {
+ return ERR(ABSENT_INFORMATION);
+ }
+ char* out;
+ jvmtiError err = tienv->Allocate(tienv->last_error_.size() + 1,
+ reinterpret_cast<unsigned char**>(&out));
+ if (err != OK) {
+ return err;
+ }
+ strcpy(out, tienv->last_error_.c_str());
+ *data = out;
+ return OK;
+}
+
+jvmtiError LogUtil::ClearLastError(jvmtiEnv* env) {
+ if (env == nullptr) {
+ return ERR(INVALID_ENVIRONMENT);
+ }
+ ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
+ art::MutexLock mu(art::Thread::Current(), tienv->last_error_mutex_);
+ tienv->last_error_.clear();
+ return OK;
+}
+
+} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_logging.h b/openjdkjvmti/ti_logging.h
new file mode 100644
index 0000000000..31b51bb126
--- /dev/null
+++ b/openjdkjvmti/ti_logging.h
@@ -0,0 +1,102 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_TI_LOGGING_H_
+#define ART_OPENJDKJVMTI_TI_LOGGING_H_
+
+#include "art_jvmti.h"
+
+#include <ostream>
+#include <sstream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "thread-current-inl.h"
+
+namespace openjdkjvmti {
+
+// NB Uses implementation details of android-base/logging.h.
+#define JVMTI_LOG(severity, env) \
+ ::openjdkjvmti::JvmtiLogMessage((env), \
+ __FILE__, \
+ __LINE__, \
+ ::android::base::DEFAULT, \
+ SEVERITY_LAMBDA(severity), \
+ _LOG_TAG_INTERNAL, \
+ -1)
+
+class JvmtiLogMessage {
+ public:
+ JvmtiLogMessage(jvmtiEnv* env,
+ const char* file,
+ unsigned int line,
+ android::base::LogId id,
+ android::base::LogSeverity severity,
+ const char* tag,
+ int error)
+ : env_(ArtJvmTiEnv::AsArtJvmTiEnv(env)),
+ real_log_(file, line, id, severity, tag, error),
+ real_log_stream_(real_log_.stream()) {
+ DCHECK(env_ != nullptr);
+ }
+
+ ~JvmtiLogMessage() {
+ art::MutexLock mu(art::Thread::Current(), env_->last_error_mutex_);
+ env_->last_error_ = save_stream_.str();
+ }
+
+ template<typename T>
+ JvmtiLogMessage& operator<<(T t) {
+ (real_log_stream_ << t);
+ (save_stream_ << t);
+ return *this;
+ }
+
+ private:
+ ArtJvmTiEnv* env_;
+ android::base::LogMessage real_log_;
+ // Lifetime of real_log_stream_ is lifetime of real_log_.
+ std::ostream& real_log_stream_;
+ std::ostringstream save_stream_;
+
+ DISALLOW_COPY_AND_ASSIGN(JvmtiLogMessage);
+};
+
+class LogUtil {
+ public:
+ static jvmtiError ClearLastError(jvmtiEnv* env);
+ static jvmtiError GetLastError(jvmtiEnv* env, char** data);
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_OPENJDKJVMTI_TI_LOGGING_H_
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 295894157c..7d69c89d1e 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -547,7 +547,7 @@ class CommonLocalVariableClosure : public art::Closure {
return;
}
bool needs_instrument = !visitor.IsShadowFrame();
- uint32_t pc = visitor.GetDexPc(/*abort_on_failure*/ false);
+ uint32_t pc = visitor.GetDexPc(/*abort_on_failure=*/ false);
if (pc == art::dex::kDexNoIndex) {
// Cannot figure out current PC.
result_ = ERR(OPAQUE_FRAME);
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index f71328a6b6..aac7233303 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -191,7 +191,7 @@ class JvmtiMonitor {
// Reaquire the mutex/monitor, also go to sleep if we were suspended.
// TODO Give an extension to wait without suspension as well.
- MonitorEnter(self, /*suspend*/ true);
+ MonitorEnter(self, /*suspend=*/ true);
CHECK(owner_.load(std::memory_order_relaxed) == self);
DCHECK_EQ(1u, count_);
// Reset the count.
@@ -261,7 +261,7 @@ jvmtiError MonitorUtil::RawMonitorEnterNoSuspend(jvmtiEnv* env ATTRIBUTE_UNUSED,
JvmtiMonitor* monitor = DecodeMonitor(id);
art::Thread* self = art::Thread::Current();
- monitor->MonitorEnter(self, /*suspend*/false);
+ monitor->MonitorEnter(self, /*suspend=*/false);
return ERR(NONE);
}
@@ -274,7 +274,7 @@ jvmtiError MonitorUtil::RawMonitorEnter(jvmtiEnv* env ATTRIBUTE_UNUSED, jrawMoni
JvmtiMonitor* monitor = DecodeMonitor(id);
art::Thread* self = art::Thread::Current();
- monitor->MonitorEnter(self, /*suspend*/true);
+ monitor->MonitorEnter(self, /*suspend=*/true);
return ERR(NONE);
}
diff --git a/openjdkjvmti/ti_object.cc b/openjdkjvmti/ti_object.cc
index 89ce35256d..344ae88546 100644
--- a/openjdkjvmti/ti_object.cc
+++ b/openjdkjvmti/ti_object.cc
@@ -92,7 +92,7 @@ jvmtiError ObjectUtil::GetObjectMonitorUsage(
{
art::ScopedObjectAccess soa(self); // Now we know we have the shared lock.
art::ScopedThreadSuspension sts(self, art::kNative);
- art::ScopedSuspendAll ssa("GetObjectMonitorUsage", /*long_suspend*/false);
+ art::ScopedSuspendAll ssa("GetObjectMonitorUsage", /*long_suspend=*/false);
art::ObjPtr<art::mirror::Object> target(self->DecodeJObject(obj));
// This gets the list of threads trying to lock or wait on the monitor.
art::MonitorInfo info(target.Ptr());
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index db2b143022..7525c021f5 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -152,7 +152,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
ObsoleteMap* obsolete_maps)
: StackVisitor(thread,
- /*context*/nullptr,
+ /*context=*/nullptr,
StackVisitor::StackWalkKind::kIncludeInlinedFrames),
allocator_(allocator),
obsoleted_methods_(obsoleted_methods),
@@ -305,10 +305,10 @@ art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location,
std::string* error_msg) {
art::MemMap map = art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
data.size(),
PROT_READ|PROT_WRITE,
- /*low_4gb*/ false,
+ /*low_4gb=*/ false,
error_msg);
if (LIKELY(map.IsValid())) {
memcpy(map.Begin(), data.data(), data.size());
@@ -445,8 +445,8 @@ jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition
std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(name,
checksum,
std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
+ /*verify=*/true,
+ /*verify_checksum=*/true,
error_msg_));
if (dex_file.get() == nullptr) {
os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
@@ -1117,10 +1117,10 @@ bool Redefiner::ClassRedefinition::CheckVerification(const RedefinitionDataIter&
dex_file_.get(),
hs.NewHandle(iter.GetNewDexCache()),
hs.NewHandle(GetClassLoader()),
- dex_file_->GetClassDef(0), /*class_def*/
- nullptr, /*compiler_callbacks*/
- true, /*allow_soft_failures*/
- /*log_level*/
+ /*class_def=*/ dex_file_->GetClassDef(0),
+ /*callbacks=*/ nullptr,
+ /*allow_soft_failures=*/ true,
+ /*log_level=*/
art::verifier::HardFailLogMode::kLogWarning,
art::Runtime::Current()->GetTargetSdkVersion(),
&error);
@@ -1288,7 +1288,7 @@ bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
}
void Redefiner::ClassRedefinition::ReleaseDexFile() {
- dex_file_.release();
+ dex_file_.release(); // NOLINT b/117926937
}
void Redefiner::ReleaseAllDexFiles() {
@@ -1367,7 +1367,7 @@ jvmtiError Redefiner::Run() {
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
- art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend*/true);
+ art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend=*/true);
for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
ClassRedefinition& redef = data.GetRedefinition();
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 1189b1dec5..2187825746 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -52,6 +52,7 @@
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
#include "thread_list.h"
+#include "ti_logging.h"
#include "ti_phase.h"
#include "well_known_classes.h"
@@ -213,7 +214,7 @@ void SearchUtil::Unregister() {
runtime->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&gSearchCallback);
}
-jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_UNUSED,
+jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env,
const char* segment) {
art::Runtime* current = art::Runtime::Current();
if (current == nullptr) {
@@ -229,9 +230,14 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U
std::string error_msg;
std::vector<std::unique_ptr<const art::DexFile>> dex_files;
const art::ArtDexFileLoader dex_file_loader;
- if (!dex_file_loader.Open(
- segment, segment, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files)) {
- LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
+ if (!dex_file_loader.Open(segment,
+ segment,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
+ &error_msg,
+ &dex_files)) {
+ JVMTI_LOG(WARNING, env) << "Could not open " << segment << " for boot classpath extension: "
+ << error_msg;
return ERR(ILLEGAL_ARGUMENT);
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 5a98755c67..5de4a81f5e 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -57,6 +57,7 @@
#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
+#include "ti_logging.h"
#include "ti_thread.h"
#include "thread-current-inl.h"
#include "thread_list.h"
@@ -150,7 +151,7 @@ struct GetStackTraceVectorClosure : public art::Closure {
frames.push_back(info);
};
auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
start_result = visitor.start;
stop_result = visitor.stop;
@@ -218,7 +219,7 @@ struct GetStackTraceDirectClosure : public art::Closure {
++index;
};
auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
}
jvmtiFrameInfo* frame_buffer;
@@ -330,7 +331,7 @@ struct GetAllStackTracesVectorClosure : public art::Closure {
thread_frames->push_back(info);
};
auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
}
art::Barrier barrier;
@@ -910,7 +911,7 @@ struct MonitorInfoClosure : public art::Closure {
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
// Find the monitors on the stack.
MonitorVisitor visitor(target);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
// Find any other monitors, including ones acquired in native code.
art::RootInfo root_info(art::kRootVMInternal);
target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
@@ -1097,7 +1098,7 @@ jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth)
} while (true);
}
-jvmtiError StackUtil::PopFrame(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread thread) {
+jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
art::Thread* self = art::Thread::Current();
art::Thread* target;
do {
@@ -1131,9 +1132,10 @@ jvmtiError StackUtil::PopFrame(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread thread) {
tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
tls_data->disable_pop_frame_depth == art::StackVisitor::ComputeNumFrames(target,
kWalkKind)) {
- LOG(WARNING) << "Disallowing frame pop due to in-progress class-load/prepare. Frame at depth "
- << tls_data->disable_pop_frame_depth << " was marked as un-poppable by the "
- << "jvmti plugin. See b/117615146 for more information.";
+ JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
+ << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
+ << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
+ << "more information.";
return ERR(OPAQUE_FRAME);
}
// We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index a0e5b5c926..2131120a11 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -812,7 +812,7 @@ jvmtiError ThreadUtil::RunAgentThread(jvmtiEnv* jvmti_env,
runtime->EndThreadBirth();
return ERR(INTERNAL);
}
- data.release();
+ data.release(); // NOLINT pthreads API.
return ERR(NONE);
}
@@ -857,7 +857,7 @@ jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
bool timeout = true;
art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
target_jthread,
- /* request_suspension */ true,
+ /* request_suspension= */ true,
art::SuspendReason::kForUserCode,
&timeout);
if (ret_target == nullptr && !timeout) {
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index d87ca56b85..653f944d7c 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -76,7 +76,7 @@ class TransformationFaultHandler final : public art::FaultHandler {
art::LockLevel::kSignalHandlingLock),
class_definition_initialized_cond_("JVMTI Initialized class definitions condition",
uninitialized_class_definitions_lock_) {
- manager->AddHandler(this, /* generated_code */ false);
+ manager->AddHandler(this, /* generated_code= */ false);
}
~TransformationFaultHandler() {
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 31dfbc03e7..e9d3290faa 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -1192,7 +1192,7 @@ TEST_F(ProfileAssistantTest, MergeProfilesWithFilter) {
// Run profman and pass the dex file with --apk-fd.
android::base::unique_fd apk_fd(
- open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+ open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY)); // NOLINT
ASSERT_GE(apk_fd.get(), 0);
std::string profman_cmd = GetProfmanCmd();
@@ -1270,7 +1270,7 @@ TEST_F(ProfileAssistantTest, CopyAndUpdateProfileKey) {
// Run profman and pass the dex file with --apk-fd.
android::base::unique_fd apk_fd(
- open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+ open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY)); // NOLINT
ASSERT_GE(apk_fd.get(), 0);
std::string profman_cmd = GetProfmanCmd();
diff --git a/profman/profman.cc b/profman/profman.cc
index d989c8c849..734cdf498e 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -477,7 +477,7 @@ class ProfMan final {
std::unique_ptr<const ProfileCompilationInfo> LoadProfile(const std::string& filename, int fd) {
if (!filename.empty()) {
- fd = open(filename.c_str(), O_RDWR);
+ fd = open(filename.c_str(), O_RDWR | O_CLOEXEC);
if (fd < 0) {
LOG(ERROR) << "Cannot open " << filename << strerror(errno);
return nullptr;
@@ -641,7 +641,7 @@ class ProfMan final {
bool GetClassNamesAndMethods(const std::string& profile_file,
std::vector<std::unique_ptr<const DexFile>>* dex_files,
std::set<std::string>* out_lines) {
- int fd = open(profile_file.c_str(), O_RDONLY);
+ int fd = open(profile_file.c_str(), O_RDONLY | O_CLOEXEC);
if (!FdIsValid(fd)) {
LOG(ERROR) << "Cannot open " << profile_file << strerror(errno);
return false;
@@ -1022,7 +1022,7 @@ class ProfMan final {
int fd = reference_profile_file_fd_;
if (!FdIsValid(fd)) {
CHECK(!reference_profile_file_.empty());
- fd = open(reference_profile_file_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ fd = open(reference_profile_file_.c_str(), O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC, 0644);
if (fd < 0) {
LOG(ERROR) << "Cannot open " << reference_profile_file_ << strerror(errno);
return kInvalidFd;
@@ -1155,7 +1155,9 @@ class ProfMan final {
}
}
// ShouldGenerateTestProfile confirms !test_profile_.empty().
- int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ int profile_test_fd = open(test_profile_.c_str(),
+ O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
+ 0644);
if (profile_test_fd < 0) {
LOG(ERROR) << "Cannot open " << test_profile_ << strerror(errno);
return -1;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 33ad987ad6..5d99187669 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -96,7 +96,10 @@ libart_cc_defaults {
"interpreter/interpreter_cache.cc",
"interpreter/interpreter_common.cc",
"interpreter/interpreter_intrinsics.cc",
- "interpreter/interpreter_switch_impl.cc",
+ "interpreter/interpreter_switch_impl0.cc",
+ "interpreter/interpreter_switch_impl1.cc",
+ "interpreter/interpreter_switch_impl2.cc",
+ "interpreter/interpreter_switch_impl3.cc",
"interpreter/lock_count_data.cc",
"interpreter/shadow_frame.cc",
"interpreter/unstarted_runtime.cc",
@@ -379,6 +382,7 @@ libart_cc_defaults {
],
header_libs: [
"art_cmdlineparser_headers",
+ "cpp-define-generator-definitions",
"libnativehelper_header_only",
"jni_platform_headers",
],
@@ -639,7 +643,6 @@ art_cc_test {
],
header_libs: [
"art_cmdlineparser_headers", // For parsed_options_test.
- "cpp-define-generator-definitions",
],
include_dirs: [
"external/zlib",
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index dcc3affb6b..12ad84b2dd 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -17,17 +17,11 @@
#include <stdint.h>
#include "art_method-inl.h"
-#include "asm_defines.h"
#include "base/callee_save_type.h"
#include "entrypoints/quick/callee_save_frame.h"
#include "common_runtime_test.h"
#include "quick/quick_method_frame_info.h"
-// Static asserts to check the values of generated #defines for assembly.
-#define ASM_DEFINE(NAME, EXPR) static_assert((NAME) == (EXPR), "Unexpected value of " #NAME);
-#include "asm_defines.def"
-#undef ASM_DEFINE
-
namespace art {
class ArchTest : public CommonRuntimeTest {
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2c5465e120..c1a03abd96 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -174,7 +174,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierMarkReg12 = nullptr; // Cannot use register 12 (IP) to pass arguments.
qpoints->pReadBarrierMarkReg13 = nullptr; // Cannot use register 13 (SP) to pass arguments.
qpoints->pReadBarrierMarkReg14 = nullptr; // Cannot use register 14 (LR) to pass arguments.
diff --git a/runtime/arch/arm64/callee_save_frame_arm64.h b/runtime/arch/arm64/callee_save_frame_arm64.h
index bc36bfabec..a5aea2a573 100644
--- a/runtime/arch/arm64/callee_save_frame_arm64.h
+++ b/runtime/arch/arm64/callee_save_frame_arm64.h
@@ -54,7 +54,7 @@ static constexpr uint32_t kArm64CalleeSaveEverythingSpills =
(1 << art::arm64::X9) | (1 << art::arm64::X10) | (1 << art::arm64::X11) |
(1 << art::arm64::X12) | (1 << art::arm64::X13) | (1 << art::arm64::X14) |
(1 << art::arm64::X15) | (1 << art::arm64::X16) | (1 << art::arm64::X17) |
- (1 << art::arm64::X18) | (1 << art::arm64::X19);
+ (1 << art::arm64::X19);
static constexpr uint32_t kArm64CalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kArm64CalleeSaveFpRefSpills = 0;
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 4c43b7ed3d..22f0c28f45 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -103,7 +103,6 @@ void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
qpoints->pReadBarrierMarkReg14 = is_active ? art_quick_read_barrier_mark_reg14 : nullptr;
qpoints->pReadBarrierMarkReg15 = is_active ? art_quick_read_barrier_mark_reg15 : nullptr;
qpoints->pReadBarrierMarkReg17 = is_active ? art_quick_read_barrier_mark_reg17 : nullptr;
- qpoints->pReadBarrierMarkReg18 = is_active ? art_quick_read_barrier_mark_reg18 : nullptr;
qpoints->pReadBarrierMarkReg19 = is_active ? art_quick_read_barrier_mark_reg19 : nullptr;
qpoints->pReadBarrierMarkReg20 = is_active ? art_quick_read_barrier_mark_reg20 : nullptr;
qpoints->pReadBarrierMarkReg21 = is_active ? art_quick_read_barrier_mark_reg21 : nullptr;
@@ -190,7 +189,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierMarkReg16 = nullptr; // IP0 is used as a temp by the asm stub.
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
}
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 96ceecfe9b..9f3377ed1e 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -289,36 +289,33 @@
#endif
// Save FP registers.
- // For better performance, store d0 and d31 separately, so that all STPs are 16-byte aligned.
- str d0, [sp, #8]
- stp d1, d2, [sp, #16]
- stp d3, d4, [sp, #32]
- stp d5, d6, [sp, #48]
- stp d7, d8, [sp, #64]
- stp d9, d10, [sp, #80]
- stp d11, d12, [sp, #96]
- stp d13, d14, [sp, #112]
- stp d15, d16, [sp, #128]
- stp d17, d18, [sp, #144]
- stp d19, d20, [sp, #160]
- stp d21, d22, [sp, #176]
- stp d23, d24, [sp, #192]
- stp d25, d26, [sp, #208]
- stp d27, d28, [sp, #224]
- stp d29, d30, [sp, #240]
- str d31, [sp, #256]
+ stp d0, d1, [sp, #16]
+ stp d2, d3, [sp, #32]
+ stp d4, d5, [sp, #48]
+ stp d6, d7, [sp, #64]
+ stp d8, d9, [sp, #80]
+ stp d10, d11, [sp, #96]
+ stp d12, d13, [sp, #112]
+ stp d14, d15, [sp, #128]
+ stp d16, d17, [sp, #144]
+ stp d18, d19, [sp, #160]
+ stp d20, d21, [sp, #176]
+ stp d22, d23, [sp, #192]
+ stp d24, d25, [sp, #208]
+ stp d26, d27, [sp, #224]
+ stp d28, d29, [sp, #240]
+ stp d30, d31, [sp, #256]
// Save core registers.
- SAVE_REG x0, 264
- SAVE_TWO_REGS x1, x2, 272
- SAVE_TWO_REGS x3, x4, 288
- SAVE_TWO_REGS x5, x6, 304
- SAVE_TWO_REGS x7, x8, 320
- SAVE_TWO_REGS x9, x10, 336
- SAVE_TWO_REGS x11, x12, 352
- SAVE_TWO_REGS x13, x14, 368
- SAVE_TWO_REGS x15, x16, 384
- SAVE_TWO_REGS x17, x18, 400
+ SAVE_TWO_REGS x0, x1, 272
+ SAVE_TWO_REGS x2, x3, 288
+ SAVE_TWO_REGS x4, x5, 304
+ SAVE_TWO_REGS x6, x7, 320
+ SAVE_TWO_REGS x8, x9, 336
+ SAVE_TWO_REGS x10, x11, 352
+ SAVE_TWO_REGS x12, x13, 368
+ SAVE_TWO_REGS x14, x15, 384
+ SAVE_TWO_REGS x16, x17, 400 // Do not save the platform register.
SAVE_TWO_REGS x19, x20, 416
SAVE_TWO_REGS x21, x22, 432
SAVE_TWO_REGS x23, x24, 448
@@ -351,35 +348,33 @@
.macro RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
// Restore FP registers.
- // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned.
- ldr d0, [sp, #8]
- ldp d1, d2, [sp, #16]
- ldp d3, d4, [sp, #32]
- ldp d5, d6, [sp, #48]
- ldp d7, d8, [sp, #64]
- ldp d9, d10, [sp, #80]
- ldp d11, d12, [sp, #96]
- ldp d13, d14, [sp, #112]
- ldp d15, d16, [sp, #128]
- ldp d17, d18, [sp, #144]
- ldp d19, d20, [sp, #160]
- ldp d21, d22, [sp, #176]
- ldp d23, d24, [sp, #192]
- ldp d25, d26, [sp, #208]
- ldp d27, d28, [sp, #224]
- ldp d29, d30, [sp, #240]
- ldr d31, [sp, #256]
+ ldp d0, d1, [sp, #16]
+ ldp d2, d3, [sp, #32]
+ ldp d4, d5, [sp, #48]
+ ldp d6, d7, [sp, #64]
+ ldp d8, d9, [sp, #80]
+ ldp d10, d11, [sp, #96]
+ ldp d12, d13, [sp, #112]
+ ldp d14, d15, [sp, #128]
+ ldp d16, d17, [sp, #144]
+ ldp d18, d19, [sp, #160]
+ ldp d20, d21, [sp, #176]
+ ldp d22, d23, [sp, #192]
+ ldp d24, d25, [sp, #208]
+ ldp d26, d27, [sp, #224]
+ ldp d28, d29, [sp, #240]
+ ldp d30, d31, [sp, #256]
// Restore core registers, except x0.
- RESTORE_TWO_REGS x1, x2, 272
- RESTORE_TWO_REGS x3, x4, 288
- RESTORE_TWO_REGS x5, x6, 304
- RESTORE_TWO_REGS x7, x8, 320
- RESTORE_TWO_REGS x9, x10, 336
- RESTORE_TWO_REGS x11, x12, 352
- RESTORE_TWO_REGS x13, x14, 368
- RESTORE_TWO_REGS x15, x16, 384
- RESTORE_TWO_REGS x17, x18, 400
+ RESTORE_REG x1, 280
+ RESTORE_TWO_REGS x2, x3, 288
+ RESTORE_TWO_REGS x4, x5, 304
+ RESTORE_TWO_REGS x6, x7, 320
+ RESTORE_TWO_REGS x8, x9, 336
+ RESTORE_TWO_REGS x10, x11, 352
+ RESTORE_TWO_REGS x12, x13, 368
+ RESTORE_TWO_REGS x14, x15, 384
+ RESTORE_TWO_REGS x16, x17, 400 // Do not restore the platform register.
RESTORE_TWO_REGS x19, x20, 416
RESTORE_TWO_REGS x21, x22, 432
RESTORE_TWO_REGS x23, x24, 448
@@ -391,7 +386,7 @@
.endm
.macro RESTORE_SAVE_EVERYTHING_FRAME
- RESTORE_REG x0, 264
+ RESTORE_REG x0, 272
RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
.endm
@@ -1116,7 +1111,8 @@ ENTRY art_quick_do_long_jump
ldp x12, x13, [x0, #96]
ldp x14, x15, [x0, #112]
// Do not load IP0 (x16) and IP1 (x17), these shall be clobbered below.
- ldp x18, x19, [x0, #144] // X18 and xSELF.
+ // Don't load the platform register (x18) either.
+ ldr x19, [x0, #152] // xSELF.
ldp x20, x21, [x0, #160] // For Baker RB, wMR (w20) is reloaded below.
ldp x22, x23, [x0, #176]
ldp x24, x25, [x0, #192]
@@ -2293,8 +2289,8 @@ ENTRY art_quick_instrumentation_exit
mov xLR, #0 // Clobber LR for later checks.
SETUP_SAVE_EVERYTHING_FRAME
- add x3, sp, #8 // Pass floating-point result pointer, in kSaveEverything frame.
- add x2, sp, #264 // Pass integer result pointer, in kSaveEverything frame.
+ add x3, sp, #16 // Pass floating-point result pointer, in kSaveEverything frame.
+ add x2, sp, #272 // Pass integer result pointer, in kSaveEverything frame.
mov x1, sp // Pass SP.
mov x0, xSELF // Pass Thread.
bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res*, fpr_res*)
@@ -2496,7 +2492,8 @@ ENTRY \name
.Lslow_rb_\name:
/*
* Allocate 44 stack slots * 8 = 352 bytes:
- * - 20 slots for core registers X0-15, X17-X19, LR
+ * - 19 slots for core registers X0-15, X17, X19, LR
+ * - 1 slot padding
* - 24 slots for floating-point registers D0-D7 and D16-D31
*/
// We must not clobber IP1 since code emitted for HLoadClass and HLoadString
@@ -2510,8 +2507,8 @@ ENTRY \name
SAVE_TWO_REGS x10, x11, 80
SAVE_TWO_REGS x12, x13, 96
SAVE_TWO_REGS x14, x15, 112
- SAVE_TWO_REGS x17, x18, 128 // Skip x16, i.e. IP0.
- SAVE_TWO_REGS x19, xLR, 144 // Save also return address.
+ SAVE_TWO_REGS x17, x19, 128 // Skip x16, i.e. IP0, and x18, the platform register.
+ SAVE_REG xLR, 144 // Save also return address.
// Save all potentially live caller-save floating-point registers.
stp d0, d1, [sp, #160]
stp d2, d3, [sp, #176]
@@ -2544,8 +2541,8 @@ ENTRY \name
POP_REGS_NE x10, x11, 80, \xreg
POP_REGS_NE x12, x13, 96, \xreg
POP_REGS_NE x14, x15, 112, \xreg
- POP_REGS_NE x17, x18, 128, \xreg
- POP_REGS_NE x19, xLR, 144, \xreg // Restore also return address.
+ POP_REGS_NE x17, x19, 128, \xreg
+ POP_REG_NE xLR, 144, \xreg // Restore also return address.
// Restore floating-point registers.
ldp d0, d1, [sp, #160]
ldp d2, d3, [sp, #176]
@@ -2588,7 +2585,7 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, w14, x14
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, w15, x15
// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, w16, x16 ip0 is blocked
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, w17, x17
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18
+// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18 x18 is blocked
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, w19, x19
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, w20, x20
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, w21, x21
@@ -2629,7 +2626,7 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
SELECT_X_OR_W_FOR_MACRO \macro_for_register, x15, w15, \xreg
\macro_for_reserved_register // IP0 is reserved
\macro_for_reserved_register // IP1 is reserved
- SELECT_X_OR_W_FOR_MACRO \macro_for_register, x18, w18, \xreg
+ \macro_for_reserved_register // x18 is reserved
SELECT_X_OR_W_FOR_MACRO \macro_for_register, x19, w19, \xreg
SELECT_X_OR_W_FOR_MACRO \macro_for_register, x20, w20, \xreg
SELECT_X_OR_W_FOR_MACRO \macro_for_register, x21, w21, \xreg
@@ -2673,13 +2670,12 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
.macro READ_BARRIER_MARK_INTROSPECTION_SLOW_PATH ldr_offset
/*
- * Allocate 44 stack slots * 8 = 352 bytes:
- * - 19 slots for core registers X0-15, X18-X19, LR
- * - 1 slot padding
+ * Allocate 42 stack slots * 8 = 336 bytes:
+ * - 18 slots for core registers X0-15, X19, LR
* - 24 slots for floating-point registers D0-D7 and D16-D31
*/
// Save all potentially live caller-save core registers.
- SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 352
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 336
SAVE_TWO_REGS x2, x3, 16
SAVE_TWO_REGS x4, x5, 32
SAVE_TWO_REGS x6, x7, 48
@@ -2687,21 +2683,21 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
SAVE_TWO_REGS x10, x11, 80
SAVE_TWO_REGS x12, x13, 96
SAVE_TWO_REGS x14, x15, 112
- SAVE_TWO_REGS x18, x19, 128 // Skip x16, x17, i.e. IP0, IP1.
- SAVE_REG xLR, 144 // Save return address, skip padding at 152.
+ // Skip x16, x17, i.e. IP0, IP1, and x18, the platform register.
+ SAVE_TWO_REGS x19, xLR, 128 // Save return address.
// Save all potentially live caller-save floating-point registers.
- stp d0, d1, [sp, #160]
- stp d2, d3, [sp, #176]
- stp d4, d5, [sp, #192]
- stp d6, d7, [sp, #208]
- stp d16, d17, [sp, #224]
- stp d18, d19, [sp, #240]
- stp d20, d21, [sp, #256]
- stp d22, d23, [sp, #272]
- stp d24, d25, [sp, #288]
- stp d26, d27, [sp, #304]
- stp d28, d29, [sp, #320]
- stp d30, d31, [sp, #336]
+ stp d0, d1, [sp, #144]
+ stp d2, d3, [sp, #160]
+ stp d4, d5, [sp, #176]
+ stp d6, d7, [sp, #192]
+ stp d16, d17, [sp, #208]
+ stp d18, d19, [sp, #224]
+ stp d20, d21, [sp, #240]
+ stp d22, d23, [sp, #256]
+ stp d24, d25, [sp, #272]
+ stp d26, d27, [sp, #288]
+ stp d28, d29, [sp, #304]
+ stp d30, d31, [sp, #320]
mov x0, xIP0
bl artReadBarrierMark // artReadBarrierMark(obj)
@@ -2716,26 +2712,26 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
RESTORE_TWO_REGS x10, x11, 80
RESTORE_TWO_REGS x12, x13, 96
RESTORE_TWO_REGS x14, x15, 112
- RESTORE_TWO_REGS x18, x19, 128 // Skip x16, x17, i.e. IP0, IP1.
- RESTORE_REG xLR, 144 // Restore return address.
+ // Skip x16, x17, i.e. IP0, IP1, and x18, the platform register.
+ RESTORE_TWO_REGS x19, xLR, 128 // Restore return address.
// Restore caller-save floating-point registers.
- ldp d0, d1, [sp, #160]
- ldp d2, d3, [sp, #176]
- ldp d4, d5, [sp, #192]
- ldp d6, d7, [sp, #208]
- ldp d16, d17, [sp, #224]
- ldp d18, d19, [sp, #240]
- ldp d20, d21, [sp, #256]
- ldp d22, d23, [sp, #272]
- ldp d24, d25, [sp, #288]
- ldp d26, d27, [sp, #304]
- ldp d28, d29, [sp, #320]
- ldp d30, d31, [sp, #336]
+ ldp d0, d1, [sp, #144]
+ ldp d2, d3, [sp, #160]
+ ldp d4, d5, [sp, #176]
+ ldp d6, d7, [sp, #192]
+ ldp d16, d17, [sp, #208]
+ ldp d18, d19, [sp, #224]
+ ldp d20, d21, [sp, #240]
+ ldp d22, d23, [sp, #256]
+ ldp d24, d25, [sp, #272]
+ ldp d26, d27, [sp, #288]
+ ldp d28, d29, [sp, #304]
+ ldp d30, d31, [sp, #320]
ldr x0, [lr, #\ldr_offset] // Load the instruction.
adr xIP1, .Lmark_introspection_return_switch
bfi xIP1, x0, #3, #5 // Calculate switch case address.
- RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 352
+ RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 336
br xIP1
.endm
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 05172dbe43..cbf5681d64 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -184,7 +184,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
// Alloc
- ResetQuickAllocEntryPoints(qpoints, /*is_active*/ false);
+ ResetQuickAllocEntryPoints(qpoints, /*is_active=*/ false);
// Cast
qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
@@ -445,7 +445,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
// Cannot use the following registers to pass arguments:
// 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
// Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 2acfe147f8..741d41a263 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -191,7 +191,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
// Cannot use the following registers to pass arguments:
// 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
// Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index e8df90eccd..de1931794f 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1899,7 +1899,7 @@ TEST_F(StubTest, DISABLED_IMT) {
LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
ImtConflictTable* empty_conflict_table =
- Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
+ Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc);
void* data = linear_alloc->Alloc(
self,
ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index ffb0c94cc7..3db4edefa1 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -98,7 +98,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierMarkReg04 = nullptr; // Cannot use register 4 (ESP) to pass arguments.
// x86 has only 8 core registers.
qpoints->pReadBarrierMarkReg08 = nullptr;
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 98462512da..e9e983cda2 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -35,27 +35,39 @@ static constexpr const char* x86_known_variants[] = {
"atom",
"sandybridge",
"silvermont",
+ "kabylake",
};
static constexpr const char* x86_variants_with_ssse3[] = {
"atom",
"sandybridge",
"silvermont",
+ "kabylake",
};
static constexpr const char* x86_variants_with_sse4_1[] = {
"sandybridge",
"silvermont",
+ "kabylake",
};
static constexpr const char* x86_variants_with_sse4_2[] = {
"sandybridge",
"silvermont",
+ "kabylake",
};
static constexpr const char* x86_variants_with_popcnt[] = {
"sandybridge",
"silvermont",
+ "kabylake",
+};
+static constexpr const char* x86_variants_with_avx[] = {
+ "kabylake",
+};
+
+static constexpr const char* x86_variants_with_avx2[] = {
+ "kabylake",
};
X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64,
@@ -93,9 +105,12 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant(
bool has_SSE4_2 = FindVariantInArray(x86_variants_with_sse4_2,
arraysize(x86_variants_with_sse4_2),
variant);
- bool has_AVX = false;
- bool has_AVX2 = false;
-
+ bool has_AVX = FindVariantInArray(x86_variants_with_avx,
+ arraysize(x86_variants_with_avx),
+ variant);
+ bool has_AVX2 = FindVariantInArray(x86_variants_with_avx2,
+ arraysize(x86_variants_with_avx2),
+ variant);
bool has_POPCNT = FindVariantInArray(x86_variants_with_popcnt,
arraysize(x86_variants_with_popcnt),
variant);
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 6bd626319e..34d908b69a 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -67,6 +67,8 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
bool HasPopCnt() const { return has_POPCNT_; }
+ bool HasAVX2() const { return has_AVX2_; }
+
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index 33eac0f0a6..cdf15af9b9 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -143,4 +143,40 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) {
EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
}
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromKabylakeVariant) {
+ // Build features for a 32-bit kabylake x86 processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_features(
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "kabylake", &error_msg));
+ ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
+ EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+ EXPECT_STREQ("ssse3,sse4.1,sse4.2,avx,avx2,popcnt",
+ x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 63U);
+
+ // Build features for a 32-bit x86 default processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
+ ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
+ EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+ EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
+ x86_default_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_default_features->AsBitmap(), 0U);
+
+ // Build features for a 64-bit x86-64 kabylake processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "kabylake", &error_msg));
+ ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
+ EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+ EXPECT_STREQ("ssse3,sse4.1,sse4.2,avx,avx2,popcnt",
+ x86_64_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_64_features->AsBitmap(), 63U);
+
+ EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+ EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+ EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
+ }
} // namespace art
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 6bae69c495..db011bab62 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -120,7 +120,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierMarkReg04 = nullptr; // Cannot use register 4 (RSP) to pass arguments.
// x86-64 has only 16 core registers.
qpoints->pReadBarrierMarkReg16 = nullptr;
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 6cbd9e4cfc..e20e7f3f5e 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -47,7 +47,7 @@ void ArtField::SetOffset(MemberOffset num_bytes) {
ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
DCHECK(GetDeclaringClass()->IsProxyClass());
ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupClass(
- Thread::Current(), descriptor, /* class_loader */ nullptr);
+ Thread::Current(), descriptor, /* class_loader= */ nullptr);
DCHECK(klass != nullptr);
return klass;
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 68ccfee089..4a19b108ab 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -324,12 +324,12 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
if (IsStatic()) {
art::interpreter::EnterInterpreterFromInvoke(
- self, this, nullptr, args, result, /*stay_in_interpreter*/ true);
+ self, this, nullptr, args, result, /*stay_in_interpreter=*/ true);
} else {
mirror::Object* receiver =
reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr();
art::interpreter::EnterInterpreterFromInvoke(
- self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
+ self, this, receiver, args + 1, result, /*stay_in_interpreter=*/ true);
}
} else {
DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 851c23f1cb..50b42d4f7b 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -58,7 +58,7 @@ MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
size = RoundUp(size, kPageSize);
std::string error_msg;
MemMap map = MemMap::MapAnonymous(name,
- /* addr */ nullptr,
+ /* addr= */ nullptr,
size,
PROT_READ | PROT_WRITE,
low_4gb,
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b2ddff3f6a..9952283272 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1028,7 +1028,11 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
guard_.recursion_count_ = 0;
timespec ts;
InitTimeSpec(true, clock, ms, ns, &ts);
- int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
+ int rc;
+ while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
+ continue;
+ }
+
if (rc == ETIMEDOUT) {
timed_out = true;
} else if (rc != 0) {
@@ -1246,9 +1250,9 @@ void Locks::Init() {
#undef UPDATE_CURRENT_LOCK_LEVEL
// List of mutexes that we may hold when accessing a weak ref.
- AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock*/ false);
- AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock*/ false);
- AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
InitConditions();
}
diff --git a/runtime/cha.cc b/runtime/cha.cc
index d8cb525719..de4aebed36 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -277,7 +277,7 @@ class CHACheckpoint final : public Closure {
};
-static void VerifyNonSingleImplementation(mirror::Class* verify_class,
+static void VerifyNonSingleImplementation(ObjPtr<mirror::Class> verify_class,
uint16_t verify_index,
ArtMethod* excluded_method)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -291,7 +291,7 @@ static void VerifyNonSingleImplementation(mirror::Class* verify_class,
PointerSize image_pointer_size =
Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- mirror::Class* input_verify_class = verify_class;
+ ObjPtr<mirror::Class> input_verify_class = verify_class;
while (verify_class != nullptr) {
if (verify_index >= verify_class->GetVTableLength()) {
@@ -299,7 +299,7 @@ static void VerifyNonSingleImplementation(mirror::Class* verify_class,
}
ArtMethod* verify_method = verify_class->GetVTableEntry(verify_index, image_pointer_size);
if (verify_method != excluded_method) {
- auto construct_parent_chain = [](mirror::Class* failed, mirror::Class* in)
+ auto construct_parent_chain = [](ObjPtr<mirror::Class> failed, ObjPtr<mirror::Class> in)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::string tmp = in->PrettyClass();
while (in != failed) {
@@ -363,7 +363,7 @@ void ClassHierarchyAnalysis::CheckVirtualMethodSingleImplementationInfo(
// non-single-implementation already.
VerifyNonSingleImplementation(klass->GetSuperClass()->GetSuperClass(),
method_in_super->GetMethodIndex(),
- nullptr /* excluded_method */);
+ /* excluded_method= */ nullptr);
return;
}
@@ -432,7 +432,7 @@ void ClassHierarchyAnalysis::CheckVirtualMethodSingleImplementationInfo(
// method_in_super might be the single-implementation of another abstract method,
// which should be also invalidated of its single-implementation status.
- mirror::Class* super_super = klass->GetSuperClass()->GetSuperClass();
+ ObjPtr<mirror::Class> super_super = klass->GetSuperClass()->GetSuperClass();
while (super_super != nullptr &&
method_index < super_super->GetVTableLength()) {
ArtMethod* method_in_super_super = super_super->GetVTableEntry(method_index, pointer_size);
@@ -564,7 +564,7 @@ void ClassHierarchyAnalysis::UpdateAfterLoadingOf(Handle<mirror::Class> klass) {
return;
}
- mirror::Class* super_class = klass->GetSuperClass();
+ ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
if (super_class == nullptr) {
return;
}
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 2536b23416..7e011371b7 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -22,7 +22,6 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "class_linker.h"
-#include "gc/heap-inl.h"
#include "gc_root-inl.h"
#include "handle_scope-inl.h"
#include "mirror/class_loader.h"
@@ -315,7 +314,7 @@ inline ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, ArtMethod*
// Check if the invoke type matches the class type.
ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
- if (CheckInvokeClassMismatch</* kThrow */ false>(dex_cache, type, method_idx, class_loader)) {
+ if (CheckInvokeClassMismatch</* kThrow= */ false>(dex_cache, type, method_idx, class_loader)) {
return nullptr;
}
// Check access.
@@ -366,7 +365,7 @@ inline ArtMethod* ClassLinker::ResolveMethod(Thread* self,
// Check if the invoke type matches the class type.
ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
- if (CheckInvokeClassMismatch</* kThrow */ true>(dex_cache, type, method_idx, class_loader)) {
+ if (CheckInvokeClassMismatch</* kThrow= */ true>(dex_cache, type, method_idx, class_loader)) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
@@ -439,6 +438,14 @@ inline void ClassLinker::VisitClassTables(const Visitor& visitor) {
}
}
+template <ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<mirror::ObjectArray<mirror::Class>> ClassLinker::GetClassRoots() {
+ ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
+ class_roots_.Read<kReadBarrierOption>();
+ DCHECK(class_roots != nullptr);
+ return class_roots;
+}
+
} // namespace art
#endif // ART_RUNTIME_CLASS_LINKER_INL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 7549c04b6f..ae812b80cf 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -496,7 +496,7 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Space (LOS) -- see the comment about the dirty card scanning logic in
// art::gc::collector::ConcurrentCopying::MarkingPhase.
Handle<mirror::Class> java_lang_String(hs.NewHandle(
- AllocClass</* kMovable */ false>(
+ AllocClass</* kMovable= */ false>(
self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
java_lang_String->SetStringClass();
mirror::Class::SetStatus(java_lang_String, ClassStatus::kResolved, self);
@@ -1039,8 +1039,8 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
if (!AddImageSpace(image_space,
ScopedNullHandle<mirror::ClassLoader>(),
- /*dex_elements*/nullptr,
- /*dex_location*/nullptr,
+ /*dex_elements=*/nullptr,
+ /*dex_location=*/nullptr,
/*out*/&dex_files,
error_msg)) {
return false;
@@ -1127,7 +1127,10 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
}
return true; // Continue with the next Element.
};
- bool error = VisitClassLoaderDexElements(soa, handle, add_element_names, /* error */ false);
+ bool error = VisitClassLoaderDexElements(soa,
+ handle,
+ add_element_names,
+ /* defaultReturn= */ false);
if (error) {
// An error occurred during DexPathList Element visiting.
return false;
@@ -1175,28 +1178,33 @@ class VerifyDeclaringClassVisitor : public ArtMethodVisitor {
/*
* A class used to ensure that all strings in an AppImage have been properly
- * interned.
+ * interned, and is only ever run in debug mode.
*/
class VerifyStringInterningVisitor {
public:
explicit VerifyStringInterningVisitor(const gc::space::ImageSpace& space) :
- uninterned_string_found_(false),
space_(space),
intern_table_(*Runtime::Current()->GetInternTable()) {}
- ALWAYS_INLINE
void TestObject(ObjPtr<mirror::Object> referred_obj) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (referred_obj != nullptr &&
space_.HasAddress(referred_obj.Ptr()) &&
referred_obj->IsString()) {
ObjPtr<mirror::String> referred_str = referred_obj->AsString();
- uninterned_string_found_ = uninterned_string_found_ ||
- (intern_table_.LookupStrong(Thread::Current(), referred_str) != referred_str);
+
+ if (kIsDebugBuild) {
+ // Saved to temporary variables to aid in debugging.
+ ObjPtr<mirror::String> strong_lookup_result =
+ intern_table_.LookupStrong(Thread::Current(), referred_str);
+ ObjPtr<mirror::String> weak_lookup_result =
+ intern_table_.LookupWeak(Thread::Current(), referred_str);
+
+ DCHECK((strong_lookup_result == referred_str) || (weak_lookup_result == referred_str));
+ }
}
}
- ALWAYS_INLINE
void VisitRootIfNonNull(
mirror::CompressedReference<mirror::Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1205,14 +1213,12 @@ class VerifyStringInterningVisitor {
}
}
- ALWAYS_INLINE
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
TestObject(root->AsMirrorPtr());
}
// Visit Class Fields
- ALWAYS_INLINE
void operator()(ObjPtr<mirror::Object> obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
@@ -1234,7 +1240,6 @@ class VerifyStringInterningVisitor {
operator()(ref, mirror::Reference::ReferentOffset(), false);
}
- mutable bool uninterned_string_found_;
const gc::space::ImageSpace& space_;
InternTable& intern_table_;
};
@@ -1244,13 +1249,14 @@ class VerifyStringInterningVisitor {
* properly interned. To be considered properly interned a reference must
* point to the same version of the string that the intern table does.
*/
-bool VerifyStringInterning(gc::space::ImageSpace& space) REQUIRES_SHARED(Locks::mutator_lock_) {
+void VerifyStringInterning(gc::space::ImageSpace& space) REQUIRES_SHARED(Locks::mutator_lock_) {
const gc::accounting::ContinuousSpaceBitmap* bitmap = space.GetMarkBitmap();
const ImageHeader& image_header = space.GetImageHeader();
const uint8_t* target_base = space.GetMemMap()->Begin();
const ImageSection& objects_section = image_header.GetObjectsSection();
- uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
- uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
+
+ auto objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+ auto objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
VerifyStringInterningVisitor visitor(space);
bitmap->VisitMarkedRange(objects_begin,
@@ -1259,21 +1265,19 @@ bool VerifyStringInterning(gc::space::ImageSpace& space) REQUIRES_SHARED(Locks::
REQUIRES_SHARED(Locks::mutator_lock_) {
if (space.HasAddress(obj)) {
if (obj->IsDexCache()) {
- obj->VisitReferences</* kVisitNativeRoots */ true,
- kVerifyNone,
- kWithoutReadBarrier>(visitor, visitor);
+ obj->VisitReferences</* kVisitNativeRoots= */ true,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
} else {
// Don't visit native roots for non-dex-cache as they can't contain
// native references to strings. This is verified during compilation
// by ImageWriter::VerifyNativeGCRootInvariants.
- obj->VisitReferences</* kVisitNativeRoots */ false,
- kVerifyNone,
- kWithoutReadBarrier>(visitor, visitor);
+ obj->VisitReferences</* kVisitNativeRoots= */ false,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
}
}
});
-
- return !visitor.uninterned_string_found_;
}
// new_class_set is the set of classes that were read from the class table section in the image.
@@ -1290,7 +1294,7 @@ class AppImageLoadingHelper {
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- static void AddImageInternTable(gc::space::ImageSpace* space)
+ static void HandleAppImageStrings(gc::space::ImageSpace* space)
REQUIRES_SHARED(Locks::mutator_lock_);
static void UpdateInternStrings(
@@ -1374,8 +1378,11 @@ void AppImageLoadingHelper::Update(
}
if (ClassLinker::kAppImageMayContainStrings) {
- AddImageInternTable(space);
- DCHECK(VerifyStringInterning(*space));
+ HandleAppImageStrings(space);
+
+ if (kIsDebugBuild) {
+ VerifyStringInterning(*space);
+ }
}
if (kVerifyArtMethodDeclaringClasses) {
@@ -1390,51 +1397,76 @@ void AppImageLoadingHelper::UpdateInternStrings(
gc::space::ImageSpace* space,
const SafeMap<mirror::String*, mirror::String*>& intern_remap) {
const uint8_t* target_base = space->Begin();
- const ImageSection& sro_section = space->GetImageHeader().GetImageStringReferenceOffsetsSection();
- const size_t num_string_offsets = sro_section.Size() / sizeof(uint32_t);
+ const ImageSection& sro_section =
+ space->GetImageHeader().GetImageStringReferenceOffsetsSection();
+ const size_t num_string_offsets = sro_section.Size() / sizeof(AppImageReferenceOffsetInfo);
VLOG(image)
<< "ClassLinker:AppImage:InternStrings:imageStringReferenceOffsetCount = "
<< num_string_offsets;
- const uint32_t* sro_base =
- reinterpret_cast<const uint32_t*>(target_base + sro_section.Offset());
+ const auto* sro_base =
+ reinterpret_cast<const AppImageReferenceOffsetInfo*>(target_base + sro_section.Offset());
for (size_t offset_index = 0; offset_index < num_string_offsets; ++offset_index) {
- if (HasNativeRefTag(sro_base[offset_index])) {
- void* raw_field_addr = space->Begin() + ClearNativeRefTag(sro_base[offset_index]);
- mirror::CompressedReference<mirror::Object>* objref_addr =
- reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(raw_field_addr);
- mirror::String* referred_string = objref_addr->AsMirrorPtr()->AsString();
+ uint32_t base_offset = sro_base[offset_index].first;
+
+ if (HasDexCacheNativeRefTag(base_offset)) {
+ base_offset = ClearDexCacheNativeRefTag(base_offset);
+ DCHECK_ALIGNED(base_offset, 2);
+
+ ObjPtr<mirror::DexCache> dex_cache =
+ reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
+ uint32_t string_index = sro_base[offset_index].second;
+
+ mirror::StringDexCachePair source = dex_cache->GetStrings()[string_index].load();
+ ObjPtr<mirror::String> referred_string = source.object.Read();
DCHECK(referred_string != nullptr);
- auto it = intern_remap.find(referred_string);
+ auto it = intern_remap.find(referred_string.Ptr());
if (it != intern_remap.end()) {
- objref_addr->Assign(it->second);
+ // This doesn't use SetResolvedString to maintain consistency with how
+ // we load the string. The index from the source string must be
+ // re-used due to the circular nature of the cache. Because we are not
+ // using a helper function we need to mark the GC card manually.
+ WriteBarrier::ForEveryFieldWrite(dex_cache);
+ dex_cache->GetStrings()[string_index].store(
+ mirror::StringDexCachePair(it->second, source.index));
}
+
} else {
- void* raw_field_addr = space->Begin() + sro_base[offset_index];
- mirror::HeapReference<mirror::Object>* objref_addr =
- reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_field_addr);
- mirror::String* referred_string = objref_addr->AsMirrorPtr()->AsString();
- DCHECK(referred_string != nullptr);
+ uint32_t raw_member_offset = sro_base[offset_index].second;
+ DCHECK_ALIGNED(base_offset, 2);
+ DCHECK_ALIGNED(raw_member_offset, 2);
+
+ ObjPtr<mirror::Object> obj_ptr =
+ reinterpret_cast<mirror::Object*>(space->Begin() + base_offset);
+ MemberOffset member_offset(raw_member_offset);
+ ObjPtr<mirror::String> referred_string =
+ obj_ptr->GetFieldObject<mirror::String,
+ kVerifyNone,
+ kWithoutReadBarrier,
+ /* kIsVolatile= */ false>(member_offset);
+ DCHECK(referred_string != nullptr);
- auto it = intern_remap.find(referred_string);
+ auto it = intern_remap.find(referred_string.Ptr());
if (it != intern_remap.end()) {
- objref_addr->Assign<false>(it->second);
+ obj_ptr->SetFieldObject</* kTransactionActive= */ false,
+ /* kCheckTransaction= */ false,
+ kVerifyNone,
+ /* kIsVolatile= */ false>(member_offset, it->second);
}
}
}
}
-void AppImageLoadingHelper::AddImageInternTable(gc::space::ImageSpace* space) {
+void AppImageLoadingHelper::HandleAppImageStrings(gc::space::ImageSpace* space) {
// Iterate over the string reference offsets stored in the image and intern
// the strings they point to.
ScopedTrace timing("AppImage:InternString");
Thread* const self = Thread::Current();
- Runtime* const runtime = Runtime::Current();
- InternTable* const intern_table = runtime->GetInternTable();
+ InternTable* const intern_table = Runtime::Current()->GetInternTable();
// Add the intern table, removing any conflicts. For conflicts, store the new address in a map
// for faster lookup.
@@ -1442,7 +1474,7 @@ void AppImageLoadingHelper::AddImageInternTable(gc::space::ImageSpace* space) {
SafeMap<mirror::String*, mirror::String*> intern_remap;
intern_table->AddImageStringsToTable(space, [&](InternTable::UnorderedSet& interns)
REQUIRES_SHARED(Locks::mutator_lock_) {
- VLOG(image) << "AppImage:StringsInInternTable = " << interns.size();
+ VLOG(image) << "AppImage:stringsInInternTableSize = " << interns.size();
for (auto it = interns.begin(); it != interns.end(); ) {
ObjPtr<mirror::String> string = it->Read();
ObjPtr<mirror::String> existing = intern_table->LookupWeak(self, string);
@@ -1458,7 +1490,7 @@ void AppImageLoadingHelper::AddImageInternTable(gc::space::ImageSpace* space) {
}
});
- VLOG(image) << "AppImage:ConflictingInternStrings = " << intern_remap.size();
+ VLOG(image) << "AppImage:conflictingInternStrings = " << intern_remap.size();
// For debug builds, always run the code below to get coverage.
if (kIsDebugBuild || !intern_remap.empty()) {
@@ -2241,7 +2273,7 @@ ClassLinker::~ClassLinker() {
for (const ClassLoaderData& data : class_loaders_) {
// CHA unloading analysis is not needed. No negative consequences are expected because
// all the classloaders are deleted at the same time.
- DeleteClassLoader(self, data, false /*cleanup_cha*/);
+ DeleteClassLoader(self, data, /*cleanup_cha=*/ false);
}
class_loaders_.clear();
}
@@ -2345,7 +2377,7 @@ ObjPtr<mirror::Class> ClassLinker::AllocPrimitiveArrayClass(Thread* self,
// in the `klass_` field of one of its instances allocated in the Large-Object
// Space (LOS) -- see the comment about the dirty card scanning logic in
// art::gc::collector::ConcurrentCopying::MarkingPhase.
- return AllocClass</* kMovable */ false>(
+ return AllocClass</* kMovable= */ false>(
self, java_lang_Class, mirror::Array::ClassSize(image_pointer_size_));
}
@@ -3441,7 +3473,7 @@ void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
CHECK(dex_cache != nullptr) << dex_file.GetLocation();
boot_class_path_.push_back(&dex_file);
WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
- RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
+ RegisterDexFileLocked(dex_file, dex_cache, /* class_loader= */ nullptr);
}
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
@@ -5012,7 +5044,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
ArtField* art_field = ResolveField(field.GetIndex(),
dex_cache,
class_loader,
- /* is_static */ true);
+ /* is_static= */ true);
if (Runtime::Current()->IsActiveTransaction()) {
value_it.ReadValueToField<true>(art_field);
} else {
@@ -6412,8 +6444,8 @@ void ClassLinker::FillIMTAndConflictTables(ObjPtr<mirror::Class> klass) {
unimplemented_method,
conflict_method,
klass,
- /*create_conflict_tables*/true,
- /*ignore_copied_methods*/false,
+ /*create_conflict_tables=*/true,
+ /*ignore_copied_methods=*/false,
&new_conflict,
&imt_data[0]);
}
@@ -6901,8 +6933,8 @@ void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
unimplemented_method,
imt_conflict_method,
klass.Get(),
- /*create_conflict_table*/false,
- /*ignore_copied_methods*/true,
+ /*create_conflict_tables=*/false,
+ /*ignore_copied_methods=*/true,
/*out*/new_conflict,
/*out*/imt);
}
@@ -8120,7 +8152,7 @@ ArtMethod* ClassLinker::ResolveMethod(uint32_t method_idx,
// Check if the invoke type matches the class type.
if (kResolveMode == ResolveMode::kCheckICCEAndIAE &&
- CheckInvokeClassMismatch</* kThrow */ true>(
+ CheckInvokeClassMismatch</* kThrow= */ true>(
dex_cache.Get(), type, [klass]() { return klass; })) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
@@ -9088,7 +9120,7 @@ void ClassLinker::CleanupClassLoaders() {
}
for (ClassLoaderData& data : to_delete) {
// CHA unloading analysis and SingleImplementaion cleanups are required.
- DeleteClassLoader(self, data, true /*cleanup_cha*/);
+ DeleteClassLoader(self, data, /*cleanup_cha=*/ true);
}
}
@@ -9234,11 +9266,11 @@ template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::ResolveMode::kNoChec
InvokeType type);
// Instantiate ClassLinker::AllocClass.
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ true>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ true>(
Thread* self,
ObjPtr<mirror::Class> java_lang_Class,
uint32_t class_size);
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ false>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ false>(
Thread* self,
ObjPtr<mirror::Class> java_lang_Class,
uint32_t class_size);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 996427e7c6..7afd575607 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -314,10 +314,7 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_);
template <ResolveMode kResolveMode>
- ALWAYS_INLINE ArtMethod* ResolveMethod(Thread* self,
- uint32_t method_idx,
- ArtMethod* referrer,
- InvokeType type)
+ ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
ArtMethod* ResolveMethodWithoutInvokeType(uint32_t method_idx,
@@ -557,12 +554,7 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ObjPtr<mirror::ObjectArray<mirror::Class>> GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
- class_roots_.Read<kReadBarrierOption>();
- DCHECK(class_roots != nullptr);
- return class_roots;
- }
+ ObjPtr<mirror::ObjectArray<mirror::Class>> GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_);
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
@@ -1041,12 +1033,12 @@ class ClassLinker {
public:
// This slot must become a default conflict method.
static MethodTranslation CreateConflictingMethod() {
- return MethodTranslation(Type::kConflict, /*translation*/nullptr);
+ return MethodTranslation(Type::kConflict, /*translation=*/nullptr);
}
// This slot must become an abstract method.
static MethodTranslation CreateAbstractMethod() {
- return MethodTranslation(Type::kAbstract, /*translation*/nullptr);
+ return MethodTranslation(Type::kAbstract, /*translation=*/nullptr);
}
// Use the given method as the current value for this vtable slot during translation.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ab7182a75e..27ac90b5b4 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1034,8 +1034,8 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
// Force initialization to turn the class erroneous.
bool initialized = class_linker_->EnsureInitialized(soa.Self(),
klass,
- /* can_init_fields */ true,
- /* can_init_parents */ true);
+ /* can_init_fields= */ true,
+ /* can_init_parents= */ true);
EXPECT_FALSE(initialized);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
@@ -1320,15 +1320,15 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) {
ObjPtr<mirror::Class> uninit = ResolveVerifyAndClinit(type_idx,
clinit,
soa.Self(),
- /* can_run_clinit */ true,
- /* verify_access */ false);
+ /* can_run_clinit= */ true,
+ /* verify_access= */ false);
EXPECT_TRUE(uninit != nullptr);
EXPECT_FALSE(uninit->IsInitialized());
ObjPtr<mirror::Class> init = ResolveVerifyAndClinit(type_idx,
getS0,
soa.Self(),
- /* can_run_clinit */ true,
- /* verify_access */ false);
+ /* can_run_clinit= */ true,
+ /* verify_access= */ false);
EXPECT_TRUE(init != nullptr);
EXPECT_TRUE(init->IsInitialized());
}
@@ -1530,7 +1530,7 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) {
{
WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
// Check that inserting with a UTF16 name works.
- class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader */ nullptr);
+ class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader= */ nullptr);
}
}
@@ -1699,14 +1699,14 @@ TEST_F(ClassLinkerClassLoaderTest, CreatePathClassLoader) {
jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
- VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+ VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
}
TEST_F(ClassLinkerClassLoaderTest, CreateDelegateLastClassLoader) {
jobject class_loader_a = LoadDexInDelegateLastClassLoader("ForClassLoaderA", nullptr);
VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
- VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+ VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
}
TEST_F(ClassLinkerClassLoaderTest, CreateClassLoaderChain) {
@@ -1753,7 +1753,7 @@ TEST_F(ClassLinkerClassLoaderTest, CreateClassLoaderChain) {
VerifyClassResolution("LDefinedInAC;", class_loader_d, class_loader_a);
// Sanity check that we don't find an undefined class.
- VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find*/ false);
+ VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find=*/ false);
}
} // namespace art
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 4da0091884..dd10f3c4dd 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -64,10 +64,10 @@ ClassLoaderContext::~ClassLoaderContext() {
// make sure we do not de-allocate them.
for (ClassLoaderInfo& info : class_loader_chain_) {
for (std::unique_ptr<OatFile>& oat_file : info.opened_oat_files) {
- oat_file.release();
+ oat_file.release(); // NOLINT b/117926937
}
for (std::unique_ptr<const DexFile>& dex_file : info.opened_dex_files) {
- dex_file.release();
+ dex_file.release(); // NOLINT b/117926937
}
}
}
@@ -223,7 +223,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
if (!dex_file_loader.Open(location.c_str(),
location.c_str(),
Runtime::Current()->IsVerificationEnabled(),
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg,
&info.opened_dex_files)) {
// If we fail to open the dex file because it's been stripped, try to open the dex file
@@ -298,12 +298,12 @@ bool ClassLoaderContext::RemoveLocationsFromClassPaths(
}
std::string ClassLoaderContext::EncodeContextForDex2oat(const std::string& base_dir) const {
- return EncodeContext(base_dir, /*for_dex2oat*/ true, /*stored_context*/ nullptr);
+ return EncodeContext(base_dir, /*for_dex2oat=*/ true, /*stored_context=*/ nullptr);
}
std::string ClassLoaderContext::EncodeContextForOatFile(const std::string& base_dir,
ClassLoaderContext* stored_context) const {
- return EncodeContext(base_dir, /*for_dex2oat*/ false, stored_context);
+ return EncodeContext(base_dir, /*for_dex2oat=*/ false, stored_context);
}
std::string ClassLoaderContext::EncodeContext(const std::string& base_dir,
@@ -663,7 +663,7 @@ std::unique_ptr<ClassLoaderContext> ClassLoaderContext::CreateContextForClassLoa
Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
- std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files*/ false));
+ std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files=*/ false));
if (result->AddInfoToContextFromClassLoader(soa, h_class_loader, h_dex_elements)) {
return result;
} else {
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index 5e3f48c100..ea624f1e9c 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -125,7 +125,7 @@ class ClassLoaderContextTest : public CommonRuntimeTest {
std::unique_ptr<ClassLoaderContext> ParseContextWithChecksums(const std::string& context_spec) {
std::unique_ptr<ClassLoaderContext> context(new ClassLoaderContext());
- if (!context->Parse(context_spec, /*parse_checksums*/ true)) {
+ if (!context->Parse(context_spec, /*parse_checksums=*/ true)) {
return nullptr;
}
return context;
@@ -263,7 +263,7 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFiles) {
"PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
"DLC[" + dex_name + "]");
- ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+ ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
VerifyContextSize(context.get(), 2);
@@ -314,7 +314,7 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFilesRelative) {
"PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
"DLC[" + dex_name + "]");
- ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+ ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
std::vector<std::unique_ptr<const DexFile>> all_dex_files0 = OpenTestDexFiles("MultiDex");
std::vector<std::unique_ptr<const DexFile>> myclass_dex_files = OpenTestDexFiles("MyClass");
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 78ad568d25..945d659396 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -160,7 +160,7 @@ inline void VisitClassLoaderDexFiles(ScopedObjectAccessAlreadyRunnable& soa,
VisitClassLoaderDexFiles<decltype(helper), void*>(soa,
class_loader,
helper,
- /* default */ nullptr);
+ /* default= */ nullptr);
}
} // namespace art
diff --git a/runtime/class_root.h b/runtime/class_root.h
index 19a78b1ffb..1cd135f2aa 100644
--- a/runtime/class_root.h
+++ b/runtime/class_root.h
@@ -17,7 +17,8 @@
#ifndef ART_RUNTIME_CLASS_ROOT_H_
#define ART_RUNTIME_CLASS_ROOT_H_
-#include "class_linker.h"
+#include "class_linker-inl.h"
+#include "gc_root-inl.h"
#include "mirror/class.h"
#include "mirror/object_array-inl.h"
#include "obj_ptr-inl.h"
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 6b6fe341e0..a2cdb2c28c 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -19,6 +19,7 @@
#include "class_table.h"
+#include "base/mutex-inl.h"
#include "gc_root-inl.h"
#include "mirror/class.h"
#include "oat_file.h"
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index a5157df36b..c48ab3629c 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -78,8 +78,8 @@ class CommonRuntimeTestImpl : public CommonArtTestImpl {
const ArtDexFileLoader dex_file_loader;
CHECK(dex_file_loader.Open(input_jar.c_str(),
input_jar.c_str(),
- /*verify*/ true,
- /*verify_checksum*/ true,
+ /*verify=*/ true,
+ /*verify_checksum=*/ true,
&error_msg,
&dex_files)) << error_msg;
EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 7199d5e192..7a08cb3df4 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -105,10 +105,10 @@ void ThrowAbstractMethodError(ArtMethod* method) {
}
void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file) {
- ThrowException("Ljava/lang/AbstractMethodError;", /* referrer */ nullptr,
+ ThrowException("Ljava/lang/AbstractMethodError;", /* referrer= */ nullptr,
StringPrintf("abstract method \"%s\"",
dex_file.PrettyMethod(method_idx,
- /* with_signature */ true).c_str()).c_str());
+ /* with_signature= */ true).c_str()).c_str());
}
// ArithmeticException
@@ -324,7 +324,7 @@ void ThrowIncompatibleClassChangeError(ObjPtr<mirror::Class> referrer, const cha
void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method) {
DCHECK(method != nullptr);
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- /*referrer*/nullptr,
+ /*referrer=*/nullptr,
StringPrintf("Conflicting default method implementations %s",
ArtMethod::PrettyMethod(method).c_str()).c_str());
}
@@ -633,7 +633,7 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) {
ArtField* field =
Runtime::Current()->GetClassLinker()->ResolveField(instr.VRegC_22c(), method, false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
break;
}
case Instruction::IGET_QUICK:
@@ -647,9 +647,9 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) {
ArtField* field = nullptr;
CHECK_NE(field_idx, DexFile::kDexNoIndex16);
field = Runtime::Current()->GetClassLinker()->ResolveField(
- field_idx, method, /* is_static */ false);
+ field_idx, method, /* is_static= */ false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
break;
}
case Instruction::IPUT:
@@ -660,9 +660,9 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) {
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
ArtField* field = Runtime::Current()->GetClassLinker()->ResolveField(
- instr.VRegC_22c(), method, /* is_static */ false);
+ instr.VRegC_22c(), method, /* is_static= */ false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
break;
}
case Instruction::IPUT_QUICK:
@@ -676,9 +676,9 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) {
ArtField* field = nullptr;
CHECK_NE(field_idx, DexFile::kDexNoIndex16);
field = Runtime::Current()->GetClassLinker()->ResolveField(
- field_idx, method, /* is_static */ false);
+ field_idx, method, /* is_static= */ false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
break;
}
case Instruction::AGET:
diff --git a/runtime/debug_print.cc b/runtime/debug_print.cc
index cb334b569f..2939b00e1b 100644
--- a/runtime/debug_print.cc
+++ b/runtime/debug_print.cc
@@ -37,7 +37,7 @@ std::string DescribeSpace(ObjPtr<mirror::Class> klass) {
std::ostringstream oss;
gc::Heap* heap = Runtime::Current()->GetHeap();
gc::space::ContinuousSpace* cs =
- heap->FindContinuousSpaceFromObject(klass, /* fail_ok */ true);
+ heap->FindContinuousSpaceFromObject(klass, /* fail_ok= */ true);
if (cs != nullptr) {
if (cs->IsImageSpace()) {
gc::space::ImageSpace* ispace = cs->AsImageSpace();
@@ -50,7 +50,7 @@ std::string DescribeSpace(ObjPtr<mirror::Class> klass) {
}
} else {
gc::space::DiscontinuousSpace* ds =
- heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok */ true);
+ heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok= */ true);
if (ds != nullptr) {
oss << "discontinuous;" << ds->GetName();
} else {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b679cbe051..099cadc07d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -65,6 +65,7 @@
#include "oat_file.h"
#include "obj_ptr-inl.h"
#include "reflection.h"
+#include "runtime-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "thread_list.h"
@@ -688,7 +689,7 @@ void Dbg::GoActive() {
runtime->GetInstrumentation()->EnableDeoptimization();
}
instrumentation_events_ = 0;
- gDebuggerActive = true;
+ Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = true; });
Runtime::Current()->GetRuntimeCallbacks()->AddMethodInspectionCallback(&gDebugActiveCallback);
LOG(INFO) << "Debugger is active";
}
@@ -726,7 +727,7 @@ void Dbg::Disconnected() {
if (RequiresDeoptimization()) {
runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
}
- gDebuggerActive = false;
+ Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = false; });
Runtime::Current()->GetRuntimeCallbacks()->RemoveMethodInspectionCallback(
&gDebugActiveCallback);
}
@@ -943,7 +944,7 @@ JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>* counts) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+ heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
VariableSizedHandleScope hs(Thread::Current());
std::vector<Handle<mirror::Class>> classes;
counts->clear();
@@ -964,7 +965,7 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
std::vector<JDWP::ObjectId>* instances) {
gc::Heap* heap = Runtime::Current()->GetHeap();
// We only want reachable instances, so do a GC.
- heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+ heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
JDWP::JdwpError error;
ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
if (c == nullptr) {
@@ -974,7 +975,7 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
std::vector<Handle<mirror::Object>> raw_instances;
Runtime::Current()->GetHeap()->GetInstances(hs,
hs.NewHandle(c),
- /* use_is_assignable_from */ false,
+ /* use_is_assignable_from= */ false,
max_count,
raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
@@ -986,7 +987,7 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>* referring_objects) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+ heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
JDWP::JdwpError error;
ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
if (o == nullptr) {
@@ -3074,7 +3075,7 @@ void Dbg::PostException(mirror::Throwable* exception_object) {
Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
std::unique_ptr<Context> context(Context::Create());
CatchLocationFinder clf(self, h_exception, context.get());
- clf.WalkStack(/* include_transitions */ false);
+ clf.WalkStack(/* include_transitions= */ false);
JDWP::EventLocation exception_throw_location;
SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
JDWP::EventLocation exception_catch_location;
@@ -3733,7 +3734,7 @@ class ScopedDebuggerThreadSuspension {
bool timed_out;
ThreadList* const thread_list = Runtime::Current()->GetThreadList();
suspended_thread = thread_list->SuspendThreadByPeer(thread_peer,
- /* request_suspension */ true,
+ /* request_suspension= */ true,
SuspendReason::kForDebugger,
&timed_out);
}
@@ -4744,7 +4745,7 @@ class HeapChunkContext {
REQUIRES_SHARED(Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
uint8_t state = ExamineNativeObject(start);
- AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
+ AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true);
startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
}
}
@@ -4756,7 +4757,7 @@ class HeapChunkContext {
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
// If it's the same, we should combine them.
uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
- AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
+ AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false);
startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
}
}
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index b50a430843..fb63c82a17 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -1251,7 +1251,7 @@ static void DCheckNativeAnnotation(const char* descriptor, jclass cls) {
// WellKnownClasses may not be initialized yet, so `klass` may be null.
if (klass != nullptr) {
// Lookup using the boot class path loader should yield the annotation class.
- CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+ CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader= */ nullptr));
}
}
}
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 2cbf557c1f..fbcee3901f 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -87,7 +87,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
std::vector<std::unique_ptr<const DexFile>> multi1;
ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc1().c_str(),
GetMultiDexSrc1().c_str(),
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_msg,
&multi1)) << error_msg;
@@ -96,7 +96,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
std::vector<std::unique_ptr<const DexFile>> multi2;
ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc2().c_str(),
GetMultiDexSrc2().c_str(),
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_msg,
&multi2)) << error_msg;
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 462620f53a..13f5fcb20e 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -123,14 +123,13 @@ void DexoptTest::GenerateOatForTest(const std::string& dex_location,
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
// Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_EQ(filter, odex_file->GetCompilerFilter());
@@ -159,7 +158,7 @@ void DexoptTest::GenerateOdexForTest(const std::string& dex_location,
GenerateOatForTest(dex_location,
odex_location,
filter,
- /* with_alternate_image */ false,
+ /*with_alternate_image=*/ false,
compilation_reason);
}
@@ -177,7 +176,7 @@ void DexoptTest::GenerateOatForTest(const char* dex_location,
}
void DexoptTest::GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter) {
- GenerateOatForTest(dex_location, filter, /* with_alternate_image */ false);
+ GenerateOatForTest(dex_location, filter, /*with_alternate_image=*/ false);
}
void DexoptTest::ReserveImageSpace() {
@@ -207,7 +206,7 @@ void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
reinterpret_cast<uint8_t*>(start),
end - start,
PROT_NONE,
- /* low_4gb*/ false,
+ /* low_4gb=*/ false,
&error_msg));
ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index ce742fe47e..4e5fe5ff0b 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -86,7 +86,7 @@ ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file,
bool low_4gb,
std::string* error_msg) {
std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
- new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only */ false));
+ new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only= */ false));
if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
return nullptr;
}
@@ -1163,7 +1163,7 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
vaddr_size,
PROT_NONE,
low_4gb,
- /* reuse */ false,
+ /* reuse= */ false,
reservation,
error_msg);
if (!local_reservation.IsValid()) {
@@ -1237,10 +1237,10 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
flags,
file->Fd(),
program_header->p_offset,
- /* low4_gb */ false,
+ /* low_4gb= */ false,
file->GetPath().c_str(),
- /* reuse */ true, // implies MAP_FIXED
- /* reservation */ nullptr,
+ /* reuse= */ true, // implies MAP_FIXED
+ /* reservation= */ nullptr,
error_msg);
if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
@@ -1262,9 +1262,9 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
p_vaddr + program_header->p_filesz,
program_header->p_memsz - program_header->p_filesz,
prot,
- /* low_4gb */ false,
- /* reuse */ true,
- /* reservation */ nullptr,
+ /* low_4gb= */ false,
+ /* reuse= */ true,
+ /* reservation= */ nullptr,
error_msg);
if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
@@ -1763,7 +1763,7 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, /*out*/std::st
PROT_READ,
MAP_PRIVATE,
file->Fd(),
- /* start */ 0,
+ /* start= */ 0,
low_4gb,
file->GetPath().c_str(),
error_msg);
@@ -1886,7 +1886,7 @@ bool ElfFile::GetLoadedSize(size_t* size, std::string* error_msg) const {
}
bool ElfFile::Strip(File* file, std::string* error_msg) {
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, error_msg));
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb=*/false, error_msg));
if (elf_file.get() == nullptr) {
return false;
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 62b3d001ba..120a0e9ea9 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -191,7 +191,7 @@ inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
return nullptr;
}
// CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
- return klass->Alloc</*kInstrumented*/true>(
+ return klass->Alloc</*kInstrumented=*/true>(
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
}
@@ -216,7 +216,7 @@ inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
// Pass in false since the object cannot be finalizable.
// CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
// instrumented.
- return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
+ return klass->Alloc</*kInstrumented=*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
}
// Pass in false since the object cannot be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
@@ -287,11 +287,11 @@ inline ObjPtr<mirror::Array> AllocArrayFromCode(dex::TypeIndex type_idx,
}
gc::Heap* heap = Runtime::Current()->GetHeap();
// CheckArrayAlloc can cause thread suspension which means we may now be instrumented.
- return mirror::Array::Alloc</*kInstrumented*/true>(self,
- klass,
- component_count,
- klass->GetComponentSizeShift(),
- heap->GetCurrentAllocator());
+ return mirror::Array::Alloc</*kInstrumented=*/true>(self,
+ klass,
+ component_count,
+ klass->GetComponentSizeShift(),
+ heap->GetCurrentAllocator());
}
return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
klass->GetComponentSizeShift(), allocator_type);
@@ -530,7 +530,13 @@ ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
UNREACHABLE();
}
case kInterface: {
- uint32_t imt_index = ImTable::GetImtIndex(resolved_method);
+ size_t imt_index;
+ InterpreterCache* tls_cache = self->GetInterpreterCache();
+ if (UNLIKELY(!tls_cache->Get(resolved_method, &imt_index))) {
+ imt_index = ImTable::GetImtIndex(resolved_method);
+ tls_cache->Set(resolved_method, imt_index);
+ }
+ DCHECK_EQ(imt_index, ImTable::GetImtIndex(resolved_method));
PointerSize pointer_size = class_linker->GetImagePointerSize();
ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5421f69fbd..12136bf476 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -244,7 +244,7 @@ CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, Calle
result.outer_method = outer_caller_and_pc.first;
uintptr_t caller_pc = outer_caller_and_pc.second;
result.caller =
- DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check */ true);
+ DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check= */ true);
return result;
}
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 8e784c164c..ce12fdee5f 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -31,7 +31,7 @@ static void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qp
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
// Alloc
- ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true);
+ ResetQuickAllocEntryPoints(qpoints, /* is_marking= */ true);
// Resolution and initialization
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index c782c9c949..2431bce059 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -74,9 +74,9 @@ extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(DeoptimizationKind kind,
JValue return_value;
return_value.SetJ(0); // we never deoptimize from compiled code with an invoke result.
self->PushDeoptimizationContext(return_value,
- false /* is_reference */,
+ /* is_reference= */ false,
self->GetException(),
- true /* from_code */,
+ /* from_code= */ true,
DeoptimizationMethodType::kDefault);
artDeoptimizeImpl(self, kind, true);
}
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index c4d85a3ef8..e9399827f5 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -140,7 +140,7 @@ extern "C" mirror::Class* artInitializeStaticStorageFromCode(mirror::Class* klas
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_klass = hs.NewHandle(klass);
bool success = class_linker->EnsureInitialized(
- self, h_klass, /* can_init_fields */ true, /* can_init_parents */ true);
+ self, h_klass, /* can_init_fields= */ true, /* can_init_parents= */ true);
if (UNLIKELY(!success)) {
return nullptr;
}
@@ -157,8 +157,8 @@ extern "C" mirror::Class* artResolveTypeFromCode(uint32_t type_idx, Thread* self
ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
caller,
self,
- /* can_run_clinit */ false,
- /* verify_access */ false);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ false);
if (LIKELY(result != nullptr) && CanReferenceBss(caller_and_outer.outer_method, caller)) {
StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
}
@@ -175,8 +175,8 @@ extern "C" mirror::Class* artResolveTypeAndVerifyAccessFromCode(uint32_t type_id
ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
caller,
self,
- /* can_run_clinit */ false,
- /* verify_access */ true);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ true);
// Do not StoreTypeInBss(); access check entrypoint is never used together with .bss.
return result.Ptr();
}
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index d38e3edce9..56232c5cd8 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -392,7 +392,7 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS
constexpr ReadBarrierOption kReadBarrierOption =
kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
mirror::Object* result =
- ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kReadBarrierOption>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>(
obj,
MemberOffset(offset),
ref_addr);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index ba7fb6b9db..2e447ec7d7 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -67,7 +67,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
ScopedQuickEntrypointChecks sqec(self);
// We come from an explicit check in the generated code. This path is triggered
// only if the object is indeed null.
- ThrowNullPointerExceptionFromDexPC(/* check_address */ false, 0U);
+ ThrowNullPointerExceptionFromDexPC(/* check_address= */ false, 0U);
self->QuickDeliverException();
}
@@ -75,7 +75,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
+ ThrowNullPointerExceptionFromDexPC(/* check_address= */ true, addr);
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 84631c377e..147249000f 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -763,7 +763,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
uint16_t num_regs = accessor.RegistersSize();
// No last shadow coming from quick.
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
+ CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
@@ -820,7 +820,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
result,
shorty[0] == 'L' || shorty[0] == '[', /* class or array */
force_frame_pop ? nullptr : self->GetException(),
- false /* from_code */,
+ /* from_code= */ false,
DeoptimizationMethodType::kDefault);
// Set special exception to cause deoptimization.
@@ -912,7 +912,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
uint32_t shorty_len = 0;
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
BuildQuickArgumentVisitor local_ref_visitor(
- sp, /* is_static */ false, shorty, shorty_len, &soa, &args);
+ sp, /* is_static= */ false, shorty, shorty_len, &soa, &args);
local_ref_visitor.VisitArguments();
DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
@@ -975,7 +975,7 @@ class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
const char* shorty,
uint32_t shorty_len,
size_t arg_pos)
- : QuickArgumentVisitor(sp, /* is_static */ false, shorty, shorty_len),
+ : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len),
cur_pos_(0u),
arg_pos_(arg_pos),
ref_arg_(nullptr) {
@@ -1061,7 +1061,7 @@ std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMetho
<< proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
uint32_t shorty_len = 0;
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
- GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /* is_static */ false, shorty, shorty_len);
+ GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len);
ref_args_visitor.VisitArguments();
std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
return ref_args;
@@ -2709,7 +2709,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
conflict_method,
interface_method,
method,
- /*force_new_conflict_method*/false);
+ /*force_new_conflict_method=*/false);
if (new_conflict_method != conflict_method) {
// Update the IMT if we create a new conflict method. No fence needed here, as the
// data is consistent.
@@ -2784,7 +2784,7 @@ extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* s
const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
const size_t first_arg = 0;
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc);
+ CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
ScopedStackedShadowFramePusher
frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
@@ -2877,7 +2877,7 @@ extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMet
const size_t first_arg = 0;
const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, caller_method, dex_pc);
+ CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
ScopedStackedShadowFramePusher
frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 50c65ea505..f45197834f 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -127,9 +127,7 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, jni_entrypoints, sizeof(size_t));
// Skip across the entrypoints structures.
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, rosalloc_runs, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
sizeof(void*) * kNumRosAllocThreadLocalSizeBracketsInThread);
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 10af10d1a6..313b2b4fe4 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,10 @@ class AtomicStack {
void Init() {
std::string error_msg;
mem_map_ = MemMap::MapAnonymous(name_.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bb2beaa94c..80c4c76bd3 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,10 @@ MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7cddec6242..9a5bde86b1 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,10 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("card table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity + 256,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 40dc6e146a..b4026fc3f3 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -462,7 +462,7 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectVisitor* vis
for (mirror::HeapReference<mirror::Object>* obj_ptr : references) {
if (obj_ptr->AsMirrorPtr() != nullptr) {
all_null = false;
- visitor->MarkHeapReference(obj_ptr, /*do_atomic_update*/ false);
+ visitor->MarkHeapReference(obj_ptr, /*do_atomic_update=*/ false);
}
}
count += references.size();
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 8bdf6da6fe..b369a6685e 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,10 @@ class ReadBarrierTable {
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
mem_map_ = MemMap::MapAnonymous("read barrier table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 9dea2f80d1..fba62c3d67 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -75,7 +75,7 @@ class RememberedSetReferenceVisitor {
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
*contains_reference_to_target_space_ = true;
- collector_->MarkHeapReference(ref_ptr, /*do_atomic_update*/ false);
+ collector_->MarkHeapReference(ref_ptr, /*do_atomic_update=*/ false);
DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
}
}
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2946486dfb..76d5d9de7e 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,10 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 0dbafde2a5..4e2cf2bf8c 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "rosalloc.h"
+#include "rosalloc-inl.h"
#include <list>
#include <map>
@@ -92,10 +92,10 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3095f9f679..6b394c7061 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -22,7 +22,7 @@
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
-#include "gc/space/region_space.h"
+#include "gc/space/region_space-inl.h"
#include "gc/verification.h"
#include "lock_word.h"
#include "mirror/class.h"
@@ -76,8 +76,8 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
// we can avoid an expensive CAS.
// For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
// set.
- success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
- /* rb_state */ ReadBarrier::GrayState());
+ success = ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+ /* rb_state= */ ReadBarrier::GrayState());
} else {
success = !bitmap->AtomicTestAndSet(ref);
}
@@ -113,8 +113,8 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(Thread* const self,
}
// This may or may not succeed, which is ok because the object may already be gray.
bool success =
- ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
- /* rb_state */ ReadBarrier::GrayState());
+ ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+ /* rb_state= */ ReadBarrier::GrayState());
if (success) {
MutexLock mu(self, immune_gray_stack_lock_);
immune_gray_stack_.push_back(ref);
@@ -186,7 +186,7 @@ inline mirror::Object* ConcurrentCopying::Mark(Thread* const self,
region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
- heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
UNREACHABLE();
}
} else {
@@ -209,8 +209,8 @@ inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* fr
if (UNLIKELY(mark_from_read_barrier_measurements_)) {
ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
} else {
- ret = Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self,
- from_ref);
+ ret = Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+ from_ref);
}
// Only set the mark bit for baker barrier.
if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 46cc79ce9c..2ae4676cac 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,10 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"concurrent copying sweep array free buffer",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -488,7 +488,7 @@ class ConcurrentCopying::FlipCallback : public Closure {
TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
// Only change live bytes for full CC.
cc->region_space_->SetFromSpace(
- cc->rb_table_, evac_mode, /*clear_live_bytes*/ !cc->young_gen_);
+ cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
}
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -601,7 +601,7 @@ void ConcurrentCopying::VerifyGrayImmuneObjects() {
REQUIRES_SHARED(Locks::mutator_lock_) {
// If an object is not gray, it should only have references to things in the immune spaces.
if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
- obj->VisitReferences</*kVisitNativeRoots*/true,
+ obj->VisitReferences</*kVisitNativeRoots=*/true,
kDefaultVerifyFlags,
kWithoutReadBarrier>(visitor, visitor);
}
@@ -669,8 +669,8 @@ void ConcurrentCopying::VerifyNoMissingCardMarks() {
// Objects on clean cards should never have references to newly allocated regions. Note
// that aged cards are also not clean.
if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
- VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
- obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+ VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj);
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>(
internal_visitor, internal_visitor);
}
};
@@ -742,7 +742,7 @@ void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
Thread* const self = Thread::Current();
- using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>;
VisitorType visitor(self);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
@@ -769,11 +769,11 @@ void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
: card;
},
/* card modified visitor */ VoidFunctor());
- card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
- space->Begin(),
- space->End(),
- visitor,
- gc::accounting::CardTable::kCardAged);
+ card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardAged);
}
}
}
@@ -781,7 +781,7 @@ void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
- using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>;
Thread* const self = Thread::Current();
VisitorType visitor(self);
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -791,11 +791,11 @@ void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
// Don't need to scan aged cards since we did these before the pause. Note that scanning cards
// also handles the mod-union table cards.
- card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
- space->Begin(),
- space->End(),
- visitor,
- gc::accounting::CardTable::kCardDirty);
+ card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardDirty);
if (table != nullptr) {
// Add the cards to the mod-union table so that we can clear cards to save RAM.
table->ProcessCards();
@@ -1376,7 +1376,7 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
space::RegionSpace* region_space = RegionSpace();
CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
VerifyNoFromSpaceRefsFieldVisitor visitor(this);
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor,
visitor);
if (kUseBakerReadBarrier) {
@@ -1558,8 +1558,8 @@ bool ConcurrentCopying::ProcessMarkStackOnce() {
MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
if (mark_stack_mode == kMarkStackModeThreadLocal) {
// Process the thread-local mark stacks and the GC mark stack.
- count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
- /* checkpoint_callback */ nullptr);
+ count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
+ /* checkpoint_callback= */ nullptr);
while (!gc_mark_stack_->IsEmpty()) {
mirror::Object* to_ref = gc_mark_stack_->PopBack();
ProcessMarkStackRef(to_ref);
@@ -1734,7 +1734,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
AssertToSpaceInvariantFieldVisitor visitor(this);
- to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor,
visitor);
}
@@ -1769,7 +1769,7 @@ void ConcurrentCopying::SwitchToSharedMarkStackMode() {
DisableWeakRefAccessCallback dwrac(this);
// Process the thread local mark stacks one last time after switching to the shared mark stack
// mode and disable weak ref accesses.
- ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ true, &dwrac);
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
if (kVerboseMode) {
LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
}
@@ -1833,7 +1833,7 @@ void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
void ConcurrentCopying::Sweep(bool swap_bitmaps) {
if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
// Only sweep objects on the live stack.
- SweepArray(heap_->GetLiveStack(), /* swap_bitmaps */ false);
+ SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
} else {
{
TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
@@ -2060,7 +2060,7 @@ void ConcurrentCopying::ReclaimPhase() {
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Sweep(/* swap_bitmaps */ false);
+ Sweep(/* swap_bitmaps= */ false);
SwapBitmaps();
heap_->UnBindBitmaps();
@@ -2171,7 +2171,7 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
LOG(FATAL) << "Invalid reference " << ref
<< " referenced from object " << obj << " at offset " << offset;
}
@@ -2264,12 +2264,12 @@ void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
LOG(FATAL) << "Invalid reference " << ref;
}
} else {
// Check to-space invariant in non-moving space.
- AssertToSpaceInvariantInNonMovingSpace(/* obj */ nullptr, ref);
+ AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref);
}
}
}
@@ -2440,7 +2440,7 @@ class ConcurrentCopying::RefFieldsVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
- collector_->MarkRoot</*kGrayImmuneObject*/false>(thread_, root);
+ collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root);
}
private:
@@ -2462,7 +2462,7 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
DCHECK_EQ(Thread::Current(), thread_running_gc_);
RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
// Disable the read barrier for a performance reason.
- to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
@@ -2476,10 +2476,10 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset)
DCHECK_EQ(Thread::Current(), thread_running_gc_);
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, kNoUnEvac, /*kFromGCThread*/true>(
+ mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>(
thread_running_gc_,
ref,
- /*holder*/ obj,
+ /*holder=*/ obj,
offset);
if (to_ref == ref) {
return;
@@ -2553,7 +2553,7 @@ inline void ConcurrentCopying::VisitRoots(
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
// kGrayImmuneObject is true because this is used for the thread flip.
- MarkRoot</*kGrayImmuneObject*/true>(self, root);
+ MarkRoot</*kGrayImmuneObject=*/true>(self, root);
}
}
}
@@ -2702,7 +2702,7 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self,
if (UNLIKELY(klass == nullptr)) {
// Remove memory protection from the region space and log debugging information.
region_space_->Unprotect();
- heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
}
// There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
// Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
@@ -2716,7 +2716,7 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self,
size_t bytes_allocated = 0U;
size_t dummy;
bool fall_back_to_non_moving = false;
- mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
+ mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (LIKELY(to_ref != nullptr)) {
@@ -2790,7 +2790,7 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self,
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
// Free the large alloc.
- region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
+ region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated);
} else {
// Record the lost copy for later reuse.
heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
@@ -3017,7 +3017,7 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
// AtomicSetReadBarrierState since it will fault if the address is not
// valid.
region_space_->Unprotect();
- heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
}
// Not marked nor on the allocation stack. Try to mark it.
// This may or may not succeed, which is ok.
@@ -3131,7 +3131,7 @@ bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror
} while (!field->CasWeakRelaxed(from_ref, to_ref));
} else {
// TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
- field->Assign</* kIsVolatile */ true>(to_ref);
+ field->Assign</* kIsVolatile= */ true>(to_ref);
}
}
return true;
@@ -3151,7 +3151,7 @@ void ConcurrentCopying::ProcessReferences(Thread* self) {
// We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
+ /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
@@ -3169,7 +3169,8 @@ mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(Thread* c
ScopedTrace tr(__FUNCTION__);
const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
mirror::Object* ret =
- Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, from_ref);
+ Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+ from_ref);
if (measure_read_barrier_slow_path_) {
rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
}
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 3b5961899f..3c20e5156f 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -57,7 +57,7 @@ void ImmuneSpaces::CreateLargestImmuneRegion() {
if (image_oat_file != nullptr) {
intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
reinterpret_cast<uintptr_t>(image_oat_file->End()),
- /*image*/false));
+ /*image=*/false));
}
}
intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 3f85c711e1..0e5fac123e 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -32,7 +32,7 @@ namespace collector {
class DummyOatFile : public OatFile {
public:
- DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
+ DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
begin_ = begin;
end_ = end;
}
@@ -45,7 +45,7 @@ class DummyImageSpace : public space::ImageSpace {
std::unique_ptr<DummyOatFile>&& oat_file,
MemMap&& oat_map)
: ImageSpace("DummyImageSpace",
- /*image_location*/"",
+ /*image_location=*/"",
std::move(map),
std::move(live_bitmap),
map.End()),
@@ -87,7 +87,7 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
image_begin,
image_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
+ /*low_4gb=*/true,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << error_str;
@@ -100,7 +100,7 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
oat_begin,
oat_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
+ /*low_4gb=*/true,
&error_str);
if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
@@ -110,23 +110,23 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
new (map.Begin()) ImageHeader(
- /*image_begin*/PointerToLowMemUInt32(map.Begin()),
- /*image_size*/map.Size(),
+ /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
+ /*image_size=*/map.Size(),
sections,
- /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
- /*oat_checksum*/0u,
+ /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
+ /*oat_checksum=*/0u,
// The oat file data in the header is always right after the image space.
- /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*boot_image_begin*/0u,
- /*boot_image_size*/0u,
- /*boot_oat_begin*/0u,
- /*boot_oat_size*/0u,
- /*pointer_size*/sizeof(void*),
+ /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*boot_image_begin=*/0u,
+ /*boot_image_size=*/0u,
+ /*boot_oat_begin=*/0u,
+ /*boot_oat_size=*/0u,
+ /*pointer_size=*/sizeof(void*),
ImageHeader::kStorageModeUncompressed,
- /*storage_size*/0u);
+ /*data_size=*/0u);
return new DummyImageSpace(std::move(map),
std::move(live_bitmap),
std::move(oat_file),
@@ -138,10 +138,10 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
static uint8_t* GetContinuousMemoryRegion(size_t size) {
std::string error_str;
MemMap map = MemMap::MapAnonymous("reserve",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << "Failed to allocate memory region " << error_str;
@@ -163,7 +163,7 @@ class DummySpace : public space::ContinuousSpace {
space::kGcRetentionPolicyNeverCollect,
begin,
end,
- /*limit*/end) {}
+ /*limit=*/end) {}
space::SpaceType GetType() const override {
return space::kSpaceTypeMallocSpace;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5f44a72e19..399f9ff301 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,10 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"mark sweep sweep array free buffer",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -283,9 +283,9 @@ void MarkSweep::MarkingPhase() {
// cards (during the call to Heap::ProcessCard) are not reordered
// *after* marking actually starts?
heap_->ProcessCards(GetTimings(),
- /* use_rem_sets */ false,
- /* process_alloc_space_cards */ true,
- /* clear_alloc_space_cards */ GetGcType() != kGcTypeSticky);
+ /* use_rem_sets= */ false,
+ /* process_alloc_space_cards= */ true,
+ /* clear_alloc_space_cards= */ GetGcType() != kGcTypeSticky);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
MarkRoots(self);
MarkReachableObjects();
@@ -446,7 +446,7 @@ class MarkSweep::MarkObjectSlowPath {
!large_object_space->Contains(obj)))) {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
// which is called from LOG(FATAL) but before the abort message.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 8cd484fc48..19b1fc7878 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -251,6 +251,7 @@ void SemiSpace::MarkingPhase() {
ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}
+ Runtime::Current()->BroadcastForNewSystemWeaks();
Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
@@ -727,7 +728,7 @@ void SemiSpace::ScanObject(Object* obj) {
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
MarkObjectVisitor visitor(this);
// Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index af9000b27a..e253dfb868 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -129,10 +129,10 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
if (!self->IsExceptionPending()) {
// AllocObject will pick up the new allocator type, and instrumented as true is the safe
// default.
- return AllocObject</*kInstrumented*/true>(self,
- klass,
- byte_count,
- pre_fence_visitor);
+ return AllocObject</*kInstrumented=*/true>(self,
+ klass,
+ byte_count,
+ pre_fence_visitor);
}
return nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 78e8422887..a31cbe755f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -433,8 +433,8 @@ Heap::Heap(size_t initial_size,
request_begin,
capacity_,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
- /* reuse */ false,
+ /* low_4gb= */ true,
+ /* reuse= */ false,
heap_reservation.IsValid() ? &heap_reservation : nullptr,
&error_str);
}
@@ -463,7 +463,7 @@ Heap::Heap(size_t initial_size,
initial_size,
size,
size,
- /* can_move_objects */ false);
+ /* can_move_objects= */ false);
CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
<< non_moving_space_mem_map_begin;
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
@@ -505,11 +505,11 @@ Heap::Heap(size_t initial_size,
// Create bump pointer spaces instead of a backup space.
main_mem_map_2.Reset();
bump_pointer_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
} else if (main_mem_map_2.IsValid()) {
@@ -519,7 +519,7 @@ Heap::Heap(size_t initial_size,
growth_limit_,
capacity_,
name,
- /* can_move_objects */ true));
+ /* can_move_objects= */ true));
CHECK(main_space_backup_.get() != nullptr);
// Add the space so its accounted for in the heap_begin and heap_end.
AddSpace(main_space_backup_.get());
@@ -634,13 +634,13 @@ Heap::Heap(size_t initial_size,
}
if (MayUseCollector(kCollectorTypeCC)) {
concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
- /*young_gen*/false,
+ /*young_gen=*/false,
"",
measure_gc_performance);
if (kEnableGenerationalConcurrentCopyingCollection) {
young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
this,
- /*young_gen*/true,
+ /*young_gen=*/true,
"young",
measure_gc_performance);
}
@@ -671,7 +671,7 @@ Heap::Heap(size_t initial_size,
bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
- MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
@@ -696,7 +696,7 @@ MemMap Heap::MapAnonymousPreferredAddress(const char* name,
request_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb*/ true,
+ /* low_4gb=*/ true,
out_error_str);
if (map.IsValid() || request_begin == nullptr) {
return map;
@@ -1323,7 +1323,7 @@ void Heap::DoPendingCollectorTransition() {
// Invoke CC full compaction.
CollectGarbageInternal(collector::kGcTypeFull,
kGcCauseCollectorTransition,
- /*clear_soft_references*/false);
+ /*clear_soft_references=*/false);
} else {
VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
}
@@ -1783,7 +1783,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
break;
}
// Try to transition the heap if the allocation failure was due to the space being full.
- if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
+ if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
// If we aren't out of memory then the OOM was probably from the non moving space being
// full. Attempt to disable compaction and turn the main space into a non moving space.
DisableMovingGc();
@@ -3870,7 +3870,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
+ RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true);
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
@@ -3916,7 +3916,7 @@ void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte
<< " IsVariableSize=" << c->IsVariableSize()
<< " ObjectSize=" << c->GetObjectSize()
<< " sizeof(Class)=" << sizeof(mirror::Class)
- << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
+ << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
CHECK_GE(byte_count, sizeof(mirror::Object));
}
@@ -4012,7 +4012,7 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
{
static constexpr size_t kMaxFrames = 16u;
FixedSizeBacktrace<kMaxFrames> backtrace;
- backtrace.Collect(/* skip_frames */ 2);
+ backtrace.Collect(/* skip_count= */ 2);
uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
@@ -4023,7 +4023,7 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
if (new_backtrace) {
StackHandleScope<1> hs(self);
auto h = hs.NewHandleWrapper(obj);
- CollectGarbage(/* clear_soft_references */ false);
+ CollectGarbage(/* clear_soft_references= */ false);
unique_backtrace_count_.fetch_add(1);
} else {
seen_backtrace_count_.fetch_add(1);
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 05a04f21db..a133a1058c 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -37,7 +37,7 @@ class HeapTest : public CommonRuntimeTest {
gc::Heap::kPreferredAllocSpaceBegin,
16 * KB,
PROT_READ,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_msg);
ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
@@ -77,7 +77,7 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
}
}
}
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
}
TEST_F(HeapTest, HeapBitmapCapacityTest) {
@@ -91,7 +91,7 @@ TEST_F(HeapTest, HeapBitmapCapacityTest) {
}
TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
Runtime::Current()->SetDumpGCPerformanceOnShutdown(true);
}
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c212bad530..d4af117e46 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,16 +60,16 @@ static inline MemberOffset GetSlowPathFlagOffset(ObjPtr<mirror::Class> reference
static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
- reference_class->SetFieldBoolean</* kTransactionActive */ false, /* kCheckTransaction */ false>(
+ reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
slow_path_offset, enabled ? 1 : 0);
}
void ReferenceProcessor::EnableSlowPath() {
- SetSlowPathFlag(/* enabled */ true);
+ SetSlowPathFlag(/* enabled= */ true);
}
void ReferenceProcessor::DisableSlowPath(Thread* self) {
- SetSlowPathFlag(/* enabled */ false);
+ SetSlowPathFlag(/* enabled= */ false);
condition_.Broadcast(self);
}
@@ -238,13 +238,13 @@ void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
// do_atomic_update needs to be true because this happens outside of the reference processing
// phase.
- if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
if (UNLIKELY(collector->IsTransactionActive())) {
// In transaction mode, keep the referent alive and avoid any reference processing to avoid the
// issue of rolling back reference processing. do_atomic_update needs to be true because this
// happens outside of the reference processing phase.
if (!referent->IsNull()) {
- collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
+ collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
}
return;
}
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index e25e279ea6..5c11e502c2 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -136,7 +136,7 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
// do_atomic_update is false because this happens during the reference processing phase where
// Reference.clear() would block.
- if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -158,7 +158,7 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
// do_atomic_update is false because this happens during the reference processing phase where
// Reference.clear() would block.
- if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
@@ -187,7 +187,7 @@ void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
if (referent_addr->AsMirrorPtr() != nullptr) {
// do_atomic_update is false because mutators can't access the referent due to the weak ref
// access blocking.
- visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false);
+ visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
}
ref = ref->GetPendingNext();
} while (LIKELY(ref != head));
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 80af7001ff..497a0c2e5f 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -32,7 +32,7 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac
requested_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 36d2161262..73582a00c0 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -54,7 +54,7 @@ DlMallocSpace::DlMallocSpace(MemMap&& mem_map,
end,
limit,
growth_limit,
- /* create_bitmaps */ true,
+ /* create_bitmaps= */ true,
can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 16359aca10..9e679573bd 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -184,7 +184,7 @@ static bool FindImageFilenameImpl(const char* image_location,
bool have_android_data = false;
*dalvik_cache_exists = false;
GetDalvikCache(GetInstructionSetString(image_isa),
- /* create_if_absent */ true,
+ /*create_if_absent=*/ true,
dalvik_cache,
&have_android_data,
dalvik_cache_exists,
@@ -389,7 +389,7 @@ class ImageSpace::Loader {
/*inout*/MemMap* oat_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- TimingLogger logger(__PRETTY_FUNCTION__, /* precise= */ true, VLOG_IS_ON(image));
+ TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
std::unique_ptr<ImageSpace> space = Init(image_filename,
image_location,
validate_oat_file,
@@ -429,9 +429,9 @@ class ImageSpace::Loader {
image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
VLOG(image) << "ImageSpace::Loader::InitAppImage exiting " << *space.get();
- if (VLOG_IS_ON(image)) {
- logger.Dump(LOG_STREAM(INFO));
- }
+ }
+ if (VLOG_IS_ON(image)) {
+ logger.Dump(LOG_STREAM(INFO));
}
return space;
}
@@ -509,21 +509,11 @@ class ImageSpace::Loader {
const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
kPageSize);
const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
- const ImageSection& relocations_section = image_header->GetImageRelocationsSection();
- if (relocations_section.Offset() != bitmap_section.Offset() + bitmap_section.Size()) {
+ if (end_of_bitmap != image_file_size) {
*error_msg = StringPrintf(
- "Relocations do not start immediately after bitmap: %u vs. %u + %u.",
- relocations_section.Offset(),
- bitmap_section.Offset(),
- bitmap_section.Size());
- return nullptr;
- }
- const size_t end_of_relocations = end_of_bitmap + relocations_section.Size();
- if (end_of_relocations != image_file_size) {
- *error_msg = StringPrintf(
- "Image file size does not equal end of relocations: size=%" PRIu64 " vs. %zu.",
+ "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.",
image_file_size,
- end_of_relocations);
+ end_of_bitmap);
return nullptr;
}
@@ -554,7 +544,7 @@ class ImageSpace::Loader {
MAP_PRIVATE,
file->Fd(),
image_bitmap_offset,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
image_filename,
error_msg);
if (!image_bitmap_map.IsValid()) {
@@ -640,10 +630,10 @@ class ImageSpace::Loader {
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
- /* start= */ 0,
- /* low_4gb= */ true,
+ /*start=*/ 0,
+ /*low_4gb=*/ true,
image_filename,
- /* reuse= */ false,
+ /*reuse=*/ false,
image_reservation,
error_msg);
}
@@ -662,8 +652,8 @@ class ImageSpace::Loader {
address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
- /* low_4gb= */ true,
- /* reuse= */ false,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
image_reservation,
error_msg);
if (map.IsValid()) {
@@ -673,8 +663,8 @@ class ImageSpace::Loader {
PROT_READ,
MAP_PRIVATE,
fd,
- /* start= */ 0,
- /* low_4gb= */ false,
+ /*start=*/ 0,
+ /*low_4gb=*/ false,
image_filename,
error_msg);
if (!temp_map.IsValid()) {
@@ -887,7 +877,8 @@ class ImageSpace::Loader {
mirror::Class* klass = obj->AsClass<kVerifyNone>();
// Fixup super class before visiting instance fields which require
// information from their super class to calculate offsets.
- mirror::Class* super_class = klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
+ mirror::Class* super_class =
+ klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>().Ptr();
if (super_class != nullptr) {
mirror::Class* new_super_class = down_cast<mirror::Class*>(ForwardObject(super_class));
if (new_super_class != super_class && IsInAppImage(new_super_class)) {
@@ -1261,13 +1252,12 @@ class ImageSpace::Loader {
if (oat_reservation != nullptr) {
oat_data_begin += oat_reservation->Begin() - image_header.GetOatFileBegin();
}
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd= */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- oat_data_begin,
!Runtime::Current()->IsAotCompiler(),
- /* low_4gb= */ false,
- /* abs_dex_location= */ nullptr,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
oat_reservation,
error_msg));
if (oat_file == nullptr) {
@@ -1277,6 +1267,7 @@ class ImageSpace::Loader {
error_msg->c_str());
return nullptr;
}
+ CHECK(oat_data_begin == oat_file->Begin());
uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
uint32_t image_oat_checksum = image_header.GetOatChecksum();
if (oat_checksum != image_oat_checksum) {
@@ -1341,7 +1332,7 @@ class ImageSpace::BootImageLoader {
/*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
/*out*/MemMap* extra_reservation,
/*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
- TimingLogger logger(__PRETTY_FUNCTION__, /* precise= */ true, VLOG_IS_ON(image));
+ TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
std::vector<std::string> locations;
if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
@@ -1380,7 +1371,7 @@ class ImageSpace::BootImageLoader {
filename = GetSystemImageFilename(location.c_str(), image_isa_);
spaces.push_back(Load(location,
filename,
- /* validate_oat_file= */ false,
+ /*validate_oat_file=*/ false,
&logger,
&image_reservation,
&oat_reservation,
@@ -1396,9 +1387,9 @@ class ImageSpace::BootImageLoader {
MaybeRelocateSpaces(spaces, &logger);
InitRuntimeMethods(spaces);
*extra_reservation = std::move(local_extra_reservation);
+ VLOG(image) << "ImageSpace::BootImageLoader::InitFromDalvikCache exiting " << *spaces.front();
boot_image_spaces->swap(spaces);
- VLOG(image) << "ImageSpace::BootImageLoader::InitFromDalvikCache exiting " << *spaces.front();
if (VLOG_IS_ON(image)) {
logger.Dump(LOG_STREAM(INFO));
}
@@ -1411,7 +1402,7 @@ class ImageSpace::BootImageLoader {
/*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
/*out*/MemMap* extra_reservation,
/*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
- TimingLogger logger(__PRETTY_FUNCTION__, /* precise= */ true, VLOG_IS_ON(image));
+ TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
DCHECK(DalvikCacheExists());
std::vector<std::string> locations;
if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
@@ -1528,7 +1519,7 @@ class ImageSpace::BootImageLoader {
PatchedObjectsMap(uint8_t* image_space_begin, size_t size)
: image_space_begin_(image_space_begin),
data_(new uint8_t[BitsToBytesRoundUp(NumLocations(size))]),
- visited_objects_(data_.get(), /* bit_start= */ 0u, NumLocations(size)) {
+ visited_objects_(data_.get(), /*bit_start=*/ 0u, NumLocations(size)) {
DCHECK_ALIGNED(image_space_begin_, kObjectAlignment);
std::memset(data_.get(), 0, BitsToBytesRoundUp(NumLocations(size)));
}
@@ -1539,7 +1530,7 @@ class ImageSpace::BootImageLoader {
ALWAYS_INLINE void MarkVisited(mirror::Object* object) {
DCHECK(!IsVisited(object));
- visited_objects_.StoreBit(GetIndex(object), /* value= */ true);
+ visited_objects_.StoreBit(GetIndex(object), /*value=*/ true);
}
private:
@@ -1564,7 +1555,7 @@ class ImageSpace::BootImageLoader {
: diff_(diff) {}
void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
- PatchGcRoot</* kMayBeNull */ false>(diff_, &field->DeclaringClassRoot());
+ PatchGcRoot</*kMayBeNull=*/ false>(diff_, &field->DeclaringClassRoot());
}
private:
@@ -1629,7 +1620,7 @@ class ImageSpace::BootImageLoader {
static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
// First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
// This should be the only reference field in j.l.Object and we assert that below.
- PatchReferenceField</* kMayBeNull */ false>(klass, mirror::Object::ClassOffset());
+ PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
// Then patch the reference instance fields described by j.l.Class.class.
// Use the sizeof(Object) to determine where these reference fields start;
// this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
@@ -1650,7 +1641,8 @@ class ImageSpace::BootImageLoader {
// we can get a reference to j.l.Object.class and assert that it has only one
// reference instance field (the `klass_` patched above).
if (kIsDebugBuild && klass == class_class) {
- mirror::Class* object_class = klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
+ ObjPtr<mirror::Class> object_class =
+ klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
}
// Then patch static fields.
@@ -1682,19 +1674,19 @@ class ImageSpace::BootImageLoader {
void VisitPointerArray(mirror::PointerArray* pointer_array)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Fully patch the pointer array, including the `klass_` field.
- PatchReferenceField</* kMayBeNull */ false>(pointer_array, mirror::Object::ClassOffset());
+ PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
int32_t length = pointer_array->GetLength<kVerifyNone>();
for (int32_t i = 0; i != length; ++i) {
ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
- PatchNativePointer<kPointerSize, /* kMayBeNull */ false>(diff_, method_entry);
+ PatchNativePointer<kPointerSize, /*kMayBeNull=*/ false>(diff_, method_entry);
}
}
void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
// Visit all reference fields.
- object->VisitReferences</* kVisitNativeRoots */ false,
+ object->VisitReferences</*kVisitNativeRoots=*/ false,
kVerifyNone,
kWithoutReadBarrier>(*this, *this);
// This function should not be called for classes.
@@ -1711,7 +1703,7 @@ class ImageSpace::BootImageLoader {
ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(klass->IsTypeOfReferenceClass());
- this->operator()(ref, mirror::Reference::ReferentOffset(), /* is_static= */ false);
+ this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
}
// Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
@@ -1750,8 +1742,8 @@ class ImageSpace::BootImageLoader {
DCHECK(kMayBeNull || old_value != nullptr);
if (!kMayBeNull || old_value != nullptr) {
mirror::Object* new_value = RelocatedAddress(old_value, diff_);
- object->SetFieldObjectWithoutWriteBarrier</* kTransactionActive */ false,
- /* kCheckTransaction */ true,
+ object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
+ /*kCheckTransaction=*/ true,
kVerifyNone>(offset, new_value);
}
}
@@ -1836,9 +1828,9 @@ class ImageSpace::BootImageLoader {
if (image_header.GetInternedStringsSection().Size() != 0u) {
const uint8_t* data = space->Begin() + image_header.GetInternedStringsSection().Offset();
size_t read_count;
- InternTable::UnorderedSet temp_set(data, /* make_copy_of_data= */ false, &read_count);
+ InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
for (GcRoot<mirror::String>& slot : temp_set) {
- PatchGcRoot</* kMayBeNull */ false>(diff, &slot);
+ PatchGcRoot</*kMayBeNull=*/ false>(diff, &slot);
}
}
@@ -1847,7 +1839,7 @@ class ImageSpace::BootImageLoader {
if (image_header.GetClassTableSection().Size() != 0u) {
uint8_t* data = space->Begin() + image_header.GetClassTableSection().Offset();
size_t read_count;
- ClassTable::ClassSet temp_set(data, /* make_copy_of_data= */ false, &read_count);
+ ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
DCHECK(!temp_set.empty());
ClassTableVisitor class_table_visitor(diff);
for (ClassTable::TableSlot& slot : temp_set) {
@@ -1935,8 +1927,8 @@ class ImageSpace::BootImageLoader {
ObjPtr<mirror::Executable>::DownCast(MakeObjPtr(object));
ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
ArtMethod* patched_method = RelocatedAddress(unpatched_method, diff);
- as_executable->SetArtMethod</* kTransactionActive */ false,
- /* kCheckTransaction */ true,
+ as_executable->SetArtMethod</*kTransactionActive=*/ false,
+ /*kCheckTransaction=*/ true,
kVerifyNone>(patched_method);
}
}
@@ -2019,8 +2011,8 @@ class ImageSpace::BootImageLoader {
// descriptor (and the associated exclusive lock) to be released when
// we leave Create.
ScopedFlock image = LockedFile::Open(image_filename.c_str(),
- rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
- true /* block */,
+ /*flags=*/ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY,
+ /*block=*/ true,
error_msg);
VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
@@ -2034,7 +2026,7 @@ class ImageSpace::BootImageLoader {
return Loader::Init(image_filename.c_str(),
image_location.c_str(),
validate_oat_file,
- /* oat_file= */ nullptr,
+ /*oat_file=*/ nullptr,
logger,
image_reservation,
oat_reservation,
@@ -2048,14 +2040,13 @@ class ImageSpace::BootImageLoader {
/*out*/ std::vector<std::string>* all_locations,
/*out*/ std::string* error_msg) {
std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_filename);
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd= */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base= */ nullptr,
- /* executable= */ false,
- /* low_4gb= */ false,
- /* abs_dex_location= */ nullptr,
- /* reservation= */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
error_msg));
if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
@@ -2109,9 +2100,9 @@ class ImageSpace::BootImageLoader {
reinterpret_cast32<uint8_t*>(addr),
total_size,
PROT_NONE,
- /* low_4gb= */ true,
- /* reuse= */ false,
- /* reservation= */ nullptr,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
error_msg);
if (!image_reservation->IsValid()) {
return false;
@@ -2284,7 +2275,7 @@ bool ImageSpace::LoadBootImage(
// Step 2: Check if we have an existing image in the dalvik cache.
if (loader.HasCache()) {
std::string local_error_msg;
- if (loader.LoadFromDalvikCache(/* validate_oat_file= */ true,
+ if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ true,
extra_reservation_size,
boot_image_spaces,
extra_reservation,
@@ -2304,7 +2295,7 @@ bool ImageSpace::LoadBootImage(
bool compilation_success =
GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg);
if (compilation_success) {
- if (loader.LoadFromDalvikCache(/* validate_oat_file= */ false,
+ if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ false,
extra_reservation_size,
boot_image_spaces,
extra_reservation,
@@ -2365,10 +2356,10 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
std::string* error_msg) {
return Loader::InitAppImage(image,
image,
- /* validate_oat_file= */ false,
+ /*validate_oat_file=*/ false,
oat_file,
- /* image_reservation= */ nullptr,
- /* oat_reservation= */ nullptr,
+ /*image_reservation=*/ nullptr,
+ /*oat_reservation=*/ nullptr,
error_msg);
}
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 4db6fdce1d..b940d88de8 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -148,16 +148,6 @@ class ImageSpace : public MemMapSpace {
return Begin() + GetImageHeader().GetImageSize();
}
- // Return the start of the associated oat file.
- uint8_t* GetOatFileBegin() const {
- return GetImageHeader().GetOatFileBegin();
- }
-
- // Return the end of the associated oat file.
- uint8_t* GetOatFileEnd() const {
- return GetImageHeader().GetOatFileEnd();
- }
-
void DumpSections(std::ostream& os) const;
// De-initialize the image-space by undoing the effects in Init().
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index cc70788725..0a35bce0fd 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -43,14 +43,13 @@ TEST_F(DexoptTest, ValidateOatFile) {
args.push_back("--oat-file=" + oat_location);
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(oat != nullptr) << error_msg;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index b783cfecbb..a7f82f6e36 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,10 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
num_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -353,7 +353,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested
requested_begin,
size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +372,10 @@ FreeListSpace::FreeListSpace(const std::string& name,
std::string error_msg;
allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
alloc_info_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 445560ad8d..189aeb5297 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -19,6 +19,7 @@
#include "android-base/stringprintf.h"
#include "base/logging.h" // For VLOG
+#include "base/mutex-inl.h"
#include "base/utils.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
@@ -109,7 +110,7 @@ MemMap MallocSpace::CreateMemMap(const std::string& name,
requested_begin,
*capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index bda1f1c561..5ff1270b56 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -18,6 +18,10 @@
#define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
#include "region_space.h"
+
+#include "base/mutex-inl.h"
+#include "mirror/object-inl.h"
+#include "region_space.h"
#include "thread-current-inl.h"
namespace art {
@@ -236,6 +240,15 @@ inline void RegionSpace::WalkInternal(Visitor&& visitor) {
}
}
+template <typename Visitor>
+inline void RegionSpace::Walk(Visitor&& visitor) {
+ WalkInternal</* kToSpaceOnly= */ false>(visitor);
+}
+template <typename Visitor>
+inline void RegionSpace::WalkToSpace(Visitor&& visitor) {
+ WalkInternal</* kToSpaceOnly= */ true>(visitor);
+}
+
inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
@@ -409,7 +422,7 @@ inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_alloc
} else {
DCHECK(reg->IsLargeTail());
}
- reg->Clear(/*zero_and_release_pages*/true);
+ reg->Clear(/*zero_and_release_pages=*/true);
if (kForEvac) {
--num_evac_regions_;
} else {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index eba6faccb1..31bbfb8f00 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,7 @@ MemMap RegionSpace::CreateMemMap(const std::string& name,
requested_begin,
capacity + kRegionSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (mem_map.IsValid() || requested_begin == nullptr) {
break;
@@ -393,7 +393,7 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
uint8_t* clear_block_begin = nullptr;
uint8_t* clear_block_end = nullptr;
auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
- r->Clear(/*zero_and_release_pages*/false);
+ r->Clear(/*zero_and_release_pages=*/false);
if (clear_block_end != r->Begin()) {
// Region `r` is not adjacent to the current clear block; zero and release
// pages within the current block and restart a new clear block at the
@@ -656,7 +656,7 @@ void RegionSpace::Clear() {
if (!r->IsFree()) {
--num_non_free_regions_;
}
- r->Clear(/*zero_and_release_pages*/true);
+ r->Clear(/*zero_and_release_pages=*/true);
}
SetNonFreeRegionLimit(0);
DCHECK_EQ(num_non_free_regions_, 0u);
@@ -735,7 +735,7 @@ bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
RevokeThreadLocalBuffersLocked(self);
// Retain sufficient free regions for full evacuation.
- Region* r = AllocateRegion(/*for_evac*/ false);
+ Region* r = AllocateRegion(/*for_evac=*/ false);
if (r != nullptr) {
r->is_a_tlab_ = true;
r->thread_ = self;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 5af1dd3cf7..8810f8ce58 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -205,14 +205,9 @@ class RegionSpace final : public ContinuousMemMapAllocSpace {
// Go through all of the blocks and visit the continuous objects.
template <typename Visitor>
- ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
- WalkInternal<false /* kToSpaceOnly */>(visitor);
- }
+ ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
template <typename Visitor>
- ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
- REQUIRES(Locks::mutator_lock_) {
- WalkInternal<true /* kToSpaceOnly */>(visitor);
- }
+ ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index ca3aff47f0..b50859b8e6 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -16,6 +16,8 @@
#include "space_test.h"
+#include "rosalloc_space.h"
+
namespace art {
namespace gc {
namespace space {
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index a78623e593..5e7ced6e23 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -16,6 +16,8 @@
#include "space_test.h"
+#include "rosalloc_space.h"
+
namespace art {
namespace gc {
namespace space {
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index c94b666695..5aac21721f 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -22,6 +22,7 @@
#include "base/globals.h"
#include "common_runtime_test.h"
+#include "handle_scope-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 07725b9a56..4fe8027c2d 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -112,6 +112,8 @@ static bool CollectorDoesAllowOrBroadcast() {
switch (type) {
case CollectorType::kCollectorTypeCMS:
case CollectorType::kCollectorTypeCC:
+ case CollectorType::kCollectorTypeSS:
+ case CollectorType::kCollectorTypeGSS:
return true;
default:
@@ -143,7 +145,7 @@ TEST_F(SystemWeakTest, Keep) {
cswh.Set(GcRoot<mirror::Object>(s.Get()));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -164,7 +166,7 @@ TEST_F(SystemWeakTest, Discard) {
cswh.Set(GcRoot<mirror::Object>(mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -188,7 +190,7 @@ TEST_F(SystemWeakTest, Remove) {
cswh.Set(GcRoot<mirror::Object>(s.Get()));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -203,7 +205,7 @@ TEST_F(SystemWeakTest, Remove) {
Runtime::Current()->RemoveSystemWeakHolder(&cswh);
// Trigger another GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expectation: no change in the numbers.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 2c31c65514..47c54bd189 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -87,7 +87,8 @@ void Verification::LogHeapCorruption(ObjPtr<mirror::Object> holder,
bool fatal) const {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
// which is called from LOG(FATAL) but before the abort message.
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index d091e7f371..f61c700a36 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -199,7 +199,7 @@ inline MutableHandle<MirrorType> VariableSizedHandleScope::NewHandle(ObjPtr<Mirr
inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self)
: BaseHandleScope(self->GetTopHandleScope()),
self_(self) {
- current_scope_ = new LocalScopeType(/*link*/ nullptr);
+ current_scope_ = new LocalScopeType(/*link=*/ nullptr);
self_->PushHandleScope(this);
}
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 580224e439..c16e7f347e 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -242,9 +242,9 @@ inline Action GetMemberAction(T* member,
AccessMethod access_method)
REQUIRES_SHARED(Locks::mutator_lock_) {
bool is_caller_trusted =
- detail::IsCallerTrusted(/* caller */ nullptr, caller_class_loader, caller_dex_cache);
+ detail::IsCallerTrusted(/* caller= */ nullptr, caller_class_loader, caller_dex_cache);
return GetMemberAction(member,
- /* thread */ nullptr,
+ /* thread= */ nullptr,
[is_caller_trusted] (Thread*) { return is_caller_trusted; },
access_method);
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index f696e2558c..832bacbb3d 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -41,6 +41,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/array_ref.h"
+#include "base/file_utils.h"
#include "base/globals.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -761,13 +762,13 @@ class Hprof : public SingleRootVisitor {
// Where exactly are we writing to?
int out_fd;
if (fd_ >= 0) {
- out_fd = dup(fd_);
+ out_fd = DupCloexec(fd_);
if (out_fd < 0) {
ThrowRuntimeException("Couldn't dump heap; dup(%d) failed: %s", fd_, strerror(errno));
return false;
}
} else {
- out_fd = open(filename_.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0644);
+ out_fd = open(filename_.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
if (out_fd < 0) {
ThrowRuntimeException("Couldn't dump heap; open(\"%s\") failed: %s", filename_.c_str(),
strerror(errno));
@@ -1252,7 +1253,7 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
__ AddU1(HPROF_CLASS_DUMP);
__ AddClassId(LookupClassId(klass));
__ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(klass));
- __ AddClassId(LookupClassId(klass->GetSuperClass()));
+ __ AddClassId(LookupClassId(klass->GetSuperClass().Ptr()));
__ AddObjectId(klass->GetClassLoader());
__ AddObjectId(nullptr); // no signer
__ AddObjectId(nullptr); // no prot domain
@@ -1543,7 +1544,7 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj,
reinterpret_cast<uintptr_t>(obj) + kObjectAlignment / 2);
__ AddObjectId(fake_object_array);
}
- klass = klass->GetSuperClass();
+ klass = klass->GetSuperClass().Ptr();
} while (klass != nullptr);
// Patch the instance field length.
diff --git a/runtime/image.cc b/runtime/image.cc
index a4351d021b..e7f44864b2 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '4', '\0' }; // Remove PIC flags.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '5', '\0' }; // Remove relocation section.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index bd8bc28e0c..0dec5f71ab 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -237,7 +237,6 @@ class PACKED(4) ImageHeader {
kSectionClassTable,
kSectionStringReferenceOffsets,
kSectionImageBitmap,
- kSectionImageRelocations,
kSectionCount, // Number of elements in enum.
};
@@ -290,18 +289,14 @@ class PACKED(4) ImageHeader {
return GetImageSection(kSectionClassTable);
}
- const ImageSection& GetImageBitmapSection() const {
- return GetImageSection(kSectionImageBitmap);
- }
-
- const ImageSection& GetImageRelocationsSection() const {
- return GetImageSection(kSectionImageRelocations);
- }
-
const ImageSection& GetImageStringReferenceOffsetsSection() const {
return GetImageSection(kSectionStringReferenceOffsets);
}
+ const ImageSection& GetImageBitmapSection() const {
+ return GetImageSection(kSectionImageBitmap);
+ }
+
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Object> GetImageRoot(ImageRoot image_root) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -447,11 +442,27 @@ class PACKED(4) ImageHeader {
};
/*
- * Tags the last bit. Used by AppImage logic to differentiate between managed
- * and native references.
+ * This type holds the information necessary to fix up AppImage string
+ * references.
+ *
+ * The first element of the pair is an offset into the image space. If the
+ * offset is tagged (testable using HasDexCacheNativeRefTag) it indicates the location
+ * of a DexCache object that has one or more native references to managed
+ * strings that need to be fixed up. In this case the second element has no
+ * meaningful value.
+ *
+ * If the first element isn't tagged then it indicates the location of a
+ * managed object with a field that needs fixing up. In this case the second
+ * element of the pair is an object-relative offset to the field in question.
+ */
+typedef std::pair<uint32_t, uint32_t> AppImageReferenceOffsetInfo;
+
+/*
+ * Tags the last bit. Used by AppImage logic to differentiate between pointers
+ * to managed objects and pointers to native reference arrays.
*/
template<typename T>
-T SetNativeRefTag(T val) {
+T SetDexCacheNativeRefTag(T val) {
static_assert(std::is_integral<T>::value, "Expected integral type.");
return val | 1u;
@@ -459,10 +470,11 @@ T SetNativeRefTag(T val) {
/*
* Retrieves the value of the last bit. Used by AppImage logic to
- * differentiate between managed and native references.
+ * differentiate between pointers to managed objects and pointers to native
+ * reference arrays.
*/
template<typename T>
-bool HasNativeRefTag(T val) {
+bool HasDexCacheNativeRefTag(T val) {
static_assert(std::is_integral<T>::value, "Expected integral type.");
return (val & 1u) == 1u;
@@ -470,10 +482,11 @@ bool HasNativeRefTag(T val) {
/*
* Sets the last bit of the value to 0. Used by AppImage logic to
- * differentiate between managed and native references.
+ * differentiate between pointers to managed objects and pointers to native
+ * reference arrays.
*/
template<typename T>
-T ClearNativeRefTag(T val) {
+T ClearDexCacheNativeRefTag(T val) {
static_assert(std::is_integral<T>::value, "Expected integral type.");
return val & ~1u;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d20522574b..6db47903b2 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -80,10 +80,10 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
const size_t table_bytes = max_count * sizeof(IrtEntry);
table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
error_msg);
if (!table_mem_map_.IsValid() && error_msg->empty()) {
*error_msg = "Unable to map memory for indirect ref table";
@@ -223,10 +223,10 @@ bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
const size_t table_bytes = new_size * sizeof(IrtEntry);
MemMap new_map = MemMap::MapAnonymous("indirect ref table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* is_low_4gb */ false,
+ /* low_4gb= */ false,
error_msg);
if (!new_map.IsValid()) {
return false;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4937132b26..cbcaaef260 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -43,6 +43,7 @@
#include "mirror/object_array-inl.h"
#include "nth_caller_visitor.h"
#include "oat_quick_method_header.h"
+#include "runtime-inl.h"
#include "thread.h"
#include "thread_list.h"
@@ -536,7 +537,7 @@ static void PotentiallyAddListenerTo(Instrumentation::InstrumentationEvent event
} else {
list.push_back(listener);
}
- *has_listener = true;
+ Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
}
void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
@@ -614,11 +615,11 @@ static void PotentiallyRemoveListenerFrom(Instrumentation::InstrumentationEvent
// Check if the list contains any non-null listener, and update 'has_listener'.
for (InstrumentationListener* l : list) {
if (l != nullptr) {
- *has_listener = true;
+ Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
return;
}
}
- *has_listener = false;
+ Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = false; });
}
void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
@@ -1359,38 +1360,56 @@ struct RuntimeMethodShortyVisitor : public StackVisitor {
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
shorty('V') {}
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m != nullptr && !m->IsRuntimeMethod()) {
- // The first Java method.
- if (m->IsNative()) {
- // Use JNI method's shorty for the jni stub.
- shorty = m->GetShorty()[0];
- return false;
+ static uint16_t GetMethodIndexOfInvoke(ArtMethod* caller,
+ const Instruction& inst,
+ uint32_t dex_pc)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ switch (inst.Opcode()) {
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
+ uint16_t method_idx = caller->GetIndexFromQuickening(dex_pc);
+ CHECK_NE(method_idx, DexFile::kDexNoIndex16);
+ return method_idx;
}
- if (m->IsProxyMethod()) {
- // Proxy method just invokes its proxied method via
- // art_quick_proxy_invoke_handler.
- shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
- return false;
+ default: {
+ return inst.VRegB();
}
+ }
+ }
+
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ if (m == nullptr || m->IsRuntimeMethod()) {
+ return true;
+ }
+ // The first Java method.
+ if (m->IsNative()) {
+ // Use JNI method's shorty for the jni stub.
+ shorty = m->GetShorty()[0];
+ } else if (m->IsProxyMethod()) {
+ // Proxy method just invokes its proxied method via
+ // art_quick_proxy_invoke_handler.
+ shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
+ } else {
const Instruction& instr = m->DexInstructions().InstructionAt(GetDexPc());
if (instr.IsInvoke()) {
+ uint16_t method_index = GetMethodIndexOfInvoke(m, instr, GetDexPc());
const DexFile* dex_file = m->GetDexFile();
- if (interpreter::IsStringInit(dex_file, instr.VRegB())) {
+ if (interpreter::IsStringInit(dex_file, method_index)) {
// Invoking string init constructor is turned into invoking
// StringFactory.newStringFromChars() which returns a string.
shorty = 'L';
- return false;
+ } else {
+ shorty = dex_file->GetMethodShorty(method_index)[0];
}
- // A regular invoke, use callee's shorty.
- uint32_t method_idx = instr.VRegB();
- shorty = dex_file->GetMethodShorty(method_idx)[0];
+ } else {
+ // It could be that a non-invoke opcode invokes a stub, which in turn
+ // invokes Java code. In such cases, we should never expect a return
+ // value from the stub.
}
- // Stop stack walking since we've seen a Java frame.
- return false;
}
- return true;
+ // Stop stack walking since we've seen a Java frame.
+ return false;
}
char shorty;
@@ -1494,8 +1513,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method);
self->PushDeoptimizationContext(return_value,
return_shorty == 'L' || return_shorty == '[',
- nullptr /* no pending exception */,
- false /* from_code */,
+ /* exception= */ nullptr ,
+ /* from_code= */ false,
deopt_method_type);
return GetTwoWordSuccessValue(*return_pc,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 31cfeb6af5..d97368931a 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -509,9 +509,9 @@ TEST_F(InstrumentationTest, MethodEntryEvent) {
ASSERT_TRUE(method->IsDirect());
ASSERT_TRUE(method->GetDeclaringClass() == klass);
TestEvent(instrumentation::Instrumentation::kMethodEntered,
- /*event_method*/ method,
- /*event_field*/ nullptr,
- /*with_object*/ true);
+ /*event_method=*/ method,
+ /*event_field=*/ nullptr,
+ /*with_object=*/ true);
}
TEST_F(InstrumentationTest, MethodExitObjectEvent) {
@@ -529,9 +529,9 @@ TEST_F(InstrumentationTest, MethodExitObjectEvent) {
ASSERT_TRUE(method->IsDirect());
ASSERT_TRUE(method->GetDeclaringClass() == klass);
TestEvent(instrumentation::Instrumentation::kMethodExited,
- /*event_method*/ method,
- /*event_field*/ nullptr,
- /*with_object*/ true);
+ /*event_method=*/ method,
+ /*event_field=*/ nullptr,
+ /*with_object=*/ true);
}
TEST_F(InstrumentationTest, MethodExitPrimEvent) {
@@ -548,9 +548,9 @@ TEST_F(InstrumentationTest, MethodExitPrimEvent) {
ASSERT_TRUE(method->IsDirect());
ASSERT_TRUE(method->GetDeclaringClass() == klass);
TestEvent(instrumentation::Instrumentation::kMethodExited,
- /*event_method*/ method,
- /*event_field*/ nullptr,
- /*with_object*/ false);
+ /*event_method=*/ method,
+ /*event_field=*/ nullptr,
+ /*with_object=*/ false);
}
TEST_F(InstrumentationTest, MethodUnwindEvent) {
@@ -582,9 +582,9 @@ TEST_F(InstrumentationTest, FieldWriteObjectEvent) {
ASSERT_TRUE(field != nullptr);
TestEvent(instrumentation::Instrumentation::kFieldWritten,
- /*event_method*/ nullptr,
- /*event_field*/ field,
- /*with_object*/ true);
+ /*event_method=*/ nullptr,
+ /*event_field=*/ field,
+ /*with_object=*/ true);
}
TEST_F(InstrumentationTest, FieldWritePrimEvent) {
@@ -600,9 +600,9 @@ TEST_F(InstrumentationTest, FieldWritePrimEvent) {
ASSERT_TRUE(field != nullptr);
TestEvent(instrumentation::Instrumentation::kFieldWritten,
- /*event_method*/ nullptr,
- /*event_field*/ field,
- /*with_object*/ false);
+ /*event_method=*/ nullptr,
+ /*event_field=*/ field,
+ /*with_object=*/ false);
}
TEST_F(InstrumentationTest, ExceptionHandledEvent) {
diff --git a/runtime/intern_table-inl.h b/runtime/intern_table-inl.h
index d8e5da8209..8c7fb42952 100644
--- a/runtime/intern_table-inl.h
+++ b/runtime/intern_table-inl.h
@@ -19,6 +19,9 @@
#include "intern_table.h"
+// Required for ToModifiedUtf8 below.
+#include "mirror/string-inl.h"
+
namespace art {
template <typename Visitor>
@@ -48,9 +51,12 @@ inline size_t InternTable::AddTableFromMemory(const uint8_t* ptr, const Visitor&
inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings) {
static constexpr bool kCheckDuplicates = kIsDebugBuild;
if (kCheckDuplicates) {
+ // Avoid doing read barriers since the space might not yet be added to the heap.
+ // See b/117803941
for (GcRoot<mirror::String>& string : intern_strings) {
- CHECK(Find(string.Read()) == nullptr)
- << "Already found " << string.Read()->ToModifiedUtf8() << " in the intern table";
+ CHECK(Find(string.Read<kWithoutReadBarrier>()) == nullptr)
+ << "Already found " << string.Read<kWithoutReadBarrier>()->ToModifiedUtf8()
+ << " in the intern table";
}
}
// Insert at the front since we add new interns into the back.
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2ae95dcc41..b37a2781b5 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -321,7 +321,7 @@ static inline JValue Execute(
} else {
while (true) {
// Mterp does not support all instrumentation/debugging.
- if (MterpShouldSwitchInterpreters() != 0) {
+ if (!self->UseMterp()) {
return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
false);
}
@@ -587,8 +587,8 @@ void EnterInterpreterFromDeoptimize(Thread* self,
accessor,
*shadow_frame,
value,
- /* stay_in_interpreter */ true,
- /* from_deoptimize */ true);
+ /* stay_in_interpreter= */ true,
+ /* from_deoptimize= */ true);
}
ShadowFrame* old_frame = shadow_frame;
shadow_frame = shadow_frame->GetLink();
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index b4966fd615..355058f4f6 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -25,25 +25,29 @@
namespace art {
-class Instruction;
class Thread;
// Small fast thread-local cache for the interpreter.
-// The key for the cache is the dex instruction pointer.
-// The interpretation of the value depends on the opcode.
-// Presence of entry might imply some performance pre-conditions.
+// It can hold arbitrary pointer-sized key-value pair.
+// The interpretation of the value depends on the key.
+// Presence of entry might imply some pre-conditions.
// All operations must be done from the owning thread,
// or at a point when the owning thread is suspended.
//
-// The values stored for opcodes in the cache currently are:
+// The key-value pairs stored in the cache currently are:
// iget/iput: The field offset. The field must be non-volatile.
// sget/sput: The ArtField* pointer. The field must be non-volitile.
+// invoke: The ArtMethod* pointer (before vtable indirection, etc).
+// ArtMethod*: The ImtIndex of the method.
+//
+// We ensure consistency of the cache by clearing it
+// whenever any dex file is unloaded.
//
// Aligned to 16-bytes to make it easier to get the address of the cache
// from assembly (it ensures that the offset is valid immediate value).
class ALIGNED(16) InterpreterCache {
// Aligned since we load the whole entry in single assembly instruction.
- typedef std::pair<const Instruction*, size_t> Entry ALIGNED(2 * sizeof(size_t));
+ typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t));
public:
// 2x size increase/decrease corresponds to ~0.5% interpreter performance change.
@@ -59,7 +63,7 @@ class ALIGNED(16) InterpreterCache {
// Clear the whole cache. It requires the owning thread for DCHECKs.
void Clear(Thread* owning_thread);
- ALWAYS_INLINE bool Get(const Instruction* key, /* out */ size_t* value) {
+ ALWAYS_INLINE bool Get(const void* key, /* out */ size_t* value) {
DCHECK(IsCalledFromOwningThread());
Entry& entry = data_[IndexOf(key)];
if (LIKELY(entry.first == key)) {
@@ -69,7 +73,7 @@ class ALIGNED(16) InterpreterCache {
return false;
}
- ALWAYS_INLINE void Set(const Instruction* key, size_t value) {
+ ALWAYS_INLINE void Set(const void* key, size_t value) {
DCHECK(IsCalledFromOwningThread());
data_[IndexOf(key)] = Entry{key, value};
}
@@ -77,7 +81,7 @@ class ALIGNED(16) InterpreterCache {
private:
bool IsCalledFromOwningThread();
- static ALWAYS_INLINE size_t IndexOf(const Instruction* key) {
+ static ALWAYS_INLINE size_t IndexOf(const void* key) {
static_assert(IsPowerOfTwo(kSize), "Size must be power of two");
size_t index = (reinterpret_cast<uintptr_t>(key) >> 2) & (kSize - 1);
DCHECK_LT(index, kSize);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index b17023208e..72a1330536 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -50,6 +50,17 @@ void ThrowNullPointerExceptionFromInterpreter() {
ThrowNullPointerExceptionFromDexPC();
}
+bool CheckStackOverflow(Thread* self, size_t frame_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ uint8_t* stack_end = self->GetStackEndForInterpreter(implicit_check);
+ if (UNLIKELY(__builtin_frame_address(0) < stack_end + frame_size)) {
+ ThrowStackOverflowError(self);
+ return false;
+ }
+ return true;
+}
+
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
bool transaction_active>
bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
@@ -714,12 +725,12 @@ bool DoMethodHandleInvokeExact(Thread* self,
if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
static const bool kIsRange = false;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
} else {
DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
static const bool kIsRange = true;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
}
}
@@ -731,12 +742,12 @@ bool DoMethodHandleInvoke(Thread* self,
if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
static const bool kIsRange = false;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
} else {
DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
static const bool kIsRange = true;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
}
}
@@ -1071,7 +1082,7 @@ static bool PackCollectorArrayForBootstrapMethod(Thread* self,
return true;
#define COLLECT_REFERENCE_ARRAY(T, Type) \
- Handle<mirror::ObjectArray<T>> array = \
+ Handle<mirror::ObjectArray<T>> array = /* NOLINT */ \
hs.NewHandle(mirror::ObjectArray<T>::Alloc(self, \
array_type, \
array_length)); \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 1e4239edfd..96588c8738 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -42,6 +42,8 @@
#include "dex/dex_instruction-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "handle_scope-inl.h"
+#include "interpreter_mterp_impl.h"
+#include "interpreter_switch_impl.h"
#include "jit/jit.h"
#include "mirror/call_site.h"
#include "mirror/class-inl.h"
@@ -51,6 +53,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
+#include "mterp/mterp.h"
#include "obj_ptr.h"
#include "stack.h"
#include "thread.h"
@@ -121,9 +124,46 @@ template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
+template<InvokeType type>
+static ALWAYS_INLINE bool UseInterpreterToInterpreterFastPath(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ if (!runtime->IsStarted()) {
+ return false;
+ }
+ const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
+ if (!runtime->GetClassLinker()->IsQuickToInterpreterBridge(quick_code)) {
+ return false;
+ }
+ if (!method->SkipAccessChecks() || method->IsNative() || method->IsProxyMethod()) {
+ return false;
+ }
+ if (method->GetDeclaringClass()->IsStringClass() && method->IsConstructor()) {
+ return false;
+ }
+ if (type == kStatic && !method->GetDeclaringClass()->IsInitialized()) {
+ return false;
+ }
+ if (runtime->IsActiveTransaction() || runtime->GetInstrumentation()->HasMethodEntryListeners()) {
+ return false;
+ }
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
+ if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
+ return false;
+ }
+ if (runtime->GetJit() != nullptr && runtime->GetJit()->JitAtFirstUse()) {
+ return false;
+ }
+ return true;
+}
+
+// Throws exception if we are getting close to the end of the stack.
+NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type, bool is_range, bool do_access_check, bool fast_invoke = false>
+template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp>
static ALWAYS_INLINE bool DoInvoke(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
@@ -163,35 +203,96 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
(type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
ArtMethod* const called_method = FindMethodToCall<type, do_access_check>(
method_idx, resolved_method, &receiver, sf_method, self);
-
- // The shadow frame should already be pushed, so we don't need to update it.
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
return false;
- } else if (UNLIKELY(!called_method->IsInvokable())) {
+ }
+ if (UNLIKELY(!called_method->IsInvokable())) {
called_method->ThrowInvocationTimeError();
result->SetJ(0);
return false;
- } else {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && (type == kVirtual || type == kInterface)) {
- jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+ }
+
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && (type == kVirtual || type == kInterface)) {
+ jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+ }
+
+ if (is_mterp && !is_range && called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
+ shadow_frame.GetResultRegister())) {
+ if (jit != nullptr && sf_method != nullptr) {
+ jit->NotifyInterpreterToCompiledCodeTransition(self, sf_method);
+ }
+ return !self->IsExceptionPending();
+ }
+ }
+
+ if (is_mterp && UseInterpreterToInterpreterFastPath<type>(called_method)) {
+ const uint16_t number_of_inputs =
+ (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
+ CodeItemDataAccessor accessor(called_method->DexInstructionData());
+ uint32_t num_regs = accessor.RegistersSize();
+ DCHECK_EQ(number_of_inputs, accessor.InsSize());
+ DCHECK_GE(num_regs, number_of_inputs);
+ size_t first_dest_reg = num_regs - number_of_inputs;
+
+ if (UNLIKELY(!CheckStackOverflow(self, ShadowFrame::ComputeSize(num_regs)))) {
+ return false;
}
- // The fast invoke is used from mterp for some invoke variants.
- // The non-fast version is used from switch interpreter and it might not support intrinsics.
- // TODO: Unify both paths.
- if (fast_invoke) {
- if (called_method->IsIntrinsic()) {
- if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
- shadow_frame.GetResultRegister())) {
- return !self->IsExceptionPending();
+
+ // Create shadow frame on the stack.
+ const char* old_cause = self->StartAssertNoThreadSuspension("DoFastInvoke");
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
+ ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
+ if (is_range) {
+ size_t src = vregC;
+ for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst, ++src) {
+ *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(src);
+ *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(src);
+ }
+ } else {
+ uint32_t arg[Instruction::kMaxVarArgRegs];
+ inst->GetVarArgs(arg, inst_data);
+ for (size_t i = 0, dst = first_dest_reg; i < number_of_inputs; ++i, ++dst) {
+ *new_shadow_frame->GetVRegAddr(dst) = *shadow_frame.GetVRegAddr(arg[i]);
+ *new_shadow_frame->GetShadowRefAddr(dst) = *shadow_frame.GetShadowRefAddr(arg[i]);
+ }
+ }
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ if (jit != nullptr) {
+ jit->AddSamples(self, called_method, 1, /* with_backedges */false);
+ }
+
+ self->PushShadowFrame(new_shadow_frame);
+ DCheckStaticState(self, called_method);
+ while (true) {
+ // Mterp does not support all instrumentation/debugging.
+ if (!self->UseMterp()) {
+ *result =
+ ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, false);
+ break;
+ }
+ if (ExecuteMterpImpl(self, accessor.Insns(), new_shadow_frame, result)) {
+ break;
+ } else {
+ // Mterp didn't like that instruction. Single-step it with the reference interpreter.
+ *result = ExecuteSwitchImpl<false, false>(self, accessor, *new_shadow_frame, *result, true);
+ if (new_shadow_frame->GetDexPC() == dex::kDexNoIndex) {
+ break; // Single-stepped a return or an exception not handled locally.
}
}
}
- return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
- result);
+ self->PopShadowFrame();
+
+ return !self->IsExceptionPending();
}
+
+ return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
+ result);
}
static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
@@ -289,7 +390,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
if (jit != nullptr) {
jit->InvokeVirtualOrInterface(
receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
- jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
+ jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/false);
}
// No need to check since we've been quickened.
return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl-inl.h
index cb64ff402a..4774d6994e 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_INL_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_INL_H_
+
#include "interpreter_switch_impl.h"
#include "base/enums.h"
@@ -167,7 +170,7 @@ namespace interpreter {
#define HOTNESS_UPDATE() \
do { \
if (jit != nullptr) { \
- jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/ true); \
+ jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/ true); \
} \
} while (false)
@@ -1669,70 +1672,70 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
}
case Instruction::INVOKE_VIRTUAL: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, false, do_access_check>(
+ bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, true, do_access_check>(
+ bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_SUPER: {
PREAMBLE();
- bool success = DoInvoke<kSuper, false, do_access_check>(
+ bool success = DoInvoke<kSuper, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_SUPER_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kSuper, true, do_access_check>(
+ bool success = DoInvoke<kSuper, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_DIRECT: {
PREAMBLE();
- bool success = DoInvoke<kDirect, false, do_access_check>(
+ bool success = DoInvoke<kDirect, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_DIRECT_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kDirect, true, do_access_check>(
+ bool success = DoInvoke<kDirect, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_INTERFACE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, false, do_access_check>(
+ bool success = DoInvoke<kInterface, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_INTERFACE_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, true, do_access_check>(
+ bool success = DoInvoke<kInterface, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_STATIC: {
PREAMBLE();
- bool success = DoInvoke<kStatic, false, do_access_check>(
+ bool success = DoInvoke<kStatic, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_STATIC_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kStatic, true, do_access_check>(
+ bool success = DoInvoke<kStatic, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
@@ -1754,7 +1757,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
case Instruction::INVOKE_POLYMORPHIC: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokePolymorphic<false /* is_range */>(
+ bool success = DoInvokePolymorphic</* is_range= */ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
break;
@@ -1762,7 +1765,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
case Instruction::INVOKE_POLYMORPHIC_RANGE: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokePolymorphic<true /* is_range */>(
+ bool success = DoInvokePolymorphic</* is_range= */ true>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
break;
@@ -1770,7 +1773,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
case Instruction::INVOKE_CUSTOM: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokeCustom<false /* is_range */>(
+ bool success = DoInvokeCustom</* is_range= */ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
@@ -1778,7 +1781,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
case Instruction::INVOKE_CUSTOM_RANGE: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokeCustom<true /* is_range */>(
+ bool success = DoInvokeCustom</* is_range= */ true>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
@@ -2571,15 +2574,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx)
return;
} // NOLINT(readability/fn_size)
-// Explicit definitions of ExecuteSwitchImplCpp.
-template HOT_ATTR
-void ExecuteSwitchImplCpp<true, false>(SwitchImplContext* ctx);
-template HOT_ATTR
-void ExecuteSwitchImplCpp<false, false>(SwitchImplContext* ctx);
-template
-void ExecuteSwitchImplCpp<true, true>(SwitchImplContext* ctx);
-template
-void ExecuteSwitchImplCpp<false, true>(SwitchImplContext* ctx);
-
} // namespace interpreter
} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_INL_H_
diff --git a/runtime/interpreter/interpreter_switch_impl0.cc b/runtime/interpreter/interpreter_switch_impl0.cc
new file mode 100644
index 0000000000..00159ecd3f
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl0.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template HOT_ATTR
+void ExecuteSwitchImplCpp<false, false>(SwitchImplContext* ctx);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl1.cc b/runtime/interpreter/interpreter_switch_impl1.cc
new file mode 100644
index 0000000000..3a86765c68
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl1.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template
+void ExecuteSwitchImplCpp<false, true>(SwitchImplContext* ctx);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl2.cc b/runtime/interpreter/interpreter_switch_impl2.cc
new file mode 100644
index 0000000000..c2739c13ae
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl2.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template HOT_ATTR
+void ExecuteSwitchImplCpp<true, false>(SwitchImplContext* ctx);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl3.cc b/runtime/interpreter/interpreter_switch_impl3.cc
new file mode 100644
index 0000000000..808e4bc9c5
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl3.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// The interpreter function takes considerable time to compile and link.
+// We compile the explicit definitions separately to speed up the build.
+
+#include "interpreter_switch_impl-inl.h"
+
+namespace art {
+namespace interpreter {
+
+// Explicit definition of ExecuteSwitchImplCpp.
+template
+void ExecuteSwitchImplCpp<true, true>(SwitchImplContext* ctx);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S
index b6ede54f86..6bf54e8de0 100644
--- a/runtime/interpreter/mterp/arm/floating_point.S
+++ b/runtime/interpreter/mterp/arm/floating_point.S
@@ -326,9 +326,9 @@
%def op_double_to_long():
% unopWide(instr="bl d2l_doconv")
-% add_helper("helper_code", op_double_to_long_helper_code)
+% add_helper(op_double_to_long_helper)
-%def op_double_to_long_helper_code():
+%def op_double_to_long_helper():
/*
* Convert the double in r0/r1 to a long in r0/r1.
*
@@ -368,9 +368,9 @@ d2l_maybeNaN:
%def op_float_to_long():
% unopWider(instr="bl f2l_doconv")
-% add_helper("helper_code", op_float_to_long_helper_code)
+% add_helper(op_float_to_long_helper)
-%def op_float_to_long_helper_code():
+%def op_float_to_long_helper():
/*
* Convert the float in r0 to a long in r0/r1.
*
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
index 8693d3b8ca..08fd1bb9ff 100644
--- a/runtime/interpreter/mterp/arm/invoke.S
+++ b/runtime/interpreter/mterp/arm/invoke.S
@@ -14,9 +14,9 @@
cmp r0, #0
beq MterpException
FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
+ ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
cmp r0, #0
- bne MterpFallback
+ beq MterpFallback
GET_INST_OPCODE ip
GOTO_OPCODE ip
@@ -37,9 +37,9 @@
cmp r0, #0
beq MterpException
FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
+ ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
cmp r0, #0
- bne MterpFallback
+ beq MterpFallback
GET_INST_OPCODE ip
GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
index 62c38bfd96..a9cffe77a7 100644
--- a/runtime/interpreter/mterp/arm/main.S
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -404,21 +404,20 @@ ENTRY ExecuteMterpImpl
// cfi info continues, and covers the whole mterp implementation.
END ExecuteMterpImpl
-%def alt_stub():
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
.extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_${opcode}
- sub lr, lr, #(.L_ALT_${opcode} - .L_${opcode}) @ Addr of primary handler.
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rPC
b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ bl Mterp_dchecks_before_helper
+ #endif
+
%def fallback():
/* Transfer stub to alternate interpreter */
b MterpFallback
@@ -532,9 +531,9 @@ MterpException:
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
/* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
+ ldr r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
cmp r0, #0
- bne MterpFallback
+ beq MterpFallback
/* resume execution at catch block */
EXPORT_PC
FETCH_INST
@@ -733,13 +732,6 @@ MterpProfileActive:
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
-%def instruction_end_alt():
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
%def instruction_start():
.type artMterpAsmInstructionStart, #object
@@ -748,14 +740,6 @@ artMterpAsmAltInstructionEnd:
artMterpAsmInstructionStart = .L_op_nop
.text
-%def instruction_start_alt():
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-
%def opcode_start():
ENTRY Mterp_${opcode}
%def opcode_end():
diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
index 13009eaefc..092aa9ef4e 100644
--- a/runtime/interpreter/mterp/arm/object.S
+++ b/runtime/interpreter/mterp/arm/object.S
@@ -43,7 +43,7 @@
mov r2, rINST, lsr #12 @ B
GET_VREG r2, r2 @ object we're operating on
cmp r0, rPC
-% slow_path_label = add_helper("slow_path", lambda: field(helper))
+% slow_path_label = add_helper(lambda: field(helper))
bne ${slow_path_label} @ cache miss
cmp r2, #0
beq common_errNullObject @ null object
diff --git a/runtime/interpreter/mterp/arm64/invoke.S b/runtime/interpreter/mterp/arm64/invoke.S
index 03ac316392..4844213414 100644
--- a/runtime/interpreter/mterp/arm64/invoke.S
+++ b/runtime/interpreter/mterp/arm64/invoke.S
@@ -13,8 +13,8 @@
bl $helper
cbz w0, MterpException
FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
+ ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+ cbz w0, MterpFallback
GET_INST_OPCODE ip
GOTO_OPCODE ip
@@ -34,8 +34,8 @@
bl $helper
cbz w0, MterpException
FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
+ ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+ cbz w0, MterpFallback
GET_INST_OPCODE ip
GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
index f248265579..858cb38697 100644
--- a/runtime/interpreter/mterp/arm64/main.S
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -429,20 +429,20 @@ ENTRY ExecuteMterpImpl
// cfi info continues, and covers the whole mterp implementation.
END ExecuteMterpImpl
-%def alt_stub():
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
.extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (${opnum} * 128) // Addr of primary handler.
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
mov x2, xPC
b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ bl Mterp_dchecks_before_helper
+ #endif
+
%def footer():
.cfi_endproc
END MterpHelpers
@@ -553,8 +553,8 @@ MterpException:
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
/* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
+ ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+ cbz w0, MterpFallback
/* resume execution at catch block */
EXPORT_PC
FETCH_INST
@@ -766,13 +766,6 @@ MterpProfileActive:
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
-%def instruction_end_alt():
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
%def instruction_start():
.type artMterpAsmInstructionStart, #object
@@ -781,14 +774,6 @@ artMterpAsmAltInstructionEnd:
artMterpAsmInstructionStart = .L_op_nop
.text
-%def instruction_start_alt():
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-
%def opcode_start():
ENTRY Mterp_${opcode}
%def opcode_end():
diff --git a/runtime/interpreter/mterp/arm64/object.S b/runtime/interpreter/mterp/arm64/object.S
index 3d44731cce..3cc688e5e2 100644
--- a/runtime/interpreter/mterp/arm64/object.S
+++ b/runtime/interpreter/mterp/arm64/object.S
@@ -41,7 +41,7 @@
lsr w2, wINST, #12 // B
GET_VREG w2, w2 // object we're operating on
cmp x0, xPC
-% slow_path_label = add_helper("slow_path", lambda: field(helper))
+% slow_path_label = add_helper(lambda: field(helper))
b.ne ${slow_path_label} // cache miss
cbz w2, common_errNullObject // null object
% if is_wide:
diff --git a/runtime/interpreter/mterp/common/gen_setup.py b/runtime/interpreter/mterp/common/gen_setup.py
index a693383a78..cfa5e2e2ef 100644
--- a/runtime/interpreter/mterp/common/gen_setup.py
+++ b/runtime/interpreter/mterp/common/gen_setup.py
@@ -22,8 +22,8 @@ import sys, re
from cStringIO import StringIO
out = StringIO() # File-like in-memory buffer.
-handler_size_bytes = "128"
-handler_size_bits = "7"
+handler_size_bytes = "MTERP_HANDLER_SIZE"
+handler_size_bits = "MTERP_HANDLER_SIZE_LOG2"
opcode = ""
opnum = ""
@@ -33,38 +33,34 @@ def write_line(line):
def balign():
write_line(" .balign {}".format(handler_size_bytes))
-def write_opcode(num, name, write_method, is_alt):
+def write_opcode(num, name, write_method):
global opnum, opcode
opnum, opcode = str(num), name
- if is_alt:
- name = "ALT_" + name
write_line("/* ------------------------------ */")
balign()
write_line(".L_{1}: /* {0:#04x} */".format(num, name))
- if is_alt:
- alt_stub()
- else:
- opcode_start()
- write_method()
- opcode_end()
+ opcode_start()
+ opcode_pre()
+ write_method()
+ opcode_end()
write_line("")
opnum, opcode = None, None
-generated_helpers = list()
+generated_helpers = {}
# This method generates a helper using the provided writer method.
# The output is temporarily redirected to in-memory buffer.
-# It returns the symbol which can be used to jump to the helper.
-def add_helper(name_suffix, write_helper):
+def add_helper(write_helper, name = None):
+ if name == None:
+ name = "Mterp_" + opcode + "_helper"
global out
old_out = out
out = StringIO()
- name = "Mterp_" + opcode + "_" + name_suffix
helper_start(name)
write_helper()
helper_end(name)
out.seek(0)
- generated_helpers.append(out.read())
+ generated_helpers[name] = out.read()
out = old_out
return name
@@ -76,19 +72,14 @@ def generate(output_filename):
entry()
instruction_start()
- opcodes(is_alt = False)
+ opcodes()
balign()
instruction_end()
- for helper in generated_helpers:
+ for name, helper in sorted(generated_helpers.items()):
out.write(helper)
helpers()
- instruction_start_alt()
- opcodes(is_alt = True)
- balign()
- instruction_end_alt()
-
footer()
out.seek(0)
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index ad6e836177..5d25955701 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -58,9 +58,9 @@ def generate_script(output_filename, input_filenames):
script = StringIO() # File-like in-memory buffer.
script.write("# DO NOT EDIT: This file was generated by gen-mterp.py.\n")
script.write(open(SCRIPT_SETUP_CODE, "r").read())
- script.write("def opcodes(is_alt):\n")
+ script.write("def opcodes():\n")
for i, opcode in enumerate(getOpcodeList()):
- script.write(' write_opcode({0}, "{1}", {1}, is_alt)\n'.format(i, opcode))
+ script.write(' write_opcode({0}, "{1}", {1})\n'.format(i, opcode))
# Read all template files and translate them into python code.
for input_filename in sorted(input_filenames):
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index c385fb9417..4b6f430e65 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -34,11 +34,11 @@ namespace interpreter {
void CheckMterpAsmConstants() {
/*
* If we're using computed goto instruction transitions, make sure
- * none of the handlers overflows the 128-byte limit. This won't tell
+ * none of the handlers overflows the byte limit. This won't tell
* which one did, but if any one is too big the total size will
* overflow.
*/
- const int width = 128;
+ const int width = kMterpHandlerSize;
int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
(uintptr_t) artMterpAsmInstructionStart;
if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
@@ -48,11 +48,7 @@ void CheckMterpAsmConstants() {
}
void InitMterpTls(Thread* self) {
- self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
- self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
- self->SetMterpCurrentIBase((kTraceExecutionEnabled || kTestExportPC) ?
- artMterpAsmAltInstructionStart :
- artMterpAsmInstructionStart);
+ self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
}
/*
@@ -146,27 +142,19 @@ extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testV
return entries[index];
}
-extern "C" size_t MterpShouldSwitchInterpreters()
+bool CanUseMterp()
REQUIRES_SHARED(Locks::mutator_lock_) {
const Runtime* const runtime = Runtime::Current();
- const instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
- return instrumentation->NonJitProfilingActive() ||
- Dbg::IsDebuggerActive() ||
+ return
+ !Dbg::IsDebuggerActive() &&
+ !runtime->GetInstrumentation()->NonJitProfilingActive() &&
// mterp only knows how to deal with the normal exits. It cannot handle any of the
// non-standard force-returns.
- // TODO We really only need to switch interpreters if a PopFrame has actually happened. We
- // should check this here.
- UNLIKELY(runtime->AreNonStandardExitsEnabled()) ||
+ !runtime->AreNonStandardExitsEnabled() &&
// An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
// know how to deal with these so we could end up never dealing with it if we are in an
- // infinite loop. Since this can be called in a tight loop and getting the current thread
- // requires a TLS read we instead first check a short-circuit runtime flag that will only be
- // set if something tries to set an async exception. This will make this function faster in
- // the common case where no async exception has ever been sent. We don't need to worry about
- // synchronization on the runtime flag since it is only set in a checkpoint which will either
- // take place on the current thread or act as a synchronization point.
- (UNLIKELY(runtime->AreAsyncExceptionsThrown()) &&
- Thread::Current()->IsAsyncExceptionPending());
+ // infinite loop.
+ !runtime->AreAsyncExceptionsThrown();
}
@@ -177,7 +165,7 @@ extern "C" size_t MterpInvokeVirtual(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kVirtual, /*is_range*/ false, /*access_check*/ false, /*fast_invoke*/ true>(
+ return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -188,7 +176,7 @@ extern "C" size_t MterpInvokeSuper(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kSuper, /*is_range*/ false, /*access_check*/ false>(
+ return DoInvoke<kSuper, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -199,7 +187,7 @@ extern "C" size_t MterpInvokeInterface(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kInterface, /*is_range*/ false, /*access_check*/ false>(
+ return DoInvoke<kInterface, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -210,7 +198,7 @@ extern "C" size_t MterpInvokeDirect(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kDirect, /*is_range*/ false, /*access_check*/ false, /*fast_invoke*/ true>(
+ return DoInvoke<kDirect, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -221,7 +209,7 @@ extern "C" size_t MterpInvokeStatic(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kStatic, /*is_range*/ false, /*access_check*/ false, /*fast_invoke*/ true>(
+ return DoInvoke<kStatic, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -232,7 +220,7 @@ extern "C" size_t MterpInvokeCustom(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokeCustom<false /* is_range */>(
+ return DoInvokeCustom</* is_range= */ false>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -243,7 +231,7 @@ extern "C" size_t MterpInvokePolymorphic(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokePolymorphic<false /* is_range */>(
+ return DoInvokePolymorphic</* is_range= */ false>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -254,7 +242,7 @@ extern "C" size_t MterpInvokeVirtualRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kVirtual, /*is_range*/ true, /*access_check*/ false>(
+ return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -265,7 +253,7 @@ extern "C" size_t MterpInvokeSuperRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kSuper, /*is_range*/ true, /*access_check*/ false>(
+ return DoInvoke<kSuper, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -276,7 +264,7 @@ extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kInterface, /*is_range*/ true, /*access_check*/ false>(
+ return DoInvoke<kInterface, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -287,7 +275,7 @@ extern "C" size_t MterpInvokeDirectRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kDirect, /*is_range*/ true, /*access_check*/ false>(
+ return DoInvoke<kDirect, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -298,7 +286,7 @@ extern "C" size_t MterpInvokeStaticRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kStatic, /*is_range*/ true, /*access_check*/ false>(
+ return DoInvoke<kStatic, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -309,7 +297,7 @@ extern "C" size_t MterpInvokeCustomRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokeCustom<true /* is_range */>(self, *shadow_frame, inst, inst_data, result_register);
+ return DoInvokeCustom</*is_range=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
}
extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
@@ -319,7 +307,7 @@ extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokePolymorphic<true /* is_range */>(
+ return DoInvokePolymorphic</* is_range= */ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -387,8 +375,8 @@ extern "C" size_t MterpConstClass(uint32_t index,
ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
shadow_frame->GetMethod(),
self,
- /* can_run_clinit */ false,
- /* verify_access */ false);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ false);
if (UNLIKELY(c == nullptr)) {
return true;
}
@@ -475,8 +463,8 @@ extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint
ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
shadow_frame->GetMethod(),
self,
- /* can_run_clinit */ false,
- /* verify_access */ false);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ false);
if (LIKELY(c != nullptr)) {
if (UNLIKELY(c->IsStringClass())) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -566,6 +554,12 @@ extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Check that we are using the right interpreter.
+ if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
+ // The flag might be currently being updated on all threads. Retry with lock.
+ MutexLock tll_mu(self, *Locks::thread_list_lock_);
+ DCHECK_EQ(self->UseMterp(), CanUseMterp());
+ }
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t inst_data = inst->Fetch16(0);
if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
@@ -665,7 +659,7 @@ extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame,
extern "C" size_t MterpSuspendCheck(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
self->AllowThreadSuspension();
- return MterpShouldSwitchInterpreters();
+ return !self->UseMterp();
}
// Execute single field access instruction (get/put, static/instance).
@@ -688,8 +682,8 @@ ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
if (kIsPrimitive) {
if (kIsRead) {
PrimType value = UNLIKELY(is_volatile)
- ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset)
- : obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset);
+ ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
+ : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
if (sizeof(PrimType) == sizeof(uint64_t)) {
shadow_frame->SetVRegLong(vRegA, value); // Set two consecutive registers.
} else {
@@ -700,9 +694,9 @@ ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
? shadow_frame->GetVRegLong(vRegA)
: shadow_frame->GetVReg(vRegA);
if (UNLIKELY(is_volatile)) {
- obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset, value);
+ obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
} else {
- obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset, value);
+ obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
}
}
} else { // Object.
@@ -714,9 +708,9 @@ ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
} else { // Write.
ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
if (UNLIKELY(is_volatile)) {
- obj->SetFieldObjectVolatile</*kTransactionActive*/ false>(offset, value);
+ obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
} else {
- obj->SetFieldObject</*kTransactionActive*/ false>(offset, value);
+ obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
}
}
}
@@ -735,7 +729,7 @@ NO_INLINE bool MterpFieldAccessSlow(Instruction* inst,
shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
ArtMethod* referrer = shadow_frame->GetMethod();
uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
field_idx, referrer, self, sizeof(PrimType));
if (UNLIKELY(field == nullptr)) {
DCHECK(self->IsExceptionPending());
@@ -776,7 +770,7 @@ ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
: tls_value;
if (kIsDebugBuild) {
uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
DCHECK_EQ(offset, field->GetOffset().SizeValue());
}
@@ -785,7 +779,7 @@ ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
: MakeObjPtr(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
if (LIKELY(obj != nullptr)) {
MterpFieldAccess<PrimType, kAccessType>(
- inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile */ false);
+ inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
return true;
}
}
@@ -804,7 +798,7 @@ ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
if (LIKELY(field != nullptr)) {
bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized();
if (LIKELY(initialized)) {
- DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
field_idx, referrer, self, sizeof(PrimType))));
ObjPtr<mirror::Object> obj = kIsStatic
? field->GetDeclaringClass().Ptr()
@@ -936,7 +930,7 @@ extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
- jit->AddSamples(self, method, count, /*with_backedges*/ true);
+ jit->AddSamples(self, method, count, /*with_backedges=*/ true);
}
return MterpSetUpHotnessCountdown(method, shadow_frame, self);
}
@@ -961,7 +955,7 @@ extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
if (offset <= 0) {
// Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
- jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true);
+ jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
}
did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
}
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
index 1a56d26813..af52758bbc 100644
--- a/runtime/interpreter/mterp/mterp.h
+++ b/runtime/interpreter/mterp/mterp.h
@@ -25,8 +25,6 @@
*/
extern "C" void* artMterpAsmInstructionStart[];
extern "C" void* artMterpAsmInstructionEnd[];
-extern "C" void* artMterpAsmAltInstructionStart[];
-extern "C" void* artMterpAsmAltInstructionEnd[];
namespace art {
@@ -36,12 +34,7 @@ namespace interpreter {
void InitMterpTls(Thread* self);
void CheckMterpAsmConstants();
-
-// The return type should be 'bool' but our assembly stubs expect 'bool'
-// to be zero-extended to the whole register and that's broken on x86-64
-// as a 'bool' is returned in 'al' and the rest of 'rax' is garbage.
-// TODO: Fix mterp and stubs and revert this workaround. http://b/30232671
-extern "C" size_t MterpShouldSwitchInterpreters();
+bool CanUseMterp();
// Poison value for TestExportPC. If we segfault with this value, it means that a mterp
// handler for a recent opcode failed to export the Dalvik PC prior to a possible exit from
@@ -50,6 +43,8 @@ constexpr uintptr_t kExportPCPoison = 0xdead00ff;
// Set true to enable poison testing of ExportPC. Uses Alt interpreter.
constexpr bool kTestExportPC = false;
+constexpr size_t kMterpHandlerSize = 128;
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/mterp/x86/arithmetic.S b/runtime/interpreter/mterp/x86/arithmetic.S
index a9fa0fc68f..3b5f0beb89 100644
--- a/runtime/interpreter/mterp/x86/arithmetic.S
+++ b/runtime/interpreter/mterp/x86/arithmetic.S
@@ -25,6 +25,9 @@
jne .L${opcode}_32
movl $special, $result
jmp .L${opcode}_finish
+% add_helper(lambda: bindiv_helper(result, rem))
+
+%def bindiv_helper(result, rem):
.L${opcode}_32:
cltd
idivl %ecx
@@ -69,7 +72,9 @@
SET_VREG $result, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+% add_helper(lambda: bindiv2addr_helper(result))
+%def bindiv2addr_helper(result):
.L${opcode}_continue_div2addr:
cltd
idivl %ecx
@@ -137,7 +142,7 @@
%def binop(result="%eax", instr=""):
/*
* Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * specifies an instruction that performs "result = eax op VREG_ADDRESS(%ecx)".
* This could be an x86 instruction or a function call. (If the result
* comes back in a register other than eax, you can override "result".)
*
@@ -148,7 +153,7 @@
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
GET_VREG %eax, %eax # eax <- vBB
- $instr # ex: addl (rFP,%ecx,4),%eax
+ $instr # ex: addl VREG_ADDRESS(%ecx),%eax
SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -182,7 +187,7 @@
sarl $$4, rINST # rINST <- B
GET_VREG %eax, rINST # eax <- vB
andb $$0xf, %cl # ecx <- A
- $instr # for ex: addl %eax,(rFP,%ecx,4)
+ $instr # for ex: addl %eax,VREG_ADDRESS(%ecx)
CLEAR_REF %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -235,8 +240,8 @@
movl rIBASE, LOCAL0(%esp) # save rIBASE
GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- $instr1 # ex: addl (rFP,%ecx,4),rIBASE
- $instr2 # ex: adcl 4(rFP,%ecx,4),%eax
+ $instr1 # ex: addl VREG_ADDRESS(%ecx),rIBASE
+ $instr2 # ex: adcl VREG_HIGH_ADDRESS(%ecx),%eax
SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
movl LOCAL0(%esp), rIBASE # restore rIBASE
SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
@@ -302,7 +307,9 @@
mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+% add_helper(lambda: cvtfp_int_helper(tgtlong))
+%def cvtfp_int_helper(tgtlong):
.L${opcode}_special_case:
fnstsw %ax
sahf
@@ -348,10 +355,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_add_int():
-% binop(instr="addl (rFP,%ecx,4), %eax")
+% binop(instr="addl VREG_ADDRESS(%ecx), %eax")
%def op_add_int_2addr():
-% binop2addr(instr="addl %eax, (rFP,%ecx,4)")
+% binop2addr(instr="addl %eax, VREG_ADDRESS(%ecx)")
%def op_add_int_lit16():
% binopLit16(instr="addl %ecx, %eax")
@@ -360,16 +367,16 @@
% binopLit8(instr="addl %ecx, %eax")
%def op_add_long():
-% binopWide(instr1="addl (rFP,%ecx,4), rIBASE", instr2="adcl 4(rFP,%ecx,4), %eax")
+% binopWide(instr1="addl VREG_ADDRESS(%ecx), rIBASE", instr2="adcl VREG_HIGH_ADDRESS(%ecx), %eax")
%def op_add_long_2addr():
% binopWide2addr(instr1="addl %eax, (rFP,rINST,4)", instr2="adcl %ecx, 4(rFP,rINST,4)")
%def op_and_int():
-% binop(instr="andl (rFP,%ecx,4), %eax")
+% binop(instr="andl VREG_ADDRESS(%ecx), %eax")
%def op_and_int_2addr():
-% binop2addr(instr="andl %eax, (rFP,%ecx,4)")
+% binop2addr(instr="andl %eax, VREG_ADDRESS(%ecx)")
%def op_and_int_lit16():
% binopLit16(instr="andl %ecx, %eax")
@@ -378,7 +385,7 @@
% binopLit8(instr="andl %ecx, %eax")
%def op_and_long():
-% binopWide(instr1="andl (rFP,%ecx,4), rIBASE", instr2="andl 4(rFP,%ecx,4), %eax")
+% binopWide(instr1="andl VREG_ADDRESS(%ecx), rIBASE", instr2="andl VREG_HIGH_ADDRESS(%ecx), %eax")
%def op_and_long_2addr():
% binopWide2addr(instr1="andl %eax, (rFP,rINST,4)", instr2="andl %ecx, 4(rFP,rINST,4)")
@@ -510,7 +517,7 @@
movzbl 3(rPC), %ecx # ecx <- CC
GET_VREG %eax, %eax # eax <- vBB
mov rIBASE, LOCAL0(%esp)
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
+ imull VREG_ADDRESS(%ecx), %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -522,7 +529,7 @@
GET_VREG %eax, rINST # eax <- vB
andb $$0xf, %cl # ecx <- A
movl rIBASE, rINST
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
+ imull VREG_ADDRESS(%ecx), %eax # trashes rIBASE/edx
movl rINST, rIBASE
SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -571,7 +578,7 @@
mov rFP, LOCAL1(%esp) # save FP
mov rIBASE, LOCAL2(%esp) # save rIBASE
leal (rFP,%eax,4), %esi # esi <- &v[B]
- leal (rFP,%ecx,4), rFP # rFP <- &v[C]
+ leal VREG_ADDRESS(%ecx), rFP # rFP <- &v[C]
movl 4(%esi), %ecx # ecx <- Bmsw
imull (rFP), %ecx # ecx <- (Bmsw*Clsw)
movl 4(rFP), %eax # eax <- Cmsw
@@ -659,10 +666,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_or_int():
-% binop(instr="orl (rFP,%ecx,4), %eax")
+% binop(instr="orl VREG_ADDRESS(%ecx), %eax")
%def op_or_int_2addr():
-% binop2addr(instr="orl %eax, (rFP,%ecx,4)")
+% binop2addr(instr="orl %eax, VREG_ADDRESS(%ecx)")
%def op_or_int_lit16():
% binopLit16(instr="orl %ecx, %eax")
@@ -671,7 +678,7 @@
% binopLit8(instr="orl %ecx, %eax")
%def op_or_long():
-% binopWide(instr1="orl (rFP,%ecx,4), rIBASE", instr2="orl 4(rFP,%ecx,4), %eax")
+% binopWide(instr1="orl VREG_ADDRESS(%ecx), rIBASE", instr2="orl VREG_HIGH_ADDRESS(%ecx), %eax")
%def op_or_long_2addr():
% binopWide2addr(instr1="orl %eax, (rFP,rINST,4)", instr2="orl %ecx, 4(rFP,rINST,4)")
@@ -838,13 +845,13 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_sub_int():
-% binop(instr="subl (rFP,%ecx,4), %eax")
+% binop(instr="subl VREG_ADDRESS(%ecx), %eax")
%def op_sub_int_2addr():
-% binop2addr(instr="subl %eax, (rFP,%ecx,4)")
+% binop2addr(instr="subl %eax, VREG_ADDRESS(%ecx)")
%def op_sub_long():
-% binopWide(instr1="subl (rFP,%ecx,4), rIBASE", instr2="sbbl 4(rFP,%ecx,4), %eax")
+% binopWide(instr1="subl VREG_ADDRESS(%ecx), rIBASE", instr2="sbbl VREG_HIGH_ADDRESS(%ecx), %eax")
%def op_sub_long_2addr():
% binopWide2addr(instr1="subl %eax, (rFP,rINST,4)", instr2="sbbl %ecx, 4(rFP,rINST,4)")
@@ -918,10 +925,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_xor_int():
-% binop(instr="xorl (rFP,%ecx,4), %eax")
+% binop(instr="xorl VREG_ADDRESS(%ecx), %eax")
%def op_xor_int_2addr():
-% binop2addr(instr="xorl %eax, (rFP,%ecx,4)")
+% binop2addr(instr="xorl %eax, VREG_ADDRESS(%ecx)")
%def op_xor_int_lit16():
% binopLit16(instr="xorl %ecx, %eax")
@@ -930,7 +937,7 @@
% binopLit8(instr="xorl %ecx, %eax")
%def op_xor_long():
-% binopWide(instr1="xorl (rFP,%ecx,4), rIBASE", instr2="xorl 4(rFP,%ecx,4), %eax")
+% binopWide(instr1="xorl VREG_ADDRESS(%ecx), rIBASE", instr2="xorl VREG_HIGH_ADDRESS(%ecx), %eax")
%def op_xor_long_2addr():
% binopWide2addr(instr1="xorl %eax, (rFP,rINST,4)", instr2="xorl %ecx, 4(rFP,rINST,4)")
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index 587c4cfd63..cfb9c7c719 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -17,9 +17,10 @@
testb %al, %al
jz MterpException
ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
+ movl rSELF, %eax
+ movb THREAD_USE_MTERP_OFFSET(%eax), %al
testb %al, %al
- jnz MterpFallback
+ jz MterpFallback
RESTORE_IBASE
FETCH_INST
GOTO_NEXT
@@ -43,9 +44,10 @@
testb %al, %al
jz MterpException
ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
+ movl rSELF, %eax
+ movb THREAD_USE_MTERP_OFFSET(%eax), %al
testb %al, %al
- jnz MterpFallback
+ jz MterpFallback
RESTORE_IBASE
FETCH_INST
GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
index b44f168686..b233f2c522 100644
--- a/runtime/interpreter/mterp/x86/main.S
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -273,47 +273,47 @@ unspecified registers or condition codes.
#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
+ movl VREG_ADDRESS(\_vreg), \_reg
.endm
/* Read wide value to xmm. */
.macro GET_WIDE_FP_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
+ movq VREG_ADDRESS(\_vreg), \_reg
.endm
.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
.endm
/* Write wide value from xmm. xmm is clobbered. */
.macro SET_WIDE_FP_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
+ movq \_reg, VREG_ADDRESS(\_vreg)
pxor \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
+ movq \_reg, VREG_REF_ADDRESS(\_vreg)
.endm
.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl \_reg, VREG_REF_ADDRESS(\_vreg)
.endm
.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
+ movl VREG_HIGH_ADDRESS(\_vreg), \_reg
.endm
.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+ movl \_reg, VREG_HIGH_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
.endm
.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
/*
@@ -410,26 +410,24 @@ ENTRY ExecuteMterpImpl
// cfi info continues, and covers the whole mterp implementation.
END ExecuteMterpImpl
-%def alt_stub():
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
.extern MterpCheckBefore
+ popl %eax # Return address (the instuction handler).
movl rSELF, %ecx
movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
+ pushl %eax # Return address for the tail call.
+ jmp SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
+
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ call SYMBOL(Mterp_dchecks_before_helper)
REFRESH_IBASE
- jmp .L_op_nop+(${opnum}*${handler_size_bytes})
+ #endif
%def fallback():
/* Transfer stub to alternate interpreter */
@@ -562,9 +560,10 @@ MterpException:
lea (%eax, %ecx, 2), rPC
movl rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
+ movl rSELF, %eax
+ movb THREAD_USE_MTERP_OFFSET(%eax), %al
testb %al, %al
- jnz MterpFallback
+ jz MterpFallback
/* resume execution at catch block */
REFRESH_IBASE
FETCH_INST
@@ -773,13 +772,6 @@ MRestoreFrame:
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
-%def instruction_end_alt():
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
%def instruction_start():
OBJECT_TYPE(artMterpAsmInstructionStart)
@@ -788,14 +780,6 @@ SYMBOL(artMterpAsmAltInstructionEnd):
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
-%def instruction_start_alt():
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-
%def opcode_start():
ENTRY Mterp_${opcode}
%def opcode_end():
diff --git a/runtime/interpreter/mterp/x86_64/arithmetic.S b/runtime/interpreter/mterp/x86_64/arithmetic.S
index ffe2008993..263f82b9f6 100644
--- a/runtime/interpreter/mterp/x86_64/arithmetic.S
+++ b/runtime/interpreter/mterp/x86_64/arithmetic.S
@@ -137,7 +137,7 @@
movzbq 2(rPC), %rax # rax <- BB
movzbq 3(rPC), %rcx # rcx <- CC
GET_VREG %eax, %rax # eax <- vBB
- $instr # ex: addl (rFP,%rcx,4),%eax
+ $instr # ex: addl VREG_ADDRESS(%rcx),%eax
SET_VREG $result, rINSTq
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -228,7 +228,7 @@
movzbq 2(rPC), %rax # eax <- BB
movzbq 3(rPC), %rcx # ecx <- CC
GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- $instr # ex: addq (rFP,%rcx,4),%rax
+ $instr # ex: addq VREG_ADDRESS(%rcx),%rax
SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -241,7 +241,7 @@
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
GET_WIDE_VREG %rax, rINSTq # rax <- vB
- $instr # for ex: addq %rax,(rFP,%rcx,4)
+ $instr # for ex: addq %rax,VREG_ADDRESS(%rcx)
CLEAR_WIDE_REF %rcx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -317,10 +317,10 @@ $instr
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_add_int():
-% binop(instr="addl (rFP,%rcx,4), %eax")
+% binop(instr="addl VREG_ADDRESS(%rcx), %eax")
%def op_add_int_2addr():
-% binop2addr(instr="addl %eax, (rFP,%rcx,4)")
+% binop2addr(instr="addl %eax, VREG_ADDRESS(%rcx)")
%def op_add_int_lit16():
% binopLit16(instr="addl %ecx, %eax")
@@ -329,16 +329,16 @@ $instr
% binopLit8(instr="addl %ecx, %eax")
%def op_add_long():
-% binopWide(instr="addq (rFP,%rcx,4), %rax")
+% binopWide(instr="addq VREG_ADDRESS(%rcx), %rax")
%def op_add_long_2addr():
-% binopWide2addr(instr="addq %rax, (rFP,%rcx,4)")
+% binopWide2addr(instr="addq %rax, VREG_ADDRESS(%rcx)")
%def op_and_int():
-% binop(instr="andl (rFP,%rcx,4), %eax")
+% binop(instr="andl VREG_ADDRESS(%rcx), %eax")
%def op_and_int_2addr():
-% binop2addr(instr="andl %eax, (rFP,%rcx,4)")
+% binop2addr(instr="andl %eax, VREG_ADDRESS(%rcx)")
%def op_and_int_lit16():
% binopLit16(instr="andl %ecx, %eax")
@@ -347,10 +347,10 @@ $instr
% binopLit8(instr="andl %ecx, %eax")
%def op_and_long():
-% binopWide(instr="andq (rFP,%rcx,4), %rax")
+% binopWide(instr="andq VREG_ADDRESS(%rcx), %rax")
%def op_and_long_2addr():
-% binopWide2addr(instr="andq %rax, (rFP,%rcx,4)")
+% binopWide2addr(instr="andq %rax, VREG_ADDRESS(%rcx)")
%def op_cmp_long():
/*
@@ -413,7 +413,7 @@ $instr
% op_move()
%def op_mul_int():
-% binop(instr="imull (rFP,%rcx,4), %eax")
+% binop(instr="imull VREG_ADDRESS(%rcx), %eax")
%def op_mul_int_2addr():
/* mul vA, vB */
@@ -432,7 +432,7 @@ $instr
% binopLit8(instr="imull %ecx, %eax")
%def op_mul_long():
-% binopWide(instr="imulq (rFP,%rcx,4), %rax")
+% binopWide(instr="imulq VREG_ADDRESS(%rcx), %rax")
%def op_mul_long_2addr():
/* mul vA, vB */
@@ -457,10 +457,10 @@ $instr
% unop(instr=" notq %rax", wide="1")
%def op_or_int():
-% binop(instr="orl (rFP,%rcx,4), %eax")
+% binop(instr="orl VREG_ADDRESS(%rcx), %eax")
%def op_or_int_2addr():
-% binop2addr(instr="orl %eax, (rFP,%rcx,4)")
+% binop2addr(instr="orl %eax, VREG_ADDRESS(%rcx)")
%def op_or_int_lit16():
% binopLit16(instr="orl %ecx, %eax")
@@ -469,10 +469,10 @@ $instr
% binopLit8(instr="orl %ecx, %eax")
%def op_or_long():
-% binopWide(instr="orq (rFP,%rcx,4), %rax")
+% binopWide(instr="orq VREG_ADDRESS(%rcx), %rax")
%def op_or_long_2addr():
-% binopWide2addr(instr="orq %rax, (rFP,%rcx,4)")
+% binopWide2addr(instr="orq %rax, VREG_ADDRESS(%rcx)")
%def op_rem_int():
% bindiv(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
@@ -530,16 +530,16 @@ $instr
% shop2addr(instr="sarq %cl, %rax", wide="1")
%def op_sub_int():
-% binop(instr="subl (rFP,%rcx,4), %eax")
+% binop(instr="subl VREG_ADDRESS(%rcx), %eax")
%def op_sub_int_2addr():
-% binop2addr(instr="subl %eax, (rFP,%rcx,4)")
+% binop2addr(instr="subl %eax, VREG_ADDRESS(%rcx)")
%def op_sub_long():
-% binopWide(instr="subq (rFP,%rcx,4), %rax")
+% binopWide(instr="subq VREG_ADDRESS(%rcx), %rax")
%def op_sub_long_2addr():
-% binopWide2addr(instr="subq %rax, (rFP,%rcx,4)")
+% binopWide2addr(instr="subq %rax, VREG_ADDRESS(%rcx)")
%def op_ushr_int():
% binop1(instr="shrl %cl, %eax")
@@ -557,10 +557,10 @@ $instr
% shop2addr(instr="shrq %cl, %rax", wide="1")
%def op_xor_int():
-% binop(instr="xorl (rFP,%rcx,4), %eax")
+% binop(instr="xorl VREG_ADDRESS(%rcx), %eax")
%def op_xor_int_2addr():
-% binop2addr(instr="xorl %eax, (rFP,%rcx,4)")
+% binop2addr(instr="xorl %eax, VREG_ADDRESS(%rcx)")
%def op_xor_int_lit16():
% binopLit16(instr="xorl %ecx, %eax")
@@ -569,7 +569,7 @@ $instr
% binopLit8(instr="xorl %ecx, %eax")
%def op_xor_long():
-% binopWide(instr="xorq (rFP,%rcx,4), %rax")
+% binopWide(instr="xorq VREG_ADDRESS(%rcx), %rax")
%def op_xor_long_2addr():
-% binopWide2addr(instr="xorq %rax, (rFP,%rcx,4)")
+% binopWide2addr(instr="xorq %rax, VREG_ADDRESS(%rcx)")
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
index 63c233c4eb..f727915265 100644
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -15,9 +15,10 @@
testb %al, %al
jz MterpException
ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
+ movq rSELF, %rax
+ movb THREAD_USE_MTERP_OFFSET(%rax), %al
testb %al, %al
- jnz MterpFallback
+ jz MterpFallback
FETCH_INST
GOTO_NEXT
@@ -38,9 +39,10 @@
testb %al, %al
jz MterpException
ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
+ movq rSELF, %rax
+ movb THREAD_USE_MTERP_OFFSET(%rax), %al
testb %al, %al
- jnz MterpFallback
+ jz MterpFallback
FETCH_INST
GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
index 900923da6d..75eb00c461 100644
--- a/runtime/interpreter/mterp/x86_64/main.S
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -256,50 +256,52 @@ unspecified registers or condition codes.
* Get/set the 32-bit value from a Dalvik register.
*/
#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
+ movl VREG_ADDRESS(\_vreg), \_reg
.endm
/* Read wide value. */
.macro GET_WIDE_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
+ movq VREG_ADDRESS(\_vreg), \_reg
.endm
.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
.endm
/* Write wide value. reg is clobbered. */
.macro SET_WIDE_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
+ movq \_reg, VREG_ADDRESS(\_vreg)
xorq \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
+ movq \_reg, VREG_REF_ADDRESS(\_vreg)
.endm
.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl \_reg, VREG_REF_ADDRESS(\_vreg)
.endm
.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
+ movl VREG_HIGH_ADDRESS(\_vreg), \_reg
.endm
.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+ movl \_reg, VREG_HIGH_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
.endm
.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
/*
@@ -393,24 +395,22 @@ ENTRY ExecuteMterpImpl
// cfi info continues, and covers the whole mterp implementation.
END ExecuteMterpImpl
-%def alt_stub():
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
.extern MterpCheckBefore
+ popq %rax # Return address (the instuction handler).
REFRESH_IBASE
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(${opnum}*${handler_size_bytes})
+ pushq %rax # Return address for the tail call.
+ jmp SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
+
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ call SYMBOL(Mterp_dchecks_before_helper)
+ #endif
%def fallback():
/* Transfer stub to alternate interpreter */
@@ -526,9 +526,10 @@ MterpException:
leaq (%rax, %rcx, 2), rPC
movq rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
+ movq rSELF, %rax
+ movb THREAD_USE_MTERP_OFFSET(%rax), %al
testb %al, %al
- jnz MterpFallback
+ jz MterpFallback
/* resume execution at catch block */
REFRESH_IBASE
FETCH_INST
@@ -726,13 +727,6 @@ MRestoreFrame:
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
-%def instruction_end_alt():
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
%def instruction_start():
OBJECT_TYPE(artMterpAsmInstructionStart)
@@ -741,14 +735,6 @@ SYMBOL(artMterpAsmAltInstructionEnd):
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
-%def instruction_start_alt():
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-
%def opcode_start():
ENTRY Mterp_${opcode}
%def opcode_end():
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 38ecc5a53d..07afba4cce 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -865,10 +865,10 @@ void UnstartedRuntime::UnstartedSystemArraycopy(
// checking version, however, does.
if (Runtime::Current()->IsActiveTransaction()) {
dst->AssignableCheckingMemcpy<true>(
- dst_pos, src, src_pos, length, true /* throw_exception */);
+ dst_pos, src, src_pos, length, /* throw_exception= */ true);
} else {
dst->AssignableCheckingMemcpy<false>(
- dst_pos, src, src_pos, length, true /* throw_exception */);
+ dst_pos, src, src_pos, length, /* throw_exception= */ true);
}
}
} else if (src_type->IsPrimitiveByte()) {
@@ -1478,9 +1478,9 @@ void UnstartedRuntime::UnstartedUnsafeCompareAndSwapObject(
reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
ReadBarrier::Barrier<
mirror::Object,
- /* kIsVolatile */ false,
+ /* kIsVolatile= */ false,
kWithReadBarrier,
- /* kAlwaysUpdateField */ true>(
+ /* kAlwaysUpdateField= */ true>(
obj,
MemberOffset(offset),
field_addr);
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index bd2705d530..3fafc31e21 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -695,7 +695,7 @@ TEST_F(UnstartedRuntimeTest, Ceil) {
{ ld2, ld2 }
};
- TestCeilFloor(true /* ceil */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+ TestCeilFloor(/* ceil= */ true, self, tmp.get(), test_pairs, arraysize(test_pairs));
}
TEST_F(UnstartedRuntimeTest, Floor) {
@@ -722,7 +722,7 @@ TEST_F(UnstartedRuntimeTest, Floor) {
{ ld2, ld2 }
};
- TestCeilFloor(false /* floor */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+ TestCeilFloor(/* ceil= */ false, self, tmp.get(), test_pairs, arraysize(test_pairs));
}
TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 0a54e38698..d31f166869 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1344,13 +1344,14 @@ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
VLOG(jdwp) << StringPrintf(" --> event requestId=%#x", requestId);
/* add it to the list */
+ // TODO: RegisterEvent() should take std::unique_ptr<>.
JdwpError err = state->RegisterEvent(pEvent.get());
if (err != ERR_NONE) {
/* registration failed, probably because event is bogus */
LOG(WARNING) << "WARNING: event request rejected";
return err;
}
- pEvent.release();
+ pEvent.release(); // NOLINT b/117926937
return ERR_NONE;
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c1f69b8712..ef893eec30 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -599,12 +599,12 @@ class JitCompileTask final : public Task {
void Run(Thread* self) override {
ScopedObjectAccess soa(self);
if (kind_ == kCompile) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false);
} else if (kind_ == kCompileOsr) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true);
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true);
} else {
DCHECK(kind_ == kAllocateProfile);
- if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
+ if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
}
}
@@ -673,7 +673,7 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_
if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
if ((new_count >= WarmMethodThreshold()) &&
(method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
- bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
+ bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
if (success) {
VLOG(jit) << "Start profiling " << method->PrettyMethod();
}
@@ -741,7 +741,7 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
if (np_method->IsCompilable()) {
if (!np_method->IsNative()) {
// The compiler requires a ProfilingInfo object for non-native methods.
- ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
+ ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
}
JitCompileTask compile_task(method, JitCompileTask::kCompile);
// Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
@@ -761,7 +761,7 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, profiling_info->GetSavedEntryPoint());
} else {
- AddSamples(thread, method, 1, /* with_backedges */false);
+ AddSamples(thread, method, 1, /* with_backedges= */false);
}
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 63cb6a4593..8239602b50 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -39,6 +39,7 @@
#include "dex/method_reference.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
+#include "gc/allocator/dlmalloc.h"
#include "gc/scoped_gc_critical_section.h"
#include "handle.h"
#include "instrumentation.h"
@@ -221,7 +222,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
unique_fd mem_fd;
// Bionic supports memfd_create, but the call may fail on older kernels.
- mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
+ mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
if (mem_fd.get() < 0) {
VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
<< strerror(errno);
@@ -281,8 +282,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
kProtRW,
base_flags,
mem_fd,
- /* start */ 0,
- /* low_4gb */ true,
+ /* start= */ 0,
+ /* low_4gb= */ true,
"data-code-cache",
&error_str);
} else {
@@ -303,12 +304,12 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
base_flags = MAP_PRIVATE | MAP_ANON;
data_pages = MemMap::MapAnonymous(
"data-code-cache",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
data_capacity + exec_capacity,
kProtRW,
- /* low_4gb */ true,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /* low_4gb= */ true,
+ /* reuse= */ false,
+ /* reservation= */ nullptr,
&error_str);
}
@@ -347,8 +348,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
kProtR,
base_flags,
mem_fd,
- /* start */ data_capacity,
- /* low_4GB */ false,
+ /* start= */ data_capacity,
+ /* low_4GB= */ false,
"jit-code-cache-rw",
&error_str);
if (!non_exec_pages.IsValid()) {
@@ -1008,7 +1009,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
// Simply discard the compiled code. Clear the counter so that it may be recompiled later.
// Hopefully the class hierarchy will be more stable when compilation is retried.
single_impl_still_valid = false;
- ClearMethodCounter(method, /*was_warm*/ false);
+ ClearMethodCounter(method, /*was_warm=*/ false);
break;
}
}
@@ -1156,7 +1157,7 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
// method. The compiled code for the method (if there is any) must not be in any threads call stack.
void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
MutexLock mu(Thread::Current(), lock_);
- RemoveMethodLocked(method, /* release_memory */ true);
+ RemoveMethodLocked(method, /* release_memory= */ true);
}
// This invalidates old_method. Once this function returns one can no longer use old_method to
@@ -1314,7 +1315,7 @@ class MarkCodeClosure final : public Closure {
// its stack frame, it is not the method owning return_pc_. We just pass null to
// LookupMethodHeader: the method is only checked against in debug builds.
OatQuickMethodHeader* method_header =
- code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
+ code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr);
if (method_header != nullptr) {
const void* code = method_header->GetCode();
CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
@@ -1438,7 +1439,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
<< PrettySize(CodeCacheSize())
<< ", data=" << PrettySize(DataCacheSize());
- DoCollection(self, /* collect_profiling_info */ do_full_collection);
+ DoCollection(self, /* collect_profiling_info= */ do_full_collection);
VLOG(jit) << "After code cache collection, code="
<< PrettySize(CodeCacheSize())
@@ -1551,7 +1552,7 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
info->SetSavedEntryPoint(nullptr);
// We are going to move this method back to interpreter. Clear the counter now to
// give it a chance to be hot again.
- ClearMethodCounter(info->GetMethod(), /*was_warm*/ true);
+ ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
}
}
} else if (kIsDebugBuild) {
@@ -1933,7 +1934,7 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr
VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
// Because the counter is not atomic, there are some rare cases where we may not hit the
// threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
- ClearMethodCounter(method, /*was_warm*/ false);
+ ClearMethodCounter(method, /*was_warm=*/ false);
return false;
}
@@ -2009,7 +2010,7 @@ void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
// and clear the counter to get the method Jitted again.
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, GetQuickToInterpreterBridge());
- ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr);
+ ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
} else {
MutexLock mu(Thread::Current(), lock_);
auto it = osr_code_map_.find(method);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 9043f267fb..e3248eaf24 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -129,7 +129,7 @@ void ProfileSaver::Run() {
}
total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs();
}
- FetchAndCacheResolvedClassesAndMethods(/*startup*/ true);
+ FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true);
// When we save without waiting for JIT notifications we use a simple
@@ -183,7 +183,7 @@ void ProfileSaver::Run() {
uint16_t number_of_new_methods = 0;
uint64_t start_work = NanoTime();
- bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods);
+ bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save=*/false, &number_of_new_methods);
// Update the notification counter based on result. Note that there might be contention on this
// but we don't care about to be 100% precise.
if (!profile_saved_to_disk) {
@@ -501,7 +501,7 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number
// We only need to do this once, not once per dex location.
// TODO: Figure out a way to only do it when stuff has changed? It takes 30-50ms.
- FetchAndCacheResolvedClassesAndMethods(/*startup*/ false);
+ FetchAndCacheResolvedClassesAndMethods(/*startup=*/ false);
for (const auto& it : tracked_locations) {
if (!force_save && ShuttingDown(Thread::Current())) {
@@ -521,7 +521,7 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number
}
{
ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
- if (!info.Load(filename, /*clear_if_invalid*/ true)) {
+ if (!info.Load(filename, /*clear_if_invalid=*/ true)) {
LOG(WARNING) << "Could not forcefully load profile " << filename;
continue;
}
@@ -607,9 +607,9 @@ void* ProfileSaver::RunProfileSaverThread(void* arg) {
Runtime* runtime = Runtime::Current();
bool attached = runtime->AttachCurrentThread("Profile Saver",
- /*as_daemon*/true,
+ /*as_daemon=*/true,
runtime->GetSystemThreadGroup(),
- /*create_peer*/true);
+ /*create_peer=*/true);
if (!attached) {
CHECK(runtime->IsShuttingDown(Thread::Current()));
return nullptr;
@@ -751,7 +751,7 @@ void ProfileSaver::Stop(bool dump_info) {
// Force save everything before destroying the thread since we want profiler_pthread_ to remain
// valid.
- instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+ instance_->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
// Wait for the saver thread to stop.
CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
@@ -838,7 +838,7 @@ void ProfileSaver::ForceProcessProfiles() {
// but we only use this in testing when we now this won't happen.
// Refactor the way we handle the instance so that we don't end up in this situation.
if (saver != nullptr) {
- saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+ saver->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
}
}
@@ -846,7 +846,7 @@ bool ProfileSaver::HasSeenMethod(const std::string& profile, bool hot, MethodRef
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
- if (!info.Load(profile, /*clear_if_invalid*/false)) {
+ if (!info.Load(profile, /*clear_if_invalid=*/false)) {
return false;
}
ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref);
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a3dae8330a..f6139bb82b 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -125,7 +125,7 @@ class ProfilingInfo {
}
bool IsInUseByCompiler() const {
- return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) ||
+ return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) ||
(current_inline_uses_ > 0);
}
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 6f61f5e37c..48f99815fd 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -286,7 +286,7 @@ bool CheckAttachedThread(const char* function_name) {
// to get reasonable stacks and environment, rather than relying on
// tombstoned.
JNIEnv* env;
- Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thread_args */ nullptr);
+ Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thr_args= */ nullptr);
std::string tmp = android::base::StringPrintf(
"a thread (tid %" PRId64 " is making JNI calls without being attached",
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 5200607e9b..52509fde66 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -82,7 +82,7 @@ namespace art {
static constexpr bool kWarnJniAbort = false;
static bool IsCallerTrusted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
- return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames */ 1));
+ return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames= */ 1));
}
template<typename T>
@@ -106,9 +106,9 @@ static void NotifySetObjectField(ArtField* field, jobject obj, jobject jval)
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
Thread* self = Thread::Current();
- ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
- /*check_suspended*/ true,
- /*abort_on_error*/ false);
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+ /*check_suspended=*/ true,
+ /*abort_on_error=*/ false);
if (cur_method == nullptr) {
// Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -133,9 +133,9 @@ static void NotifySetPrimitiveField(ArtField* field, jobject obj, JValue val)
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
Thread* self = Thread::Current();
- ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
- /*check_suspended*/ true,
- /*abort_on_error*/ false);
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+ /*check_suspended=*/ true,
+ /*abort_on_error=*/ false);
if (cur_method == nullptr) {
// Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -157,9 +157,9 @@ static void NotifyGetField(ArtField* field, jobject obj)
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
Thread* self = Thread::Current();
- ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
- /*check_suspended*/ true,
- /*abort_on_error*/ false);
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+ /*check_suspended=*/ true,
+ /*abort_on_error=*/ false);
if (cur_method == nullptr) {
// Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 4ad4c14e0b..57346b7f78 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -962,11 +962,11 @@ TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) {
// Make sure we can actually use it.
jstring s = env_->NewStringUTF("poop");
if (mirror::kUseStringCompression) {
- ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible */ true),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible= */ true),
env_->GetIntField(s, fid2));
// Create incompressible string
jstring s_16 = env_->NewStringUTF("\u0444\u0444");
- ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible */ false),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible= */ false),
env_->GetIntField(s_16, fid2));
} else {
ASSERT_EQ(4, env_->GetIntField(s, fid2));
@@ -1485,7 +1485,7 @@ TEST_F(JniInternalTest, StaleWeakGlobal) {
ASSERT_NE(weak_global, nullptr);
env_->DeleteLocalRef(local_ref);
// GC should clear the weak global.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
jobject new_global_ref = env_->NewGlobalRef(weak_global);
EXPECT_EQ(new_global_ref, nullptr);
jobject new_local_ref = env_->NewLocalRef(weak_global);
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 570fc48272..7d889c0694 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -22,6 +22,7 @@
#include "common_dex_operations.h"
#include "interpreter/shadow_frame-inl.h"
#include "jvalue-inl.h"
+#include "mirror/class-inl.h"
#include "mirror/emulated_stack_frame.h"
#include "mirror/method_handle_impl-inl.h"
#include "mirror/method_type.h"
@@ -745,7 +746,7 @@ bool DoInvokePolymorphicMethod(Thread* self,
callee_type,
self,
shadow_frame,
- method_handle /* receiver */,
+ /* receiver= */ method_handle,
operands,
result);
} else {
@@ -1103,7 +1104,7 @@ static inline bool MethodHandleInvokeInternal(Thread* self,
if (IsInvokeVarHandle(handle_kind)) {
return DoVarHandleInvokeTranslation(self,
shadow_frame,
- /*invokeExact*/ false,
+ /*invokeExact=*/ false,
method_handle,
callsite_type,
operands,
@@ -1155,7 +1156,7 @@ static inline bool MethodHandleInvokeExactInternal(
} else if (IsInvokeVarHandle(handle_kind)) {
return DoVarHandleInvokeTranslation(self,
shadow_frame,
- /*invokeExact*/ true,
+ /*invokeExact=*/ true,
method_handle,
callsite_type,
operands,
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 704fb118e3..d806e40005 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -27,6 +27,7 @@
#include "class.h"
#include "gc/heap-inl.h"
#include "obj_ptr-inl.h"
+#include "runtime.h"
#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index df70fab21f..50b1b903ab 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -57,14 +57,14 @@ inline uint32_t Class::GetObjectSizeAllocFastPath() {
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline Class* Class::GetSuperClass() {
+inline ObjPtr<Class> Class::GetSuperClass() {
// Can only get super class for loaded classes (hack for when runtime is
// initializing)
DCHECK(IsLoaded<kVerifyFlags>() ||
IsErroneous<kVerifyFlags>() ||
!Runtime::Current()->IsStarted()) << IsLoaded();
- return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(
- OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
+ return ObjPtr<Class>(GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(
+ OFFSET_OF_OBJECT_MEMBER(Class, super_class_)));
}
inline void Class::SetSuperClass(ObjPtr<Class> new_super_class) {
@@ -1073,8 +1073,8 @@ inline void Class::FixupNativePointer(
T old_value = GetFieldPtrWithSize<T, kVerifyFlags>(member_offset, pointer_size);
T new_value = visitor(old_value, address);
if (old_value != new_value) {
- dest->SetFieldPtrWithSize</* kTransactionActive */ false,
- /* kCheckTransaction */ true,
+ dest->SetFieldPtrWithSize</* kTransactionActive= */ false,
+ /* kCheckTransaction= */ true,
kVerifyNone>(member_offset, new_value, pointer_size);
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 26dba024c6..6a378f0ca5 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -83,7 +83,7 @@ ObjPtr<mirror::Class> Class::GetPrimitiveClass(ObjPtr<mirror::String> name) {
Thread* self = Thread::Current();
if (name == nullptr) {
// Note: ThrowNullPointerException() requires a message which we deliberately want to omit.
- self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg */ nullptr);
+ self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg= */ nullptr);
} else {
self->ThrowNewException("Ljava/lang/ClassNotFoundException;", name->ToModifiedUtf8().c_str());
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index eddc84bb2c..cbe377e61b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -624,7 +624,7 @@ class MANAGED Class final : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE Class* GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE ObjPtr<Class> GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
// Get first common super class. It will never return null.
// `This` and `klass` must be classes.
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 6efb7474e0..13eaf3da45 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -27,12 +27,12 @@
#include "base/enums.h"
#include "class_linker.h"
#include "dex/dex_file.h"
-#include "gc/heap-inl.h"
#include "gc_root-inl.h"
#include "mirror/call_site.h"
#include "mirror/class.h"
#include "mirror/method_type.h"
#include "obj_ptr.h"
+#include "object-inl.h"
#include "runtime.h"
#include "write_barrier-inl.h"
@@ -42,6 +42,27 @@ namespace art {
namespace mirror {
template <typename T>
+inline DexCachePair<T>::DexCachePair(ObjPtr<T> object, uint32_t index)
+ : object(object), index(index) {}
+
+template <typename T>
+inline void DexCachePair<T>::Initialize(std::atomic<DexCachePair<T>>* dex_cache) {
+ DexCachePair<T> first_elem;
+ first_elem.object = GcRoot<T>(nullptr);
+ first_elem.index = InvalidIndexForSlot(0);
+ dex_cache[0].store(first_elem, std::memory_order_relaxed);
+}
+
+template <typename T>
+inline T* DexCachePair<T>::GetObjectForIndex(uint32_t idx) {
+ if (idx != index) {
+ return nullptr;
+ }
+ DCHECK(!object.IsNull());
+ return object.Read();
+}
+
+template <typename T>
inline void NativeDexCachePair<T>::Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache,
PointerSize pointer_size) {
NativeDexCachePair<T> first_elem;
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index da1cd3fc2b..6149f9c0be 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -21,7 +21,7 @@
#include "base/bit_utils.h"
#include "base/mutex.h"
#include "dex/dex_file_types.h"
-#include "gc_root-inl.h"
+#include "gc_root.h" // Note: must not use -inl here to avoid circular dependency.
#include "object.h"
#include "object_array.h"
@@ -67,19 +67,12 @@ template <typename T> struct PACKED(8) DexCachePair {
// it's always non-null if the id branch succeeds (except for the 0th id).
// Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
// the lookup id == stored id branch.
- DexCachePair(ObjPtr<T> object, uint32_t index)
- : object(object),
- index(index) {}
+ DexCachePair(ObjPtr<T> object, uint32_t index);
DexCachePair() : index(0) {}
DexCachePair(const DexCachePair<T>&) = default;
DexCachePair& operator=(const DexCachePair<T>&) = default;
- static void Initialize(std::atomic<DexCachePair<T>>* dex_cache) {
- DexCachePair<T> first_elem;
- first_elem.object = GcRoot<T>(nullptr);
- first_elem.index = InvalidIndexForSlot(0);
- dex_cache[0].store(first_elem, std::memory_order_relaxed);
- }
+ static void Initialize(std::atomic<DexCachePair<T>>* dex_cache);
static uint32_t InvalidIndexForSlot(uint32_t slot) {
// Since the cache size is a power of two, 0 will always map to slot 0.
@@ -87,13 +80,7 @@ template <typename T> struct PACKED(8) DexCachePair {
return (slot == 0) ? 1u : 0u;
}
- T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (idx != index) {
- return nullptr;
- }
- DCHECK(!object.IsNull());
- return object.Read();
- }
+ T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
};
template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index e9e7ca8688..36c5ae2ab9 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -108,7 +108,7 @@ TEST_F(DexCacheTest, TestResolvedFieldAccess) {
EXPECT_NE(klass1->NumStaticFields(), 0u);
for (ArtField& field : klass2->GetSFields()) {
EXPECT_FALSE(
- klass1->ResolvedFieldAccessTest</*throw_on_failure*/ false>(
+ klass1->ResolvedFieldAccessTest</*throw_on_failure=*/ false>(
klass2.Get(),
&field,
klass1->GetDexCache(),
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index fbe002a9f0..8ae79a8c66 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -80,11 +80,11 @@ inline uint32_t Object::GetLockOwnerThreadId() {
}
inline mirror::Object* Object::MonitorEnter(Thread* self) {
- return Monitor::MonitorEnter(self, this, /*trylock*/false);
+ return Monitor::MonitorEnter(self, this, /*trylock=*/false);
}
inline mirror::Object* Object::MonitorTryEnter(Thread* self) {
- return Monitor::MonitorEnter(self, this, /*trylock*/true);
+ return Monitor::MonitorEnter(self, this, /*trylock=*/true);
}
inline bool Object::MonitorExit(Thread* self) {
@@ -738,7 +738,7 @@ template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVer
inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset,
ObjPtr<Object> new_value) {
VerifyTransaction<kTransactionActive, kCheckTransaction>();
- VerifyCAS<kVerifyFlags>(new_value, /*old_value*/ nullptr);
+ VerifyCAS<kVerifyFlags>(new_value, /*old_value=*/ nullptr);
uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 8689e4dadb..ee84997fe6 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -131,7 +131,7 @@ inline uint32_t Object::GetReadBarrierState() {
UNREACHABLE();
}
DCHECK(kUseBakerReadBarrier);
- LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile*/false>(MonitorOffset()));
+ LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset()));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 025c10bc2a..9b38576fc5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -27,7 +27,7 @@
#include "dex/descriptors_names.h"
#include "dex/dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
-#include "gc/heap.h"
+#include "gc/heap-inl.h"
#include "handle_scope-inl.h"
#include "iftable-inl.h"
#include "monitor.h"
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 8fa2c6cf7f..3752d6dde9 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -237,7 +237,7 @@ inline String* String::Alloc(Thread* self, int32_t utf16_length_with_flag,
template <bool kIsInstrumented>
inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
- const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible */ true);
+ const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible= */ true);
SetStringCountVisitor visitor(length_with_flag);
return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 02aa1a823a..0f0a378142 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -289,7 +289,7 @@ struct NthCallerWithDexPcVisitor final : public StackVisitor {
// Is this the requested frame?
if (current_frame_number_ == wanted_frame_number_) {
method_ = m;
- dex_pc_ = GetDexPc(false /* abort_on_error*/);
+ dex_pc_ = GetDexPc(/* abort_on_failure=*/ false);
return false;
}
@@ -385,7 +385,7 @@ bool Monitor::TryLockLocked(Thread* self) {
} else {
return false;
}
- AtraceMonitorLock(self, GetObject(), false /* is_wait */);
+ AtraceMonitorLock(self, GetObject(), /* is_wait= */ false);
return true;
}
@@ -777,7 +777,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
AtraceMonitorUnlock(); // For the implict Unlock() just above. This will only end the deepest
// nesting, but that is enough for the visualization, and corresponds to
// the single Lock() we do afterwards.
- AtraceMonitorLock(self, GetObject(), true /* is_wait */);
+ AtraceMonitorLock(self, GetObject(), /* is_wait= */ true);
bool was_interrupted = false;
bool timed_out = false;
@@ -1042,7 +1042,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
// No ordering required for preceding lockword read, since we retest.
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
- AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+ AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
}
continue; // Go again.
@@ -1060,8 +1060,8 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
// Only this thread pays attention to the count. Thus there is no need for stronger
// than relaxed memory ordering.
if (!kUseReadBarrier) {
- h_obj->SetLockWord(thin_locked, false /* volatile */);
- AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+ h_obj->SetLockWord(thin_locked, /* as_volatile= */ false);
+ AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
} else {
// Use CAS to preserve the read barrier state.
@@ -1069,7 +1069,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
thin_locked,
CASMode::kWeak,
std::memory_order_relaxed)) {
- AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+ AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
}
}
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 74623dab31..19e1f3d2c4 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -43,7 +43,7 @@ void Monitor::LogContentionEvent(Thread* self,
// Emit the process name, <= 37 bytes.
{
- int fd = open("/proc/self/cmdline", O_RDONLY);
+ int fd = open("/proc/self/cmdline", O_RDONLY | O_CLOEXEC);
char procName[33];
memset(procName, 0, sizeof(procName));
read(fd, procName, sizeof(procName) - 1);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 0b168f86f4..8610899b9b 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -361,7 +361,7 @@ TEST_F(MonitorTest, TestTryLock) {
thread_pool.AddTask(self, new TryLockTask(obj1));
thread_pool.StartWorkers(self);
ScopedThreadSuspension sts(self, kSuspended);
- thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false);
+ thread_pool.Wait(Thread::Current(), /*do_work=*/false, /*may_hold_locks=*/false);
}
// Test that the trylock actually locks the object.
{
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 36f9b1aaeb..f9f87d83f6 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -35,6 +35,7 @@
#include "dex/descriptors_names.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "handle_scope-inl.h"
#include "jit/debugger_interface.h"
#include "jni/jni_internal.h"
#include "mirror/class_loader.h"
@@ -112,7 +113,7 @@ static jlongArray ConvertDexFilesToJavaArray(JNIEnv* env,
// Now release all the unique_ptrs.
for (auto& dex_file : vec) {
- dex_file.release();
+ dex_file.release(); // NOLINT
}
return long_array;
@@ -174,10 +175,10 @@ static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
std::string error_message;
size_t length = static_cast<size_t>(end - start);
MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
length,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_message);
if (!dex_mem_map.IsValid()) {
ScopedObjectAccess soa(env);
@@ -196,8 +197,8 @@ static const DexFile* CreateDexFile(JNIEnv* env, MemMap&& dex_mem_map) {
std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
0,
std::move(dex_mem_map),
- /* verify */ true,
- /* verify_location */ true,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_message));
if (dex_file == nullptr) {
ScopedObjectAccess soa(env);
@@ -295,7 +296,7 @@ static jobject DexFile_openDexFileNative(JNIEnv* env,
ScopedObjectAccess soa(env);
for (auto& dex_file : dex_files) {
if (linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
- dex_file.release();
+ dex_file.release(); // NOLINT
}
}
}
@@ -551,7 +552,7 @@ static jstring DexFile_getDexFileStatus(JNIEnv* env,
}
OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set,
- false /* load_executable */);
+ /* load_executable= */ false);
return env->NewStringUTF(oat_file_assistant.GetStatusDump().c_str());
}
@@ -774,7 +775,7 @@ static jobjectArray DexFile_getDexFileOutputPaths(JNIEnv* env,
OatFileAssistant oat_file_assistant(filename.c_str(),
target_instruction_set,
- false /* load_executable */);
+ /* load_executable= */ false);
std::unique_ptr<OatFile> best_oat_file = oat_file_assistant.GetBestOatFile();
if (best_oat_file == nullptr) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 6f98a6d381..24c8d14a96 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -23,6 +23,7 @@
#include "nativehelper/jni_macros.h"
+#include "base/file_utils.h"
#include "base/histogram-inl.h"
#include "base/time_utils.h"
#include "class_linker.h"
@@ -113,7 +114,7 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env,
return;
}
- int fd = dup(originalFd);
+ int fd = DupCloexec(originalFd);
if (fd < 0) {
ScopedObjectAccess soa(env);
soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
@@ -366,7 +367,7 @@ static jobjectArray VMDebug_getInstancesOfClasses(JNIEnv* env,
VariableSizedHandleScope hs2(soa.Self());
std::vector<Handle<mirror::Object>> raw_instances;
- heap->GetInstances(hs2, h_class, includeAssignable, /* max_count */ 0, raw_instances);
+ heap->GetInstances(hs2, h_class, includeAssignable, /* max_count= */ 0, raw_instances);
jobjectArray array = env->NewObjectArray(raw_instances.size(),
WellKnownClasses::java_lang_Object,
nullptr);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 861d1db880..2a3ea46225 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -404,7 +404,7 @@ static void PreloadDexCachesResolveField(ObjPtr<mirror::DexCache> dex_cache,
const DexFile* dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_idx);
ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
- field_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+ field_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
if (klass == nullptr) {
return;
}
@@ -432,12 +432,12 @@ static void PreloadDexCachesResolveMethod(ObjPtr<mirror::DexCache> dex_cache, ui
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ObjPtr<mirror::Class> klass = class_linker->LookupResolvedType(
- method_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+ method_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
if (klass == nullptr) {
return;
}
// Call FindResolvedMethod to populate the dex cache.
- class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader */ nullptr, method_idx);
+ class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader= */ nullptr, method_idx);
}
struct DexCacheStats {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index e3932df5c0..32733a8409 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -59,7 +59,7 @@ static ResultT GetThreadStack(const ScopedFastNativeObjectAccess& soa,
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
Thread* thread = thread_list->SuspendThreadByPeer(peer,
- /* request_suspension */ true,
+ /* request_suspension= */ true,
SuspendReason::kInternal,
&timed_out);
if (thread != nullptr) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 72dae4791c..f54bf87216 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -152,7 +152,8 @@ static void CollectNonDebuggableClasses() REQUIRES(!Locks::mutator_lock_) {
// Drop the shared mutator lock.
ScopedThreadSuspension sts(self, art::ThreadState::kNative);
// Get exclusive mutator lock with suspend all.
- ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+ ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!",
+ /*long_suspend=*/false);
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
}
@@ -399,7 +400,7 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
env,
is_system_server,
Runtime::NativeBridgeAction::kUnload,
- /*isa*/ nullptr,
+ /*isa=*/ nullptr,
profile_system_server);
}
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index f5039d1929..6d94fa1922 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -647,7 +647,7 @@ static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis)
ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
annotation_array_class,
- /* length */ 0);
+ /* length= */ 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
}
return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForClass(klass));
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index b7f0a7aabc..67ad0a47b8 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -147,7 +147,7 @@ static void Thread_setNativeName(JNIEnv* env, jobject peer, jstring java_name) {
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
Thread* thread = thread_list->SuspendThreadByPeer(peer,
- /* request_suspension */ true,
+ /* request_suspension= */ true,
SuspendReason::kInternal,
&timed_out);
if (thread != nullptr) {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 1ad233a6b2..46162c1989 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -21,6 +21,7 @@
#include "dex/descriptors_names.h"
#include "dex/dex_file_loader.h"
#include "dex/utf.h"
+#include "handle_scope-inl.h"
#include "jni/jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.cc b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
index 1f2bf09f0e..0b26bd7c4a 100644
--- a/runtime/native/java_lang_invoke_MethodHandleImpl.cc
+++ b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
@@ -48,7 +48,7 @@ static jobject MethodHandleImpl_getMemberInternal(JNIEnv* env, jobject thiz) {
if (handle_kind >= mirror::MethodHandle::kFirstAccessorKind) {
ArtField* const field = handle->GetTargetField();
h_object.Assign(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
- soa.Self(), field, false /* force_resolve */));
+ soa.Self(), field, /* force_resolve= */ false));
} else {
ArtMethod* const method = handle->GetTargetMethod();
if (method->IsConstructor()) {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 46444808d7..e021b77dae 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -74,8 +74,8 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb
mirror::HeapReference<mirror::Object>* field_addr =
reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset));
- ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kWithReadBarrier,
- /* kAlwaysUpdateField */ true>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier,
+ /* kAlwaysUpdateField= */ true>(
obj.Ptr(),
MemberOffset(offset),
field_addr);
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index f42a2d6755..8b6c2edc66 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -16,6 +16,7 @@
#include "non_debuggable_classes.h"
+#include "jni/jni_env_ext.h"
#include "jni/jni_internal.h"
#include "mirror/class-inl.h"
#include "nativehelper/scoped_local_ref.h"
diff --git a/runtime/oat.h b/runtime/oat.h
index 963725a050..2a6d738178 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -23,7 +23,6 @@
#include "base/macros.h"
#include "base/safe_map.h"
#include "compiler_filter.h"
-#include "dex/dex_file.h"
namespace art {
@@ -32,8 +31,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Remove PIC option from oat files.
- static constexpr uint8_t kOatVersion[] = { '1', '6', '2', '\0' };
+ // Last oat version changed reason: Remove interpreter alt tables.
+ static constexpr uint8_t kOatVersion[] = { '1', '6', '3', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 5b965090d2..7c320d8101 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -101,7 +101,6 @@ class OatFileBase : public OatFile {
const std::string& vdex_filename,
const std::string& elf_filename,
const std::string& location,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -115,7 +114,6 @@ class OatFileBase : public OatFile {
int oat_fd,
const std::string& vdex_filename,
const std::string& oat_filename,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -156,9 +154,7 @@ class OatFileBase : public OatFile {
/*inout*/MemMap* reservation, // Where to load if not null.
/*out*/std::string* error_msg) = 0;
- bool ComputeFields(uint8_t* requested_base,
- const std::string& file_path,
- std::string* error_msg);
+ bool ComputeFields(const std::string& file_path, std::string* error_msg);
virtual void PreSetup(const std::string& elf_filename) = 0;
@@ -187,7 +183,6 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
const std::string& vdex_filename,
const std::string& elf_filename,
const std::string& location,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -207,7 +202,7 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
return nullptr;
}
- if (!ret->ComputeFields(requested_base, elf_filename, error_msg)) {
+ if (!ret->ComputeFields(elf_filename, error_msg)) {
return nullptr;
}
@@ -230,7 +225,6 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
int oat_fd,
const std::string& vdex_location,
const std::string& oat_location,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -248,7 +242,7 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
return nullptr;
}
- if (!ret->ComputeFields(requested_base, oat_location, error_msg)) {
+ if (!ret->ComputeFields(oat_location, error_msg)) {
return nullptr;
}
@@ -271,11 +265,11 @@ bool OatFileBase::LoadVdex(const std::string& vdex_filename,
std::string* error_msg) {
vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
vdex_end_ - vdex_begin_,
- vdex_begin_ != nullptr /* mmap_reuse */,
+ /*mmap_reuse=*/ vdex_begin_ != nullptr,
vdex_filename,
writable,
low_4gb,
- /* unquicken*/ false,
+ /* unquicken=*/ false,
error_msg);
if (vdex_.get() == nullptr) {
*error_msg = StringPrintf("Failed to load vdex file '%s' %s",
@@ -299,13 +293,13 @@ bool OatFileBase::LoadVdex(int vdex_fd,
} else {
vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
vdex_end_ - vdex_begin_,
- vdex_begin_ != nullptr /* mmap_reuse */,
+ /*mmap_reuse=*/ vdex_begin_ != nullptr,
vdex_fd,
s.st_size,
vdex_filename,
writable,
low_4gb,
- false /* unquicken */,
+ /*unquicken=*/ false,
error_msg);
if (vdex_.get() == nullptr) {
*error_msg = "Failed opening vdex file.";
@@ -316,9 +310,7 @@ bool OatFileBase::LoadVdex(int vdex_fd,
return true;
}
-bool OatFileBase::ComputeFields(uint8_t* requested_base,
- const std::string& file_path,
- std::string* error_msg) {
+bool OatFileBase::ComputeFields(const std::string& file_path, std::string* error_msg) {
std::string symbol_error_msg;
begin_ = FindDynamicSymbolAddress("oatdata", &symbol_error_msg);
if (begin_ == nullptr) {
@@ -327,16 +319,6 @@ bool OatFileBase::ComputeFields(uint8_t* requested_base,
symbol_error_msg.c_str());
return false;
}
- if (requested_base != nullptr && begin_ != requested_base) {
- // Host can fail this check. Do not dump there to avoid polluting the output.
- if (kIsTargetBuild && (kIsDebugBuild || VLOG_IS_ON(oat))) {
- PrintFileToLog("/proc/self/maps", android::base::LogSeverity::WARNING);
- }
- *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
- "oatdata=%p != expected=%p. See process maps in the log.",
- begin_, requested_base);
- return false;
- }
end_ = FindDynamicSymbolAddress("oatlastword", &symbol_error_msg);
if (end_ == nullptr) {
*error_msg = StringPrintf("Failed to find oatlastword symbol in '%s' %s",
@@ -649,15 +631,15 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e
if (zip_fd != -1) {
loaded = dex_file_loader.OpenZip(zip_fd,
dex_file_location,
- /* verify */ false,
- /* verify_checksum */ false,
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
error_msg,
uncompressed_dex_files_.get());
} else {
loaded = dex_file_loader.Open(dex_file_location.c_str(),
dex_file_location,
- /* verify */ false,
- /* verify_checksum */ false,
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
error_msg,
uncompressed_dex_files_.get());
}
@@ -1323,7 +1305,7 @@ ElfOatFile* ElfOatFile::OpenElfFile(int zip_fd,
}
// Complete the setup.
- if (!oat_file->ComputeFields(/* requested_base */ nullptr, file->GetPath(), error_msg)) {
+ if (!oat_file->ComputeFields(file->GetPath(), error_msg)) {
return nullptr;
}
@@ -1407,10 +1389,9 @@ bool ElfOatFile::ElfFileOpen(File* file,
/*inout*/MemMap* reservation,
/*out*/std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- // TODO: rename requested_base to oat_data_begin
elf_file_.reset(ElfFile::Open(file,
writable,
- /*program_header_only*/true,
+ /*program_header_only=*/ true,
low_4gb,
error_msg));
if (elf_file_ == nullptr) {
@@ -1458,7 +1439,7 @@ OatFile* OatFile::OpenWithElfFile(int zip_fd,
const std::string& location,
const char* abs_dex_location,
std::string* error_msg) {
- std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, false /* executable */));
+ std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, /*executable=*/ false));
return oat_file->InitializeFromElfFile(zip_fd, elf_file, vdex_file, abs_dex_location, error_msg)
? oat_file.release()
: nullptr;
@@ -1467,7 +1448,6 @@ OatFile* OatFile::OpenWithElfFile(int zip_fd,
OatFile* OatFile::Open(int zip_fd,
const std::string& oat_filename,
const std::string& oat_location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -1494,8 +1474,7 @@ OatFile* OatFile::Open(int zip_fd,
vdex_filename,
oat_filename,
oat_location,
- requested_base,
- false /* writable */,
+ /*writable=*/ false,
executable,
low_4gb,
abs_dex_location,
@@ -1524,8 +1503,7 @@ OatFile* OatFile::Open(int zip_fd,
vdex_filename,
oat_filename,
oat_location,
- requested_base,
- false /* writable */,
+ /*writable=*/ false,
executable,
low_4gb,
abs_dex_location,
@@ -1538,7 +1516,6 @@ OatFile* OatFile::Open(int zip_fd,
int vdex_fd,
int oat_fd,
const std::string& oat_location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -1553,8 +1530,7 @@ OatFile* OatFile::Open(int zip_fd,
oat_fd,
vdex_location,
oat_location,
- requested_base,
- false /* writable */,
+ /*writable=*/ false,
executable,
low_4gb,
abs_dex_location,
@@ -1572,11 +1548,11 @@ OatFile* OatFile::OpenWritable(int zip_fd,
return ElfOatFile::OpenElfFile(zip_fd,
file,
location,
- /* writable */ true,
- /* executable */ false,
- /*low_4gb*/false,
+ /*writable=*/ true,
+ /*executable=*/ false,
+ /*low_4gb=*/false,
abs_dex_location,
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
error_msg);
}
@@ -1589,11 +1565,11 @@ OatFile* OatFile::OpenReadable(int zip_fd,
return ElfOatFile::OpenElfFile(zip_fd,
file,
location,
- /* writable */ false,
- /* executable */ false,
- /*low_4gb*/false,
+ /*writable=*/ false,
+ /*executable=*/ false,
+ /*low_4gb=*/false,
abs_dex_location,
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
error_msg);
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b3736e6514..4294baf23a 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -85,7 +85,6 @@ class OatFile {
static OatFile* Open(int zip_fd,
const std::string& filename,
const std::string& location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -99,7 +98,6 @@ class OatFile {
int vdex_fd,
int oat_fd,
const std::string& oat_location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -228,12 +226,12 @@ class OatFile {
// A representation of an invalid OatClass, used when an OatClass can't be found.
// See FindOatClass().
static OatClass Invalid() {
- return OatClass(/* oat_file */ nullptr,
+ return OatClass(/* oat_file= */ nullptr,
ClassStatus::kErrorUnresolved,
kOatClassNoneCompiled,
- /* bitmap_size */ 0,
- /* bitmap_pointer */ nullptr,
- /* methods_pointer */ nullptr);
+ /* bitmap_size= */ 0,
+ /* bitmap_pointer= */ nullptr,
+ /* methods_pointer= */ nullptr);
}
private:
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 009abdb85c..a06be4c719 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -76,9 +76,9 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
isa,
load_executable,
only_load_system_executable,
- -1 /* vdex_fd */,
- -1 /* oat_fd */,
- -1 /* zip_fd */) {}
+ /*vdex_fd=*/ -1,
+ /*oat_fd=*/ -1,
+ /*zip_fd=*/ -1) {}
OatFileAssistant::OatFileAssistant(const char* dex_location,
@@ -91,8 +91,8 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
: isa_(isa),
load_executable_(load_executable),
only_load_system_executable_(only_load_system_executable),
- odex_(this, /*is_oat_location*/ false),
- oat_(this, /*is_oat_location*/ true),
+ odex_(this, /*is_oat_location=*/ false),
+ oat_(this, /*is_oat_location=*/ true),
zip_fd_(zip_fd) {
CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
@@ -124,7 +124,7 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
// Get the oat filename.
std::string oat_file_name;
if (DexLocationToOatFilename(dex_location_, isa_, &oat_file_name, &error_msg)) {
- oat_.Reset(oat_file_name, false /* use_fd */);
+ oat_.Reset(oat_file_name, /*use_fd=*/ false);
} else {
LOG(WARNING) << "Failed to determine oat file name for dex location "
<< dex_location_ << ": " << error_msg;
@@ -575,7 +575,6 @@ OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string
}
info->oat_checksum = image_header->GetOatChecksum();
- info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
info->patch_delta = image_header->GetPatchDelta();
return info;
}
@@ -693,17 +692,17 @@ OatFileAssistant::OatStatus OatFileAssistant::OatFileInfo::Status() {
vdex = VdexFile::Open(vdex_fd_,
s.st_size,
vdex_filename,
- false /*writable*/,
- false /*low_4gb*/,
- false /* unquicken */,
+ /*writable=*/ false,
+ /*low_4gb=*/ false,
+ /*unquicken=*/ false,
&error_msg);
}
}
} else {
vdex = VdexFile::Open(vdex_filename,
- false /*writeable*/,
- false /*low_4gb*/,
- false /*unquicken*/,
+ /*writable=*/ false,
+ /*low_4gb=*/ false,
+ /*unquicken=*/ false,
&error_msg);
}
if (vdex == nullptr) {
@@ -779,22 +778,20 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
vdex_fd_,
oat_fd_,
filename_.c_str(),
- /* requested_base */ nullptr,
executable,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
oat_file_assistant_->dex_location_.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
}
} else {
- file_.reset(OatFile::Open(/* zip_fd */ -1,
+ file_.reset(OatFile::Open(/*zip_fd=*/ -1,
filename_.c_str(),
filename_.c_str(),
- /* requested_base */ nullptr,
executable,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
oat_file_assistant_->dex_location_.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
}
if (file_.get() == nullptr) {
@@ -924,7 +921,7 @@ void OatFileAssistant::GetOptimizationStatus(
std::string* out_compilation_reason) {
// It may not be possible to load an oat file executable (e.g., selinux restrictions). Load
// non-executable and check the status manually.
- OatFileAssistant oat_file_assistant(filename.c_str(), isa, false /* load_executable */);
+ OatFileAssistant oat_file_assistant(filename.c_str(), isa, /*load_executable=*/ false);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
if (oat_file == nullptr) {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 3da1a221ae..590ae2254c 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -247,7 +247,6 @@ class OatFileAssistant {
private:
struct ImageInfo {
uint32_t oat_checksum = 0;
- uintptr_t oat_data_begin = 0;
int32_t patch_delta = 0;
std::string location;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 3a974df386..521e419d2f 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -182,8 +182,8 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(
CompilerFilter::kDefaultCompilerFilter,
- /* downgrade */ false,
- /* profile_changed */ false,
+ /* profile_changed= */ false,
+ /* downgrade= */ false,
relative_context.get()));
}
@@ -336,11 +336,11 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ false);
+ /* with_alternate_image= */ false);
- android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
- android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
- android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+ android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
+ android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
@@ -375,17 +375,17 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ false);
+ /* with_alternate_image= */ false);
- android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
- android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+ android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
false,
vdex_fd.get(),
- -1 /* oat_fd */,
+ /* oat_fd= */ -1,
zip_fd.get());
EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -408,16 +408,16 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ false);
+ /* with_alternate_image= */ false);
- android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
- android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+ android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
false,
- -1 /* vdex_fd */,
+ /* vdex_fd= */ -1,
odex_fd.get(),
zip_fd.get());
@@ -436,13 +436,13 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexVdexFd) {
Copy(GetDexSrc1(), dex_location);
- android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
false,
- -1 /* vdex_fd */,
- -1 /* oat_fd */,
+ /* vdex_fd= */ -1,
+ /* oat_fd= */ -1,
zip_fd);
EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -637,7 +637,7 @@ TEST_F(OatFileAssistantTest, StrippedMultiDexNonMainOutOfDate) {
// Strip the dex file.
Copy(GetStrippedDexSrc1(), dex_location);
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable*/false);
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable=*/false);
// Because the dex file is stripped, the odex file is considered the source
// of truth for the dex checksums. The oat file should be considered
@@ -730,7 +730,7 @@ TEST_F(OatFileAssistantTest, OatImageOutOfDate) {
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ true);
+ /* with_alternate_image= */ true);
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -765,7 +765,7 @@ TEST_F(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) {
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kExtract,
- /* with_alternate_image */ true);
+ /* with_alternate_image= */ true);
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -1167,7 +1167,7 @@ class RaceGenerateTask : public Task {
dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
dex_location_.c_str(),
Runtime::Current()->GetSystemClassLoader(),
- /*dex_elements*/nullptr,
+ /*dex_elements=*/nullptr,
&oat_file,
&error_msgs);
CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n');
@@ -1213,7 +1213,7 @@ TEST_F(OatFileAssistantTest, RaceToGenerate) {
tasks.push_back(std::move(task));
}
thread_pool.StartWorkers(self);
- thread_pool.Wait(self, /* do_work */ true, /* may_hold_locks */ false);
+ thread_pool.Wait(self, /* do_work= */ true, /* may_hold_locks= */ false);
// Verify that tasks which got an oat file got a unique one.
std::set<const OatFile*> oat_files;
@@ -1335,8 +1335,8 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
oat_file_assistant.GetDexOptNeeded(
CompilerFilter::kDefaultCompilerFilter,
- /* downgrade */ false,
- /* profile_changed */ false,
+ /* profile_changed= */ false,
+ /* downgrade= */ false,
updated_context.get()));
}
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index a9ef9a3fa9..7ac1ab40a2 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -84,7 +84,7 @@ void OatFileManager::UnRegisterAndDeleteOatFile(const OatFile* oat_file) {
auto it = oat_files_.find(compare);
CHECK(it != oat_files_.end());
oat_files_.erase(it);
- compare.release();
+ compare.release(); // NOLINT b/117926937
}
const OatFile* OatFileManager::FindOpenedOatFileFromDexLocation(
@@ -181,7 +181,7 @@ class TypeIndexInfo {
private:
static BitVector GenerateTypeIndexes(const DexFile* dex_file) {
- BitVector type_indexes(/*start_bits*/0, /*expandable*/true, Allocator::GetMallocAllocator());
+ BitVector type_indexes(/*start_bits=*/0, /*expandable=*/true, Allocator::GetMallocAllocator());
for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
uint16_t type_idx = class_def.class_idx_.index_;
@@ -302,12 +302,12 @@ static bool CheckClassCollision(const OatFile* oat_file,
std::priority_queue<DexFileAndClassPair> queue;
for (size_t i = 0; i < dex_files_loaded.size(); ++i) {
if (loaded_types[i].GetIterator() != loaded_types[i].GetIteratorEnd()) {
- queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat*/true);
+ queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat=*/true);
}
}
for (size_t i = 0; i < dex_files_unloaded.size(); ++i) {
if (unloaded_types[i].GetIterator() != unloaded_types[i].GetIteratorEnd()) {
- queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat*/false);
+ queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat=*/false);
}
}
@@ -385,8 +385,8 @@ OatFileManager::CheckCollisionResult OatFileManager::CheckCollision(
// the oat file without addition checks
ClassLoaderContext::VerificationResult result = context->VerifyClassLoaderContextMatch(
oat_file->GetClassLoaderContext(),
- /*verify_names*/ true,
- /*verify_checksums*/ true);
+ /*verify_names=*/ true,
+ /*verify_checksums=*/ true);
switch (result) {
case ClassLoaderContext::VerificationResult::kForcedToSkipChecks:
return CheckCollisionResult::kSkippedClassLoaderContextSharedLibrary;
@@ -567,7 +567,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (added_image_space) {
// Successfully added image space to heap, release the map so that it does not get
// freed.
- image_space.release();
+ image_space.release(); // NOLINT b/117926937
// Register for tracking.
for (const auto& dex_file : dex_files) {
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 51d8fca6c5..b54711322b 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -74,14 +74,13 @@ TEST_F(OatFileTest, LoadOat) {
std::string error_msg;
ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr);
@@ -102,14 +101,13 @@ TEST_F(OatFileTest, ChangingMultiDexUncompressed) {
// Ensure we can load that file. Just a precondition.
{
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_EQ(2u, odex_file->GetOatDexFiles().size());
@@ -119,14 +117,13 @@ TEST_F(OatFileTest, ChangingMultiDexUncompressed) {
Copy(GetTestDexFileName("MainUncompressed"), dex_location);
// And try to load again.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location,
oat_location,
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
EXPECT_TRUE(odex_file == nullptr);
EXPECT_NE(std::string::npos, error_msg.find("expected 2 uncompressed dex files, but found 1"))
diff --git a/runtime/proxy_test.h b/runtime/proxy_test.h
index 411dc7af82..23e536d27e 100644
--- a/runtime/proxy_test.h
+++ b/runtime/proxy_test.h
@@ -47,7 +47,7 @@ ObjPtr<mirror::Class> GenerateProxyClass(ScopedObjectAccess& soa,
// Builds the interfaces array.
jobjectArray proxyClassInterfaces =
- soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement */ nullptr);
+ soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement= */ nullptr);
soa.Self()->AssertNoPendingException();
for (size_t i = 0; i < interfaces.size(); ++i) {
soa.Env()->SetObjectArrayElement(proxyClassInterfaces, i,
@@ -62,7 +62,7 @@ ObjPtr<mirror::Class> GenerateProxyClass(ScopedObjectAccess& soa,
jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(
methods_count,
soa.AddLocalReference<jclass>(GetClassRoot<mirror::Method>()),
- /* initialElement */ nullptr);
+ /* initialElement= */ nullptr);
soa.Self()->AssertNoPendingException();
jsize array_index = 0;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 36a6b7fc47..afdfefaffa 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -126,7 +126,7 @@ class CatchBlockStackVisitor final : public StackVisitor {
exception_handler_->SetHandlerDexPc(found_dex_pc);
exception_handler_->SetHandlerQuickFramePc(
GetCurrentOatQuickMethodHeader()->ToNativeQuickPc(
- method, found_dex_pc, /* is_catch_handler */ true));
+ method, found_dex_pc, /* is_for_catch_handler= */ true));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
return false; // End stack walk.
@@ -218,7 +218,10 @@ void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
}
// Walk the stack to find catch handler.
- CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this, /*skip*/already_popped);
+ CatchBlockStackVisitor visitor(self_, context_,
+ &exception_ref,
+ this,
+ /*skip_frames=*/already_popped);
visitor.WalkStack(true);
uint32_t new_pop_count = handler_frame_depth_;
DCHECK_GE(new_pop_count, already_popped);
@@ -606,7 +609,7 @@ void QuickExceptionHandler::DeoptimizeSingleFrame(DeoptimizationKind kind) {
<< deopt_method->PrettyMethod()
<< " due to "
<< GetDeoptimizationKindName(kind);
- DumpFramesWithType(self_, /* details */ true);
+ DumpFramesWithType(self_, /* details= */ true);
}
if (Runtime::Current()->UseJitCompilation()) {
Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index bde0d11c6f..e6cc471ae6 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -25,7 +25,9 @@
#include "base/casts.h"
#include "entrypoints/quick/callee_save_frame.h"
#include "gc_root-inl.h"
+#include "interpreter/mterp/mterp.h"
#include "obj_ptr-inl.h"
+#include "thread_list.h"
namespace art {
@@ -86,6 +88,15 @@ inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
return reinterpret_cast64<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
}
+template<typename Action>
+void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) {
+ MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_);
+ lamda();
+ Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+ thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+ }, nullptr);
+}
+
} // namespace art
#endif // ART_RUNTIME_RUNTIME_INL_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b3a2bdd936..7fa5607582 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -165,6 +165,11 @@
#include <android/set_abort_message.h>
#endif
+// Static asserts to check the values of generated assembly-support macros.
+#define ASM_DEFINE(NAME, EXPR) static_assert((NAME) == (EXPR), "Unexpected value of " #NAME);
+#include "asm_defines.def"
+#undef ASM_DEFINE
+
namespace art {
// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
@@ -300,15 +305,15 @@ Runtime::~Runtime() {
// Very few things are actually capable of distinguishing between the peer & peerless states so
// this should be fine.
bool thread_attached = AttachCurrentThread("Shutdown thread",
- /* as_daemon */ false,
+ /* as_daemon= */ false,
GetSystemThreadGroup(),
- /* Create peer */ IsStarted());
+ /* create_peer= */ IsStarted());
if (UNLIKELY(!thread_attached)) {
LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer.";
CHECK(AttachCurrentThread("Shutdown thread (no java peer)",
- /* as_daemon */ false,
- /* thread_group*/ nullptr,
- /* Create peer */ false));
+ /* as_daemon= */ false,
+ /* thread_group=*/ nullptr,
+ /* create_peer= */ false));
}
self = Thread::Current();
} else {
@@ -609,7 +614,7 @@ bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options) {
Locks::Init();
- InitLogging(/* argv */ nullptr, Abort); // Calls Locks::Init() as a side effect.
+ InitLogging(/* argv= */ nullptr, Abort); // Calls Locks::Init() as a side effect.
bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
if (!parsed) {
LOG(ERROR) << "Failed to parse options";
@@ -810,7 +815,7 @@ bool Runtime::Start() {
? NativeBridgeAction::kInitialize
: NativeBridgeAction::kUnload;
InitNonZygoteOrPostFork(self->GetJniEnv(),
- /* is_system_server */ false,
+ /* is_system_server= */ false,
action,
GetInstructionSetString(kRuntimeISA));
}
@@ -997,9 +1002,9 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
std::string error_msg;
std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
- false /* writable */,
- false /* low_4gb */,
- false, /* unquicken */
+ /* writable= */ false,
+ /* low_4gb= */ false,
+ /* unquicken= */ false,
&error_msg));
if (vdex_file.get() == nullptr) {
return false;
@@ -1010,15 +1015,15 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
return false;
}
std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
- false /* writable */,
- false /* program_header_only */,
- false /* low_4gb */,
+ /* writable= */ false,
+ /* program_header_only= */ false,
+ /* low_4gb= */ false,
&error_msg));
if (elf_file.get() == nullptr) {
return false;
}
std::unique_ptr<const OatFile> oat_file(
- OatFile::OpenWithElfFile(/* zip_fd */ -1,
+ OatFile::OpenWithElfFile(/* zip_fd= */ -1,
elf_file.release(),
vdex_file.release(),
oat_location,
@@ -1112,7 +1117,7 @@ static inline void CreatePreAllocatedException(Thread* self,
CHECK(klass != nullptr);
gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast(
- klass->Alloc</* kIsInstrumented */ true>(self, allocator_type));
+ klass->Alloc</* kIsInstrumented= */ true>(self, allocator_type));
CHECK(exception_object != nullptr);
*exception = GcRoot<mirror::Throwable>(exception_object);
// Initialize the "detailMessage" field.
@@ -1122,7 +1127,7 @@ static inline void CreatePreAllocatedException(Thread* self,
ArtField* detailMessageField =
throwable->FindDeclaredInstanceField("detailMessage", "Ljava/lang/String;");
CHECK(detailMessageField != nullptr);
- detailMessageField->SetObject</* kTransactionActive */ false>(exception->Read(), message);
+ detailMessageField->SetObject</* kTransactionActive= */ false>(exception->Read(), message);
}
bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
@@ -1155,8 +1160,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
reinterpret_cast<uint8_t*>(kSentinelAddr),
kPageSize,
PROT_NONE,
- /* low_4g */ true,
- /* error_msg */ nullptr);
+ /* low_4gb= */ true,
+ /* error_msg= */ nullptr);
if (!protected_fault_page_.IsValid()) {
LOG(WARNING) << "Could not reserve sentinel fault page";
} else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
@@ -1366,13 +1371,13 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
arena_pool_.reset(new MallocArenaPool());
jit_arena_pool_.reset(new MallocArenaPool());
} else {
- arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false));
- jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false, "CompilerMetadata"));
+ arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false));
+ jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false, "CompilerMetadata"));
}
if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
// 4gb, no malloc. Explanation in header.
- low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ true));
+ low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ true));
}
linear_alloc_.reset(CreateLinearAlloc());
@@ -2143,7 +2148,7 @@ ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
}
// Create empty conflict table.
- method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
+ method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count=*/0u, linear_alloc),
pointer_size);
return method;
}
@@ -2275,7 +2280,7 @@ void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
return;
}
- if (!OS::FileExists(profile_output_filename.c_str(), false /*check_file_type*/)) {
+ if (!OS::FileExists(profile_output_filename.c_str(), /*check_file_type=*/ false)) {
LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
return;
}
@@ -2514,12 +2519,12 @@ void Runtime::FixupConflictTables() {
const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
imt_unimplemented_method_->SetImtConflictTable(
- ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
pointer_size);
}
if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
imt_conflict_method_->SetImtConflictTable(
- ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
pointer_size);
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 398a48d935..e27c87d616 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -659,7 +659,7 @@ class Runtime {
}
void SetNonStandardExitsEnabled() {
- non_standard_exits_enabled_ = true;
+ DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
}
bool AreAsyncExceptionsThrown() const {
@@ -667,9 +667,20 @@ class Runtime {
}
void SetAsyncExceptionsThrown() {
- async_exceptions_thrown_ = true;
+ DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
}
+ // Change state and re-check which interpreter should be used.
+ //
+ // This must be called whenever there is an event that forces
+ // us to use different interpreter (e.g. debugger is attached).
+ //
+ // Changing the state using the lamda gives us some multihreading safety.
+ // It ensures that two calls do not interfere with each other and
+ // it makes it possible to DCHECK that thread local flag is correct.
+ template<typename Action>
+ static void DoAndMaybeSwitchInterpreter(Action lamda);
+
// Returns the build fingerprint, if set. Otherwise an empty string is returned.
std::string GetFingerprint() {
return fingerprint_;
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 4bd3b3ae3a..55ba293f52 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -30,8 +30,8 @@ void HandleUnexpectedSignalAndroid(int signal_number, siginfo_t* info, void* raw
HandleUnexpectedSignalCommon(signal_number,
info,
raw_context,
- /* handle_timeout_signal */ false,
- /* dump_on_stderr */ false);
+ /* handle_timeout_signal= */ false,
+ /* dump_on_stderr= */ false);
// Run the old signal handler.
old_action.sa_sigaction(signal_number, info, raw_context);
@@ -44,7 +44,7 @@ void Runtime::InitPlatformSignalHandlers() {
if (android_root != nullptr && strcmp(android_root, "/system") != 0) {
InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid,
&old_action,
- /* handle_timeout_signal */ false);
+ /* handle_timeout_signal= */ false);
}
}
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 89f312457a..20b33277b3 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -191,10 +191,10 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava)
TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
std::string error_msg;
MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
128 * kPageSize, // Just some small stack.
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(stack.IsValid()) << error_msg;
@@ -505,10 +505,10 @@ TEST_F(MonitorWaitCallbacksTest, WaitUnlocked) {
self,
// Just a random class
soa.Decode<mirror::Class>(WellKnownClasses::java_util_Collections).Ptr(),
- /*ms*/0,
- /*ns*/0,
- /*interruptShouldThrow*/false,
- /*why*/kWaiting);
+ /*ms=*/0,
+ /*ns=*/0,
+ /*interruptShouldThrow=*/false,
+ /*why=*/kWaiting);
}
}
ASSERT_TRUE(cb_.saw_wait_start_);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 6313553255..cfa8ea6342 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -31,8 +31,8 @@ void HandleUnexpectedSignalLinux(int signal_number, siginfo_t* info, void* raw_c
HandleUnexpectedSignalCommon(signal_number,
info,
raw_context,
- /* handle_timeout_signal */ true,
- /* dump_on_stderr */ true);
+ /* handle_timeout_signal= */ true,
+ /* dump_on_stderr= */ true);
if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
pid_t tid = GetTid();
@@ -77,7 +77,7 @@ void Runtime::InitPlatformSignalHandlers() {
// On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux,
nullptr,
- /* handle_timeout_signal */ true);
+ /* handle_timeout_signal= */ true);
}
} // namespace art
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index f4a27b8397..38ea9cc3b8 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -118,7 +118,7 @@ void SignalCatcher::Output(const std::string& s) {
ScopedThreadStateChange tsc(Thread::Current(), kWaitingForSignalCatcherOutput);
- std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage */));
+ std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage= */));
bool success = file->WriteFully(s.data(), s.size());
if (success) {
success = file->FlushCloseOrErase() == 0;
@@ -169,7 +169,7 @@ void SignalCatcher::HandleSigQuit() {
void SignalCatcher::HandleSigUsr1() {
LOG(INFO) << "SIGUSR1 forcing GC (no HPROF) and profile save";
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
ProfileSaver::ForceProcessProfiles();
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 5f44286089..811e23b2d2 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -358,7 +358,7 @@ class CodeInfo {
ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map) const {
if (stack_map.HasDexRegisterMap()) {
DexRegisterMap map(number_of_dex_registers_, DexRegisterLocation::Invalid());
- DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register */ 0, &map);
+ DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register= */ 0, &map);
return map;
}
return DexRegisterMap(0, DexRegisterLocation::None());
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index aac547eb78..106c7f1f75 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -237,7 +237,7 @@ struct SubtypeCheck {
static SubtypeCheckInfo::State EnsureInitialized(ClassPtr klass)
REQUIRES(Locks::subtype_check_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return InitializeOrAssign(klass, /*assign*/false).GetState();
+ return InitializeOrAssign(klass, /*assign=*/false).GetState();
}
// Force this class's SubtypeCheckInfo state into Assigned|Overflowed.
@@ -250,7 +250,7 @@ struct SubtypeCheck {
static SubtypeCheckInfo::State EnsureAssigned(ClassPtr klass)
REQUIRES(Locks::subtype_check_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return InitializeOrAssign(klass, /*assign*/true).GetState();
+ return InitializeOrAssign(klass, /*assign=*/true).GetState();
}
// Resets the SubtypeCheckInfo into the Uninitialized state.
@@ -398,7 +398,7 @@ struct SubtypeCheck {
// Force all ancestors to Assigned | Overflowed.
ClassPtr parent_klass = GetParentClass(klass);
- size_t parent_depth = InitializeOrAssign(parent_klass, /*assign*/true).GetDepth();
+ size_t parent_depth = InitializeOrAssign(parent_klass, /*assign=*/true).GetDepth();
if (kIsDebugBuild) {
SubtypeCheckInfo::State parent_state = GetSubtypeCheckInfo(parent_klass).GetState();
DCHECK(parent_state == SubtypeCheckInfo::kAssigned ||
@@ -542,17 +542,17 @@ struct SubtypeCheck {
int32_t new_value)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current() != nullptr && Runtime::Current()->IsActiveTransaction()) {
- return klass->template CasField32</*kTransactionActive*/true>(offset,
- old_value,
- new_value,
- CASMode::kWeak,
- std::memory_order_seq_cst);
- } else {
- return klass->template CasField32</*kTransactionActive*/false>(offset,
+ return klass->template CasField32</*kTransactionActive=*/true>(offset,
old_value,
new_value,
CASMode::kWeak,
std::memory_order_seq_cst);
+ } else {
+ return klass->template CasField32</*kTransactionActive=*/false>(offset,
+ old_value,
+ new_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
}
}
diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h
index 462f203978..23d8ac371e 100644
--- a/runtime/subtype_check_bits.h
+++ b/runtime/subtype_check_bits.h
@@ -56,9 +56,9 @@ namespace art {
*
* See subtype_check.h and subtype_check_info.h for more details.
*/
-BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size*/ BitString::BitStructSizeOf() + 1u)
- BitStructField<BitString, /*lsb*/ 0> bitstring_;
- BitStructUint</*lsb*/ BitString::BitStructSizeOf(), /*width*/ 1> overflow_;
+BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size=*/ BitString::BitStructSizeOf() + 1u)
+ BitStructField<BitString, /*lsb=*/ 0> bitstring_;
+ BitStructUint</*lsb=*/ BitString::BitStructSizeOf(), /*width=*/ 1> overflow_;
BITSTRUCT_DEFINE_END(SubtypeCheckBits);
} // namespace art
diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h
index 321a723985..eec6e21832 100644
--- a/runtime/subtype_check_bits_and_status.h
+++ b/runtime/subtype_check_bits_and_status.h
@@ -68,11 +68,11 @@ static constexpr size_t NonNumericBitSizeOf() {
static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast));
static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits.");
BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>())
- BitStructField<SubtypeCheckBits, /*lsb*/ 0> subtype_check_info_;
+ BitStructField<SubtypeCheckBits, /*lsb=*/ 0> subtype_check_info_;
BitStructField<ClassStatus,
- /*lsb*/ SubtypeCheckBits::BitStructSizeOf(),
- /*width*/ kClassStatusBitSize> status_;
- BitStructInt</*lsb*/ 0, /*width*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
+ /*lsb=*/ SubtypeCheckBits::BitStructSizeOf(),
+ /*width=*/ kClassStatusBitSize> status_;
+ BitStructInt</*lsb=*/ 0, /*width=*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus);
// Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data.
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index 9bd135e4c2..44a2a6933e 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -87,7 +87,7 @@ BitString SetBitStringCharAt(BitString bit_string, size_t i, size_t val) {
struct SubtypeCheckInfoTest : public ::testing::Test {
protected:
void SetUp() override {
- android::base::InitLogging(/*argv*/nullptr);
+ android::base::InitLogging(/*argv=*/nullptr);
}
void TearDown() override {
@@ -158,33 +158,33 @@ TEST_F(SubtypeCheckInfoTest, IllegalValues) {
// Illegal values during construction would cause a Dcheck failure and crash.
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/0u),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/0u),
GetExpectedMessageForDeathTest("Path was too long for the depth"));
ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({1u, 1u}),
- /*overflow*/false,
- /*depth*/0u),
+ /*overflow=*/false,
+ /*depth=*/0u),
GetExpectedMessageForDeathTest("Bitstring too long for depth"));
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/1u),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/1u),
GetExpectedMessageForDeathTest("Expected \\(Assigned\\|Initialized\\) "
"state to have >0 Next value"));
ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({0u, 2u, 1u}),
- /*overflow*/false,
- /*depth*/2u),
+ /*overflow=*/false,
+ /*depth=*/2u),
GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 2u}),
- /*next*/MakeBitStringChar(1u),
- /*overflow*/false,
- /*depth*/2u),
+ /*next=*/MakeBitStringChar(1u),
+ /*overflow=*/false,
+ /*depth=*/2u),
GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 1u, 1u}),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/3u),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/3u),
GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
// These are really slow (~1sec per death test on host),
@@ -194,62 +194,62 @@ TEST_F(SubtypeCheckInfoTest, IllegalValues) {
TEST_F(SubtypeCheckInfoTest, States) {
EXPECT_EQ(SubtypeCheckInfo::kUninitialized, MakeSubtypeCheckInfo().GetState());
EXPECT_EQ(SubtypeCheckInfo::kInitialized,
- MakeSubtypeCheckInfo(/*path*/{}, /*next*/MakeBitStringChar(1)).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/{}, /*next=*/MakeBitStringChar(1)).GetState());
EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
- MakeSubtypeCheckInfo(/*path*/{},
- /*next*/MakeBitStringChar(1),
- /*overflow*/true,
- /*depth*/1u).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/{},
+ /*next=*/MakeBitStringChar(1),
+ /*overflow=*/true,
+ /*depth=*/1u).GetState());
EXPECT_EQ(SubtypeCheckInfo::kAssigned,
- MakeSubtypeCheckInfo(/*path*/MakeBitString({1u}),
- /*next*/MakeBitStringChar(1),
- /*overflow*/false,
- /*depth*/1u).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitString({1u}),
+ /*next=*/MakeBitStringChar(1),
+ /*overflow=*/false,
+ /*depth=*/1u).GetState());
// Test edge conditions: depth == BitString::kCapacity (No Next value).
EXPECT_EQ(SubtypeCheckInfo::kAssigned,
- MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/BitString::kCapacity).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity).GetState());
EXPECT_EQ(SubtypeCheckInfo::kInitialized,
- MakeSubtypeCheckInfo(/*path*/MakeBitStringMax<BitString::kCapacity - 1u>(),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/BitString::kCapacity).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax<BitString::kCapacity - 1u>(),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity).GetState());
// Test edge conditions: depth > BitString::kCapacity (Must overflow).
EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
- MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
- /*next*/MakeBitStringChar(0),
- /*overflow*/true,
- /*depth*/BitString::kCapacity + 1u).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/true,
+ /*depth=*/BitString::kCapacity + 1u).GetState());
}
TEST_F(SubtypeCheckInfoTest, NextValue) {
// Validate "Next" is correctly aliased as the Bitstring[Depth] character.
EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
- /*overflow*/false,
- /*depth*/0u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/0u).GetNext());
EXPECT_EQ(MakeBitStringChar(2u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
- /*overflow*/false,
- /*depth*/1u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/1u).GetNext());
EXPECT_EQ(MakeBitStringChar(3u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
- /*overflow*/false,
- /*depth*/2u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/2u).GetNext());
EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({0u, 2u, 1u}),
- /*overflow*/false,
- /*depth*/2u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/2u).GetNext());
// Test edge conditions: depth == BitString::kCapacity (No Next value).
EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity)));
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity)));
// Anything with depth >= BitString::kCapacity has no next value.
EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity + 1u)));
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity + 1u)));
EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax(),
- /*overflow*/false,
- /*depth*/std::numeric_limits<size_t>::max())));
+ /*overflow=*/false,
+ /*depth=*/std::numeric_limits<size_t>::max())));
}
template <size_t kPos = BitString::kCapacity>
@@ -259,10 +259,10 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) {
using StorageType = BitString::StorageType;
SubtypeCheckInfo sci =
- MakeSubtypeCheckInfo(/*path_to_root*/MakeBitStringMax(),
- /*next*/BitStringChar{},
- /*overflow*/false,
- /*depth*/BitString::kCapacity);
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+ /*next=*/BitStringChar{},
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity);
// 0b000...111 where LSB == 1, and trailing 1s = the maximum bitstring representation.
EXPECT_EQ(MaxInt<StorageType>(LenForPos()), sci.GetEncodedPathToRoot());
@@ -275,8 +275,8 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) {
SubtypeCheckInfo sci2 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity);
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity);
#define MAKE_ENCODED_PATH(pos0, pos1, pos2) \
(((pos0) << 0) | \
@@ -290,8 +290,8 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) {
SubtypeCheckInfo sci3 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity - 1u);
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity - 1u);
EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
sci3.GetEncodedPathToRoot());
@@ -300,8 +300,8 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) {
SubtypeCheckInfo sci4 =
MakeSubtypeCheckInfoUnchecked(MakeBitString({0b1010101u}),
- /*overflow*/false,
- /*depth*/BitString::kCapacity - 2u);
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity - 2u);
EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b0000, 0b0), sci4.GetEncodedPathToRoot());
EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b0000, 0b0),
@@ -320,7 +320,7 @@ TEST_F(SubtypeCheckInfoTest, CopyCleared) {
SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
- SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign.
EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -331,7 +331,7 @@ TEST_F(SubtypeCheckInfoTest, CopyCleared) {
// CopyCleared is just a thin wrapper around value-init and providing the depth.
SubtypeCheckInfo cleared_copy_value =
- SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth*/1u);
+ SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth=*/1u);
EXPECT_EQ(SubtypeCheckInfo::kUninitialized, cleared_copy_value.GetState());
EXPECT_EQ(MakeBitString({}), GetPathToRoot(cleared_copy_value));
}
@@ -340,7 +340,7 @@ TEST_F(SubtypeCheckInfoTest, NewForChild2) {
SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
- SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign.
EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -350,17 +350,17 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) {
SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
- SubtypeCheckInfo childA = root.CreateChild(/*assign*/false);
+ SubtypeCheckInfo childA = root.CreateChild(/*assign_next=*/false);
EXPECT_EQ(SubtypeCheckInfo::kInitialized, childA.GetState());
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); // Next unchanged for Initialize.
EXPECT_EQ(MakeBitString({}), GetPathToRoot(childA));
- SubtypeCheckInfo childB = root.CreateChild(/*assign*/false);
+ SubtypeCheckInfo childB = root.CreateChild(/*assign_next=*/false);
EXPECT_EQ(SubtypeCheckInfo::kInitialized, childB.GetState());
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); // Next unchanged for Initialize.
EXPECT_EQ(MakeBitString({}), GetPathToRoot(childB));
- SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign.
EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -369,19 +369,19 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) {
size_t cur_depth = 1u;
SubtypeCheckInfo latest_child = childC;
while (cur_depth != BitString::kCapacity) {
- latest_child = latest_child.CreateChild(/*assign*/true);
+ latest_child = latest_child.CreateChild(/*assign_next=*/true);
ASSERT_EQ(SubtypeCheckInfo::kAssigned, latest_child.GetState());
ASSERT_EQ(cur_depth + 1u, GetPathToRoot(latest_child).Length());
cur_depth++;
}
// Future assignments will result in a too-deep overflow.
- SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep.GetState());
EXPECT_EQ(GetPathToRoot(latest_child), GetPathToRoot(child_of_deep));
// Assignment of too-deep overflow also causes overflow.
- SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep_2.GetState());
EXPECT_EQ(GetPathToRoot(child_of_deep), GetPathToRoot(child_of_deep_2));
}
@@ -393,7 +393,7 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) {
break;
}
- SubtypeCheckInfo child = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/true);
ASSERT_EQ(SubtypeCheckInfo::kAssigned, child.GetState());
ASSERT_EQ(MakeBitStringChar(cur_next+1u), root.GetNext());
ASSERT_EQ(MakeBitString({cur_next}), GetPathToRoot(child));
@@ -403,20 +403,20 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) {
// Now the root will be in a state that further assigns will be too-wide overflow.
// Initialization still succeeds.
- SubtypeCheckInfo child = root.CreateChild(/*assign*/false);
+ SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/false);
EXPECT_EQ(SubtypeCheckInfo::kInitialized, child.GetState());
EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
EXPECT_EQ(MakeBitString({}), GetPathToRoot(child));
// Assignment goes to too-wide Overflow.
- SubtypeCheckInfo child_of = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of.GetState());
EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
EXPECT_EQ(MakeBitString({}), GetPathToRoot(child_of));
// Assignment of overflowed child still succeeds.
// The path to root is the same.
- SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of2.GetState());
EXPECT_EQ(GetPathToRoot(child_of), GetPathToRoot(child_of2));
}
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index 9aa30325c2..719e5d917c 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -302,7 +302,7 @@ struct MockScopedLockMutator {
struct SubtypeCheckTest : public ::testing::Test {
protected:
void SetUp() override {
- android::base::InitLogging(/*argv*/nullptr);
+ android::base::InitLogging(/*argv=*/nullptr);
CreateRootedTree(BitString::kCapacity + 2u, BitString::kCapacity + 2u);
}
@@ -312,8 +312,8 @@ struct SubtypeCheckTest : public ::testing::Test {
void CreateRootedTree(size_t width, size_t height) {
all_classes_.clear();
- root_ = CreateClassFor(/*parent*/nullptr, /*x*/0, /*y*/0);
- CreateTreeFor(root_, /*width*/width, /*depth*/height);
+ root_ = CreateClassFor(/*parent=*/nullptr, /*x=*/0, /*y=*/0);
+ CreateTreeFor(root_, /*width=*/width, /*levels=*/height);
}
MockClass* CreateClassFor(MockClass* parent, size_t x, size_t y) {
@@ -681,7 +681,7 @@ void EnsureStateChangedTest(
const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
ASSERT_EQ(depth, transitions.size());
- EnsureStateChangedTestRecursive(root, /*cur_depth*/0u, depth, transitions);
+ EnsureStateChangedTestRecursive(root, /*cur_depth=*/0u, depth, transitions);
}
TEST_F(SubtypeCheckTest, EnsureInitialized_NoOverflow) {
@@ -869,8 +869,8 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide) {
{
// Create too-wide siblings at the kTargetDepth level.
- MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
- CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+ MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+ CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
// Leave the rest of the tree as the default.
@@ -914,15 +914,15 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide_TooWide) {
{
// Create too-wide siblings at the kTargetDepth level.
- MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1);
- CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+ MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1);
+ CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren()) << *child;
ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
// Leave the rest of the tree as the default.
// Create too-wide children for a too-wide parent.
- MockClass* child_subchild = child->FindChildAt(/*x*/0, kTargetDepth);
- CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*depth*/1);
+ MockClass* child_subchild = child->FindChildAt(/*x=*/0, kTargetDepth);
+ CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOffSub*2, child_subchild->GetNumberOfChildren()) << *child_subchild;
ASSERT_TRUE(IsTooWide(child_subchild->GetMaxChild())) << *(child_subchild->GetMaxChild());
}
@@ -1035,8 +1035,8 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide_TooDeep) {
{
// Create too-wide siblings at the kTargetDepth level.
- MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
- CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+ MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+ CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
// Leave the rest of the tree as the default.
@@ -1045,7 +1045,7 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide_TooDeep) {
MockClass* child_subchild = child->GetMaxChild();
ASSERT_TRUE(child_subchild != nullptr);
ASSERT_EQ(0u, child_subchild->GetNumberOfChildren()) << *child_subchild;
- CreateTreeFor(child_subchild, /*width*/1, /*levels*/kTooDeepTargetDepth);
+ CreateTreeFor(child_subchild, /*width=*/1, /*levels=*/kTooDeepTargetDepth);
MockClass* too_deep_child = child_subchild->FindChildAt(0, kTooDeepTargetDepth + 2);
ASSERT_TRUE(too_deep_child != nullptr) << child_subchild->ToDotGraph();
ASSERT_TRUE(IsTooWide(too_deep_child)) << *(too_deep_child);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 51775c69a2..a3de4e2215 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -72,6 +72,7 @@
#include "handle_scope-inl.h"
#include "indirect_reference_table-inl.h"
#include "interpreter/interpreter.h"
+#include "interpreter/mterp/mterp.h"
#include "interpreter/shadow_frame-inl.h"
#include "java_frame_root_info.h"
#include "jni/java_vm_ext.h"
@@ -93,6 +94,7 @@
#include "quick_exception_handler.h"
#include "read_barrier-inl.h"
#include "reflection.h"
+#include "runtime-inl.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
@@ -150,7 +152,7 @@ void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active);
void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
CHECK(kUseReadBarrier);
tls32_.is_gc_marking = is_marking;
- UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking);
+ UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
ResetQuickAllocEntryPointsForThread(is_marking);
}
@@ -577,7 +579,7 @@ void Thread::InstallImplicitProtection() {
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
- if (ProtectStack(/* fatal_on_error */ false)) {
+ if (ProtectStack(/* fatal_on_error= */ false)) {
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
uint32_t unwanted_size = stack_top - pregion - kPageSize;
@@ -646,7 +648,7 @@ void Thread::InstallImplicitProtection() {
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
// Protect the bottom of the stack to prevent read/write to it.
- ProtectStack(/* fatal_on_error */ true);
+ ProtectStack(/* fatal_on_error= */ true);
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
@@ -729,7 +731,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
// JNIEnvExt we created.
// Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
// between the threads.
- child_jni_env_ext.release();
+ child_jni_env_ext.release(); // NOLINT pthreads API.
return;
}
}
@@ -2012,13 +2014,13 @@ void Thread::DumpStack(std::ostream& os,
DumpKernelStack(os, GetTid(), " kernel: ", false);
ArtMethod* method =
GetCurrentMethod(nullptr,
- /*check_suspended*/ !force_dump_stack,
- /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
+ /*check_suspended=*/ !force_dump_stack,
+ /*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
}
DumpJavaStack(os,
- /*check_suspended*/ !force_dump_stack,
- /*dump_locks*/ !force_dump_stack);
+ /*check_suspended=*/ !force_dump_stack,
+ /*dump_locks=*/ !force_dump_stack);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
@@ -2141,6 +2143,11 @@ Thread::Thread(bool daemon)
tlsPtr_.flip_function = nullptr;
tlsPtr_.thread_local_mark_stack = nullptr;
tls32_.is_transitioning_to_runnable = false;
+ tls32_.use_mterp = false;
+}
+
+void Thread::NotifyInTheadList() {
+ tls32_.use_mterp = interpreter::CanUseMterp();
}
bool Thread::CanLoadClasses() const {
@@ -2904,8 +2911,8 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
// Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
class_linker->EnsureInitialized(soa.Self(),
h_aste_class,
- /* can_init_fields */ true,
- /* can_init_parents */ true);
+ /* can_init_fields= */ true,
+ /* can_init_parents= */ true);
if (soa.Self()->IsExceptionPending()) {
// This should not fail in a healthy runtime.
return nullptr;
@@ -3422,9 +3429,9 @@ void Thread::QuickDeliverException() {
}
PushDeoptimizationContext(
JValue(),
- false /* is_reference */,
+ /* is_reference= */ false,
(force_deopt ? nullptr : exception),
- false /* from_code */,
+ /* from_code= */ false,
method_type);
artDeoptimize(this);
UNREACHABLE();
@@ -3550,7 +3557,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
// Mark lock count map required for structured locking checks.
- shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg */ -1, this);
+ shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this);
}
private:
@@ -3566,7 +3573,7 @@ class ReferenceMapVisitor : public StackVisitor {
if (kVerifyImageObjectsMarked) {
gc::Heap* const heap = Runtime::Current()->GetHeap();
gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
- /*fail_ok*/true);
+ /*fail_ok=*/true);
if (space != nullptr && space->IsImageSpace()) {
bool failed = false;
if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
@@ -3588,7 +3595,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
mirror::Object* new_ref = klass.Ptr();
- visitor_(&new_ref, /* vreg */ -1, this);
+ visitor_(&new_ref, /* vreg= */ -1, this);
if (new_ref != klass) {
method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
}
@@ -3661,7 +3668,7 @@ class ReferenceMapVisitor : public StackVisitor {
mirror::Object* ref = ref_addr->AsMirrorPtr();
if (ref != nullptr) {
mirror::Object* new_ref = ref;
- visitor_(&new_ref, /* vreg */ -1, this);
+ visitor_(&new_ref, /* vreg= */ -1, this);
if (ref != new_ref) {
ref_addr->Assign(new_ref);
}
@@ -3854,9 +3861,9 @@ void Thread::VisitRoots(RootVisitor* visitor) {
void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) {
- VisitRoots</* kPrecise */ true>(visitor);
+ VisitRoots</* kPrecise= */ true>(visitor);
} else {
- VisitRoots</* kPrecise */ false>(visitor);
+ VisitRoots</* kPrecise= */ false>(visitor);
}
}
@@ -4071,7 +4078,7 @@ mirror::Object* Thread::GetPeerFromOtherThread() const {
void Thread::SetReadBarrierEntrypoints() {
// Make sure entrypoints aren't null.
- UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true);
+ UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true);
}
void Thread::ClearAllInterpreterCaches() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 47a3af2564..941867ce2d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -19,6 +19,7 @@
#include <setjmp.h>
+#include <atomic>
#include <bitset>
#include <deque>
#include <iosfwd>
@@ -672,6 +673,13 @@ class Thread {
}
template<PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
+ }
+
+ template<PointerSize pointer_size>
static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
@@ -743,18 +751,6 @@ class Thread {
}
template<PointerSize pointer_size>
- static constexpr ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
- }
-
- template<PointerSize pointer_size>
- static constexpr ThreadOffset<pointer_size> MterpAltIBaseOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
- }
-
- template<PointerSize pointer_size>
static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
}
@@ -1125,6 +1121,10 @@ class Thread {
tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
}
+ bool UseMterp() const {
+ return tls32_.use_mterp.load();
+ }
+
void ResetQuickAllocEntryPointsForThread(bool is_marking);
// Returns the remaining space in the TLAB.
@@ -1199,30 +1199,14 @@ class Thread {
bool ProtectStack(bool fatal_on_error = true);
bool UnprotectStack();
- void SetMterpDefaultIBase(void* ibase) {
- tlsPtr_.mterp_default_ibase = ibase;
- }
-
void SetMterpCurrentIBase(void* ibase) {
tlsPtr_.mterp_current_ibase = ibase;
}
- void SetMterpAltIBase(void* ibase) {
- tlsPtr_.mterp_alt_ibase = ibase;
- }
-
- const void* GetMterpDefaultIBase() const {
- return tlsPtr_.mterp_default_ibase;
- }
-
const void* GetMterpCurrentIBase() const {
return tlsPtr_.mterp_current_ibase;
}
- const void* GetMterpAltIBase() const {
- return tlsPtr_.mterp_alt_ibase;
- }
-
bool HandlingSignal() const {
return tls32_.handling_signal_;
}
@@ -1311,6 +1295,9 @@ class Thread {
~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
void Destroy();
+ void NotifyInTheadList()
+ REQUIRES_SHARED(Locks::thread_list_lock_);
+
// Attaches the calling native thread to the runtime, returning the new native peer.
// Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
template <typename PeerAction>
@@ -1575,6 +1562,10 @@ class Thread {
// This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
// told that AssertHeld should be good enough.
int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
+
+ // True if everything is in the ideal state for fast interpretation.
+ // False if we need to switch to the C++ interpreter to handle special cases.
+ std::atomic<bool32_t> use_mterp;
} tls32_;
struct PACKED(8) tls_64bit_sized_values {
@@ -1599,8 +1590,7 @@ class Thread {
last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
thread_local_limit(nullptr),
- thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
- mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
+ thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
thread_local_alloc_stack_end(nullptr),
flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
async_exception(nullptr) {
@@ -1737,10 +1727,8 @@ class Thread {
JniEntryPoints jni_entrypoints;
QuickEntryPoints quick_entrypoints;
- // Mterp jump table bases.
+ // Mterp jump table base.
void* mterp_current_ibase;
- void* mterp_default_ibase;
- void* mterp_alt_ibase;
// There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ec40716296..d21b600566 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -438,7 +438,7 @@ void ThreadList::RunEmptyCheckpoint() {
// Wake up the threads blocking for weak ref access so that they will respond to the empty
// checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
- Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
+ Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint=*/true);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
uint64_t total_wait_time = 0;
@@ -491,9 +491,9 @@ void ThreadList::RunEmptyCheckpoint() {
// Found a runnable thread that hasn't responded to the empty checkpoint request.
// Assume it's stuck and safe to dump its stack.
thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
- /*dump_native_stack*/ true,
- /*backtrace_map*/ nullptr,
- /*force_dump_stack*/ true);
+ /*dump_native_stack=*/ true,
+ /*backtrace_map=*/ nullptr,
+ /*force_dump_stack=*/ true);
}
}
}
@@ -1431,6 +1431,7 @@ void ThreadList::Register(Thread* self) {
}
self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled());
}
+ self->NotifyInTheadList();
}
void ThreadList::Unregister(Thread* self) {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index f1c808bb35..a245f659d7 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -47,10 +47,10 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
stack_size += kPageSize;
std::string error_msg;
stack_ = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
stack_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(stack_.IsValid()) << error_msg;
CHECK_ALIGNED(stack_.Begin(), kPageSize);
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 2600f55695..9e7c44a078 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -119,7 +119,7 @@ TEST_F(ThreadPoolTest, StopWait) {
// Drain the task list. Note: we have to restart here, as no tasks will be finished when
// the pool is stopped.
thread_pool.StartWorkers(self);
- thread_pool.Wait(self, /* do_work */ true, false);
+ thread_pool.Wait(self, /* do_work= */ true, false);
}
class TreeTask : public Task {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4ee983db21..ad58c2ea99 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -435,7 +435,7 @@ void Trace::Start(std::unique_ptr<File>&& trace_file_in,
// want to use the trampolines anyway since it is faster. It makes the story with disabling
// jit-gc more complex though.
runtime->GetInstrumentation()->EnableMethodTracing(
- kTracerInstrumentationKey, /*needs_interpreter*/!runtime->IsJavaDebuggable());
+ kTracerInstrumentationKey, /*needs_interpreter=*/!runtime->IsJavaDebuggable());
}
}
}
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 1e5b2bbd4c..e4bf447a6f 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -18,6 +18,7 @@
#include <android-base/logging.h>
+#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "gc/accounting/card_table-inl.h"
#include "gc_root-inl.h"
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 452cd8e359..bd59e73192 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -150,11 +150,11 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
(writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
unquicken ? MAP_PRIVATE : MAP_SHARED,
file_fd,
- /* start */ 0u,
+ /* start= */ 0u,
low_4gb,
vdex_filename.c_str(),
mmap_reuse,
- /* reservation */ nullptr,
+ /* reservation= */ nullptr,
error_msg);
if (!mmap.IsValid()) {
*error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
@@ -173,7 +173,7 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
return nullptr;
}
vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
- /* decompile_return_instruction */ false);
+ /* decompile_return_instruction= */ false);
// Update the quickening info size to pretend there isn't any.
size_t offset = vdex->GetDexSectionHeaderOffset();
reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
@@ -213,13 +213,13 @@ bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_
std::unique_ptr<const DexFile> dex(dex_file_loader.OpenWithDataSection(
dex_file_start,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
GetLocationChecksum(i),
- nullptr /*oat_dex_file*/,
- false /*verify*/,
- false /*verify_checksum*/,
+ /*oat_dex_file=*/ nullptr,
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
error_msg));
if (dex == nullptr) {
return false;
diff --git a/runtime/vdex_file_test.cc b/runtime/vdex_file_test.cc
index ced6e28577..9d92b42140 100644
--- a/runtime/vdex_file_test.cc
+++ b/runtime/vdex_file_test.cc
@@ -34,14 +34,14 @@ TEST_F(VdexFileTest, OpenEmptyVdex) {
std::unique_ptr<VdexFile> vdex = VdexFile::Open(tmp.GetFd(),
0,
tmp.GetFilename(),
- /*writable*/false,
- /*low_4gb*/false,
- /*quicken*/false,
+ /*writable=*/false,
+ /*low_4gb=*/false,
+ /*unquicken=*/false,
&error_msg);
EXPECT_TRUE(vdex == nullptr);
vdex = VdexFile::Open(
- tmp.GetFilename(), /*writable*/false, /*low_4gb*/false, /*quicken*/ false, &error_msg);
+ tmp.GetFilename(), /*writable=*/false, /*low_4gb=*/false, /*unquicken=*/ false, &error_msg);
EXPECT_TRUE(vdex == nullptr);
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 5fce892ee6..7b07389057 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -242,7 +242,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self,
*previous_idx = method_idx;
const InvokeType type = method.GetInvokeType(class_def.access_flags_);
ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- method_idx, dex_cache, class_loader, /* referrer */ nullptr, type);
+ method_idx, dex_cache, class_loader, /* referrer= */ nullptr, type);
if (resolved_method == nullptr) {
DCHECK(self->IsExceptionPending());
// We couldn't resolve the method, but continue regardless.
@@ -263,7 +263,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self,
callbacks,
allow_soft_failures,
log_level,
- /*need_precise_constants*/ false,
+ /*need_precise_constants=*/ false,
api_level,
&hard_failure_msg);
if (result.kind == FailureKind::kHardFailure) {
@@ -340,11 +340,11 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
method_idx,
method,
method_access_flags,
- true /* can_load_classes */,
+ /* can_load_classes= */ true,
allow_soft_failures,
need_precise_constants,
- false /* verify to dump */,
- true /* allow_thread_suspension */,
+ /* verify to dump */ false,
+ /* allow_thread_suspension= */ true,
api_level);
if (verifier.Verify()) {
// Verification completed, however failures may be pending that didn't cause the verification
@@ -475,11 +475,11 @@ MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self,
dex_method_idx,
method,
method_access_flags,
- true /* can_load_classes */,
- true /* allow_soft_failures */,
- true /* need_precise_constants */,
- true /* verify_to_dump */,
- true /* allow_thread_suspension */,
+ /* can_load_classes= */ true,
+ /* allow_soft_failures= */ true,
+ /* need_precise_constants= */ true,
+ /* verify_to_dump= */ true,
+ /* allow_thread_suspension= */ true,
api_level);
verifier->Verify();
verifier->DumpFailures(vios->Stream());
@@ -570,11 +570,11 @@ void MethodVerifier::FindLocksAtDexPc(
m->GetDexMethodIndex(),
m,
m->GetAccessFlags(),
- false /* can_load_classes */,
- true /* allow_soft_failures */,
- false /* need_precise_constants */,
- false /* verify_to_dump */,
- false /* allow_thread_suspension */,
+ /* can_load_classes= */ false,
+ /* allow_soft_failures= */ true,
+ /* need_precise_constants= */ false,
+ /* verify_to_dump= */ false,
+ /* allow_thread_suspension= */ false,
api_level);
verifier.interesting_dex_pc_ = dex_pc;
verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index cedc583986..7519257cae 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -42,7 +42,7 @@ class MethodVerifierTest : public CommonRuntimeTest {
// Verify the class
std::string error_msg;
FailureKind failure = MethodVerifier::VerifyClass(
- self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level */ 0u, &error_msg);
+ self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level= */ 0u, &error_msg);
if (android::base::StartsWith(descriptor, "Ljava/lang/invoke")) {
ASSERT_TRUE(failure == FailureKind::kSoftFailure ||
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 4a3f9e6365..91be00d34a 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -756,13 +756,13 @@ const RegType& RegType::Merge(const RegType& incoming_type,
VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
join_class,
GetClass(),
- /* strict */ true,
- /* is_assignable */ true);
+ /* is_strict= */ true,
+ /* is_assignable= */ true);
VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
join_class,
incoming_type.GetClass(),
- /* strict */ true,
- /* is_assignable */ true);
+ /* is_strict= */ true,
+ /* is_assignable= */ true);
}
if (GetClass() == join_class && !IsPreciseReference()) {
return *this;
@@ -771,7 +771,7 @@ const RegType& RegType::Merge(const RegType& incoming_type,
} else {
std::string temp;
const char* descriptor = join_class->GetDescriptor(&temp);
- return reg_types->FromClass(descriptor, join_class, /* precise */ false);
+ return reg_types->FromClass(descriptor, join_class, /* precise= */ false);
}
}
} else {
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9f87adfa31..f62e8b6f54 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -126,7 +126,7 @@ inline const ImpreciseConstType& RegTypeCache::PosShortConstant() {
inline const PreciseReferenceType& RegTypeCache::JavaLangClass() {
const RegType* result = &FromClass("Ljava/lang/Class;",
GetClassRoot<mirror::Class>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
@@ -135,7 +135,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangString() {
// String is final and therefore always precise.
const RegType* result = &FromClass("Ljava/lang/String;",
GetClassRoot<mirror::String>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
@@ -143,7 +143,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangString() {
inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() {
const RegType* result = &FromClass("Ljava/lang/invoke/MethodHandle;",
GetClassRoot<mirror::MethodHandle>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
@@ -151,7 +151,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() {
inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() {
const RegType* result = &FromClass("Ljava/lang/invoke/MethodType;",
GetClassRoot<mirror::MethodType>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index f1f3488a3c..ceba7484dd 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -438,14 +438,14 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left,
// Is the resolved part a primitive array?
if (resolved_merged_is_array && !resolved_parts_merged.IsObjectArrayTypes()) {
- return JavaLangObject(false /* precise */);
+ return JavaLangObject(/* precise= */ false);
}
// Is any part not an array (but exists)?
if ((!left_unresolved_is_array && left_resolved != &left) ||
(!right_unresolved_is_array && right_resolved != &right) ||
!resolved_merged_is_array) {
- return JavaLangObject(false /* precise */);
+ return JavaLangObject(/* precise= */ false);
}
}
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 0430d205af..32243857f8 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -80,8 +80,8 @@ TEST_F(RegTypeTest, Pairs) {
EXPECT_FALSE(precise_lo.CheckWidePair(precise_const));
EXPECT_TRUE(precise_lo.CheckWidePair(precise_hi));
// Test Merging.
- EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier */ nullptr)).IsLongTypes());
- EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier */ nullptr)).IsLongHighTypes());
+ EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier= */ nullptr)).IsLongTypes());
+ EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier= */ nullptr)).IsLongHighTypes());
}
TEST_F(RegTypeTest, Primitives) {
@@ -429,7 +429,7 @@ TEST_F(RegTypeReferenceTest, Dump) {
const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
const RegType& unresolved_merged = cache.FromUnresolvedMerge(
- unresolved_ref, unresolved_ref_another, /* verifier */ nullptr);
+ unresolved_ref, unresolved_ref_another, /* verifier= */ nullptr);
std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -490,14 +490,14 @@ TEST_F(RegTypeReferenceTest, Merging) {
RegTypeCache cache_new(true, allocator);
const RegType& string = cache_new.JavaLangString();
const RegType& Object = cache_new.JavaLangObject(true);
- EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier */ nullptr).IsJavaLangObject());
+ EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier= */ nullptr).IsJavaLangObject());
// Merge two unresolved types.
const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true);
EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
- const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsUnresolvedMergedReference());
RegType& merged_nonconst = const_cast<RegType&>(merged);
@@ -520,22 +520,22 @@ TEST_F(RegTypeTest, MergingFloat) {
const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
{
// float MERGE precise cst => float.
- const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
{
// precise cst MERGE float => float.
- const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
{
// float MERGE imprecise cst => float.
- const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
{
// imprecise cst MERGE float => float.
- const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
}
@@ -556,46 +556,46 @@ TEST_F(RegTypeTest, MergingLong) {
const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// lo MERGE imprecise cst lo => lo.
const RegType& merged = long_lo_type.Merge(
- imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// imprecise cst lo MERGE lo => lo.
const RegType& merged = imprecise_cst_lo.Merge(
- long_lo_type, &cache_new, /* verifier */ nullptr);
+ long_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
{
// hi MERGE imprecise cst hi => hi.
const RegType& merged = long_hi_type.Merge(
- imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
{
// imprecise cst hi MERGE hi => hi.
const RegType& merged = imprecise_cst_hi.Merge(
- long_hi_type, &cache_new, /* verifier */ nullptr);
+ long_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
}
@@ -617,49 +617,49 @@ TEST_F(RegTypeTest, MergingDouble) {
{
// lo MERGE precise cst lo => lo.
const RegType& merged = double_lo_type.Merge(
- precise_cst_lo, &cache_new, /* verifier */ nullptr);
+ precise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// precise cst lo MERGE lo => lo.
const RegType& merged = precise_cst_lo.Merge(
- double_lo_type, &cache_new, /* verifier */ nullptr);
+ double_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// lo MERGE imprecise cst lo => lo.
const RegType& merged = double_lo_type.Merge(
- imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// imprecise cst lo MERGE lo => lo.
const RegType& merged = imprecise_cst_lo.Merge(
- double_lo_type, &cache_new, /* verifier */ nullptr);
+ double_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// hi MERGE precise cst hi => hi.
const RegType& merged = double_hi_type.Merge(
- precise_cst_hi, &cache_new, /* verifier */ nullptr);
+ precise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// precise cst hi MERGE hi => hi.
const RegType& merged = precise_cst_hi.Merge(
- double_hi_type, &cache_new, /* verifier */ nullptr);
+ double_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// hi MERGE imprecise cst hi => hi.
const RegType& merged = double_hi_type.Merge(
- imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// imprecise cst hi MERGE hi => hi.
const RegType& merged = imprecise_cst_hi.Merge(
- double_hi_type, &cache_new, /* verifier */ nullptr);
+ double_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
}
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index fb91976781..b666c1582c 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -43,7 +43,7 @@ VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files, bool ou
}
VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files)
- : VerifierDeps(dex_files, /*output_only*/ true) {}
+ : VerifierDeps(dex_files, /*output_only=*/ true) {}
void VerifierDeps::MergeWith(const VerifierDeps& other,
const std::vector<const DexFile*>& dex_files) {
@@ -439,7 +439,7 @@ void VerifierDeps::AddAssignability(const DexFile& dex_file,
AddAssignability(dex_file,
destination_component,
source_component,
- /* is_strict */ true,
+ /* is_strict= */ true,
is_assignable);
return;
}
@@ -707,7 +707,7 @@ void VerifierDeps::Encode(const std::vector<const DexFile*>& dex_files,
VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files,
ArrayRef<const uint8_t> data)
- : VerifierDeps(dex_files, /*output_only*/ false) {
+ : VerifierDeps(dex_files, /*output_only=*/ false) {
if (data.empty()) {
// Return eagerly, as the first thing we expect from VerifierDeps data is
// the number of created strings, even if there is no dependency.
@@ -1089,9 +1089,9 @@ bool VerifierDeps::VerifyDexFile(Handle<mirror::ClassLoader> class_loader,
const DexFileDeps& deps,
Thread* self) const {
bool result = VerifyAssignability(
- class_loader, dex_file, deps.assignable_types_, /* expected_assignability */ true, self);
+ class_loader, dex_file, deps.assignable_types_, /* expected_assignability= */ true, self);
result = result && VerifyAssignability(
- class_loader, dex_file, deps.unassignable_types_, /* expected_assignability */ false, self);
+ class_loader, dex_file, deps.unassignable_types_, /* expected_assignability= */ false, self);
result = result && VerifyClasses(class_loader, dex_file, deps.classes_, self);
result = result && VerifyFields(class_loader, dex_file, deps.fields_, self);
diff --git a/test/1957-error-ext/expected.txt b/test/1957-error-ext/expected.txt
new file mode 100644
index 0000000000..bfe7033c67
--- /dev/null
+++ b/test/1957-error-ext/expected.txt
@@ -0,0 +1,4 @@
+LastError is: <call returned error: class java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION>
+Got class java.lang.Exception: Failed to redefine class <Lart/Test1957$Transform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED
+LastError is: FAILURE TO REDEFINE Unable to perform redefinition of 'Lart/Test1957$Transform;': Total number of declared methods changed from 2 to 1
+LastError is: <call returned error: class java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION>
diff --git a/test/1957-error-ext/info.txt b/test/1957-error-ext/info.txt
new file mode 100644
index 0000000000..ef772d9ae8
--- /dev/null
+++ b/test/1957-error-ext/info.txt
@@ -0,0 +1 @@
+Test for get_last_error_message extension function.
diff --git a/test/1957-error-ext/lasterror.cc b/test/1957-error-ext/lasterror.cc
new file mode 100644
index 0000000000..5aa3fbe9fb
--- /dev/null
+++ b/test/1957-error-ext/lasterror.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1957ErrorExt {
+
+using GetLastError = jvmtiError(*)(jvmtiEnv* env, char** msg);
+using ClearLastError = jvmtiError(*)(jvmtiEnv* env);
+
+template <typename T>
+static void Dealloc(T* t) {
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename ...Rest>
+static void Dealloc(T* t, Rest... rs) {
+ Dealloc(t);
+ Dealloc(rs...);
+}
+
+static void DeallocParams(jvmtiParamInfo* params, jint n_params) {
+ for (jint i = 0; i < n_params; i++) {
+ Dealloc(params[i].name);
+ }
+}
+
+static jvmtiExtensionFunction FindExtensionMethod(JNIEnv* env, const std::string& name) {
+ jint n_ext;
+ jvmtiExtensionFunctionInfo* infos;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionFunctions(&n_ext, &infos))) {
+ return nullptr;
+ }
+ jvmtiExtensionFunction res = nullptr;
+ for (jint i = 0; i < n_ext; i++) {
+ jvmtiExtensionFunctionInfo* cur_info = &infos[i];
+ if (strcmp(name.c_str(), cur_info->id) == 0) {
+ res = cur_info->func;
+ }
+ // Cleanup the cur_info
+ DeallocParams(cur_info->params, cur_info->param_count);
+ Dealloc(cur_info->id, cur_info->short_description, cur_info->params, cur_info->errors);
+ }
+ // Cleanup the array.
+ Dealloc(infos);
+ if (res == nullptr) {
+ ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+ env->ThrowNew(rt_exception.get(), (name + " extensions not found").c_str());
+ return nullptr;
+ }
+ return res;
+}
+
+extern "C" JNIEXPORT
+jstring JNICALL Java_art_Test1957_getLastError(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ GetLastError get_last_error = reinterpret_cast<GetLastError>(
+ FindExtensionMethod(env, "com.android.art.misc.get_last_error_message"));
+ if (get_last_error == nullptr) {
+ return nullptr;
+ }
+ char* msg;
+ if (JvmtiErrorToException(env, jvmti_env, get_last_error(jvmti_env, &msg))) {
+ return nullptr;
+ }
+
+ return env->NewStringUTF(msg);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1957_clearLastError(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ ClearLastError clear_last_error = reinterpret_cast<ClearLastError>(
+ FindExtensionMethod(env, "com.android.art.misc.clear_last_error_message"));
+ if (clear_last_error == nullptr) {
+ return;
+ }
+ JvmtiErrorToException(env, jvmti_env, clear_last_error(jvmti_env));
+}
+
+} // namespace Test1957ErrorExt
+} // namespace art
diff --git a/test/1957-error-ext/run b/test/1957-error-ext/run
new file mode 100755
index 0000000000..8be0ed4aed
--- /dev/null
+++ b/test/1957-error-ext/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+./default-run "$@" --jvmti
diff --git a/test/1957-error-ext/src/Main.java b/test/1957-error-ext/src/Main.java
new file mode 100644
index 0000000000..7e5e075fd1
--- /dev/null
+++ b/test/1957-error-ext/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1957.run();
+ }
+}
diff --git a/test/1957-error-ext/src/art/Redefinition.java b/test/1957-error-ext/src/art/Redefinition.java
new file mode 100644
index 0000000000..56d2938a01
--- /dev/null
+++ b/test/1957-error-ext/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1957-error-ext/src/art/Test1957.java b/test/1957-error-ext/src/art/Test1957.java
new file mode 100644
index 0000000000..ffb68be478
--- /dev/null
+++ b/test/1957-error-ext/src/art/Test1957.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1957 {
+
+ static class Transform {
+ public void sayHi() {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Goodbye" < "LTransform;" < "hello".
+ System.out.println("hello");
+ }
+ }
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADUAEQoAAwAKBwAMBwAPAQAGPGluaXQ+AQADKClWAQAEQ29kZQEAD0xpbmVOdW1iZXJU" +
+ "YWJsZQEAClNvdXJjZUZpbGUBAA1UZXN0MTk1Ny5qYXZhDAAEAAUHABABABZhcnQvVGVzdDE5NTck" +
+ "VHJhbnNmb3JtAQAJVHJhbnNmb3JtAQAMSW5uZXJDbGFzc2VzAQAQamF2YS9sYW5nL09iamVjdAEA" +
+ "DGFydC9UZXN0MTk1NwAgAAIAAwAAAAAAAQAAAAQABQABAAYAAAAdAAEAAQAAAAUqtwABsQAAAAEA" +
+ "BwAAAAYAAQAAAAYAAgAIAAAAAgAJAA4AAAAKAAEAAgALAA0ACA==");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQAQiK+oahCb4T18bDge0pSvp7rka4UQ2AY0AwAAcAAAAHhWNBIAAAAAAAAAAIgCAAAN" +
+ "AAAAcAAAAAYAAACkAAAAAQAAALwAAAAAAAAAAAAAAAIAAADIAAAAAQAAANgAAAA8AgAA+AAAABQB" +
+ "AAAcAQAANgEAAEYBAABqAQAAigEAAJ4BAACtAQAAuAEAALsBAADIAQAAzgEAANUBAAABAAAAAgAA" +
+ "AAMAAAAEAAAABQAAAAgAAAAIAAAABQAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAA" +
+ "AAAAAAYAAAB4AgAAWwIAAAAAAAABAAEAAQAAABABAAAEAAAAcBABAAAADgAGAA4ABjxpbml0PgAY" +
+ "TGFydC9UZXN0MTk1NyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTU3OwAiTGRhbHZpay9hbm5vdGF0" +
+ "aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2" +
+ "YS9sYW5nL09iamVjdDsADVRlc3QxOTU3LmphdmEACVRyYW5zZm9ybQABVgALYWNjZXNzRmxhZ3MA" +
+ "BG5hbWUABXZhbHVlAHV+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjox" +
+ "LCJzaGEtMSI6Ijg0NjI2ZDE0MmRiMmY4NzVhY2E2YjVlOWVmYWU3OThjYWQ5ZDlhNTAiLCJ2ZXJz" +
+ "aW9uIjoiMS40LjItZGV2In0AAgIBCxgBAgMCCQQIChcHAAABAACAgAT4AQAAAAAAAAACAAAATAIA" +
+ "AFICAABsAgAAAAAAAAAAAAAAAAAADgAAAAAAAAABAAAAAAAAAAEAAAANAAAAcAAAAAIAAAAGAAAA" +
+ "pAAAAAMAAAABAAAAvAAAAAUAAAACAAAAyAAAAAYAAAABAAAA2AAAAAEgAAABAAAA+AAAAAMgAAAB" +
+ "AAAAEAEAAAIgAAANAAAAFAEAAAQgAAACAAAATAIAAAAgAAABAAAAWwIAAAMQAAACAAAAaAIAAAYg" +
+ "AAABAAAAeAIAAAAQAAABAAAAiAIAAA==");
+
+ public static void run() {
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+ Transform t = new Transform();
+ System.out.println("LastError is: " + getLastErrorOrException());
+ try {
+ Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ } catch (Throwable e) {
+ System.out.println("Got " + e.getClass().toString() + ": " + e.getMessage());
+ }
+ System.out.println("LastError is: " + getLastErrorOrException());
+ clearLastError();
+ System.out.println("LastError is: " + getLastErrorOrException());
+ }
+
+ public static String getLastErrorOrException() {
+ try {
+ return getLastError();
+ } catch (Throwable t) {
+ return "<call returned error: " + t.getClass().toString() + ": " + t.getMessage() + ">";
+ }
+ }
+ public static native String getLastError();
+ public static native void clearLastError();
+}
diff --git a/test/602-deoptimizeable/info.txt b/test/602-deoptimizeable/info.txt
index 4b6147f7f1..d0952f903b 100644
--- a/test/602-deoptimizeable/info.txt
+++ b/test/602-deoptimizeable/info.txt
@@ -1,8 +1 @@
Test various cases for full/partial-fragment deoptimization.
-
-TODO: we should remove this test as its expectations at point of
-writing was that debuggable apps could run un-deoptimizeable frames
-from the boot image. Today, we deoptimize the boot image as soon as
-we see the app being debuggable. Test 685-deoptimizeable is the proper
-version of this test, but we currently keep the 602 version around to
-try diagnosing a gcstress issue.
diff --git a/test/602-deoptimizeable/src/Main.java b/test/602-deoptimizeable/src/Main.java
index 7a3285d793..46584b0847 100644
--- a/test/602-deoptimizeable/src/Main.java
+++ b/test/602-deoptimizeable/src/Main.java
@@ -33,10 +33,7 @@ class DummyObject {
public int hashCode() {
sHashCodeInvoked = true;
- Main.assertIsManaged();
Main.deoptimizeAll();
- Main.assertIsInterpreted();
- Main.assertCallerIsManaged(); // Caller is from framework code HashMap.
return i % 64;
}
}
@@ -46,13 +43,6 @@ public class Main {
public static native void deoptimizeAll();
public static native void undeoptimizeAll();
- public static native void assertIsInterpreted();
- public static native void assertIsManaged();
- public static native void assertCallerIsInterpreted();
- public static native void assertCallerIsManaged();
- public static native void disableStackFrameAsserts();
- public static native boolean hasOatFile();
- public static native boolean isInterpreted();
public static void execute(Runnable runnable) throws Exception {
Thread t = new Thread(runnable);
@@ -62,19 +52,13 @@ public class Main {
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
- // TODO: Stack frame assertions are irrelevant in this test as we now
- // always run JIT with debuggable. 685-deoptimizeable is the proper version
- // of this test, but we keep this version around to diagnose a gcstress issue.
- disableStackFrameAsserts();
final HashMap<DummyObject, Long> map = new HashMap<DummyObject, Long>();
// Single-frame deoptimization that covers partial fragment.
execute(new Runnable() {
public void run() {
int[] arr = new int[3];
- assertIsManaged();
int res = $noinline$run1(arr);
- assertIsManaged(); // Only single frame is deoptimized.
if (res != 79) {
System.out.println("Failure 1!");
System.exit(0);
@@ -87,13 +71,11 @@ public class Main {
public void run() {
try {
int[] arr = new int[3];
- assertIsManaged();
// Use reflection to call $noinline$run2 so that it does
// full-fragment deoptimization since that is an upcall.
Class<?> cls = Class.forName("Main");
Method method = cls.getDeclaredMethod("$noinline$run2", int[].class);
double res = (double)method.invoke(Main.class, arr);
- assertIsManaged(); // Only single frame is deoptimized.
if (res != 79.3d) {
System.out.println("Failure 2!");
System.exit(0);
@@ -107,9 +89,7 @@ public class Main {
// Full-fragment deoptimization.
execute(new Runnable() {
public void run() {
- assertIsManaged();
float res = $noinline$run3B();
- assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
if (res != 0.034f) {
System.out.println("Failure 3!");
System.exit(0);
@@ -123,9 +103,7 @@ public class Main {
execute(new Runnable() {
public void run() {
try {
- assertIsManaged();
map.put(new DummyObject(10), Long.valueOf(100));
- assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
if (map.get(new DummyObject(10)) == null) {
System.out.println("Expected map to contain DummyObject(10)");
}
@@ -147,7 +125,6 @@ public class Main {
}
public static int $noinline$run1(int[] arr) {
- assertIsManaged();
// Prevent inlining.
if (sFlag) {
throw new Error();
@@ -161,18 +138,15 @@ public class Main {
// This causes AIOOBE and triggers deoptimization from compiled code.
arr[3] = 1;
} catch (ArrayIndexOutOfBoundsException e) {
- assertIsInterpreted(); // Single-frame deoptimization triggered.
caught = true;
}
if (!caught) {
System.out.println("Expected exception");
}
- assertIsInterpreted();
return 79;
}
public static double $noinline$run2(int[] arr) {
- assertIsManaged();
// Prevent inlining.
if (sFlag) {
throw new Error();
@@ -186,37 +160,30 @@ public class Main {
// This causes AIOOBE and triggers deoptimization from compiled code.
arr[3] = 1;
} catch (ArrayIndexOutOfBoundsException e) {
- assertIsInterpreted(); // Single-frame deoptimization triggered.
caught = true;
}
if (!caught) {
System.out.println("Expected exception");
}
- assertIsInterpreted();
return 79.3d;
}
public static float $noinline$run3A() {
- assertIsManaged();
// Prevent inlining.
if (sFlag) {
throw new Error();
}
// Deoptimize callers.
deoptimizeAll();
- assertIsInterpreted();
- assertCallerIsInterpreted(); // $noinline$run3B is deoptimizeable.
return 0.034f;
}
public static float $noinline$run3B() {
- assertIsManaged();
// Prevent inlining.
if (sFlag) {
throw new Error();
}
float res = $noinline$run3A();
- assertIsInterpreted();
return res;
}
}
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index 96ef2665cb..52367c7731 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -15,6 +15,7 @@
*/
#include "jni.h"
+#include "handle_scope-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
diff --git a/test/687-deopt/expected.txt b/test/687-deopt/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/687-deopt/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/687-deopt/info.txt b/test/687-deopt/info.txt
new file mode 100644
index 0000000000..ef56f51504
--- /dev/null
+++ b/test/687-deopt/info.txt
@@ -0,0 +1,2 @@
+Regression test for instrumentation deopt, which previously did not expect a
+quickened instruction when returning from instrumentation stub.
diff --git a/test/687-deopt/src/Main.java b/test/687-deopt/src/Main.java
new file mode 100644
index 0000000000..afe90d67f7
--- /dev/null
+++ b/test/687-deopt/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ // Jit compile HashMap.hash method, so that instrumentation stubs
+ // will deoptimize it.
+ ensureJitCompiled(HashMap.class, "hash");
+
+ Main key = new Main();
+ Integer value = new Integer(10);
+ HashMap<Main, Integer> map = new HashMap<>();
+ map.put(key, value);
+ Integer res = map.get(key);
+ if (!value.equals(res)) {
+ throw new Error("Expected 10, got " + res);
+ }
+ }
+
+ public int hashCode() {
+ // The call stack at this point is:
+ // Main.main
+ // HashMap.put
+ // HashMap.hash
+ // Main.hashCode
+ //
+ // The opcode at HashMap.hash is invoke-virtual-quick which the
+ // instrumentation code did not expect and used to fetch the wrong
+ // method index for it.
+ deoptimizeAll();
+ return 42;
+ }
+
+ public static native void deoptimizeAll();
+ public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/905-object-free/src/art/Test905.java b/test/905-object-free/src/art/Test905.java
index 62b6e62669..dddd1aa69f 100644
--- a/test/905-object-free/src/art/Test905.java
+++ b/test/905-object-free/src/art/Test905.java
@@ -16,10 +16,53 @@
package art;
+import java.lang.ref.PhantomReference;
+import java.lang.ref.ReferenceQueue;
import java.util.ArrayList;
import java.util.Arrays;
public class Test905 {
+ // Taken from jdwp tests.
+ public static class MarkerObj {
+ public static int cnt = 0;
+ public void finalize() { cnt++; }
+ }
+ public static class GcMarker {
+ private final ReferenceQueue mQueue;
+ private final ArrayList<PhantomReference> mList;
+ public GcMarker() {
+ mQueue = new ReferenceQueue();
+ mList = new ArrayList<PhantomReference>(3);
+ }
+ public void add(Object referent) {
+ mList.add(new PhantomReference(referent, mQueue));
+ }
+ public void waitForGc() {
+ waitForGc(mList.size());
+ }
+ public void waitForGc(int numberOfExpectedFinalizations) {
+ if (numberOfExpectedFinalizations > mList.size()) {
+ throw new IllegalArgumentException("wait condition will never be met");
+ }
+ // Request finalization of objects, and subsequent reference enqueueing.
+ // Repeat until reference queue reaches expected size.
+ do {
+ System.runFinalization();
+ Runtime.getRuntime().gc();
+ try { Thread.sleep(10); } catch (Exception e) {}
+ } while (isLive(numberOfExpectedFinalizations));
+ }
+ private boolean isLive(int numberOfExpectedFinalizations) {
+ int numberFinalized = 0;
+ for (int i = 0, n = mList.size(); i < n; i++) {
+ if (mList.get(i).isEnqueued()) {
+ numberFinalized++;
+ }
+ }
+ return numberFinalized < numberOfExpectedFinalizations;
+ }
+ }
+
public static void run() throws Exception {
doTest();
}
@@ -44,7 +87,7 @@ public class Test905 {
allocate(l, 1);
l.clear();
- Runtime.getRuntime().gc();
+ gcAndWait();
getAndPrintTags();
System.out.println("---");
@@ -56,12 +99,12 @@ public class Test905 {
}
l.clear();
- Runtime.getRuntime().gc();
+ gcAndWait();
getAndPrintTags();
System.out.println("---");
- Runtime.getRuntime().gc();
+ gcAndWait();
getAndPrintTags();
System.out.println("---");
@@ -80,7 +123,7 @@ public class Test905 {
for (int i = 1; i <= 100000; ++i) {
stressAllocate(i);
}
- Runtime.getRuntime().gc();
+ gcAndWait();
long[] freedTags1 = getCollectedTags(0);
long[] freedTags2 = getCollectedTags(1);
System.out.println("Free counts " + freedTags1.length + " " + freedTags2.length);
@@ -103,6 +146,17 @@ public class Test905 {
System.out.println(Arrays.toString(freedTags));
}
+ private static GcMarker getMarker() {
+ GcMarker m = new GcMarker();
+ m.add(new MarkerObj());
+ return m;
+ }
+
+ private static void gcAndWait() {
+ GcMarker marker = getMarker();
+ marker.waitForGc();
+ }
+
private static native void setupObjectFreeCallback();
private static native void enableFreeTracking(boolean enable);
private static native long[] getCollectedTags(int index);
diff --git a/test/Android.bp b/test/Android.bp
index 8c1c1bf32e..561f95eb47 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -292,6 +292,7 @@ art_cc_defaults {
"1950-unprepared-transform/unprepared_transform.cc",
"1951-monitor-enter-no-suspend/raw_monitor.cc",
"1953-pop-frame/pop_frame.cc",
+ "1957-error-ext/lasterror.cc",
],
// Use NDK-compatible headers for ctstiagent.
header_libs: [
diff --git a/test/StringLiterals/StringLiterals.java b/test/StringLiterals/StringLiterals.java
new file mode 100644
index 0000000000..9ab37ca3de
--- /dev/null
+++ b/test/StringLiterals/StringLiterals.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class StringLiterals {
+ static class StartupClass {
+ static {
+ System.out.println("Startup init");
+ }
+ }
+
+ static class OtherClass {
+ static {
+ System.out.println("Other class init");
+ }
+ }
+
+ void startUpMethod() {
+ String resource = "abcd.apk";
+ System.out.println("Starting up");
+ System.out.println("Loading " + resource);
+ }
+
+ void otherMethod() {
+ System.out.println("Unexpected error");
+ System.out.println("Shutting down!");
+ }
+}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index d769b48bfd..7b401609c3 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -948,6 +948,7 @@
"685-deoptimizeable",
"685-shifts",
"686-get-this",
+ "687-deopt",
"706-checker-scheduler",
"707-checker-invalid-profile",
"714-invoke-custom-lambda-metafactory",
@@ -1027,7 +1028,8 @@
"679-locks",
"999-redefine-hiddenapi",
"1000-non-moving-space-stress",
- "1951-monitor-enter-no-suspend"],
+ "1951-monitor-enter-no-suspend",
+ "1957-error-ext"],
"variant": "jvm",
"description": ["Doesn't run on RI."]
},
@@ -1114,11 +1116,5 @@
"variant": "target & gcstress & debug",
"bug": "b/117597114",
"description": ["Looks timing dependent"]
- },
- {
- "tests": ["920-objects"],
- "variant": "jit-on-first-use",
- "bug": "b/117638896",
- "description": ["SIGSEGVs on jit-on-first-use configuration."]
}
]
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 32e5b5525f..dcd910597d 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -104,9 +104,17 @@ $(AHAT_TEST_DUMP_PROGUARD_MAP): PRIVATE_AHAT_SOURCE_PROGUARD_MAP := $(proguard_d
$(AHAT_TEST_DUMP_PROGUARD_MAP): $(proguard_dictionary)
cp $(PRIVATE_AHAT_SOURCE_PROGUARD_MAP) $@
+ifeq (true,$(HOST_PREFER_32_BIT))
+ AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm32
+ AHAT_TEST_DALVIKVM_ARG := --32
+else
+ AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm64
+ AHAT_TEST_DALVIKVM_ARG := --64
+endif
+
# Run ahat-test-dump.jar to generate test-dump.hprof and test-dump-base.hprof
AHAT_TEST_DUMP_DEPENDENCIES := \
- $(HOST_OUT_EXECUTABLES)/dalvikvm64 \
+ $(AHAT_TEST_DALVIKVM_DEP) \
$(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES) \
$(HOST_OUT_EXECUTABLES)/art \
$(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
@@ -114,20 +122,24 @@ AHAT_TEST_DUMP_DEPENDENCIES := \
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_ANDROID_DATA)
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG)
$(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
- $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
+ $(PRIVATE_AHAT_TEST_ART) -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \
+ -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_BASE_ANDROID_DATA)
+$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG)
$(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
- $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
+ $(PRIVATE_AHAT_TEST_ART) -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \
+ -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
# --- ahat-ri-test-dump.jar -------
include $(CLEAR_VARS)
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index b805b307a3..57ccbdc54e 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -77,6 +77,9 @@ public class AnnotationVisitor extends EmptyVisitor {
mStatus.debug("Member has annotation %s for which we have a handler",
a.getAnnotationType());
mAnnotationHandlers.get(a.getAnnotationType()).handleAnnotation(a, context);
+ } else {
+ mStatus.debug("Member has annotation %s for which we do not have a handler",
+ a.getAnnotationType());
}
}
}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index 53157a323e..870f85a2c3 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -17,7 +17,9 @@
package com.android.class2greylist;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
@@ -41,6 +43,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.function.Predicate;
/**
* Build time tool for extracting a list of members from jar files that have the @UsedByApps
@@ -48,16 +51,20 @@ import java.util.Set;
*/
public class Class2Greylist {
- private static final String GREYLIST_ANNOTATION = "Landroid/annotation/UnsupportedAppUsage;";
+ private static final Set<String> GREYLIST_ANNOTATIONS =
+ ImmutableSet.of(
+ "Landroid/annotation/UnsupportedAppUsage;",
+ "Ldalvik/annotation/compat/UnsupportedAppUsage;");
private static final Set<String> WHITELIST_ANNOTATIONS = ImmutableSet.of();
private final Status mStatus;
private final String mPublicApiListFile;
private final String[] mPerSdkOutputFiles;
private final String mWhitelistFile;
+ private final String mCsvMetadataFile;
private final String[] mJarFiles;
private final GreylistConsumer mOutput;
- private final Set<Integer> mAllowedSdkVersions;
+ private final Predicate<Integer> mAllowedSdkVersions;
private final Set<String> mPublicApis;
@@ -95,10 +102,17 @@ public class Class2Greylist {
.hasArgs(0)
.create('m'));
options.addOption(OptionBuilder
+ .withLongOpt("write-metadata-csv")
+ .hasArgs(1)
+ .withDescription("Specify a file to write API metaadata to. This is a CSV file " +
+ "containing any annotation properties for all members. Do not use in " +
+ "conjunction with --write-greylist or --write-whitelist.")
+ .create('c'));
+ options.addOption(OptionBuilder
.withLongOpt("help")
.hasArgs(0)
.withDescription("Show this help")
- .create("h"));
+ .create('h'));
CommandLineParser parser = new GnuParser();
CommandLine cmd;
@@ -132,6 +146,7 @@ public class Class2Greylist {
cmd.getOptionValue('p', null),
cmd.getOptionValues('g'),
cmd.getOptionValue('w', null),
+ cmd.getOptionValue('c', null),
jarFiles);
c2gl.main();
} catch (IOException e) {
@@ -149,22 +164,33 @@ public class Class2Greylist {
@VisibleForTesting
Class2Greylist(Status status, String publicApiListFile, String[] perSdkLevelOutputFiles,
- String whitelistOutputFile, String[] jarFiles) throws IOException {
+ String whitelistOutputFile, String csvMetadataFile, String[] jarFiles)
+ throws IOException {
mStatus = status;
mPublicApiListFile = publicApiListFile;
mPerSdkOutputFiles = perSdkLevelOutputFiles;
mWhitelistFile = whitelistOutputFile;
+ mCsvMetadataFile = csvMetadataFile;
mJarFiles = jarFiles;
- if (mPerSdkOutputFiles != null) {
+ if (mCsvMetadataFile != null) {
+ mOutput = new CsvGreylistConsumer(mStatus, mCsvMetadataFile);
+ mAllowedSdkVersions = x -> true;
+ } else {
Map<Integer, String> outputFiles = readGreylistMap(mStatus, mPerSdkOutputFiles);
mOutput = new FileWritingGreylistConsumer(mStatus, outputFiles, mWhitelistFile);
- mAllowedSdkVersions = outputFiles.keySet();
- } else {
- // TODO remove this once per-SDK greylist support integrated into the build.
- // Right now, mPerSdkOutputFiles is always null as the build never passes the
- // corresponding command lind flags. Once the build is updated, can remove this.
- mOutput = new SystemOutGreylistConsumer();
- mAllowedSdkVersions = new HashSet<>(Arrays.asList(null, 26, 28));
+ mAllowedSdkVersions = new Predicate<Integer>(){
+ @Override
+ public boolean test(Integer i) {
+ return outputFiles.keySet().contains(i);
+ }
+
+ @Override
+ public String toString() {
+ // we reply on this toString behaviour for readable error messages in
+ // GreylistAnnotationHandler
+ return Joiner.on(",").join(outputFiles.keySet());
+ }
+ };
}
if (mPublicApiListFile != null) {
@@ -176,10 +202,11 @@ public class Class2Greylist {
}
private Map<String, AnnotationHandler> createAnnotationHandlers() {
- return ImmutableMap.<String, AnnotationHandler>builder()
- .put(GreylistAnnotationHandler.ANNOTATION_NAME,
- new GreylistAnnotationHandler(
- mStatus, mOutput, mPublicApis, mAllowedSdkVersions))
+ Builder<String, AnnotationHandler> builder = ImmutableMap.builder();
+ GreylistAnnotationHandler greylistAnnotationHandler = new GreylistAnnotationHandler(
+ mStatus, mOutput, mPublicApis, mAllowedSdkVersions);
+ GREYLIST_ANNOTATIONS.forEach(a -> builder.put(a, greylistAnnotationHandler));
+ return builder
.put(CovariantReturnTypeHandler.ANNOTATION_NAME,
new CovariantReturnTypeHandler(mOutput, mPublicApis))
.put(CovariantReturnTypeMultiHandler.ANNOTATION_NAME,
diff --git a/tools/class2greylist/src/com/android/class2greylist/CsvGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/CsvGreylistConsumer.java
new file mode 100644
index 0000000000..7d28b317f0
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/CsvGreylistConsumer.java
@@ -0,0 +1,35 @@
+package com.android.class2greylist;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+import java.util.Map;
+
+public class CsvGreylistConsumer implements GreylistConsumer {
+
+ private final Status mStatus;
+ private final CsvWriter mCsvWriter;
+
+ public CsvGreylistConsumer(Status status, String csvMetadataFile) throws FileNotFoundException {
+ mStatus = status;
+ mCsvWriter = new CsvWriter(
+ new PrintStream(new FileOutputStream(new File(csvMetadataFile))));
+ }
+
+ @Override
+ public void greylistEntry(String signature, Integer maxTargetSdk,
+ Map<String, String> annotationProperties) {
+ annotationProperties.put("signature", signature);
+ mCsvWriter.addRow(annotationProperties);
+ }
+
+ @Override
+ public void whitelistEntry(String signature) {
+ }
+
+ @Override
+ public void close() {
+ mCsvWriter.close();
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/CsvWriter.java b/tools/class2greylist/src/com/android/class2greylist/CsvWriter.java
new file mode 100644
index 0000000000..3cfec30f23
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/CsvWriter.java
@@ -0,0 +1,49 @@
+package com.android.class2greylist;
+
+import com.google.common.base.Joiner;
+
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Helper class for writing data to a CSV file.
+ *
+ * This class does not write anything to its output until it is closed, so it can gather a set of
+ * all columns before writing the header row.
+ */
+public class CsvWriter {
+
+ private final PrintStream mOutput;
+ private final ArrayList<Map<String, String>> mContents;
+ private final Set<String> mColumns;
+
+ public CsvWriter(PrintStream out) {
+ mOutput = out;
+ mContents = new ArrayList<>();
+ mColumns = new HashSet<>();
+ }
+
+ public void addRow(Map<String, String> values) {
+ mColumns.addAll(values.keySet());
+ mContents.add(values);
+ }
+
+ public void close() {
+ List<String> columns = new ArrayList<>(mColumns);
+ columns.sort(Comparator.naturalOrder());
+ mOutput.println(columns.stream().collect(Collectors.joining(",")));
+ for (Map<String, String> row : mContents) {
+ mOutput.println(columns.stream().map(column -> row.getOrDefault(column, "")).collect(
+ Collectors.joining(",")));
+ }
+ mOutput.close();
+ }
+
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
index bfd23102b9..b3ed1b16f9 100644
--- a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
+++ b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
@@ -44,7 +44,8 @@ public class FileWritingGreylistConsumer implements GreylistConsumer {
}
@Override
- public void greylistEntry(String signature, Integer maxTargetSdk) {
+ public void greylistEntry(
+ String signature, Integer maxTargetSdk, Map<String, String> annotationProperties) {
PrintStream p = mSdkToPrintStreamMap.get(maxTargetSdk);
if (p == null) {
mStatus.error("No output file for signature %s with maxTargetSdk of %d", signature,
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
index 460f2c3c22..72c0ea4206 100644
--- a/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
+++ b/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
@@ -11,6 +11,8 @@ import org.apache.bcel.classfile.FieldOrMethod;
import org.apache.bcel.classfile.Method;
import org.apache.bcel.classfile.SimpleElementValue;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
@@ -27,8 +29,6 @@ import java.util.function.Predicate;
*/
public class GreylistAnnotationHandler implements AnnotationHandler {
- public static final String ANNOTATION_NAME = "Landroid/annotation/UnsupportedAppUsage;";
-
// properties of greylist annotations:
private static final String EXPECTED_SIGNATURE = "expectedSignature";
private static final String MAX_TARGET_SDK = "maxTargetSdk";
@@ -36,7 +36,7 @@ public class GreylistAnnotationHandler implements AnnotationHandler {
private final Status mStatus;
private final Predicate<GreylistMember> mGreylistFilter;
private final GreylistConsumer mGreylistConsumer;
- private final Set<Integer> mValidMaxTargetSdkValues;
+ private final Predicate<Integer> mValidMaxTargetSdkValues;
/**
* Represents a member of a class file (a field or method).
@@ -73,7 +73,7 @@ public class GreylistAnnotationHandler implements AnnotationHandler {
Status status,
GreylistConsumer greylistConsumer,
Set<String> publicApis,
- Set<Integer> validMaxTargetSdkValues) {
+ Predicate<Integer> validMaxTargetSdkValues) {
this(status, greylistConsumer,
member -> !(member.bridge && publicApis.contains(member.signature)),
validMaxTargetSdkValues);
@@ -84,7 +84,7 @@ public class GreylistAnnotationHandler implements AnnotationHandler {
Status status,
GreylistConsumer greylistConsumer,
Predicate<GreylistMember> greylistFilter,
- Set<Integer> validMaxTargetSdkValues) {
+ Predicate<Integer> validMaxTargetSdkValues) {
mStatus = status;
mGreylistConsumer = greylistConsumer;
mGreylistFilter = greylistFilter;
@@ -101,6 +101,7 @@ public class GreylistAnnotationHandler implements AnnotationHandler {
}
String signature = context.getMemberDescriptor();
Integer maxTargetSdk = null;
+ Map<String, String> allValues = new HashMap<String, String>();
for (ElementValuePair property : annotation.getElementValuePairs()) {
switch (property.getNameString()) {
case EXPECTED_SIGNATURE:
@@ -110,9 +111,10 @@ public class GreylistAnnotationHandler implements AnnotationHandler {
maxTargetSdk = verifyAndGetMaxTargetSdk(context, property);
break;
}
+ allValues.put(property.getNameString(), property.getValue().stringifyValue());
}
if (mGreylistFilter.test(new GreylistMember(signature, bridge, maxTargetSdk))) {
- mGreylistConsumer.greylistEntry(signature, maxTargetSdk);
+ mGreylistConsumer.greylistEntry(signature, maxTargetSdk, allValues);
}
}
@@ -131,13 +133,14 @@ public class GreylistAnnotationHandler implements AnnotationHandler {
if (property.getValue().getElementValueType() != ElementValue.PRIMITIVE_INT) {
context.reportError("Expected property %s to be of type int; got %d",
property.getNameString(), property.getValue().getElementValueType());
+ return null;
}
int value = ((SimpleElementValue) property.getValue()).getValueInt();
- if (!mValidMaxTargetSdkValues.contains(value)) {
+ if (!mValidMaxTargetSdkValues.test(value)) {
context.reportError("Invalid value for %s: got %d, expected one of [%s]",
property.getNameString(),
value,
- Joiner.on(",").join(mValidMaxTargetSdkValues));
+ mValidMaxTargetSdkValues);
return null;
}
return value;
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
index fd855e88ed..afded37e66 100644
--- a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
+++ b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
@@ -1,5 +1,7 @@
package com.android.class2greylist;
+import java.util.Map;
+
public interface GreylistConsumer {
/**
* Handle a new greylist entry.
@@ -7,7 +9,8 @@ public interface GreylistConsumer {
* @param signature Signature of the member.
* @param maxTargetSdk maxTargetSdk value from the annotation, or null if none set.
*/
- void greylistEntry(String signature, Integer maxTargetSdk);
+ void greylistEntry(
+ String signature, Integer maxTargetSdk, Map<String, String> annotationProperties);
/**
* Handle a new whitelist entry.
diff --git a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
index ad5ad705b4..f86ac6ec68 100644
--- a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
+++ b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
@@ -1,8 +1,11 @@
package com.android.class2greylist;
+import java.util.Map;
+
public class SystemOutGreylistConsumer implements GreylistConsumer {
@Override
- public void greylistEntry(String signature, Integer maxTargetSdk) {
+ public void greylistEntry(
+ String signature, Integer maxTargetSdk, Map<String, String> annotationValues) {
System.out.println(signature);
}
diff --git a/tools/class2greylist/test/Android.mk b/tools/class2greylist/test/Android.mk
index 23f4156f6d..f35e74c63d 100644
--- a/tools/class2greylist/test/Android.mk
+++ b/tools/class2greylist/test/Android.mk
@@ -21,7 +21,7 @@ LOCAL_SRC_FILES := $(call all-java-files-under, src)
LOCAL_MODULE := class2greylisttest
-LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host
+LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host objenesis
# tag this module as a cts test artifact
LOCAL_COMPATIBILITY_SUITE := general-tests
@@ -29,4 +29,4 @@ LOCAL_COMPATIBILITY_SUITE := general-tests
include $(BUILD_HOST_JAVA_LIBRARY)
# Build the test APKs using their own makefiles
-include $(call all-makefiles-under,$(LOCAL_PATH)) \ No newline at end of file
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
index 1a4bfb8283..edf2ecd84d 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
@@ -60,7 +60,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
Predicate<GreylistAnnotationHandler.GreylistMember> greylistFilter,
Set<Integer> validMaxTargetSdkValues) {
return new GreylistAnnotationHandler(
- mStatus, mConsumer, greylistFilter, validMaxTargetSdkValues);
+ mStatus, mConsumer, greylistFilter, x -> validMaxTargetSdkValues.contains(x));
}
@Test
@@ -80,7 +80,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -101,7 +101,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;-><init>()V");
}
@@ -122,7 +122,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->i:I");
}
@@ -143,7 +143,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -184,7 +184,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class$Inner;->method()V");
}
@@ -202,7 +202,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
).visit();
assertNoErrors();
- verify(mConsumer, never()).greylistEntry(any(String.class), any());
+ verify(mConsumer, never()).greylistEntry(any(String.class), any(), any());
}
@Test
@@ -222,7 +222,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
@@ -252,7 +252,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -284,7 +284,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -322,7 +322,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Base;->method(Ljava/lang/Object;)V");
@@ -355,14 +355,14 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
mStatus,
mConsumer,
publicApis,
- emptySet()));
+ x -> false));
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// The bridge method generated for the above, is a public API so should be excluded
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
@@ -384,7 +384,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->field:I");
}
@@ -423,7 +423,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
- verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture(), any());
assertThat(maxTargetSdk.getValue()).isEqualTo(1);
}
@@ -445,7 +445,7 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
- verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture(), any());
assertThat(maxTargetSdk.getValue()).isEqualTo(null);
}
@@ -468,4 +468,37 @@ public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
verify(mStatus, times(1)).error(any(), any());
}
+ @Test
+ public void testAnnotationPropertiesIntoMap() throws IOException {
+ mJavac.addSource("annotation.Anno2", Joiner.on('\n').join(
+ "package annotation;",
+ "import static java.lang.annotation.RetentionPolicy.CLASS;",
+ "import java.lang.annotation.Retention;",
+ "@Retention(CLASS)",
+ "public @interface Anno2 {",
+ " String expectedSignature() default \"\";",
+ " int maxTargetSdk() default Integer.MAX_VALUE;",
+ " long trackingBug() default 0;",
+ "}"));
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno2;",
+ "public class Class {",
+ " @Anno2(maxTargetSdk=2, trackingBug=123456789)",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of("Lannotation/Anno2;", createGreylistHandler(x -> true,
+ ImmutableSet.of(2)))
+ ).visit();
+
+ assertNoErrors();
+ ArgumentCaptor<Map<String, String>> properties = ArgumentCaptor.forClass(Map.class);
+ verify(mConsumer, times(1)).greylistEntry(any(), any(), properties.capture());
+ assertThat(properties.getValue()).containsExactly(
+ "maxTargetSdk", "2",
+ "trackingBug", "123456789");
+ }
+
}
diff --git a/tools/cpp-define-generator/asm_defines.cc b/tools/cpp-define-generator/asm_defines.cc
index c105c1a7ce..b79e1ae26e 100644
--- a/tools/cpp-define-generator/asm_defines.cc
+++ b/tools/cpp-define-generator/asm_defines.cc
@@ -31,6 +31,6 @@
#define ASM_DEFINE(NAME, EXPR) \
void AsmDefineHelperFor_##NAME() { \
asm volatile("\n.ascii \">>" #NAME " %0 %1<<\"" \
- :: "i" (static_cast<int64_t>(EXPR)), "i" (EXPR < 0 ? 1 : 0)); \
+ :: "i" (static_cast<int64_t>(EXPR)), "i" ((EXPR) < 0 ? 1 : 0)); \
}
#include "asm_defines.def"
diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def
index 2324f5168e..6443a0c517 100644
--- a/tools/cpp-define-generator/globals.def
+++ b/tools/cpp-define-generator/globals.def
@@ -22,6 +22,7 @@
#include "dex/modifiers.h"
#include "gc/accounting/card_table.h"
#include "gc/heap.h"
+#include "interpreter/mterp/mterp.h"
#include "jit/jit.h"
#include "mirror/object.h"
#include "mirror/object_reference.h"
@@ -50,6 +51,10 @@ ASM_DEFINE(JIT_HOTNESS_DISABLE,
art::jit::kJitHotnessDisabled)
ASM_DEFINE(MIN_LARGE_OBJECT_THRESHOLD,
art::gc::Heap::kMinLargeObjectThreshold)
+ASM_DEFINE(MTERP_HANDLER_SIZE,
+ art::interpreter::kMterpHandlerSize)
+ASM_DEFINE(MTERP_HANDLER_SIZE_LOG2,
+ art::WhichPowerOf2(art::interpreter::kMterpHandlerSize))
ASM_DEFINE(OBJECT_ALIGNMENT_MASK,
art::kObjectAlignment - 1)
ASM_DEFINE(OBJECT_ALIGNMENT_MASK_TOGGLED,
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
index 2dd90fae3f..8c91dc8a87 100644
--- a/tools/cpp-define-generator/thread.def
+++ b/tools/cpp-define-generator/thread.def
@@ -18,16 +18,12 @@
#include "thread.h"
#endif
-ASM_DEFINE(THREAD_ALT_IBASE_OFFSET,
- art::Thread::MterpAltIBaseOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_CARD_TABLE_OFFSET,
art::Thread::CardTableOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_CHECKPOINT_REQUEST,
art::kCheckpointRequest)
ASM_DEFINE(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<art::kRuntimePointerSize>().Int32Value())
-ASM_DEFINE(THREAD_DEFAULT_IBASE_OFFSET,
- art::Thread::MterpDefaultIBaseOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_EMPTY_CHECKPOINT_REQUEST,
art::kEmptyCheckpointRequest)
ASM_DEFINE(THREAD_EXCEPTION_OFFSET,
@@ -60,5 +56,7 @@ ASM_DEFINE(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST,
art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
ASM_DEFINE(THREAD_SUSPEND_REQUEST,
art::kSuspendRequest)
+ASM_DEFINE(THREAD_USE_MTERP_OFFSET,
+ art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value())
ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())
diff --git a/tools/jfuzz/jfuzz.cc b/tools/jfuzz/jfuzz.cc
index a97a99ce4b..b8a646d8fb 100644
--- a/tools/jfuzz/jfuzz.cc
+++ b/tools/jfuzz/jfuzz.cc
@@ -562,11 +562,11 @@ class JFuzz {
case 1:
if (emitArrayVariable(tp))
return;
- // FALL-THROUGH
+ [[fallthrough]];
case 2:
if (emitLocalVariable(tp))
return;
- // FALL-THROUGH
+ [[fallthrough]];
default:
emitFieldVariable(tp);
break;
diff --git a/tools/ti-fast/tifast.cc b/tools/ti-fast/tifast.cc
index 00ef6563a9..d02e549963 100644
--- a/tools/ti-fast/tifast.cc
+++ b/tools/ti-fast/tifast.cc
@@ -574,7 +574,7 @@ static jvmtiEventCallbacks kLogCallbacks {
static std::string EventToName(jvmtiEvent desired_event) {
#define CHECK_NAME(name, event, args, argnames) \
- if (desired_event == event) { \
+ if (desired_event == (event)) { \
return #name; \
}
FOR_ALL_SUPPORTED_EVENTS(CHECK_NAME);
diff --git a/tools/titrace/instruction_decoder.cc b/tools/titrace/instruction_decoder.cc
index 7f8b296bc3..89904b32cd 100644
--- a/tools/titrace/instruction_decoder.cc
+++ b/tools/titrace/instruction_decoder.cc
@@ -484,7 +484,7 @@ class DexInstructionDecoder : public InstructionDecoder {
public:
enum Opcode {
#define MAKE_ENUM_DEFINITION(opcode, instruction_code, name, format, index, flags, extended_flags, verifier_flags) \
- instruction_code = opcode,
+ instruction_code = opcode, /* NOLINT */
DEX_INSTRUCTION_LIST(MAKE_ENUM_DEFINITION)
#undef MAKE_ENUM_DEFINITION
};
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index e925e1da17..1fca7e1ae7 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -162,7 +162,7 @@ int VeriFlowAnalysis::GetBranchFlags(const Instruction& instruction) const {
case Instruction::IF_##cond##Z: { \
RegisterValue val = GetRegister(instruction.VRegA()); \
if (val.IsConstant()) { \
- if (val.GetConstant() op 0) { \
+ if (val.GetConstant() op 0) { /* NOLINT */ \
return Instruction::kBranch; \
} else { \
return Instruction::kContinue; \