summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.oat.mk4
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/common_compiler_test.cc4
-rw-r--r--compiler/dex/dex_to_dex_decompiler.cc2
-rw-r--r--compiler/driver/compiler_driver.cc92
-rw-r--r--compiler/driver/compiler_driver_test.cc5
-rw-r--r--compiler/intrinsics_list.h7
-rw-r--r--compiler/oat_test.cc23
-rw-r--r--compiler/oat_writer.cc41
-rw-r--r--compiler/optimizing/code_generator.cc5
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_mips.cc90
-rw-r--r--compiler/optimizing/code_generator_mips.h1
-rw-r--r--compiler/optimizing/code_generator_mips64.cc185
-rw-r--r--compiler/optimizing/code_sinking.cc403
-rw-r--r--compiler/optimizing/code_sinking.h48
-rw-r--r--compiler/optimizing/common_dominator.h6
-rw-r--r--compiler/optimizing/induction_var_range.cc107
-rw-r--r--compiler/optimizing/induction_var_range.h16
-rw-r--r--compiler/optimizing/induction_var_range_test.cc112
-rw-r--r--compiler/optimizing/inliner.cc28
-rw-r--r--compiler/optimizing/inliner.h7
-rw-r--r--compiler/optimizing/intrinsics.cc109
-rw-r--r--compiler/optimizing/intrinsics.h33
-rw-r--r--compiler/optimizing/intrinsics_arm.cc70
-rw-r--r--compiler/optimizing/intrinsics_arm.h1
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc73
-rw-r--r--compiler/optimizing/intrinsics_arm64.h4
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc72
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.h1
-rw-r--r--compiler/optimizing/intrinsics_mips.cc62
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc61
-rw-r--r--compiler/optimizing/intrinsics_x86.cc59
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc60
-rw-r--r--compiler/optimizing/load_store_elimination.cc109
-rw-r--r--compiler/optimizing/loop_optimization.cc3
-rw-r--r--compiler/optimizing/loop_optimization.h9
-rw-r--r--compiler/optimizing/loop_optimization_test.cc2
-rw-r--r--compiler/optimizing/nodes.h3
-rw-r--r--compiler/optimizing/optimizing_compiler.cc25
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h29
-rw-r--r--compiler/optimizing/side_effects_analysis.h6
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.h10
-rw-r--r--compiler/utils/mips/assembler_mips.cc4
-rw-r--r--compiler/utils/mips/assembler_mips.h36
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc161
-rw-r--r--compiler/utils/mips64/assembler_mips64.h224
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc18
-rw-r--r--compiler/utils/x86/assembler_x86.cc8
-rw-r--r--compiler/utils/x86/assembler_x86.h1
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc8
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc9
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h1
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc4
-rw-r--r--dex2oat/dex2oat.cc19
-rw-r--r--dex2oat/dex2oat_test.cc202
-rw-r--r--dexlayout/dex_ir.cc31
-rw-r--r--dexlayout/dexlayout.cc115
-rw-r--r--dexlayout/dexlayout.h5
-rw-r--r--dexlayout/dexlayout_test.cc208
-rw-r--r--profman/profile_assistant_test.cc219
-rw-r--r--profman/profman.cc242
-rw-r--r--runtime/Android.bp2
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc1
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S3
-rw-r--r--runtime/arch/mips64/asm_support_mips64.S6
-rw-r--r--runtime/art_method-inl.h14
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/arena_allocator.h1
-rw-r--r--runtime/base/bit_utils.h5
-rw-r--r--runtime/base/unix_file/fd_file.cc2
-rw-r--r--runtime/bytecode_utils.h (renamed from compiler/optimizing/bytecode_utils.h)6
-rw-r--r--runtime/class_linker.cc85
-rw-r--r--runtime/class_linker.h3
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/dex_file.cc8
-rw-r--r--runtime/gc/accounting/read_barrier_table.h2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc27
-rw-r--r--runtime/gc/collector/semi_space.cc4
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/gc_cause.cc1
-rw-r--r--runtime/gc/gc_cause.h4
-rw-r--r--runtime/gc/heap.cc65
-rw-r--r--runtime/gc/space/region_space-inl.h16
-rw-r--r--runtime/gc/space/region_space.cc103
-rw-r--r--runtime/gc/space/region_space.h50
-rw-r--r--runtime/hprof/hprof.cc4
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/arm/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_wide.S10
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_wide.S10
-rw-r--r--runtime/interpreter/mterp/config_arm4
-rw-r--r--runtime/interpreter/mterp/config_arm644
-rw-r--r--runtime/interpreter/mterp/config_mips4
-rw-r--r--runtime/interpreter/mterp/config_mips644
-rw-r--r--runtime/interpreter/mterp/config_x864
-rw-r--r--runtime/interpreter/mterp/config_x86_644
-rw-r--r--runtime/interpreter/mterp/mips/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_wide.S10
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_wide.S10
-rw-r--r--runtime/interpreter/mterp/mterp.cc206
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S72
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S72
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S74
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S80
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S78
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86_64.S76
-rw-r--r--runtime/interpreter/mterp/x86/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_wide.S10
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_wide.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_wide.S8
-rw-r--r--runtime/jit/jit.cc8
-rw-r--r--runtime/jit/jit.h8
-rw-r--r--runtime/jit/jit_code_cache.cc13
-rw-r--r--runtime/jit/jit_code_cache.h10
-rw-r--r--runtime/jit/profile_compilation_info.cc137
-rw-r--r--runtime/jit/profile_compilation_info.h40
-rw-r--r--runtime/jit/profile_compilation_info_test.cc139
-rw-r--r--runtime/jit/profile_saver.cc158
-rw-r--r--runtime/jit/profile_saver.h25
-rw-r--r--runtime/mem_map.cc48
-rw-r--r--runtime/mem_map.h7
-rw-r--r--runtime/mem_map_test.cc104
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc24
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc87
-rw-r--r--runtime/native/java_lang_Object.cc7
-rw-r--r--runtime/native/java_lang_System.cc10
-rw-r--r--runtime/native/java_lang_Void.cc40
-rw-r--r--runtime/native/java_lang_Void.h28
-rw-r--r--runtime/non_debuggable_classes.cc42
-rw-r--r--runtime/non_debuggable_classes.h42
-rw-r--r--runtime/oat_file_assistant.cc110
-rw-r--r--runtime/oat_file_assistant.h9
-rw-r--r--runtime/oat_file_manager.cc3
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc338
-rw-r--r--runtime/openjdkjvmti/art_jvmti.h11
-rw-r--r--runtime/openjdkjvmti/ti_field.cc1
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc661
-rw-r--r--runtime/openjdkjvmti/ti_phase.cc1
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc277
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h14
-rw-r--r--runtime/openjdkjvmti/ti_search.cc1
-rw-r--r--runtime/openjdkjvmti/transform.cc13
-rw-r--r--runtime/openjdkjvmti/transform.h8
-rw-r--r--runtime/runtime.cc19
-rw-r--r--runtime/runtime.h5
-rw-r--r--test/152-dead-large-object/expected.txt (renamed from test/577-profile-foreign-dex/expected.txt)0
-rw-r--r--test/152-dead-large-object/info.txt1
-rw-r--r--test/152-dead-large-object/src/Main.java (renamed from test/577-profile-foreign-dex/src-ex/OtherDex.java)13
-rw-r--r--test/154-gc-loop/src/Main.java2
-rw-r--r--test/157-void-class/expected.txt2
-rw-r--r--test/157-void-class/info.txt0
-rwxr-xr-x[-rw-r--r--]test/157-void-class/run (renamed from test/577-profile-foreign-dex/run)14
-rw-r--r--test/157-void-class/src/Main.java57
-rw-r--r--test/530-checker-lse/src/Main.java85
-rw-r--r--test/532-checker-nonnull-arrayset/src/Main.java4
-rw-r--r--test/536-checker-intrinsic-optimization/src/Main.java31
-rw-r--r--test/577-profile-foreign-dex/info.txt1
-rw-r--r--test/577-profile-foreign-dex/src/Main.java175
-rw-r--r--test/595-profile-saving/src/Main.java12
-rw-r--r--test/623-checker-loop-regressions/src/Main.java24
-rw-r--r--test/639-checker-code-sinking/expected.txt3
-rw-r--r--test/639-checker-code-sinking/info.txt1
-rw-r--r--test/639-checker-code-sinking/src/Main.java355
-rw-r--r--test/640-checker-integer-valueof/expected.txt0
-rw-r--r--test/640-checker-integer-valueof/info.txt1
-rw-r--r--test/640-checker-integer-valueof/src/Main.java93
-rw-r--r--test/903-hello-tagging/expected.txt1
-rw-r--r--test/903-hello-tagging/src/Main.java8
-rw-r--r--test/903-hello-tagging/tagging.cc56
-rw-r--r--test/906-iterate-heap/expected.txt30
-rw-r--r--test/906-iterate-heap/iterate_heap.cc87
-rw-r--r--test/906-iterate-heap/src/Main.java80
-rw-r--r--test/913-heaps/expected.txt275
-rw-r--r--test/913-heaps/heaps.cc98
-rwxr-xr-xtest/913-heaps/run2
-rw-r--r--test/913-heaps/src/Main.java120
-rw-r--r--test/921-hello-failure/expected.txt3
-rw-r--r--test/921-hello-failure/src/Main.java1
-rw-r--r--test/921-hello-failure/src/Unmodifiable.java52
-rw-r--r--test/Android.run-test.mk14
-rw-r--r--test/ProfileTestMultiDex/Main.java42
-rw-r--r--test/ProfileTestMultiDex/Second.java5
-rw-r--r--test/ProfileTestMultiDex/main.jpp22
-rw-r--r--test/ProfileTestMultiDex/main.list6
-rw-r--r--test/common/runtime_state.cc3
-rwxr-xr-xtest/etc/default-build6
-rwxr-xr-xtest/etc/run-test-jar22
-rw-r--r--test/knownfailures.json18
-rwxr-xr-xtest/run-test24
-rw-r--r--test/testrunner/env.py2
-rwxr-xr-xtest/testrunner/testrunner.py22
-rw-r--r--tools/libcore_failures.txt50
-rwxr-xr-xtools/setup-buildbot-device.sh24
264 files changed, 7169 insertions, 2051 deletions
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index f53740e6e0..c733febd06 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -109,7 +109,7 @@ $$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
$$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
- --host --android-root=$$(HOST_OUT) --include-patch-information \
+ --host --android-root=$$(HOST_OUT) \
--generate-debug-info --generate-build-id --compile-pic \
$$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS)
@@ -212,7 +212,7 @@ $$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \
--instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
- --android-root=$$(PRODUCT_OUT)/system --include-patch-information \
+ --android-root=$$(PRODUCT_OUT)/system \
--generate-debug-info --generate-build-id --compile-pic \
$$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
diff --git a/compiler/Android.bp b/compiler/Android.bp
index f5589cd7a3..1ee2a21b18 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -52,6 +52,7 @@ art_cc_defaults {
"optimizing/cha_guard_optimization.cc",
"optimizing/code_generator.cc",
"optimizing/code_generator_utils.cc",
+ "optimizing/code_sinking.cc",
"optimizing/constant_folding.cc",
"optimizing/dead_code_elimination.cc",
"optimizing/escape.cc",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index d89cdbabf8..9a45379a05 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -52,10 +52,10 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) {
compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
method->GetDexMethodIndex()));
}
- if (compiled_method != nullptr) {
+ // If the code size is 0 it means the method was skipped due to profile guided compilation.
+ if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0u) {
ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
uint32_t code_size = code.size();
- CHECK_NE(0u, code_size);
ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
uint32_t vmap_table_offset = vmap_table.empty() ? 0u
: sizeof(OatQuickMethodHeader) + vmap_table.size();
diff --git a/compiler/dex/dex_to_dex_decompiler.cc b/compiler/dex/dex_to_dex_decompiler.cc
index bfd485d126..53601033da 100644
--- a/compiler/dex/dex_to_dex_decompiler.cc
+++ b/compiler/dex/dex_to_dex_decompiler.cc
@@ -20,7 +20,7 @@
#include "base/mutex.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
-#include "optimizing/bytecode_utils.h"
+#include "bytecode_utils.h"
namespace art {
namespace optimizer {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7e91453741..a5e4cb0877 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2283,7 +2283,7 @@ class InitializeClassVisitor : public CompilationVisitor {
public:
explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
ATRACE_CALL();
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2343,23 +2343,32 @@ class InitializeClassVisitor : public CompilationVisitor {
// mode which prevents the GC from visiting objects modified during the transaction.
// Ensure GC is not run so don't access freed objects when aborting transaction.
- ScopedAssertNoThreadSuspension ants("Transaction end");
- runtime->ExitTransactionMode();
+ {
+ ScopedAssertNoThreadSuspension ants("Transaction end");
+ runtime->ExitTransactionMode();
+
+ if (!success) {
+ CHECK(soa.Self()->IsExceptionPending());
+ mirror::Throwable* exception = soa.Self()->GetException();
+ VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+ << exception->Dump();
+ std::ostream* file_log = manager_->GetCompiler()->
+ GetCompilerOptions().GetInitFailureOutput();
+ if (file_log != nullptr) {
+ *file_log << descriptor << "\n";
+ *file_log << exception->Dump() << "\n";
+ }
+ soa.Self()->ClearException();
+ transaction.Rollback();
+ CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ }
+ }
if (!success) {
- CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException();
- VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
- << exception->Dump();
- std::ostream* file_log = manager_->GetCompiler()->
- GetCompilerOptions().GetInitFailureOutput();
- if (file_log != nullptr) {
- *file_log << descriptor << "\n";
- *file_log << exception->Dump() << "\n";
- }
- soa.Self()->ClearException();
- transaction.Rollback();
- CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ // On failure, still intern strings of static fields and seen in <clinit>, as these
+ // will be created in the zygote. This is separated from the transaction code just
+ // above as we will allocate strings, so must be allowed to suspend.
+ InternStrings(klass, class_loader);
}
}
}
@@ -2375,6 +2384,57 @@ class InitializeClassVisitor : public CompilationVisitor {
}
private:
+ void InternStrings(Handle<mirror::Class> klass, Handle<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(manager_->GetCompiler()->GetCompilerOptions().IsBootImage());
+ DCHECK(klass->IsVerified());
+ DCHECK(!klass->IsInitialized());
+
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> h_dex_cache = hs.NewHandle(klass->GetDexCache());
+ const DexFile* dex_file = manager_->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ ClassLinker* class_linker = manager_->GetClassLinker();
+
+ // Check encoded final field values for strings and intern.
+ annotations::RuntimeEncodedStaticFieldValueIterator value_it(*dex_file,
+ &h_dex_cache,
+ &class_loader,
+ manager_->GetClassLinker(),
+ *class_def);
+ for ( ; value_it.HasNext(); value_it.Next()) {
+ if (value_it.GetValueType() == annotations::RuntimeEncodedStaticFieldValueIterator::kString) {
+ // Resolve the string. This will intern the string.
+ art::ObjPtr<mirror::String> resolved = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(value_it.GetJavaValue().i), h_dex_cache);
+ CHECK(resolved != nullptr);
+ }
+ }
+
+ // Intern strings seen in <clinit>.
+ ArtMethod* clinit = klass->FindClassInitializer(class_linker->GetImagePointerSize());
+ if (clinit != nullptr) {
+ const DexFile::CodeItem* code_item = clinit->GetCodeItem();
+ DCHECK(code_item != nullptr);
+ const Instruction* inst = Instruction::At(code_item->insns_);
+
+ const uint32_t insns_size = code_item->insns_size_in_code_units_;
+ for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
+ if (inst->Opcode() == Instruction::CONST_STRING) {
+ ObjPtr<mirror::String> s = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(inst->VRegB_21c()), h_dex_cache);
+ CHECK(s != nullptr);
+ } else if (inst->Opcode() == Instruction::CONST_STRING_JUMBO) {
+ ObjPtr<mirror::String> s = class_linker->ResolveString(
+ *dex_file, dex::StringIndex(inst->VRegB_31c()), h_dex_cache);
+ CHECK(s != nullptr);
+ }
+ dex_pc += inst->SizeInCodeUnits();
+ inst = inst->Next();
+ }
+ }
+ }
+
const ParallelCompilationManager* const manager_;
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 97954f3c29..562f97b3ae 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -240,9 +240,8 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
ProfileCompilationInfo info;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
- std::string key = ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation());
- profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 1);
- profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 2);
+ profile_info_.AddMethodIndex(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 1);
+ profile_info_.AddMethodIndex(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 2);
}
return &profile_info_;
}
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index 9bd25d8484..63c23cb074 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -24,6 +24,10 @@
// Note: adding a new intrinsic requires an art image version change,
// as the modifiers flag for some ArtMethods will need to be changed.
+// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an OOME.
+// The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf
+// (kNoSideEffects), and it is also OK to remove it if it's unused.
+
#define INTRINSICS_LIST(V) \
V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \
V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \
@@ -149,7 +153,8 @@
V(UnsafeLoadFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "loadFence", "()V") \
V(UnsafeStoreFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "storeFence", "()V") \
V(UnsafeFullFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "fullFence", "()V") \
- V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;")
+ V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \
+ V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;")
#endif // ART_COMPILER_INTRINSICS_LIST_H_
#undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 66111f6e23..e2233e4bbd 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -265,6 +265,7 @@ class OatTest : public CommonCompilerTest {
void TestDexFileInput(bool verify, bool low_4gb, bool use_profile);
void TestZipFileInput(bool verify);
+ void TestZipFileInputWithEmptyDex();
std::unique_ptr<const InstructionSetFeatures> insn_features_;
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
@@ -821,6 +822,28 @@ TEST_F(OatTest, ZipFileInputCheckVerifier) {
TestZipFileInput(true);
}
+void OatTest::TestZipFileInputWithEmptyDex() {
+ ScratchFile zip_file;
+ ZipBuilder zip_builder(zip_file.GetFile());
+ bool success = zip_builder.AddFile("classes.dex", nullptr, 0);
+ ASSERT_TRUE(success);
+ success = zip_builder.Finish();
+ ASSERT_TRUE(success) << strerror(errno);
+
+ SafeMap<std::string, std::string> key_value_store;
+ key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
+ std::vector<const char*> input_filenames { zip_file.GetFilename().c_str() }; // NOLINT [readability/braces] [4]
+ ScratchFile oat_file, vdex_file(oat_file, ".vdex");
+ std::unique_ptr<ProfileCompilationInfo> profile_compilation_info(new ProfileCompilationInfo());
+ success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(), input_filenames,
+ key_value_store, /*verify*/false, profile_compilation_info.get());
+ ASSERT_FALSE(success);
+}
+
+TEST_F(OatTest, ZipFileInputWithEmptyDex) {
+ TestZipFileInputWithEmptyDex();
+}
+
TEST_F(OatTest, UpdateChecksum) {
InstructionSet insn_set = kX86;
std::string error_msg;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 0ea11255a8..8ab44d2c19 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -104,6 +104,13 @@ inline uint32_t CodeAlignmentSize(uint32_t header_offset, const CompiledMethod&
// Defines the location of the raw dex file to write.
class OatWriter::DexFileSource {
public:
+ enum Type {
+ kNone,
+ kZipEntry,
+ kRawFile,
+ kRawData,
+ };
+
explicit DexFileSource(ZipEntry* zip_entry)
: type_(kZipEntry), source_(zip_entry) {
DCHECK(source_ != nullptr);
@@ -119,6 +126,7 @@ class OatWriter::DexFileSource {
DCHECK(source_ != nullptr);
}
+ Type GetType() const { return type_; }
bool IsZipEntry() const { return type_ == kZipEntry; }
bool IsRawFile() const { return type_ == kRawFile; }
bool IsRawData() const { return type_ == kRawData; }
@@ -147,13 +155,6 @@ class OatWriter::DexFileSource {
}
private:
- enum Type {
- kNone,
- kZipEntry,
- kRawFile,
- kRawData,
- };
-
Type type_;
const void* source_;
};
@@ -2259,16 +2260,38 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
std::unique_ptr<MemMap> mem_map(
zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg));
+ if (mem_map == nullptr) {
+ LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
+ return false;
+ }
dex_file = DexFile::Open(location,
zip_entry->GetCrc32(),
std::move(mem_map),
/* verify */ true,
/* verify_checksum */ true,
&error_msg);
- } else {
- DCHECK(oat_dex_file->source_.IsRawFile());
+ } else if (oat_dex_file->source_.IsRawFile()) {
File* raw_file = oat_dex_file->source_.GetRawFile();
dex_file = DexFile::OpenDex(raw_file->Fd(), location, /* verify_checksum */ true, &error_msg);
+ } else {
+ // The source data is a vdex file.
+ CHECK(oat_dex_file->source_.IsRawData())
+ << static_cast<size_t>(oat_dex_file->source_.GetType());
+ const uint8_t* raw_dex_file = oat_dex_file->source_.GetRawData();
+ // Note: The raw data has already been checked to contain the header
+ // and all the data that the header specifies as the file size.
+ DCHECK(raw_dex_file != nullptr);
+ DCHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file->GetLocation()));
+ const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file);
+ // Since the source may have had its layout changed, don't verify the checksum.
+ dex_file = DexFile::Open(raw_dex_file,
+ header->file_size_,
+ location,
+ oat_dex_file->dex_file_location_checksum_,
+ nullptr,
+ /* verify */ true,
+ /* verify_checksum */ false,
+ &error_msg);
}
if (dex_file == nullptr) {
LOG(ERROR) << "Failed to open dex file for layout: " << error_msg;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8dd423fcbb..424b8507fb 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -861,8 +861,11 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
bool CodeGenerator::HasStackMapAtCurrentPc() {
uint32_t pc = GetAssembler()->CodeSize();
size_t count = stack_map_stream_.GetNumberOfStackMaps();
+ if (count == 0) {
+ return false;
+ }
CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
- return (count > 0) && (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
+ return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
}
void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index edccbd4904..18c95b3c41 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4094,7 +4094,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4107,7 +4107,7 @@ void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* inv
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c9dde7cc55..51dd898a81 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1899,9 +1899,9 @@ void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
}
}
-auto InstructionCodeGeneratorMIPS::GetImplicitNullChecker(HInstruction* instruction) {
- auto null_checker = [this, instruction]() {
- this->codegen_->MaybeRecordImplicitNullCheck(instruction);
+static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS* codegen) {
+ auto null_checker = [codegen, instruction]() {
+ codegen->MaybeRecordImplicitNullCheck(instruction);
};
return null_checker;
}
@@ -1911,7 +1911,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
- auto null_checker = GetImplicitNullChecker(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
Primitive::Type type = instruction->GetType();
const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
@@ -2073,6 +2073,11 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
+
+ if (type == Primitive::kPrimNot) {
+ Register out = locations->Out().AsRegister<Register>();
+ __ MaybeUnpoisonHeapReference(out);
+ }
}
void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
@@ -2143,7 +2148,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
bool needs_runtime_call = locations->WillCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- auto null_checker = GetImplicitNullChecker(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
Register base_reg = index.IsConstant() ? obj : TMP;
switch (value_type) {
@@ -2200,7 +2205,31 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
DCHECK(!needs_write_barrier);
} else {
Register value = value_location.AsRegister<Register>();
- __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Use Sw() instead of StoreToOffset() in order to be able to
+ // hold the poisoned reference in AT and thus avoid allocating
+ // yet another temporary register.
+ if (index.IsConstant()) {
+ if (!IsInt<16>(static_cast<int32_t>(data_offset))) {
+ int16_t low = Low16Bits(data_offset);
+ uint32_t high = data_offset - low;
+ __ Addiu32(TMP, obj, high);
+ base_reg = TMP;
+ data_offset = low;
+ }
+ } else {
+ DCHECK(IsInt<16>(static_cast<int32_t>(data_offset)));
+ }
+ __ PoisonHeapReference(AT, value);
+ __ Sw(AT, base_reg, data_offset);
+ null_checker();
+ } else {
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ }
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
@@ -2208,6 +2237,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Note: if heap poisoning is enabled, pAputObject takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
}
@@ -2322,6 +2353,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
__ Beqz(obj, slow_path->GetExitLabel());
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(obj_cls);
__ Bne(obj_cls, cls, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -4891,7 +4923,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
LoadOperandType load_type = kLoadUnsignedByte;
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- auto null_checker = GetImplicitNullChecker(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
switch (type) {
case Primitive::kPrimBoolean:
@@ -4958,6 +4990,9 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
dst = locations->Out().AsRegister<Register>();
}
__ LoadFromOffset(load_type, dst, obj, offset, null_checker);
+ if (type == Primitive::kPrimNot) {
+ __ MaybeUnpoisonHeapReference(dst);
+ }
} else {
DCHECK(locations->Out().IsFpuRegister());
FRegister dst = locations->Out().AsFpuRegister<FRegister>();
@@ -5016,7 +5051,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
StoreOperandType store_type = kStoreByte;
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- auto null_checker = GetImplicitNullChecker(instruction);
+ bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
switch (type) {
case Primitive::kPrimBoolean:
@@ -5089,7 +5125,16 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
} else {
src = value_location.AsRegister<Register>();
}
- __ StoreToOffset(store_type, src, obj, offset, null_checker);
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ __ PoisonHeapReference(TMP, src);
+ __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
+ } else {
+ __ StoreToOffset(store_type, src, obj, offset, null_checker);
+ }
} else {
FRegister src = value_location.AsFpuRegister<FRegister>();
if (type == Primitive::kPrimFloat) {
@@ -5101,7 +5146,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
}
// TODO: memory barriers?
- if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ if (needs_write_barrier) {
Register src = value_location.AsRegister<Register>();
codegen_->MarkGCCard(obj, src, value_can_be_null);
}
@@ -5173,6 +5218,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(out);
if (instruction->IsExactCheck()) {
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
@@ -5239,6 +5285,14 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke
__ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
__ LoadFromOffset(kLoadWord, temp, temp,
mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
@@ -5562,6 +5616,14 @@ void CodeGeneratorMIPS::GenerateVirtualCall(HInvokeVirtual* invoke, Location tem
// temp = object->GetClass();
__ LoadFromOffset(kLoadWord, temp, receiver, class_offset);
MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -5692,7 +5754,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
bool reordering = __ SetReorder(false);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
__ SetReorder(reordering);
generate_null_check = true;
break;
@@ -5837,7 +5899,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
bool reordering = __ SetReorder(false);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg);
- __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
__ SetReorder(reordering);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
@@ -6059,6 +6121,8 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
@@ -6076,6 +6140,8 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 47eba50248..0ccd80ab93 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -297,7 +297,6 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
- auto GetImplicitNullChecker(HInstruction* instruction);
void GenPackedSwitchWithCompares(Register value_reg,
int32_t lower_bound,
uint32_t num_entries,
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 5be0da4011..138ebe6a25 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1483,11 +1483,19 @@ void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
}
}
+static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS64* codegen) {
+ auto null_checker = [codegen, instruction]() {
+ codegen->MaybeRecordImplicitNullCheck(instruction);
+ };
+ return null_checker;
+}
+
void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
Location index = locations->InAt(1);
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
Primitive::Type type = instruction->GetType();
const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
@@ -1498,10 +1506,10 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+ __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
} else {
__ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
- __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1511,10 +1519,10 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+ __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
} else {
__ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
- __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1524,11 +1532,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1537,8 +1545,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
if (maybe_compressed_char_at) {
uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
- __ LoadFromOffset(kLoadWord, TMP, obj, count_offset);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
__ Dext(TMP, TMP, 0, 1);
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
@@ -1563,7 +1570,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
__ LoadFromOffset(kLoadUnsignedHalfword,
out,
obj,
- data_offset + (const_index << TIMES_2));
+ data_offset + (const_index << TIMES_2),
+ null_checker);
}
} else {
GpuRegister index_reg = index.AsRegister<GpuRegister>();
@@ -1581,7 +1589,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
} else {
__ Dsll(TMP, index_reg, TIMES_2);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
}
}
break;
@@ -1595,11 +1603,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ LoadFromOffset(load_type, out, obj, offset);
+ __ LoadFromOffset(load_type, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(load_type, out, TMP, data_offset);
+ __ LoadFromOffset(load_type, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1609,11 +1617,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
+ __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1623,11 +1631,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
+ __ LoadFpuFromOffset(kLoadWord, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
+ __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1637,11 +1645,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
+ __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
}
break;
}
@@ -1650,8 +1658,10 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
- if (!maybe_compressed_char_at) {
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+
+ if (type == Primitive::kPrimNot) {
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ __ MaybeUnpoisonHeapReference(out);
}
}
@@ -1703,6 +1713,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
bool needs_runtime_call = locations->WillCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
switch (value_type) {
case Primitive::kPrimBoolean:
@@ -1712,10 +1723,10 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ StoreToOffset(kStoreByte, value, obj, offset);
+ __ StoreToOffset(kStoreByte, value, obj, offset, null_checker);
} else {
__ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
- __ StoreToOffset(kStoreByte, value, TMP, data_offset);
+ __ StoreToOffset(kStoreByte, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1727,11 +1738,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ __ StoreToOffset(kStoreHalfword, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
__ Daddu(TMP, obj, TMP);
- __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
+ __ StoreToOffset(kStoreHalfword, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1740,24 +1751,59 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
if (!needs_runtime_call) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ GpuRegister base_reg;
GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreToOffset(kStoreWord, value, obj, offset);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
+ base_reg = obj;
} else {
DCHECK(index.IsRegister()) << index;
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ StoreToOffset(kStoreWord, value, TMP, data_offset);
+ base_reg = TMP;
+ }
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Use Sw() instead of StoreToOffset() in order to be able to
+ // hold the poisoned reference in AT and thus avoid allocating
+ // yet another temporary register.
+ if (index.IsConstant()) {
+ if (!IsInt<16>(static_cast<int32_t>(data_offset))) {
+ int16_t low16 = Low16Bits(data_offset);
+ // For consistency with StoreToOffset() and such treat data_offset as int32_t.
+ uint64_t high48 = static_cast<uint64_t>(static_cast<int32_t>(data_offset)) - low16;
+ int16_t upper16 = High16Bits(high48);
+ // Allow the full [-2GB,+2GB) range in case `low16` is negative and needs a
+ // compensatory 64KB added, which may push `high48` above 2GB and require
+ // the dahi instruction.
+ int16_t higher16 = High32Bits(high48) + ((upper16 < 0) ? 1 : 0);
+ __ Daui(TMP, obj, upper16);
+ if (higher16 != 0) {
+ __ Dahi(TMP, higher16);
+ }
+ base_reg = TMP;
+ data_offset = low16;
+ }
+ } else {
+ DCHECK(IsInt<16>(static_cast<int32_t>(data_offset)));
+ }
+ __ PoisonHeapReference(AT, value);
+ __ Sw(AT, base_reg, data_offset);
+ null_checker();
+ } else {
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
+ // Note: if heap poisoning is enabled, pAputObject takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
}
@@ -1770,11 +1816,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreToOffset(kStoreDoubleword, value, obj, offset);
+ __ StoreToOffset(kStoreDoubleword, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
+ __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1786,11 +1832,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreFpuToOffset(kStoreWord, value, obj, offset);
+ __ StoreFpuToOffset(kStoreWord, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
__ Daddu(TMP, obj, TMP);
- __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
+ __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1802,11 +1848,11 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
+ __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset, null_checker);
} else {
__ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
__ Daddu(TMP, obj, TMP);
- __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
+ __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker);
}
break;
}
@@ -1815,11 +1861,6 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
-
- // Ints and objects are handled in the switch.
- if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- }
}
void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -1871,6 +1912,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
__ Beqzc(obj, slow_path->GetExitLabel());
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(obj_cls);
__ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -3086,6 +3128,9 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
LoadOperandType load_type = kLoadUnsignedByte;
+ uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
+
switch (type) {
case Primitive::kPrimBoolean:
load_type = kLoadUnsignedByte;
@@ -3117,15 +3162,18 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
if (!Primitive::IsFloatingPointType(type)) {
DCHECK(locations->Out().IsRegister());
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
- __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
} else {
DCHECK(locations->Out().IsFpuRegister());
FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
- __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ __ LoadFpuFromOffset(load_type, dst, obj, offset, null_checker);
}
-
- codegen_->MaybeRecordImplicitNullCheck(instruction);
// TODO: memory barrier?
+
+ if (type == Primitive::kPrimNot) {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ __ MaybeUnpoisonHeapReference(dst);
+ }
}
void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
@@ -3147,6 +3195,10 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
LocationSummary* locations = instruction->GetLocations();
GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
StoreOperandType store_type = kStoreByte;
+ uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+ bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
+ auto null_checker = GetImplicitNullChecker(instruction, codegen_);
+
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -3172,16 +3224,24 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
if (!Primitive::IsFloatingPointType(type)) {
DCHECK(locations->InAt(1).IsRegister());
GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
- __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ if (kPoisonHeapReferences && needs_write_barrier) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(type, Primitive::kPrimNot);
+ __ PoisonHeapReference(TMP, src);
+ __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
+ } else {
+ __ StoreToOffset(store_type, src, obj, offset, null_checker);
+ }
} else {
DCHECK(locations->InAt(1).IsFpuRegister());
FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
- __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ __ StoreFpuToOffset(store_type, src, obj, offset, null_checker);
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
// TODO: memory barriers?
- if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ if (needs_write_barrier) {
DCHECK(locations->InAt(1).IsRegister());
GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
codegen_->MarkGCCard(obj, src, value_can_be_null);
@@ -3247,6 +3307,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ __ MaybeUnpoisonHeapReference(out);
if (instruction->IsExactCheck()) {
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
@@ -3325,6 +3386,14 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
__ LoadFromOffset(kLoadDoubleword, temp, temp,
mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
@@ -3567,6 +3636,14 @@ void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t
// temp = object->GetClass();
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
+ __ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
__ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -3666,8 +3743,8 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
generate_null_check = true;
break;
}
@@ -3773,8 +3850,8 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -3944,6 +4021,8 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
@@ -3961,6 +4040,8 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
+ // Note: if heap poisoning is enabled, the entry point takes care
+ // of poisoning the reference.
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
new file mode 100644
index 0000000000..dc3d378e75
--- /dev/null
+++ b/compiler/optimizing/code_sinking.cc
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_sinking.h"
+
+#include "common_dominator.h"
+#include "nodes.h"
+
+namespace art {
+
+void CodeSinking::Run() {
+ HBasicBlock* exit = graph_->GetExitBlock();
+ if (exit == nullptr) {
+ // Infinite loop, just bail.
+ return;
+ }
+ // TODO(ngeoffray): we do not profile branches yet, so use throw instructions
+ // as an indicator of an uncommon branch.
+ for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) {
+ if (exit_predecessor->GetLastInstruction()->IsThrow()) {
+ SinkCodeToUncommonBranch(exit_predecessor);
+ }
+ }
+}
+
+static bool IsInterestingInstruction(HInstruction* instruction) {
+ // Instructions from the entry graph (for example constants) are never interesting to move.
+ if (instruction->GetBlock() == instruction->GetBlock()->GetGraph()->GetEntryBlock()) {
+ return false;
+ }
+ // We want to move moveable instructions that cannot throw, as well as
+ // heap stores and allocations.
+
+ // Volatile stores cannot be moved.
+ if (instruction->IsInstanceFieldSet()) {
+ if (instruction->AsInstanceFieldSet()->IsVolatile()) {
+ return false;
+ }
+ }
+
+ // Check allocations first, as they can throw, but it is safe to move them.
+ if (instruction->IsNewInstance() || instruction->IsNewArray()) {
+ return true;
+ }
+
+ // All other instructions that can throw cannot be moved.
+ if (instruction->CanThrow()) {
+ return false;
+ }
+
+ // We can only store on local allocations. Other heap references can
+ // be escaping. Note that allocations can escape too, but we only move
+ // allocations if their users can move to, or are in the list of
+ // post dominated blocks.
+ if (instruction->IsInstanceFieldSet()) {
+ if (!instruction->InputAt(0)->IsNewInstance()) {
+ return false;
+ }
+ }
+
+ if (instruction->IsArraySet()) {
+ if (!instruction->InputAt(0)->IsNewArray()) {
+ return false;
+ }
+ }
+
+ // Heap accesses cannot go pass instructions that have memory side effects, which
+ // we are not tracking here. Note that the load/store elimination optimization
+ // runs before this optimization, and should have removed interesting ones.
+ // In theory, we could handle loads of local allocations, but this is currently
+ // hard to test, as LSE removes them.
+ if (instruction->IsStaticFieldGet() ||
+ instruction->IsInstanceFieldGet() ||
+ instruction->IsArrayGet()) {
+ return false;
+ }
+
+ if (instruction->IsInstanceFieldSet() ||
+ instruction->IsArraySet() ||
+ instruction->CanBeMoved()) {
+ return true;
+ }
+ return false;
+}
+
+static void AddInstruction(HInstruction* instruction,
+ const ArenaBitVector& processed_instructions,
+ const ArenaBitVector& discard_blocks,
+ ArenaVector<HInstruction*>* worklist) {
+ // Add to the work list if the instruction is not in the list of blocks
+ // to discard, hasn't been already processed and is of interest.
+ if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
+ !processed_instructions.IsBitSet(instruction->GetId()) &&
+ IsInterestingInstruction(instruction)) {
+ worklist->push_back(instruction);
+ }
+}
+
+static void AddInputs(HInstruction* instruction,
+ const ArenaBitVector& processed_instructions,
+ const ArenaBitVector& discard_blocks,
+ ArenaVector<HInstruction*>* worklist) {
+ for (HInstruction* input : instruction->GetInputs()) {
+ AddInstruction(input, processed_instructions, discard_blocks, worklist);
+ }
+}
+
+static void AddInputs(HBasicBlock* block,
+ const ArenaBitVector& processed_instructions,
+ const ArenaBitVector& discard_blocks,
+ ArenaVector<HInstruction*>* worklist) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
+ }
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
+ }
+}
+
+static bool ShouldFilterUse(HInstruction* instruction,
+ HInstruction* user,
+ const ArenaBitVector& post_dominated) {
+ if (instruction->IsNewInstance()) {
+ return user->IsInstanceFieldSet() &&
+ (user->InputAt(0) == instruction) &&
+ !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
+ } else if (instruction->IsNewArray()) {
+ return user->IsArraySet() &&
+ (user->InputAt(0) == instruction) &&
+ !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
+ }
+ return false;
+}
+
+
+// Find the ideal position for moving `instruction`. If `filter` is true,
+// we filter out store instructions to that instruction, which are processed
+// first in the step (3) of the sinking algorithm.
+// This method is tailored to the sinking algorithm, unlike
+// the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops.
+static HInstruction* FindIdealPosition(HInstruction* instruction,
+ const ArenaBitVector& post_dominated,
+ bool filter = false) {
+ DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
+
+ // Find the target block.
+ CommonDominator finder(/* start_block */ nullptr);
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ HInstruction* user = use.GetUser();
+ if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
+ finder.Update(user->IsPhi()
+ ? user->GetBlock()->GetPredecessors()[use.GetIndex()]
+ : user->GetBlock());
+ }
+ }
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ DCHECK(!use.GetUser()->GetHolder()->IsPhi());
+ DCHECK(!filter || !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated));
+ finder.Update(use.GetUser()->GetHolder()->GetBlock());
+ }
+ HBasicBlock* target_block = finder.Get();
+ if (target_block == nullptr) {
+ // No user we can go next to? Likely a LSE or DCE limitation.
+ return nullptr;
+ }
+
+ // Move to the first dominator not in a loop, if we can.
+ while (target_block->IsInLoop()) {
+ if (!post_dominated.IsBitSet(target_block->GetDominator()->GetBlockId())) {
+ break;
+ }
+ target_block = target_block->GetDominator();
+ DCHECK(target_block != nullptr);
+ }
+
+ // Find insertion position. No need to filter anymore, as we have found a
+ // target block.
+ HInstruction* insert_pos = nullptr;
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ if (use.GetUser()->GetBlock() == target_block &&
+ (insert_pos == nullptr || use.GetUser()->StrictlyDominates(insert_pos))) {
+ insert_pos = use.GetUser();
+ }
+ }
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ HInstruction* user = use.GetUser()->GetHolder();
+ if (user->GetBlock() == target_block &&
+ (insert_pos == nullptr || user->StrictlyDominates(insert_pos))) {
+ insert_pos = user;
+ }
+ }
+ if (insert_pos == nullptr) {
+ // No user in `target_block`, insert before the control flow instruction.
+ insert_pos = target_block->GetLastInstruction();
+ DCHECK(insert_pos->IsControlFlow());
+ // Avoid splitting HCondition from HIf to prevent unnecessary materialization.
+ if (insert_pos->IsIf()) {
+ HInstruction* if_input = insert_pos->AsIf()->InputAt(0);
+ if (if_input == insert_pos->GetPrevious()) {
+ insert_pos = if_input;
+ }
+ }
+ }
+ DCHECK(!insert_pos->IsPhi());
+ return insert_pos;
+}
+
+
+void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
+ // Local allocator to discard data structures created below at the end of
+ // this optimization.
+ ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
+
+ size_t number_of_instructions = graph_->GetCurrentInstructionId();
+ ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
+ ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ ArenaBitVector instructions_that_can_move(
+ &allocator, number_of_instructions, /* expandable */ false);
+ ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
+
+ // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
+ // TODO(ngeoffray): Getting the full set of post-dominated shoud be done by
+ // computint the post dominator tree, but that could be too time consuming. Also,
+ // we should start the analysis from blocks dominated by an uncommon branch, but we
+ // don't profile branches yet.
+ bool found_block = false;
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
+ if (block == end_block) {
+ found_block = true;
+ post_dominated.SetBit(block->GetBlockId());
+ } else if (found_block) {
+ bool is_post_dominated = true;
+ if (block->GetSuccessors().empty()) {
+ // We currently bail for loops.
+ is_post_dominated = false;
+ } else {
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ if (!post_dominated.IsBitSet(successor->GetBlockId())) {
+ is_post_dominated = false;
+ break;
+ }
+ }
+ }
+ if (is_post_dominated) {
+ post_dominated.SetBit(block->GetBlockId());
+ }
+ }
+ }
+
+ // Now that we have found a subset of post-dominated blocks, add to the worklist all inputs
+ // of instructions in these blocks that are not themselves in these blocks.
+ // Also find the common dominator of the found post dominated blocks, to help filtering
+ // out un-movable uses in step (2).
+ CommonDominator finder(end_block);
+ for (size_t i = 0, e = graph_->GetBlocks().size(); i < e; ++i) {
+ if (post_dominated.IsBitSet(i)) {
+ finder.Update(graph_->GetBlocks()[i]);
+ AddInputs(graph_->GetBlocks()[i], processed_instructions, post_dominated, &worklist);
+ }
+ }
+ HBasicBlock* common_dominator = finder.Get();
+
+ // Step (2): iterate over the worklist to find sinking candidates.
+ while (!worklist.empty()) {
+ HInstruction* instruction = worklist.back();
+ if (processed_instructions.IsBitSet(instruction->GetId())) {
+ // The instruction has already been processed, continue. This happens
+ // when the instruction is the input/user of multiple instructions.
+ worklist.pop_back();
+ continue;
+ }
+ bool all_users_in_post_dominated_blocks = true;
+ bool can_move = true;
+ // Check users of the instruction.
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ HInstruction* user = use.GetUser();
+ if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId()) &&
+ !instructions_that_can_move.IsBitSet(user->GetId())) {
+ all_users_in_post_dominated_blocks = false;
+ // If we've already processed this user, or the user cannot be moved, or
+ // is not dominating the post dominated blocks, bail.
+ // TODO(ngeoffray): The domination check is an approximation. We should
+ // instead check if the dominated blocks post dominate the user's block,
+ // but we do not have post dominance information here.
+ if (processed_instructions.IsBitSet(user->GetId()) ||
+ !IsInterestingInstruction(user) ||
+ !user->GetBlock()->Dominates(common_dominator)) {
+ can_move = false;
+ break;
+ }
+ }
+ }
+
+ // Check environment users of the instruction. Some of these users require
+ // the instruction not to move.
+ if (all_users_in_post_dominated_blocks) {
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ HEnvironment* environment = use.GetUser();
+ HInstruction* user = environment->GetHolder();
+ if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
+ if (graph_->IsDebuggable() ||
+ user->IsDeoptimize() ||
+ user->CanThrowIntoCatchBlock() ||
+ (user->IsSuspendCheck() && graph_->IsCompilingOsr())) {
+ can_move = false;
+ break;
+ }
+ }
+ }
+ }
+ if (!can_move) {
+ // Instruction cannot be moved, mark it as processed and remove it from the work
+ // list.
+ processed_instructions.SetBit(instruction->GetId());
+ worklist.pop_back();
+ } else if (all_users_in_post_dominated_blocks) {
+ // Instruction is a candidate for being sunk. Mark it as such, remove it from the
+ // work list, and add its inputs to the work list.
+ instructions_that_can_move.SetBit(instruction->GetId());
+ move_in_order.push_back(instruction);
+ processed_instructions.SetBit(instruction->GetId());
+ worklist.pop_back();
+ AddInputs(instruction, processed_instructions, post_dominated, &worklist);
+ // Drop the environment use not in the list of post-dominated block. This is
+ // to help step (3) of this optimization, when we start moving instructions
+ // closer to their use.
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ HEnvironment* environment = use.GetUser();
+ HInstruction* user = environment->GetHolder();
+ if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
+ environment->RemoveAsUserOfInput(use.GetIndex());
+ environment->SetRawEnvAt(use.GetIndex(), nullptr);
+ }
+ }
+ } else {
+ // The information we have on the users was not enough to decide whether the
+ // instruction could be moved.
+ // Add the users to the work list, and keep the instruction in the work list
+ // to process it again once all users have been processed.
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ AddInstruction(use.GetUser(), processed_instructions, post_dominated, &worklist);
+ }
+ }
+ }
+
+ // Make sure we process instructions in dominated order. This is required for heap
+ // stores.
+ std::sort(move_in_order.begin(), move_in_order.end(), [](HInstruction* a, HInstruction* b) {
+ return b->StrictlyDominates(a);
+ });
+
+ // Step (3): Try to move sinking candidates.
+ for (HInstruction* instruction : move_in_order) {
+ HInstruction* position = nullptr;
+ if (instruction->IsArraySet() || instruction->IsInstanceFieldSet()) {
+ if (!instructions_that_can_move.IsBitSet(instruction->InputAt(0)->GetId())) {
+ // A store can trivially move, but it can safely do so only if the heap
+ // location it stores to can also move.
+ // TODO(ngeoffray): Handle allocation/store cycles by pruning these instructions
+ // from the set and all their inputs.
+ continue;
+ }
+ // Find the position of the instruction we're storing into, filtering out this
+ // store and all other stores to that instruction.
+ position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
+
+ // The position needs to be dominated by the store, in order for the store to move there.
+ if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
+ continue;
+ }
+ } else {
+ // Find the ideal position within the post dominated blocks.
+ position = FindIdealPosition(instruction, post_dominated);
+ if (position == nullptr) {
+ continue;
+ }
+ }
+ // Bail if we could not find a position in the post dominated blocks (for example,
+ // if there are multiple users whose common dominator is not in the list of
+ // post dominated blocks).
+ if (!post_dominated.IsBitSet(position->GetBlock()->GetBlockId())) {
+ continue;
+ }
+ MaybeRecordStat(MethodCompilationStat::kInstructionSunk);
+ instruction->MoveBefore(position, /* ensure_safety */ false);
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
new file mode 100644
index 0000000000..59cda52a8c
--- /dev/null
+++ b/compiler/optimizing/code_sinking.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_SINKING_H_
+#define ART_COMPILER_OPTIMIZING_CODE_SINKING_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+/**
+ * Optimization pass to move instructions into uncommon branches,
+ * when it is safe to do so.
+ */
+class CodeSinking : public HOptimization {
+ public:
+ CodeSinking(HGraph* graph, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kCodeSinkingPassName, stats) {}
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kCodeSinkingPassName = "code_sinking";
+
+ private:
+ // Try to move code only used by `end_block` and all its post-dominated / dominated
+ // blocks, to these blocks.
+ void SinkCodeToUncommonBranch(HBasicBlock* end_block);
+
+ DISALLOW_COPY_AND_ASSIGN(CodeSinking);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_SINKING_H_
diff --git a/compiler/optimizing/common_dominator.h b/compiler/optimizing/common_dominator.h
index b459d24d7c..9f012cfbb2 100644
--- a/compiler/optimizing/common_dominator.h
+++ b/compiler/optimizing/common_dominator.h
@@ -36,12 +36,16 @@ class CommonDominator {
// Create a finder starting with a given block.
explicit CommonDominator(HBasicBlock* block)
: dominator_(block), chain_length_(ChainLength(block)) {
- DCHECK(block != nullptr);
}
// Update the common dominator with another block.
void Update(HBasicBlock* block) {
DCHECK(block != nullptr);
+ if (dominator_ == nullptr) {
+ dominator_ = block;
+ chain_length_ = ChainLength(block);
+ return;
+ }
HBasicBlock* block2 = dominator_;
DCHECK(block2 != nullptr);
if (block == block2) {
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 5539413aad..3dfe17647c 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -57,21 +57,27 @@ static bool IsIntAndGet(HInstruction* instruction, int64_t* value) {
return false;
}
-/** Returns b^e for b,e >= 1. Sets overflow if arithmetic wrap-around occurred. */
+/** Computes a * b for a,b > 0 (at least until first overflow happens). */
+static int64_t SafeMul(int64_t a, int64_t b, /*out*/ bool* overflow) {
+ if (a > 0 && b > 0 && a > (std::numeric_limits<int64_t>::max() / b)) {
+ *overflow = true;
+ }
+ return a * b;
+}
+
+/** Returns b^e for b,e > 0. Sets overflow if arithmetic wrap-around occurred. */
static int64_t IntPow(int64_t b, int64_t e, /*out*/ bool* overflow) {
- DCHECK_GE(b, 1);
- DCHECK_GE(e, 1);
+ DCHECK_LT(0, b);
+ DCHECK_LT(0, e);
int64_t pow = 1;
while (e) {
if (e & 1) {
- int64_t oldpow = pow;
- pow *= b;
- if (pow < oldpow) {
- *overflow = true;
- }
+ pow = SafeMul(pow, b, overflow);
}
e >>= 1;
- b *= b;
+ if (e) {
+ b = SafeMul(b, b, overflow);
+ }
}
return pow;
}
@@ -377,6 +383,53 @@ bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) co
return false;
}
+bool InductionVarRange::IsUnitStride(HInstruction* instruction,
+ /*out*/ HInstruction** offset) const {
+ HLoopInformation* loop = nullptr;
+ HInductionVarAnalysis::InductionInfo* info = nullptr;
+ HInductionVarAnalysis::InductionInfo* trip = nullptr;
+ if (HasInductionInfo(instruction, instruction, &loop, &info, &trip)) {
+ if (info->induction_class == HInductionVarAnalysis::kLinear &&
+ info->op_b->operation == HInductionVarAnalysis::kFetch) {
+ int64_t stride_value = 0;
+ if (IsConstant(info->op_a, kExact, &stride_value) && stride_value == 1) {
+ int64_t off_value = 0;
+ if (IsConstant(info->op_b, kExact, &off_value) && off_value == 0) {
+ *offset = nullptr;
+ } else {
+ *offset = info->op_b->fetch;
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+HInstruction* InductionVarRange::GenerateTripCount(HLoopInformation* loop,
+ HGraph* graph,
+ HBasicBlock* block) {
+ HInductionVarAnalysis::InductionInfo *trip =
+ induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
+ if (trip != nullptr && !IsUnsafeTripCount(trip)) {
+ HInstruction* taken_test = nullptr;
+ HInstruction* trip_expr = nullptr;
+ if (IsBodyTripCount(trip)) {
+ if (!GenerateCode(trip->op_b, nullptr, graph, block, &taken_test, false, false)) {
+ return nullptr;
+ }
+ }
+ if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) {
+ if (taken_test != nullptr) {
+ HInstruction* zero = graph->GetConstant(trip->type, 0);
+ trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc));
+ }
+ return trip_expr;
+ }
+ }
+ return nullptr;
+}
+
//
// Private class methods.
//
@@ -1157,12 +1210,15 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
HInstruction* opb = nullptr;
switch (info->induction_class) {
case HInductionVarAnalysis::kInvariant:
- // Invariants (note that even though is_min does not impact code generation for
- // invariants, some effort is made to keep this parameter consistent).
+ // Invariants (note that since invariants only have other invariants as
+ // sub expressions, viz. no induction, there is no need to adjust is_min).
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
- case HInductionVarAnalysis::kRem: // no proper is_min for second arg
- case HInductionVarAnalysis::kXor: // no proper is_min for second arg
+ case HInductionVarAnalysis::kSub:
+ case HInductionVarAnalysis::kMul:
+ case HInductionVarAnalysis::kDiv:
+ case HInductionVarAnalysis::kRem:
+ case HInductionVarAnalysis::kXor:
case HInductionVarAnalysis::kLT:
case HInductionVarAnalysis::kLE:
case HInductionVarAnalysis::kGT:
@@ -1174,6 +1230,12 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
operation = new (graph->GetArena()) HAdd(type, opa, opb); break;
+ case HInductionVarAnalysis::kSub:
+ operation = new (graph->GetArena()) HSub(type, opa, opb); break;
+ case HInductionVarAnalysis::kMul:
+ operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break;
+ case HInductionVarAnalysis::kDiv:
+ operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kRem:
operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kXor:
@@ -1194,16 +1256,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
return true;
}
break;
- case HInductionVarAnalysis::kSub: // second reversed!
- if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) &&
- GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
- if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HSub(type, opa, opb));
- }
- return true;
- }
- break;
- case HInductionVarAnalysis::kNeg: // reversed!
+ case HInductionVarAnalysis::kNeg:
if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
if (graph != nullptr) {
*result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
@@ -1240,9 +1293,9 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
}
}
break;
- default:
- break;
- }
+ case HInductionVarAnalysis::kNop:
+ LOG(FATAL) << "unexpected invariant nop";
+ } // switch invariant operation
break;
case HInductionVarAnalysis::kLinear: {
// Linear induction a * i + b, for normalized 0 <= i < TC. For ranges, this should
@@ -1293,7 +1346,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
}
break;
}
- }
+ } // switch induction class
}
return false;
}
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 6c424b78b9..0858d73982 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -24,7 +24,8 @@ namespace art {
/**
* This class implements range analysis on expressions within loops. It takes the results
* of induction variable analysis in the constructor and provides a public API to obtain
- * a conservative lower and upper bound value on each instruction in the HIR.
+ * a conservative lower and upper bound value or last value on each instruction in the HIR.
+ * The public API also provides a few general-purpose utility methods related to induction.
*
* The range analysis is done with a combination of symbolic and partial integral evaluation
* of expressions. The analysis avoids complications with wrap-around arithmetic on the integral
@@ -154,6 +155,19 @@ class InductionVarRange {
*/
bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
+ /**
+ * Checks if instruction is a unit stride induction inside the closest enveloping loop.
+ * Returns invariant offset on success.
+ */
+ bool IsUnitStride(HInstruction* instruction, /*out*/ HInstruction** offset) const;
+
+ /**
+ * Generates the trip count expression for the given loop. Code is generated in given block
+ * and graph. The expression is guarded by a taken test if needed. Returns the trip count
+ * expression on success or null otherwise.
+ */
+ HInstruction* GenerateTripCount(HLoopInformation* loop, HGraph* graph, HBasicBlock* block);
+
private:
/*
* Enum used in IsConstant() request.
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index d81817fb09..fcdf8eb7dc 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -48,6 +48,11 @@ class InductionVarRangeTest : public CommonCompilerTest {
EXPECT_EQ(v1.is_known, v2.is_known);
}
+ void ExpectInt(int32_t value, HInstruction* i) {
+ ASSERT_TRUE(i->IsIntConstant());
+ EXPECT_EQ(value, i->AsIntConstant()->GetValue());
+ }
+
//
// Construction methods.
//
@@ -757,10 +762,20 @@ TEST_F(InductionVarRangeTest, ConstantTripCountUp) {
// Last value (unsimplified).
HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
ASSERT_TRUE(last->IsAdd());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, last->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1000, last->InputAt(0));
+ ExpectInt(0, last->InputAt(1));
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(1000, tc);
+ HInstruction* offset = nullptr;
+ EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(offset == nullptr);
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ ExpectInt(1000, tce);
}
TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
@@ -799,15 +814,27 @@ TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
// Last value (unsimplified).
HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
ASSERT_TRUE(last->IsSub());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, last->InputAt(0));
ASSERT_TRUE(last->InputAt(1)->IsNeg());
last = last->InputAt(1)->InputAt(0);
ASSERT_TRUE(last->IsSub());
- ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, last->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1000, last->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(0, last->InputAt(0));
+ ExpectInt(1000, last->InputAt(1));
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(1000, tc);
+ HInstruction* offset = nullptr;
+ EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ ASSERT_TRUE(tce->IsNeg());
+ last = tce->InputAt(0);
+ EXPECT_TRUE(last->IsSub());
+ ExpectInt(0, last->InputAt(0));
+ ExpectInt(1000, last->InputAt(1));
}
TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
@@ -851,27 +878,22 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
// Verify lower is 0+0.
ASSERT_TRUE(lower != nullptr);
ASSERT_TRUE(lower->IsAdd());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, lower->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, lower->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(0, lower->InputAt(0));
+ ExpectInt(0, lower->InputAt(1));
// Verify upper is (V-1)+0.
ASSERT_TRUE(upper != nullptr);
ASSERT_TRUE(upper->IsAdd());
ASSERT_TRUE(upper->InputAt(0)->IsSub());
EXPECT_TRUE(upper->InputAt(0)->InputAt(0)->IsParameterValue());
- ASSERT_TRUE(upper->InputAt(0)->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1, upper->InputAt(0)->InputAt(1)->AsIntConstant()->GetValue());
- ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1, upper->InputAt(0)->InputAt(1));
+ ExpectInt(0, upper->InputAt(1));
// Verify taken-test is 0<V.
HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsLessThan());
- ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
- EXPECT_EQ(0, taken->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(0, taken->InputAt(0));
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
// Replacement.
@@ -880,6 +902,21 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(y_, 1, 0), v2);
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(0, tc); // unknown
+ HInstruction* offset = nullptr;
+ EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(offset == nullptr);
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test
+ ExpectInt(0, tce->InputAt(0));
+ EXPECT_TRUE(tce->InputAt(1)->IsParameterValue());
+ EXPECT_TRUE(tce->InputAt(2)->IsLessThan());
}
TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
@@ -923,32 +960,26 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
// Verify lower is 1000-((1000-V)-1).
ASSERT_TRUE(lower != nullptr);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, lower->InputAt(0));
lower = lower->InputAt(1);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
- EXPECT_EQ(1, lower->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1, lower->InputAt(1));
lower = lower->InputAt(0);
ASSERT_TRUE(lower->IsSub());
- ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, lower->InputAt(0));
EXPECT_TRUE(lower->InputAt(1)->IsParameterValue());
// Verify upper is 1000-0.
ASSERT_TRUE(upper != nullptr);
ASSERT_TRUE(upper->IsSub());
- ASSERT_TRUE(upper->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, upper->InputAt(0)->AsIntConstant()->GetValue());
- ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
- EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+ ExpectInt(1000, upper->InputAt(0));
+ ExpectInt(0, upper->InputAt(1));
// Verify taken-test is 1000>V.
HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsGreaterThan());
- ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
- EXPECT_EQ(1000, taken->InputAt(0)->AsIntConstant()->GetValue());
+ ExpectInt(1000, taken->InputAt(0));
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
// Replacement.
@@ -957,6 +988,23 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(y_, 1, 0), v1);
ExpectEqual(Value(999), v2);
+
+ // Loop logic.
+ int64_t tc = 0;
+ EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
+ EXPECT_EQ(0, tc); // unknown
+ HInstruction* offset = nullptr;
+ EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ HInstruction* tce = range_.GenerateTripCount(
+ loop_header_->GetLoopInformation(), graph_, loop_preheader_);
+ ASSERT_TRUE(tce != nullptr);
+ EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test
+ ExpectInt(0, tce->InputAt(0));
+ EXPECT_TRUE(tce->InputAt(1)->IsSub());
+ EXPECT_TRUE(tce->InputAt(2)->IsGreaterThan());
+ tce = tce->InputAt(1);
+ ExpectInt(1000, taken->InputAt(0));
+ EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
}
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 8c73f1d036..3e340908bf 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1272,12 +1272,19 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
- // When they are needed, allocate `inline_stats` on the heap instead
+ // When they are needed, allocate `inline_stats_` on the Arena instead
// of on the stack, as Clang might produce a stack frame too large
// for this function, that would not fit the requirements of the
// `-Wframe-larger-than` option.
- std::unique_ptr<OptimizingCompilerStats> inline_stats =
- (stats_ == nullptr) ? nullptr : MakeUnique<OptimizingCompilerStats>();
+ if (stats_ != nullptr) {
+ // Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
+ if (inline_stats_ == nullptr) {
+ void* storage = graph_->GetArena()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
+ inline_stats_ = new (storage) OptimizingCompilerStats;
+ } else {
+ inline_stats_->Reset();
+ }
+ }
HGraphBuilder builder(callee_graph,
&dex_compilation_unit,
&outer_compilation_unit_,
@@ -1285,7 +1292,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
*code_item,
compiler_driver_,
codegen_,
- inline_stats.get(),
+ inline_stats_,
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
dex_cache,
handles_);
@@ -1468,6 +1475,11 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
DCHECK_EQ(callee_instruction_counter, callee_graph->GetCurrentInstructionId())
<< "No instructions can be added to the inner graph during inlining into the outer graph";
+ if (stats_ != nullptr) {
+ DCHECK(inline_stats_ != nullptr);
+ inline_stats_->AddTo(stats_);
+ }
+
return true;
}
@@ -1476,11 +1488,11 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph,
const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
- HDeadCodeElimination dce(callee_graph, stats_, "dead_code_elimination$inliner");
+ HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_, handles_);
- InstructionSimplifier simplify(callee_graph, stats_);
- IntrinsicsRecognizer intrinsics(callee_graph, stats_);
+ InstructionSimplifier simplify(callee_graph, inline_stats_);
+ IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
&intrinsics,
@@ -1504,7 +1516,7 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph,
dex_compilation_unit,
compiler_driver_,
handles_,
- stats_,
+ inline_stats_,
total_number_of_dex_registers_ + code_item->registers_size_,
depth_ + 1);
inliner.Run();
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 11aacab802..75d025ae41 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -51,7 +51,8 @@ class HInliner : public HOptimization {
total_number_of_dex_registers_(total_number_of_dex_registers),
depth_(depth),
number_of_inlined_instructions_(0),
- handles_(handles) {}
+ handles_(handles),
+ inline_stats_(nullptr) {}
void Run() OVERRIDE;
@@ -218,6 +219,10 @@ class HInliner : public HOptimization {
size_t number_of_inlined_instructions_;
VariableSizedHandleScope* const handles_;
+ // Used to record stats about optimizations on the inlined graph.
+ // If the inlining is successful, these stats are merged to the caller graph's stats.
+ OptimizingCompilerStats* inline_stats_;
+
DISALLOW_COPY_AND_ASSIGN(HInliner);
};
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 17d683f357..8df80adc9f 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -19,6 +19,7 @@
#include "art_method.h"
#include "class_linker.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "invoke_type.h"
#include "mirror/dex_cache-inl.h"
#include "nodes.h"
@@ -178,4 +179,112 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
return os;
}
+void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
+ CodeGenerator* codegen,
+ Location return_location,
+ Location first_argument_location) {
+ if (Runtime::Current()->IsAotCompiler()) {
+ if (codegen->GetCompilerOptions().IsBootImage() ||
+ codegen->GetCompilerOptions().GetCompilePic()) {
+ // TODO(ngeoffray): Support boot image compilation.
+ return;
+ }
+ }
+
+ IntegerValueOfInfo info = ComputeIntegerValueOfInfo();
+
+ // Most common case is that we have found all we needed (classes are initialized
+ // and in the boot image). Bail if not.
+ if (info.integer_cache == nullptr ||
+ info.integer == nullptr ||
+ info.cache == nullptr ||
+ info.value_offset == 0 ||
+ // low and high cannot be 0, per the spec.
+ info.low == 0 ||
+ info.high == 0) {
+ LOG(INFO) << "Integer.valueOf will not be optimized";
+ return;
+ }
+
+ // The intrinsic will call if it needs to allocate a j.l.Integer.
+ LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
+ if (!invoke->InputAt(0)->IsConstant()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+ locations->AddTemp(first_argument_location);
+ locations->SetOut(return_location);
+}
+
+IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() {
+ // Note that we could cache all of the data looked up here. but there's no good
+ // location for it. We don't want to add it to WellKnownClasses, to avoid creating global
+ // jni values. Adding it as state to the compiler singleton seems like wrong
+ // separation of concerns.
+ // The need for this data should be pretty rare though.
+
+ // The most common case is that the classes are in the boot image and initialized,
+ // which is easy to generate code for. We bail if not.
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ gc::Heap* heap = runtime->GetHeap();
+ IntegerValueOfInfo info;
+ info.integer_cache = class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;");
+ if (info.integer_cache == nullptr) {
+ self->ClearException();
+ return info;
+ }
+ if (!heap->ObjectIsInBootImageSpace(info.integer_cache) || !info.integer_cache->IsInitialized()) {
+ // Optimization only works if the class is initialized and in the boot image.
+ return info;
+ }
+ info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;");
+ if (info.integer == nullptr) {
+ self->ClearException();
+ return info;
+ }
+ if (!heap->ObjectIsInBootImageSpace(info.integer) || !info.integer->IsInitialized()) {
+ // Optimization only works if the class is initialized and in the boot image.
+ return info;
+ }
+
+ ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+ if (field == nullptr) {
+ return info;
+ }
+ info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>(
+ field->GetObject(info.integer_cache).Ptr());
+ if (info.cache == nullptr) {
+ return info;
+ }
+
+ if (!heap->ObjectIsInBootImageSpace(info.cache)) {
+ // Optimization only works if the object is in the boot image.
+ return info;
+ }
+
+ field = info.integer->FindDeclaredInstanceField("value", "I");
+ if (field == nullptr) {
+ return info;
+ }
+ info.value_offset = field->GetOffset().Int32Value();
+
+ field = info.integer_cache->FindDeclaredStaticField("low", "I");
+ if (field == nullptr) {
+ return info;
+ }
+ info.low = field->GetInt(info.integer_cache);
+
+ field = info.integer_cache->FindDeclaredStaticField("high", "I");
+ if (field == nullptr) {
+ return info;
+ }
+ info.high = field->GetInt(info.integer_cache);
+
+ DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1);
+ return info;
+}
+
} // namespace art
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 6425e1313f..9da5a7fa3b 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -113,6 +113,39 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
}
+ static void ComputeIntegerValueOfLocations(HInvoke* invoke,
+ CodeGenerator* codegen,
+ Location return_location,
+ Location first_argument_location);
+
+ // Temporary data structure for holding Integer.valueOf useful data. We only
+ // use it if the mirror::Class* are in the boot image, so it is fine to keep raw
+ // mirror::Class pointers in this structure.
+ struct IntegerValueOfInfo {
+ IntegerValueOfInfo()
+ : integer_cache(nullptr),
+ integer(nullptr),
+ cache(nullptr),
+ low(0),
+ high(0),
+ value_offset(0) {}
+
+ // The java.lang.IntegerCache class.
+ mirror::Class* integer_cache;
+ // The java.lang.Integer class.
+ mirror::Class* integer;
+ // Value of java.lang.IntegerCache#cache.
+ mirror::ObjectArray<mirror::Object>* cache;
+ // Value of java.lang.IntegerCache#low.
+ int32_t low;
+ // Value of java.lang.IntegerCache#high.
+ int32_t high;
+ // The offset of java.lang.Integer.value.
+ int32_t value_offset;
+ };
+
+ static IntegerValueOfInfo ComputeIntegerValueOfInfo();
+
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index c262cf983d..86000e9356 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -129,6 +129,7 @@ class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode {
IntrinsicLocationsBuilderARM::IntrinsicLocationsBuilderARM(CodeGeneratorARM* codegen)
: arena_(codegen->GetGraph()->GetArena()),
+ codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -2644,6 +2645,75 @@ void IntrinsicCodeGeneratorARM::VisitReferenceGetReferent(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ Location::RegisterLocation(R0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ ArmAssembler* const assembler = GetAssembler();
+
+ Register out = locations->Out().AsRegister<Register>();
+ InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ LoadImmediate(IP, value);
+ __ StoreToOffset(kStoreWord, IP, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ // Check bounds of our cache.
+ __ AddConstant(out, in, -info.low);
+ __ CmpConstant(out, info.high - info.low + 1);
+ Label allocate, done;
+ __ b(&allocate, HS);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ LoadLiteral(IP, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), IP, out);
+ __ MaybeUnpoisonHeapReference(out);
+ __ b(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ StoreToOffset(kStoreWord, in, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 7f20ea4b1f..2840863632 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -51,6 +51,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
private:
ArenaAllocator* arena_;
+ CodeGenerator* codegen_;
ArmAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 86e54294ae..6c3938c1a9 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2924,6 +2924,79 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ calling_convention.GetReturnLocation(Primitive::kPrimNot),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ MacroAssembler* masm = GetVIXLAssembler();
+
+ Register out = RegisterFrom(locations->Out(), Primitive::kPrimNot);
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireW();
+ InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ Mov(temp.W(), value);
+ __ Str(temp.W(), HeapOperand(out.W(), info.value_offset));
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ Register in = RegisterFrom(locations->InAt(0), Primitive::kPrimInt);
+ // Check bounds of our cache.
+ __ Add(out.W(), in.W(), -info.low);
+ __ Cmp(out.W(), info.high - info.low + 1);
+ vixl::aarch64::Label allocate, done;
+ __ B(&allocate, hs);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ MemOperand source = HeapOperand(
+ temp, out.X(), LSL, Primitive::ComponentSizeShift(Primitive::kPrimNot));
+ codegen_->Load(Primitive::kPrimNot, out, source);
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out);
+ __ B(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ Str(in.W(), HeapOperand(out.W(), info.value_offset));
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 28e41cb086..3c53517b28 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -38,7 +38,8 @@ class CodeGeneratorARM64;
class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
public:
- explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena) : arena_(arena) {}
+ explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen)
+ : arena_(arena), codegen_(codegen) {}
// Define visitor methods.
@@ -56,6 +57,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
private:
ArenaAllocator* arena_;
+ CodeGeneratorARM64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
};
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 70a3d38c13..aa89deae34 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -203,6 +203,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
: arena_(codegen->GetGraph()->GetArena()),
+ codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -2988,6 +2989,77 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
__ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
+void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ LocationFrom(r0),
+ LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ ArmVIXLAssembler* const assembler = GetAssembler();
+
+ vixl32::Register out = RegisterFrom(locations->Out());
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ vixl32::Register argument = calling_convention.GetRegisterAt(0);
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address =
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ Mov(temp, value);
+ assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ }
+ } else {
+ vixl32::Register in = RegisterFrom(locations->InAt(0));
+ // Check bounds of our cache.
+ __ Add(out, in, -info.low);
+ __ Cmp(out, info.high - info.low + 1);
+ vixl32::Label allocate, done;
+ __ B(hs, &allocate);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), temp, out);
+ assembler->MaybeUnpoisonHeapReference(out);
+ __ B(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
+ // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
+ // one.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 6e79cb76a1..023cba1349 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -47,6 +47,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
private:
ArenaAllocator* arena_;
+ CodeGenerator* codegen_;
ArmVIXLAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 64a68403e9..ba006edfa2 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1572,6 +1572,10 @@ static void GenUnsafeGet(HInvoke* invoke,
__ Lwr(trg, TMP, 0);
__ Lwl(trg, TMP, 3);
}
+
+ if (type == Primitive::kPrimNot) {
+ __ MaybeUnpoisonHeapReference(trg);
+ }
}
}
@@ -1663,6 +1667,11 @@ static void GenUnsafePut(LocationSummary* locations,
if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
Register value = locations->InAt(3).AsRegister<Register>();
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(AT, value);
+ value = AT;
+ }
+
if (is_R6) {
__ Sw(value, TMP, 0);
} else {
@@ -1852,13 +1861,23 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
codegen->MarkGCCard(base, value, value_can_be_null);
}
+ MipsLabel loop_head, exit_loop;
+ __ Addu(TMP, base, offset_lo);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(expected);
+ // Do not poison `value`, if it is the same register as
+ // `expected`, which has just been poisoned.
+ if (value != expected) {
+ __ PoisonHeapReference(value);
+ }
+ }
+
// do {
// tmp_value = [tmp_ptr] - expected;
// } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
// result = tmp_value != 0;
- MipsLabel loop_head, exit_loop;
- __ Addu(TMP, base, offset_lo);
__ Sync(0);
__ Bind(&loop_head);
if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) {
@@ -1868,8 +1887,8 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
__ LlR2(out, TMP);
}
} else {
- LOG(FATAL) << "Unsupported op size " << type;
- UNREACHABLE();
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
}
__ Subu(out, out, expected); // If we didn't get the 'expected'
__ Sltiu(out, out, 1); // value, set 'out' to false, and
@@ -1894,6 +1913,15 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
// cycle atomically then retry.
__ Bind(&exit_loop);
__ Sync(0);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ UnpoisonHeapReference(expected);
+ // Do not unpoison `value`, if it is the same register as
+ // `expected`, which has just been unpoisoned.
+ if (value != expected) {
+ __ UnpoisonHeapReference(value);
+ }
+ }
}
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
@@ -1989,20 +2017,24 @@ void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) {
__ LoadConst32(out, 1);
return;
}
-
- // Check if input is null, return false if it is.
- __ Beqz(arg, &return_false);
+ StringEqualsOptimizations optimizations(invoke);
+ if (!optimizations.GetArgumentNotNull()) {
+ // Check if input is null, return false if it is.
+ __ Beqz(arg, &return_false);
+ }
// Reference equality check, return true if same reference.
__ Beq(str, arg, &return_true);
- // Instanceof check for the argument by comparing class fields.
- // All string objects must have the same type since String cannot be subclassed.
- // Receiver must be a string object, so its class field is equal to all strings' class fields.
- // If the argument is a string object, its class field must be equal to receiver's class field.
- __ Lw(temp1, str, class_offset);
- __ Lw(temp2, arg, class_offset);
- __ Bne(temp1, temp2, &return_false);
+ if (!optimizations.GetArgumentIsString()) {
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ Lw(temp1, str, class_offset);
+ __ Lw(temp2, arg, class_offset);
+ __ Bne(temp1, temp2, &return_false);
+ }
// Load `count` fields of this and argument strings.
__ Lw(temp1, str, count_offset);
@@ -2682,6 +2714,8 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, IntegerValueOf)
+
UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 3888828722..21c5074a1c 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1187,6 +1187,7 @@ static void GenUnsafeGet(HInvoke* invoke,
case Primitive::kPrimNot:
__ Lwu(trg, TMP, 0);
+ __ MaybeUnpoisonHeapReference(trg);
break;
case Primitive::kPrimLong:
@@ -1285,7 +1286,12 @@ static void GenUnsafePut(LocationSummary* locations,
switch (type) {
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- __ Sw(value, TMP, 0);
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(AT, value);
+ __ Sw(AT, TMP, 0);
+ } else {
+ __ Sw(value, TMP, 0);
+ }
break;
case Primitive::kPrimLong:
@@ -1454,13 +1460,23 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
codegen->MarkGCCard(base, value, value_can_be_null);
}
+ Mips64Label loop_head, exit_loop;
+ __ Daddu(TMP, base, offset);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ PoisonHeapReference(expected);
+ // Do not poison `value`, if it is the same register as
+ // `expected`, which has just been poisoned.
+ if (value != expected) {
+ __ PoisonHeapReference(value);
+ }
+ }
+
// do {
// tmp_value = [tmp_ptr] - expected;
// } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
// result = tmp_value != 0;
- Mips64Label loop_head, exit_loop;
- __ Daddu(TMP, base, offset);
__ Sync(0);
__ Bind(&loop_head);
if (type == Primitive::kPrimLong) {
@@ -1469,6 +1485,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
// Note: We will need a read barrier here, when read barrier
// support is added to the MIPS64 back end.
__ Ll(out, TMP);
+ if (type == Primitive::kPrimNot) {
+ // The LL instruction sign-extends the 32-bit value, but
+ // 32-bit references must be zero-extended. Zero-extend `out`.
+ __ Dext(out, out, 0, 32);
+ }
}
__ Dsubu(out, out, expected); // If we didn't get the 'expected'
__ Sltiu(out, out, 1); // value, set 'out' to false, and
@@ -1487,6 +1508,15 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
// cycle atomically then retry.
__ Bind(&exit_loop);
__ Sync(0);
+
+ if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+ __ UnpoisonHeapReference(expected);
+ // Do not unpoison `value`, if it is the same register as
+ // `expected`, which has just been unpoisoned.
+ if (value != expected) {
+ __ UnpoisonHeapReference(value);
+ }
+ }
}
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
@@ -1593,19 +1623,24 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) {
return;
}
- // Check if input is null, return false if it is.
- __ Beqzc(arg, &return_false);
+ StringEqualsOptimizations optimizations(invoke);
+ if (!optimizations.GetArgumentNotNull()) {
+ // Check if input is null, return false if it is.
+ __ Beqzc(arg, &return_false);
+ }
// Reference equality check, return true if same reference.
__ Beqc(str, arg, &return_true);
- // Instanceof check for the argument by comparing class fields.
- // All string objects must have the same type since String cannot be subclassed.
- // Receiver must be a string object, so its class field is equal to all strings' class fields.
- // If the argument is a string object, its class field must be equal to receiver's class field.
- __ Lw(temp1, str, class_offset);
- __ Lw(temp2, arg, class_offset);
- __ Bnec(temp1, temp2, &return_false);
+ if (!optimizations.GetArgumentIsString()) {
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ Lw(temp1, str, class_offset);
+ __ Lw(temp2, arg, class_offset);
+ __ Bnec(temp1, temp2, &return_false);
+ }
// Load `count` fields of this and argument strings.
__ Lw(temp1, str, count_offset);
@@ -2075,6 +2110,8 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf)
+
UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e1b7ea53b4..a671788ff5 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3335,6 +3335,65 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
__ Bind(intrinsic_slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ Location::RegisterLocation(EAX),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ X86Assembler* assembler = GetAssembler();
+
+ Register out = locations->Out().AsRegister<Register>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ movl(out, Immediate(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), Immediate(value));
+ }
+ } else {
+ Register in = locations->InAt(0).AsRegister<Register>();
+ // Check bounds of our cache.
+ __ leal(out, Address(in, -info.low));
+ __ cmpl(out, Immediate(info.high - info.low + 1));
+ NearLabel allocate, done;
+ __ j(kAboveEqual, &allocate);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ movl(out, Address(out, TIMES_4, data_offset + address));
+ __ MaybeUnpoisonHeapReference(out);
+ __ jmp(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), in);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 05d270a4e6..9a6dd985a4 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -39,7 +39,6 @@ IntrinsicLocationsBuilderX86_64::IntrinsicLocationsBuilderX86_64(CodeGeneratorX8
: arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
}
-
X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
return down_cast<X86_64Assembler*>(codegen_->GetAssembler());
}
@@ -2995,6 +2994,65 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
+ InvokeRuntimeCallingConvention calling_convention;
+ IntrinsicVisitor::ComputeIntegerValueOfLocations(
+ invoke,
+ codegen_,
+ Location::RegisterLocation(RAX),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ LocationSummary* locations = invoke->GetLocations();
+ X86_64Assembler* assembler = GetAssembler();
+
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (invoke->InputAt(0)->IsConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (value >= info.low && value <= info.high) {
+ // Just embed the j.l.Integer in the code.
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Object* boxed = info.cache->Get(value + (-info.low));
+ DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
+ __ movl(out, Immediate(address));
+ } else {
+ // Allocate and initialize a new j.l.Integer.
+ // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
+ // JIT object table.
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), Immediate(value));
+ }
+ } else {
+ CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>();
+ // Check bounds of our cache.
+ __ leal(out, Address(in, -info.low));
+ __ cmpl(out, Immediate(info.high - info.low + 1));
+ NearLabel allocate, done;
+ __ j(kAboveEqual, &allocate);
+ // If the value is within the bounds, load the j.l.Integer directly from the array.
+ uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
+ uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
+ __ movl(out, Address(out, TIMES_4, data_offset + address));
+ __ MaybeUnpoisonHeapReference(out);
+ __ jmp(&done);
+ __ Bind(&allocate);
+ // Otherwise allocate and initialize a new j.l.Integer.
+ address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address));
+ codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ __ movl(Address(out, info.value_offset), in);
+ __ Bind(&done);
+ }
+}
+
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 2d3c00fb97..46ba048738 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -38,7 +38,8 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
position_(pos),
is_singleton_(true),
is_singleton_and_not_returned_(true),
- is_singleton_and_not_deopt_visible_(true) {
+ is_singleton_and_not_deopt_visible_(true),
+ has_index_aliasing_(false) {
CalculateEscape(reference_,
nullptr,
&is_singleton_,
@@ -68,13 +69,29 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
return is_singleton_and_not_returned_ && is_singleton_and_not_deopt_visible_;
}
+ bool HasIndexAliasing() {
+ return has_index_aliasing_;
+ }
+
+ void SetHasIndexAliasing(bool has_index_aliasing) {
+ // Only allow setting to true.
+ DCHECK(has_index_aliasing);
+ has_index_aliasing_ = has_index_aliasing;
+ }
+
private:
HInstruction* const reference_;
const size_t position_; // position in HeapLocationCollector's ref_info_array_.
- bool is_singleton_; // can only be referred to by a single name in the method,
- bool is_singleton_and_not_returned_; // and not returned to caller,
- bool is_singleton_and_not_deopt_visible_; // and not used as an environment local of HDeoptimize.
+ // Can only be referred to by a single name in the method.
+ bool is_singleton_;
+ // Is singleton and not returned to caller.
+ bool is_singleton_and_not_returned_;
+ // Is singleton and not used as an environment local of HDeoptimize.
+ bool is_singleton_and_not_deopt_visible_;
+ // Some heap locations with reference_ have array index aliasing,
+ // e.g. arr[i] and arr[j] may be the same location.
+ bool has_index_aliasing_;
DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
};
@@ -321,6 +338,12 @@ class HeapLocationCollector : public HGraphVisitor {
// Different constant indices do not alias.
return false;
}
+ ReferenceInfo* ref_info = loc1->GetReferenceInfo();
+ if (ref_info->IsSingleton()) {
+ // This is guaranteed by the CanReferencesAlias() test above.
+ DCHECK_EQ(ref_info, loc2->GetReferenceInfo());
+ ref_info->SetHasIndexAliasing(true);
+ }
}
return true;
}
@@ -497,7 +520,8 @@ class LSEVisitor : public HGraphVisitor {
removed_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
substitute_instructions_for_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
possibly_removed_stores_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+ singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ singleton_new_arrays_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -534,20 +558,24 @@ class LSEVisitor : public HGraphVisitor {
}
// At this point, stores in possibly_removed_stores_ can be safely removed.
- for (size_t i = 0, e = possibly_removed_stores_.size(); i < e; i++) {
- HInstruction* store = possibly_removed_stores_[i];
+ for (HInstruction* store : possibly_removed_stores_) {
DCHECK(store->IsInstanceFieldSet() || store->IsStaticFieldSet() || store->IsArraySet());
store->GetBlock()->RemoveInstruction(store);
}
// Eliminate allocations that are not used.
- for (size_t i = 0, e = singleton_new_instances_.size(); i < e; i++) {
- HInstruction* new_instance = singleton_new_instances_[i];
+ for (HInstruction* new_instance : singleton_new_instances_) {
if (!new_instance->HasNonEnvironmentUses()) {
new_instance->RemoveEnvironmentUsers();
new_instance->GetBlock()->RemoveInstruction(new_instance);
}
}
+ for (HInstruction* new_array : singleton_new_arrays_) {
+ if (!new_array->HasNonEnvironmentUses()) {
+ new_array->RemoveEnvironmentUsers();
+ new_array->GetBlock()->RemoveInstruction(new_array);
+ }
+ }
}
private:
@@ -558,7 +586,7 @@ class LSEVisitor : public HGraphVisitor {
void KeepIfIsStore(HInstruction* heap_value) {
if (heap_value == kDefaultHeapValue ||
heap_value == kUnknownHeapValue ||
- !heap_value->IsInstanceFieldSet()) {
+ !(heap_value->IsInstanceFieldSet() || heap_value->IsArraySet())) {
return;
}
auto idx = std::find(possibly_removed_stores_.begin(),
@@ -734,13 +762,16 @@ class LSEVisitor : public HGraphVisitor {
heap_values[idx] = constant;
return;
}
- if (heap_value != kUnknownHeapValue && heap_value->IsInstanceFieldSet()) {
- HInstruction* store = heap_value;
- // This load must be from a singleton since it's from the same field
- // that a "removed" store puts the value. That store must be to a singleton's field.
- DCHECK(ref_info->IsSingleton());
- // Get the real heap value of the store.
- heap_value = store->InputAt(1);
+ if (heap_value != kUnknownHeapValue) {
+ if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
+ HInstruction* store = heap_value;
+ // This load must be from a singleton since it's from the same
+ // field/element that a "removed" store puts the value. That store
+ // must be to a singleton's field/element.
+ DCHECK(ref_info->IsSingleton());
+ // Get the real heap value of the store.
+ heap_value = heap_value->IsInstanceFieldSet() ? store->InputAt(1) : store->InputAt(2);
+ }
}
if (heap_value == kUnknownHeapValue) {
// Load isn't eliminated. Put the load as the value into the HeapLocation.
@@ -796,19 +827,19 @@ class LSEVisitor : public HGraphVisitor {
if (Equal(heap_value, value)) {
// Store into the heap location with the same value.
same_value = true;
- } else if (index != nullptr) {
- // For array element, don't eliminate stores since it can be easily aliased
- // with non-constant index.
+ } else if (index != nullptr && ref_info->HasIndexAliasing()) {
+ // For array element, don't eliminate stores if the index can be
+ // aliased.
} else if (ref_info->IsSingletonAndRemovable()) {
- // Store into a field of a singleton that's not returned. The value cannot be
- // killed due to aliasing/invocation. It can be redundant since future loads can
- // directly get the value set by this instruction. The value can still be killed due to
- // merging or loop side effects. Stores whose values are killed due to merging/loop side
- // effects later will be removed from possibly_removed_stores_ when that is detected.
+ // Store into a field/element of a singleton instance/array that's not returned.
+ // The value cannot be killed due to aliasing/invocation. It can be redundant since
+ // future loads can directly get the value set by this instruction. The value can
+ // still be killed due to merging or loop side effects. Stores whose values are
+ // killed due to merging/loop side effects later will be removed from
+ // possibly_removed_stores_ when that is detected.
possibly_redundant = true;
HNewInstance* new_instance = ref_info->GetReference()->AsNewInstance();
- DCHECK(new_instance != nullptr);
- if (new_instance->IsFinalizable()) {
+ if (new_instance != nullptr && new_instance->IsFinalizable()) {
// Finalizable objects escape globally. Need to keep the store.
possibly_redundant = false;
} else {
@@ -834,7 +865,7 @@ class LSEVisitor : public HGraphVisitor {
if (!same_value) {
if (possibly_redundant) {
- DCHECK(instruction->IsInstanceFieldSet());
+ DCHECK(instruction->IsInstanceFieldSet() || instruction->IsArraySet());
// Put the store as the heap value. If the value is loaded from heap
// by a load later, this store isn't really redundant.
heap_values[idx] = instruction;
@@ -995,6 +1026,27 @@ class LSEVisitor : public HGraphVisitor {
}
}
+ void VisitNewArray(HNewArray* new_array) OVERRIDE {
+ ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_array);
+ if (ref_info == nullptr) {
+ // new_array isn't used for array accesses. No need to process it.
+ return;
+ }
+ if (ref_info->IsSingletonAndRemovable()) {
+ singleton_new_arrays_.push_back(new_array);
+ }
+ ArenaVector<HInstruction*>& heap_values =
+ heap_values_for_[new_array->GetBlock()->GetBlockId()];
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
+ HInstruction* ref = location->GetReferenceInfo()->GetReference();
+ if (ref == new_array && location->GetIndex() != nullptr) {
+ // Array elements are set to default heap values.
+ heap_values[i] = kDefaultHeapValue;
+ }
+ }
+ }
+
// Find an instruction's substitute if it should be removed.
// Return the same instruction if it should not be removed.
HInstruction* FindSubstitute(HInstruction* instruction) {
@@ -1023,6 +1075,7 @@ class LSEVisitor : public HGraphVisitor {
ArenaVector<HInstruction*> possibly_removed_stores_;
ArenaVector<HInstruction*> singleton_new_instances_;
+ ArenaVector<HInstruction*> singleton_new_arrays_;
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 26c9ab83c2..318d83bf40 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -16,6 +16,7 @@
#include "loop_optimization.h"
+#include "driver/compiler_driver.h"
#include "linear_order.h"
namespace art {
@@ -57,8 +58,10 @@ static bool IsEarlyExit(HLoopInformation* loop_info) {
//
HLoopOptimization::HLoopOptimization(HGraph* graph,
+ CompilerDriver* compiler_driver,
HInductionVarAnalysis* induction_analysis)
: HOptimization(graph, kLoopOptimizationPassName),
+ compiler_driver_(compiler_driver),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
top_loop_(nullptr),
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 9ddab4150c..0b798fc7a9 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -23,13 +23,17 @@
namespace art {
+class CompilerDriver;
+
/**
* Loop optimizations. Builds a loop hierarchy and applies optimizations to
* the detected nested loops, such as removal of dead induction and empty loops.
*/
class HLoopOptimization : public HOptimization {
public:
- HLoopOptimization(HGraph* graph, HInductionVarAnalysis* induction_analysis);
+ HLoopOptimization(HGraph* graph,
+ CompilerDriver* compiler_driver,
+ HInductionVarAnalysis* induction_analysis);
void Run() OVERRIDE;
@@ -76,6 +80,9 @@ class HLoopOptimization : public HOptimization {
bool TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block);
void RemoveDeadInstructions(const HInstructionList& list);
+ // Compiler driver (to query ISA features).
+ const CompilerDriver* compiler_driver_;
+
// Range information based on prior induction variable analysis.
InductionVarRange induction_range_;
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 9a6b4935b2..5b9350689e 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -31,7 +31,7 @@ class LoopOptimizationTest : public CommonCompilerTest {
allocator_(&pool_),
graph_(CreateGraph(&allocator_)),
iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
- loop_opt_(new (&allocator_) HLoopOptimization(graph_, iva_)) {
+ loop_opt_(new (&allocator_) HLoopOptimization(graph_, nullptr, iva_)) {
BuildGraph();
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8a9e61875a..c39aed2c6a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1914,6 +1914,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
virtual bool IsControlFlow() const { return false; }
+ // Can the instruction throw?
+ // TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance),
+ // could throw OOME, but it is still OK to remove them if they are unused.
virtual bool CanThrow() const { return false; }
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f72bd6a5a3..607b9433ae 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -56,6 +56,7 @@
#include "builder.h"
#include "cha_guard_optimization.h"
#include "code_generator.h"
+#include "code_sinking.h"
#include "compiled_method.h"
#include "compiler.h"
#include "constant_folding.h"
@@ -518,9 +519,11 @@ static HOptimization* BuildOptimization(
} else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
return new (arena) SideEffectsAnalysis(graph);
} else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
- return new (arena) HLoopOptimization(graph, most_recent_induction);
+ return new (arena) HLoopOptimization(graph, driver, most_recent_induction);
} else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
return new (arena) CHAGuardOptimization(graph);
+ } else if (opt_name == CodeSinking::kCodeSinkingPassName) {
+ return new (arena) CodeSinking(graph, stats);
#ifdef ART_ENABLE_CODEGEN_arm
} else if (opt_name == arm::DexCacheArrayFixups::kDexCacheArrayFixupsArmPassName) {
return new (arena) arm::DexCacheArrayFixups(graph, codegen, stats);
@@ -770,13 +773,16 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
HConstantFolding* fold2 = new (arena) HConstantFolding(
graph, "constant_folding$after_inlining");
HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
- LICM* licm = new (arena) LICM(graph, *side_effects, stats);
- LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects);
+ SideEffectsAnalysis* side_effects1 = new (arena) SideEffectsAnalysis(
+ graph, "side_effects$before_gvn");
+ SideEffectsAnalysis* side_effects2 = new (arena) SideEffectsAnalysis(
+ graph, "side_effects$before_lse");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects1);
+ LICM* licm = new (arena) LICM(graph, *side_effects1, stats);
HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
- BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
- HLoopOptimization* loop = new (arena) HLoopOptimization(graph, induction);
+ BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction);
+ HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction);
+ LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2);
HSharpening* sharpening = new (arena) HSharpening(
graph, codegen, dex_compilation_unit, driver, handles);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
@@ -787,6 +793,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
graph, stats, "instruction_simplifier$before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
+ CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
HOptimization* optimizations1[] = {
intrinsics,
@@ -806,7 +813,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
fold2, // TODO: if we don't inline we can also skip fold2.
simplify2,
dce2,
- side_effects,
+ side_effects1,
gvn,
licm,
induction,
@@ -814,9 +821,11 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
loop,
fold3, // evaluates code generated by dynamic bce
simplify3,
+ side_effects2,
lse,
cha_guard,
dce3,
+ code_sinking,
// The codegen has a few assumptions that only the instruction simplifier
// can satisfy. For example, the code generator does not expect to see a
// HTypeConversion from a type to the same type.
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 203b1ec7ec..ae9a8119a7 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
+#include <atomic>
#include <iomanip>
#include <string>
#include <type_traits>
@@ -67,14 +68,18 @@ enum MethodCompilationStat {
kImplicitNullCheckGenerated,
kExplicitNullCheckGenerated,
kSimplifyIf,
+ kInstructionSunk,
kLastStat
};
class OptimizingCompilerStats {
public:
- OptimizingCompilerStats() {}
+ OptimizingCompilerStats() {
+ // The std::atomic<> default constructor leaves values uninitialized, so initialize them now.
+ Reset();
+ }
- void RecordStat(MethodCompilationStat stat, size_t count = 1) {
+ void RecordStat(MethodCompilationStat stat, uint32_t count = 1) {
compile_stats_[stat] += count;
}
@@ -93,7 +98,7 @@ class OptimizingCompilerStats {
<< " methods: " << std::fixed << std::setprecision(2)
<< compiled_percent << "% (" << compile_stats_[kCompiled] << ") compiled.";
- for (int i = 0; i < kLastStat; i++) {
+ for (size_t i = 0; i < kLastStat; i++) {
if (compile_stats_[i] != 0) {
LOG(INFO) << PrintMethodCompilationStat(static_cast<MethodCompilationStat>(i)) << ": "
<< compile_stats_[i];
@@ -102,6 +107,21 @@ class OptimizingCompilerStats {
}
}
+ void AddTo(OptimizingCompilerStats* other_stats) {
+ for (size_t i = 0; i != kLastStat; ++i) {
+ uint32_t count = compile_stats_[i];
+ if (count != 0) {
+ other_stats->RecordStat(static_cast<MethodCompilationStat>(i), count);
+ }
+ }
+ }
+
+ void Reset() {
+ for (size_t i = 0; i != kLastStat; ++i) {
+ compile_stats_[i] = 0u;
+ }
+ }
+
private:
std::string PrintMethodCompilationStat(MethodCompilationStat stat) const {
std::string name;
@@ -147,6 +167,7 @@ class OptimizingCompilerStats {
case kImplicitNullCheckGenerated: name = "ImplicitNullCheckGenerated"; break;
case kExplicitNullCheckGenerated: name = "ExplicitNullCheckGenerated"; break;
case kSimplifyIf: name = "SimplifyIf"; break;
+ case kInstructionSunk: name = "InstructionSunk"; break;
case kLastStat:
LOG(FATAL) << "invalid stat "
@@ -156,7 +177,7 @@ class OptimizingCompilerStats {
return "OptStat#" + name;
}
- AtomicInteger compile_stats_[kLastStat];
+ std::atomic<uint32_t> compile_stats_[kLastStat];
DISALLOW_COPY_AND_ASSIGN(OptimizingCompilerStats);
};
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index bac6088bf7..fea47e66d9 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -25,8 +25,8 @@ namespace art {
class SideEffectsAnalysis : public HOptimization {
public:
- explicit SideEffectsAnalysis(HGraph* graph)
- : HOptimization(graph, kSideEffectsAnalysisPassName),
+ SideEffectsAnalysis(HGraph* graph, const char* pass_name = kSideEffectsAnalysisPassName)
+ : HOptimization(graph, pass_name),
graph_(graph),
block_effects_(graph->GetBlocks().size(),
graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)),
@@ -41,7 +41,7 @@ class SideEffectsAnalysis : public HOptimization {
bool HasRun() const { return has_run_; }
- static constexpr const char* kSideEffectsAnalysisPassName = "SideEffects";
+ static constexpr const char* kSideEffectsAnalysisPassName = "side_effects";
private:
void UpdateLoopEffects(HLoopInformation* info, SideEffects effects);
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 322f6c4d70..e81e767575 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -135,6 +135,16 @@ class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
// jumping within 2KB range. For B(cond, label), because the supported branch range is 256
// bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps.
void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true);
+
+ // Use literal for generating double constant if it doesn't fit VMOV encoding.
+ void Vmov(vixl32::DRegister rd, double imm) {
+ if (vixl::VFP::IsImmFP64(imm)) {
+ MacroAssembler::Vmov(rd, imm);
+ } else {
+ MacroAssembler::Vldr(rd, imm);
+ }
+ }
+ using MacroAssembler::Vmov;
};
class ArmVIXLAssembler FINAL : public Assembler {
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 5e83e825ed..2e2231b07d 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -3475,8 +3475,8 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberO
CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
- if (kPoisonHeapReferences && unpoison_reference) {
- Subu(dest.AsCoreRegister(), ZERO, dest.AsCoreRegister());
+ if (unpoison_reference) {
+ MaybeUnpoisonHeapReference(dest.AsCoreRegister());
}
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 2fca185ec3..1a5a23d10b 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -501,8 +501,10 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
bool is_float = false);
private:
+ // This will be used as an argument for loads/stores
+ // when there is no need for implicit null checks.
struct NoImplicitNullChecker {
- void operator()() {}
+ void operator()() const {}
};
public:
@@ -727,6 +729,38 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void Pop(Register rd);
void PopAndReturn(Register rd, Register rt);
+ //
+ // Heap poisoning.
+ //
+
+ // Poison a heap reference contained in `src` and store it in `dst`.
+ void PoisonHeapReference(Register dst, Register src) {
+ // dst = -src.
+ Subu(dst, ZERO, src);
+ }
+ // Poison a heap reference contained in `reg`.
+ void PoisonHeapReference(Register reg) {
+ // reg = -reg.
+ PoisonHeapReference(reg, reg);
+ }
+ // Unpoison a heap reference contained in `reg`.
+ void UnpoisonHeapReference(Register reg) {
+ // reg = -reg.
+ Subu(reg, ZERO, reg);
+ }
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
+ // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybeUnpoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ UnpoisonHeapReference(reg);
+ }
+ }
+
void Bind(Label* label) OVERRIDE {
Bind(down_cast<MipsLabel*>(label));
}
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 998f2c709b..39eb5893d8 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -488,6 +488,11 @@ void Mips64Assembler::Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
EmitI(0xf, rs, rt, imm16);
}
+void Mips64Assembler::Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ EmitI(0x1d, rs, rt, imm16);
+}
+
void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
}
@@ -2015,80 +2020,18 @@ void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label) {
Bcond(label, kCondT, static_cast<GpuRegister>(ft), ZERO);
}
-void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
+void Mips64Assembler::LoadFromOffset(LoadOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kLoadSignedByte:
- Lb(reg, base, offset);
- break;
- case kLoadUnsignedByte:
- Lbu(reg, base, offset);
- break;
- case kLoadSignedHalfword:
- Lh(reg, base, offset);
- break;
- case kLoadUnsignedHalfword:
- Lhu(reg, base, offset);
- break;
- case kLoadWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lw(reg, base, offset);
- break;
- case kLoadUnsignedWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwu(reg, base, offset);
- break;
- case kLoadDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwu(reg, base, offset);
- Lwu(TMP2, base, offset + kMips64WordSize);
- Dinsu(reg, TMP2, 32, 32);
- } else {
- Ld(reg, base, offset);
- }
- break;
- }
+ LoadFromOffset<>(type, reg, base, offset);
}
-void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
+void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kLoadWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwc1(reg, base, offset);
- break;
- case kLoadDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Lwc1(reg, base, offset);
- Lw(TMP2, base, offset + kMips64WordSize);
- Mthc1(TMP2, reg);
- } else {
- Ldc1(reg, base, offset);
- }
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ LoadFpuFromOffset<>(type, reg, base, offset);
}
void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
@@ -2118,72 +2061,18 @@ void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register,
}
}
-void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
+void Mips64Assembler::StoreToOffset(StoreOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kStoreByte:
- Sb(reg, base, offset);
- break;
- case kStoreHalfword:
- Sh(reg, base, offset);
- break;
- case kStoreWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Sw(reg, base, offset);
- break;
- case kStoreDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Sw(reg, base, offset);
- Dsrl32(TMP2, reg, 0);
- Sw(TMP2, base, offset + kMips64WordSize);
- } else {
- Sd(reg, base, offset);
- }
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ StoreToOffset<>(type, reg, base, offset);
}
-void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
+void Mips64Assembler::StoreFpuToOffset(StoreOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
int32_t offset) {
- if (!IsInt<16>(offset) ||
- (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
- LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
- Daddu(AT, AT, base);
- base = AT;
- offset &= (kMips64DoublewordSize - 1);
- }
-
- switch (type) {
- case kStoreWord:
- CHECK_ALIGNED(offset, kMips64WordSize);
- Swc1(reg, base, offset);
- break;
- case kStoreDoubleword:
- if (!IsAligned<kMips64DoublewordSize>(offset)) {
- CHECK_ALIGNED(offset, kMips64WordSize);
- Mfhc1(TMP2, reg);
- Swc1(reg, base, offset);
- Sw(TMP2, base, offset + kMips64WordSize);
- } else {
- Sdc1(reg, base, offset);
- }
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
+ StoreFpuToOffset<>(type, reg, base, offset);
}
static dwarf::Reg DWARFReg(GpuRegister reg) {
@@ -2367,12 +2256,8 @@ void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, Membe
CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
base.AsMips64().AsGpuRegister(), offs.Int32Value());
- if (kPoisonHeapReferences && unpoison_reference) {
- // TODO: review
- // Negate the 32-bit ref
- Dsubu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
- // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64
- Dext(dest.AsGpuRegister(), dest.AsGpuRegister(), 0, 32);
+ if (unpoison_reference) {
+ MaybeUnpoisonHeapReference(dest.AsGpuRegister());
}
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index a0a1db634d..8bbe862d19 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -512,6 +512,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Ldpc(GpuRegister rs, uint32_t imm18); // MIPS64
void Lui(GpuRegister rt, uint16_t imm16);
void Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64
void Dati(GpuRegister rs, uint16_t imm16); // MIPS64
void Sync(uint32_t stype);
@@ -654,6 +655,44 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Addiu32(GpuRegister rt, GpuRegister rs, int32_t value);
void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT); // MIPS64
+ //
+ // Heap poisoning.
+ //
+
+ // Poison a heap reference contained in `src` and store it in `dst`.
+ void PoisonHeapReference(GpuRegister dst, GpuRegister src) {
+ // dst = -src.
+ // Negate the 32-bit ref.
+ Dsubu(dst, ZERO, src);
+ // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64.
+ Dext(dst, dst, 0, 32);
+ }
+ // Poison a heap reference contained in `reg`.
+ void PoisonHeapReference(GpuRegister reg) {
+ // reg = -reg.
+ PoisonHeapReference(reg, reg);
+ }
+ // Unpoison a heap reference contained in `reg`.
+ void UnpoisonHeapReference(GpuRegister reg) {
+ // reg = -reg.
+ // Negate the 32-bit ref.
+ Dsubu(reg, ZERO, reg);
+ // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64.
+ Dext(reg, reg, 0, 32);
+ }
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(GpuRegister reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
+ // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybeUnpoisonHeapReference(GpuRegister reg) {
+ if (kPoisonHeapReferences) {
+ UnpoisonHeapReference(reg);
+ }
+ }
+
void Bind(Label* label) OVERRIDE {
Bind(down_cast<Mips64Label*>(label));
}
@@ -733,6 +772,191 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Bc1nez(FpuRegister ft, Mips64Label* label);
void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
+
+ private:
+ // This will be used as an argument for loads/stores
+ // when there is no need for implicit null checks.
+ struct NoImplicitNullChecker {
+ void operator()() const {}
+ };
+
+ public:
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void LoadFromOffset(LoadOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kLoadSignedByte:
+ Lb(reg, base, offset);
+ break;
+ case kLoadUnsignedByte:
+ Lbu(reg, base, offset);
+ break;
+ case kLoadSignedHalfword:
+ Lh(reg, base, offset);
+ break;
+ case kLoadUnsignedHalfword:
+ Lhu(reg, base, offset);
+ break;
+ case kLoadWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lw(reg, base, offset);
+ break;
+ case kLoadUnsignedWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwu(reg, base, offset);
+ break;
+ case kLoadDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwu(reg, base, offset);
+ null_checker();
+ Lwu(TMP2, base, offset + kMips64WordSize);
+ Dinsu(reg, TMP2, 32, 32);
+ } else {
+ Ld(reg, base, offset);
+ null_checker();
+ }
+ break;
+ }
+ if (type != kLoadDoubleword) {
+ null_checker();
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void LoadFpuFromOffset(LoadOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kLoadWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwc1(reg, base, offset);
+ null_checker();
+ break;
+ case kLoadDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Lwc1(reg, base, offset);
+ null_checker();
+ Lw(TMP2, base, offset + kMips64WordSize);
+ Mthc1(TMP2, reg);
+ } else {
+ Ldc1(reg, base, offset);
+ null_checker();
+ }
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreToOffset(StoreOperandType type,
+ GpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kStoreByte:
+ Sb(reg, base, offset);
+ break;
+ case kStoreHalfword:
+ Sh(reg, base, offset);
+ break;
+ case kStoreWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Sw(reg, base, offset);
+ break;
+ case kStoreDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Sw(reg, base, offset);
+ null_checker();
+ Dsrl32(TMP2, reg, 0);
+ Sw(TMP2, base, offset + kMips64WordSize);
+ } else {
+ Sd(reg, base, offset);
+ null_checker();
+ }
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ if (type != kStoreDoubleword) {
+ null_checker();
+ }
+ }
+
+ template <typename ImplicitNullChecker = NoImplicitNullChecker>
+ void StoreFpuToOffset(StoreOperandType type,
+ FpuRegister reg,
+ GpuRegister base,
+ int32_t offset,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ if (!IsInt<16>(offset) ||
+ (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
+ LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1));
+ Daddu(AT, AT, base);
+ base = AT;
+ offset &= (kMips64DoublewordSize - 1);
+ }
+
+ switch (type) {
+ case kStoreWord:
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Swc1(reg, base, offset);
+ null_checker();
+ break;
+ case kStoreDoubleword:
+ if (!IsAligned<kMips64DoublewordSize>(offset)) {
+ CHECK_ALIGNED(offset, kMips64WordSize);
+ Mfhc1(TMP2, reg);
+ Swc1(reg, base, offset);
+ null_checker();
+ Sw(TMP2, base, offset + kMips64WordSize);
+ } else {
+ Sdc1(reg, base, offset);
+ null_checker();
+ }
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ }
+
void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
void LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
void StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 74b8f068c1..96a02c46d7 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -1269,6 +1269,24 @@ TEST_F(AssemblerMIPS64Test, Lui) {
DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lui, 16, "lui ${reg}, {imm}"), "lui");
}
+TEST_F(AssemblerMIPS64Test, Daui) {
+ std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
+ std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
+ reg2_registers.erase(reg2_registers.begin()); // reg2 can't be ZERO, remove it.
+ std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits */ 16, /* as_uint */ true);
+ WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
+ std::ostringstream expected;
+ for (mips64::GpuRegister* reg1 : reg1_registers) {
+ for (mips64::GpuRegister* reg2 : reg2_registers) {
+ for (int64_t imm : imms) {
+ __ Daui(*reg1, *reg2, imm);
+ expected << "daui $" << *reg1 << ", $" << *reg2 << ", " << imm << "\n";
+ }
+ }
+ }
+ DriverStr(expected.str(), "daui");
+}
+
TEST_F(AssemblerMIPS64Test, Dahi) {
DriverStr(RepeatRIb(&mips64::Mips64Assembler::Dahi, 16, "dahi ${reg}, ${reg}, {imm}"), "dahi");
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 6eab302dab..6a57f45e42 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -958,6 +958,14 @@ void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) {
}
+void X86Assembler::cvtdq2ps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5B);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 2999599fc5..e3c123ccaf 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -457,6 +457,7 @@ class X86Assembler FINAL : public Assembler {
void cvttss2si(Register dst, XmmRegister src);
void cvttsd2si(Register dst, XmmRegister src);
+ void cvtdq2ps(XmmRegister dst, XmmRegister src);
void cvtdq2pd(XmmRegister dst, XmmRegister src);
void comiss(XmmRegister a, XmmRegister b);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index a74bea207e..110d0dcd05 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -322,6 +322,14 @@ TEST_F(AssemblerX86Test, RollImm) {
DriverStr(RepeatRI(&x86::X86Assembler::roll, 1U, "roll ${imm}, %{reg}"), "rolli");
}
+TEST_F(AssemblerX86Test, Cvtdq2ps) {
+ DriverStr(RepeatFF(&x86::X86Assembler::cvtdq2ps, "cvtdq2ps %{reg2}, %{reg1}"), "cvtdq2ps");
+}
+
+TEST_F(AssemblerX86Test, Cvtdq2pd) {
+ DriverStr(RepeatFF(&x86::X86Assembler::cvtdq2pd, "cvtdq2pd %{reg2}, %{reg1}"), "cvtdq2pd");
+}
+
TEST_F(AssemblerX86Test, ComissAddr) {
GetAssembler()->comiss(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
const char* expected = "comiss 0(%EAX), %xmm0\n";
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 458204aca9..688fdcc37d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1153,6 +1153,15 @@ void X86_64Assembler::cvtsd2ss(XmmRegister dst, const Address& src) {
}
+void X86_64Assembler::cvtdq2ps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5B);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 0dc11d840b..480e7116eb 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -486,6 +486,7 @@ class X86_64Assembler FINAL : public Assembler {
void cvttsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvttsd2si(CpuRegister dst, XmmRegister src, bool is64bit);
+ void cvtdq2ps(XmmRegister dst, XmmRegister src);
void cvtdq2pd(XmmRegister dst, XmmRegister src);
void comiss(XmmRegister a, XmmRegister b);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index fe9449720f..ba011c968e 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1205,6 +1205,10 @@ TEST_F(AssemblerX86_64Test, Cvtsd2ss) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtsd2ss, "cvtsd2ss %{reg2}, %{reg1}"), "cvtsd2ss");
}
+TEST_F(AssemblerX86_64Test, Cvtdq2ps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtdq2ps, "cvtdq2ps %{reg2}, %{reg1}"), "cvtdq2ps");
+}
+
TEST_F(AssemblerX86_64Test, Cvtdq2pd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtdq2pd, "cvtdq2pd %{reg2}, %{reg1}"), "cvtdq2pd");
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index be756286fc..f535557e8c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1426,25 +1426,15 @@ class Dex2Oat FINAL {
if (profile_compilation_info_ != nullptr && IsAppImage()) {
Runtime* runtime = Runtime::Current();
CHECK(runtime != nullptr);
- std::set<DexCacheResolvedClasses> resolved_classes(
- profile_compilation_info_->GetResolvedClasses());
-
// Filter out class path classes since we don't want to include these in the image.
std::unordered_set<std::string> dex_files_locations;
for (const DexFile* dex_file : dex_files_) {
dex_files_locations.insert(dex_file->GetLocation());
}
- for (auto it = resolved_classes.begin(); it != resolved_classes.end(); ) {
- if (dex_files_locations.find(it->GetDexLocation()) == dex_files_locations.end()) {
- VLOG(compiler) << "Removed profile samples for non-app dex file " << it->GetDexLocation();
- it = resolved_classes.erase(it);
- } else {
- ++it;
- }
- }
-
+ std::set<DexCacheResolvedClasses> resolved_classes(
+ profile_compilation_info_->GetResolvedClasses(dex_files_locations));
image_classes_.reset(new std::unordered_set<std::string>(
- runtime->GetClassLinker()->GetClassDescriptorsForProfileKeys(resolved_classes)));
+ runtime->GetClassLinker()->GetClassDescriptorsForResolvedClasses(resolved_classes)));
VLOG(compiler) << "Loaded " << image_classes_->size()
<< " image class descriptors from profile";
if (VLOG_IS_ON(compiler)) {
@@ -2821,6 +2811,9 @@ static int CompileImage(Dex2Oat& dex2oat) {
// When given --host, finish early without stripping.
if (dex2oat.IsHost()) {
+ if (!dex2oat.FlushCloseOutputFiles()) {
+ return EXIT_FAILURE;
+ }
dex2oat.DumpTiming();
return EXIT_SUCCESS;
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 2c0b125fb7..e7277bceae 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -37,6 +37,8 @@
namespace art {
+using android::base::StringPrintf;
+
class Dex2oatTest : public Dex2oatEnvironmentTest {
public:
virtual void TearDown() OVERRIDE {
@@ -52,10 +54,19 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
const std::string& odex_location,
CompilerFilter::Filter filter,
const std::vector<std::string>& extra_args = {},
- bool expect_success = true) {
+ bool expect_success = true,
+ bool use_fd = false) {
+ std::unique_ptr<File> oat_file;
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
- args.push_back("--oat-file=" + odex_location);
+ if (use_fd) {
+ oat_file.reset(OS::CreateEmptyFile(odex_location.c_str()));
+ CHECK(oat_file != nullptr) << odex_location;
+ args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
+ args.push_back("--oat-location=" + odex_location);
+ } else {
+ args.push_back("--oat-file=" + odex_location);
+ }
args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
args.push_back("--runtime-arg");
args.push_back("-Xnorelocate");
@@ -64,6 +75,9 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
std::string error_msg;
bool success = Dex2Oat(args, &error_msg);
+ if (oat_file != nullptr) {
+ ASSERT_EQ(oat_file->FlushClose(), 0) << "Could not flush and close oat file";
+ }
if (expect_success) {
ASSERT_TRUE(success) << error_msg << std::endl << output_;
@@ -570,40 +584,150 @@ class Dex2oatLayoutTest : public Dex2oatTest {
// Emits a profile with a single dex file with the given location and a single class index of 1.
void GenerateProfile(const std::string& test_profile,
const std::string& dex_location,
+ size_t num_classes,
uint32_t checksum) {
int profile_test_fd = open(test_profile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
CHECK_GE(profile_test_fd, 0);
ProfileCompilationInfo info;
std::string profile_key = ProfileCompilationInfo::GetProfileDexFileKey(dex_location);
- info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1));
+ for (size_t i = 0; i < num_classes; ++i) {
+ info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1 + i));
+ }
bool result = info.Save(profile_test_fd);
close(profile_test_fd);
ASSERT_TRUE(result);
}
- void RunTest() {
- std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
- std::string profile_location = GetScratchDir() + "/primary.prof";
- std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
-
- Copy(GetDexSrc2(), dex_location);
+ void CompileProfileOdex(const std::string& dex_location,
+ const std::string& odex_location,
+ const std::string& app_image_file_name,
+ bool use_fd,
+ size_t num_profile_classes,
+ const std::vector<std::string>& extra_args = {}) {
+ const std::string profile_location = GetScratchDir() + "/primary.prof";
const char* location = dex_location.c_str();
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
ASSERT_TRUE(DexFile::Open(location, location, true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
- GenerateProfile(profile_location, dex_location, dex_file->GetLocationChecksum());
+ GenerateProfile(profile_location,
+ dex_location,
+ num_profile_classes,
+ dex_file->GetLocationChecksum());
+ std::vector<std::string> copy(extra_args);
+ copy.push_back("--profile-file=" + profile_location);
+ std::unique_ptr<File> app_image_file;
+ if (!app_image_file_name.empty()) {
+ if (use_fd) {
+ app_image_file.reset(OS::CreateEmptyFile(app_image_file_name.c_str()));
+ copy.push_back("--app-image-fd=" + std::to_string(app_image_file->Fd()));
+ } else {
+ copy.push_back("--app-image-file=" + app_image_file_name);
+ }
+ }
+ GenerateOdexForTest(dex_location,
+ odex_location,
+ CompilerFilter::kSpeedProfile,
+ copy,
+ /* expect_success */ true,
+ use_fd);
+ if (app_image_file != nullptr) {
+ ASSERT_EQ(app_image_file->FlushCloseOrErase(), 0) << "Could not flush and close art file";
+ }
+ }
+
+ uint64_t GetImageSize(const std::string& image_file_name) {
+ EXPECT_FALSE(image_file_name.empty());
+ std::unique_ptr<File> file(OS::OpenFileForReading(image_file_name.c_str()));
+ CHECK(file != nullptr);
+ ImageHeader image_header;
+ const bool success = file->ReadFully(&image_header, sizeof(image_header));
+ CHECK(success);
+ CHECK(image_header.IsValid());
+ ReaderMutexLock mu(Thread::Current(), *Locks::mutator_lock_);
+ return image_header.GetImageSize();
+ }
+
+ void RunTest(bool app_image) {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
+ std::string app_image_file = app_image ? (GetOdexDir() + "/DexOdexNoOat.art"): "";
+ Copy(GetDexSrc2(), dex_location);
+
+ uint64_t image_file_empty_profile = 0;
+ if (app_image) {
+ CompileProfileOdex(dex_location,
+ odex_location,
+ app_image_file,
+ /* use_fd */ false,
+ /* num_profile_classes */ 0);
+ CheckValidity();
+ ASSERT_TRUE(success_);
+ // Don't check the result since CheckResult relies on the class being in the profile.
+ image_file_empty_profile = GetImageSize(app_image_file);
+ EXPECT_GT(image_file_empty_profile, 0u);
+ }
+
+ // Small profile.
+ CompileProfileOdex(dex_location,
+ odex_location,
+ app_image_file,
+ /* use_fd */ false,
+ /* num_profile_classes */ 1);
+ CheckValidity();
+ ASSERT_TRUE(success_);
+ CheckResult(dex_location, odex_location, app_image_file);
+
+ if (app_image) {
+ // Test that the profile made a difference by adding more classes.
+ const uint64_t image_file_small_profile = GetImageSize(app_image_file);
+ CHECK_LT(image_file_empty_profile, image_file_small_profile);
+ }
+ }
- const std::vector<std::string>& extra_args = { "--profile-file=" + profile_location };
- GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile, extra_args);
+ void RunTestVDex() {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
+ std::string vdex_location = GetOdexDir() + "/DexOdexNoOat.vdex";
+ std::string app_image_file_name = GetOdexDir() + "/DexOdexNoOat.art";
+ Copy(GetDexSrc2(), dex_location);
+ std::unique_ptr<File> vdex_file1(OS::CreateEmptyFile(vdex_location.c_str()));
+ CHECK(vdex_file1 != nullptr) << vdex_location;
+ ScratchFile vdex_file2;
+ {
+ std::string input_vdex = "--input-vdex-fd=-1";
+ std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd());
+ CompileProfileOdex(dex_location,
+ odex_location,
+ app_image_file_name,
+ /* use_fd */ true,
+ /* num_profile_classes */ 1,
+ { input_vdex, output_vdex });
+ EXPECT_GT(vdex_file1->GetLength(), 0u);
+ }
+ {
+ std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
+ std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file2.GetFd());
+ CompileProfileOdex(dex_location,
+ odex_location,
+ app_image_file_name,
+ /* use_fd */ true,
+ /* num_profile_classes */ 1,
+ { input_vdex, output_vdex });
+ EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u);
+ }
+ ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
CheckValidity();
ASSERT_TRUE(success_);
- CheckResult(dex_location, odex_location);
+ CheckResult(dex_location, odex_location, app_image_file_name);
}
- void CheckResult(const std::string& dex_location, const std::string& odex_location) {
+
+ void CheckResult(const std::string& dex_location,
+ const std::string& odex_location,
+ const std::string& app_image_file_name) {
// Host/target independent checks.
std::string error_msg;
std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
@@ -639,29 +763,47 @@ class Dex2oatLayoutTest : public Dex2oatTest {
}
EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile);
- }
- // Check whether the dex2oat run was really successful.
- void CheckValidity() {
- if (kIsTargetBuild) {
- CheckTargetValidity();
- } else {
- CheckHostValidity();
- }
+ if (!app_image_file_name.empty()) {
+ // Go peek at the image header to make sure it was large enough to contain the class.
+ std::unique_ptr<File> file(OS::OpenFileForReading(app_image_file_name.c_str()));
+ ImageHeader image_header;
+ bool success = file->ReadFully(&image_header, sizeof(image_header));
+ ASSERT_TRUE(success);
+ ASSERT_TRUE(image_header.IsValid());
+ EXPECT_GT(image_header.GetImageSection(ImageHeader::kSectionObjects).Size(), 0u);
}
+ }
- void CheckTargetValidity() {
- // TODO: Ignore for now.
+ // Check whether the dex2oat run was really successful.
+ void CheckValidity() {
+ if (kIsTargetBuild) {
+ CheckTargetValidity();
+ } else {
+ CheckHostValidity();
}
+ }
- // On the host, we can get the dex2oat output. Here, look for "dex2oat took."
- void CheckHostValidity() {
- EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_;
- }
- };
+ void CheckTargetValidity() {
+ // TODO: Ignore for now.
+ }
+
+ // On the host, we can get the dex2oat output. Here, look for "dex2oat took."
+ void CheckHostValidity() {
+ EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_;
+ }
+};
TEST_F(Dex2oatLayoutTest, TestLayout) {
- RunTest();
+ RunTest(/* app-image */ false);
+}
+
+TEST_F(Dex2oatLayoutTest, TestLayoutAppImage) {
+ RunTest(/* app-image */ true);
+}
+
+TEST_F(Dex2oatLayoutTest, TestVdexLayout) {
+ RunTestVDex();
}
class Dex2oatWatchdogTest : public Dex2oatTest {
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 609068f41c..34983cf5fb 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -64,23 +64,18 @@ static uint32_t GetCodeItemSize(const DexFile& dex_file, const DexFile::CodeItem
uintptr_t insns_end = reinterpret_cast<uintptr_t>(&disk_code_item.insns_[insns_size]);
return insns_end - code_item_start;
} else {
- uint32_t last_handler_off = 0;
- for (uint32_t i = 0; i < tries_size; ++i) {
- // Iterate over the try items to find the last catch handler.
- const DexFile::TryItem* disk_try_item = dex_file.GetTryItems(disk_code_item, i);
- uint16_t handler_off = disk_try_item->handler_off_;
- if (handler_off > last_handler_off) {
- last_handler_off = handler_off;
+ // Get the start of the handler data.
+ const uint8_t* handler_data = DexFile::GetCatchHandlerData(disk_code_item, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
+ // Manually read each handler.
+ for (uint32_t i = 0; i < handlers_size; ++i) {
+ int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
+ if (uleb128_count <= 0) {
+ uleb128_count = -uleb128_count + 1;
+ }
+ for (int32_t j = 0; j < uleb128_count; ++j) {
+ DecodeUnsignedLeb128(&handler_data);
}
- }
- // Decode the final handler to see where it ends.
- const uint8_t* handler_data = DexFile::GetCatchHandlerData(disk_code_item, last_handler_off);
- int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
- if (uleb128_count <= 0) {
- uleb128_count = -uleb128_count + 1;
- }
- for (int32_t i = 0; i < uleb128_count; ++i) {
- DecodeUnsignedLeb128(&handler_data);
}
return reinterpret_cast<uintptr_t>(handler_data) - code_item_start;
}
@@ -649,11 +644,11 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
}
}
int32_t size = DecodeSignedLeb128(&handlers_data);
- bool has_catch_all = size < 0;
+ bool has_catch_all = size <= 0;
if (has_catch_all) {
size = -size;
}
- if (already_added == true) {
+ if (already_added) {
for (int32_t i = 0; i < size; i++) {
DecodeUnsignedLeb128(&handlers_data);
DecodeUnsignedLeb128(&handlers_data);
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 1add6bfede..4aa8b82ec7 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -46,6 +46,8 @@ namespace art {
using android::base::StringPrintf;
+static constexpr uint32_t kDexCodeItemAlignment = 4;
+
/*
* Flags for use with createAccessFlagStr().
*/
@@ -1489,7 +1491,7 @@ void DexLayout::DumpDexFile() {
}
}
-std::vector<dex_ir::ClassDef*> DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
+std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
std::vector<dex_ir::ClassDef*> new_class_def_order;
for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
dex::TypeIndex type_idx(class_def->ClassType()->GetIndex());
@@ -1505,46 +1507,101 @@ std::vector<dex_ir::ClassDef*> DexLayout::LayoutClassDefsAndClassData(const DexF
}
uint32_t class_defs_offset = header_->GetCollections().ClassDefsOffset();
uint32_t class_data_offset = header_->GetCollections().ClassDatasOffset();
+ std::unordered_set<dex_ir::ClassData*> visited_class_data;
+ std::vector<dex_ir::ClassData*> new_class_data_order;
for (uint32_t i = 0; i < new_class_def_order.size(); ++i) {
dex_ir::ClassDef* class_def = new_class_def_order[i];
class_def->SetIndex(i);
class_def->SetOffset(class_defs_offset);
class_defs_offset += dex_ir::ClassDef::ItemSize();
- if (class_def->GetClassData() != nullptr) {
- class_def->GetClassData()->SetOffset(class_data_offset);
- class_data_offset += class_def->GetClassData()->GetSize();
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data != nullptr && visited_class_data.find(class_data) == visited_class_data.end()) {
+ class_data->SetOffset(class_data_offset);
+ class_data_offset += class_data->GetSize();
+ visited_class_data.insert(class_data);
+ new_class_data_order.push_back(class_data);
}
}
- return new_class_def_order;
+ return new_class_data_order;
}
-int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassDef*> new_class_def_order) {
+// Orders code items according to specified class data ordering.
+// NOTE: If the section following the code items is byte aligned, the last code item is left in
+// place to preserve alignment. Layout needs an overhaul to handle movement of other sections.
+int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order) {
+ // Do not move code items if class data section precedes code item section.
+ // ULEB encoding is variable length, causing problems determining the offset of the code items.
+ // TODO: We should swap the order of these sections in the future to avoid this issue.
+ uint32_t class_data_offset = header_->GetCollections().ClassDatasOffset();
+ uint32_t code_item_offset = header_->GetCollections().CodeItemsOffset();
+ if (class_data_offset < code_item_offset) {
+ return 0;
+ }
+
+ // Find the last code item so we can leave it in place if the next section is not 4 byte aligned.
+ std::unordered_set<dex_ir::CodeItem*> visited_code_items;
+ bool is_code_item_aligned = IsNextSectionCodeItemAligned(code_item_offset);
+ if (!is_code_item_aligned) {
+ dex_ir::CodeItem* last_code_item = nullptr;
+ for (auto& code_item_pair : header_->GetCollections().CodeItems()) {
+ std::unique_ptr<dex_ir::CodeItem>& code_item = code_item_pair.second;
+ if (last_code_item == nullptr || last_code_item->GetOffset() < code_item->GetOffset()) {
+ last_code_item = code_item.get();
+ }
+ }
+ // Preserve the last code item by marking it already visited.
+ visited_code_items.insert(last_code_item);
+ }
+
int32_t diff = 0;
- uint32_t offset = header_->GetCollections().CodeItemsOffset();
- for (dex_ir::ClassDef* class_def : new_class_def_order) {
- dex_ir::ClassData* class_data = class_def->GetClassData();
- if (class_data != nullptr) {
- class_data->SetOffset(class_data->GetOffset() + diff);
- for (auto& method : *class_data->DirectMethods()) {
- dex_ir::CodeItem* code_item = method->GetCodeItem();
- if (code_item != nullptr) {
- diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(offset);
- offset += RoundUp(code_item->GetSize(), 4);
- }
+ for (dex_ir::ClassData* class_data : new_class_data_order) {
+ class_data->SetOffset(class_data->GetOffset() + diff);
+ for (auto& method : *class_data->DirectMethods()) {
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
+ visited_code_items.insert(code_item);
+ diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset());
+ code_item->SetOffset(code_item_offset);
+ code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
}
- for (auto& method : *class_data->VirtualMethods()) {
- dex_ir::CodeItem* code_item = method->GetCodeItem();
- if (code_item != nullptr) {
- diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(offset);
- offset += RoundUp(code_item->GetSize(), 4);
- }
+ }
+ for (auto& method : *class_data->VirtualMethods()) {
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
+ visited_code_items.insert(code_item);
+ diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset());
+ code_item->SetOffset(code_item_offset);
+ code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
}
}
}
+ // Adjust diff to be 4-byte aligned.
+ return RoundUp(diff, kDexCodeItemAlignment);
+}
- return diff;
+bool DexLayout::IsNextSectionCodeItemAligned(uint32_t offset) {
+ dex_ir::Collections& collections = header_->GetCollections();
+ std::set<uint32_t> section_offsets;
+ section_offsets.insert(collections.MapListOffset());
+ section_offsets.insert(collections.TypeListsOffset());
+ section_offsets.insert(collections.AnnotationSetRefListsOffset());
+ section_offsets.insert(collections.AnnotationSetItemsOffset());
+ section_offsets.insert(collections.ClassDatasOffset());
+ section_offsets.insert(collections.CodeItemsOffset());
+ section_offsets.insert(collections.StringDatasOffset());
+ section_offsets.insert(collections.DebugInfoItemsOffset());
+ section_offsets.insert(collections.AnnotationItemsOffset());
+ section_offsets.insert(collections.EncodedArrayItemsOffset());
+ section_offsets.insert(collections.AnnotationsDirectoryItemsOffset());
+
+ auto found = section_offsets.find(offset);
+ if (found != section_offsets.end()) {
+ found++;
+ if (found != section_offsets.end()) {
+ return *found % kDexCodeItemAlignment == 0;
+ }
+ }
+ return false;
}
// Adjust offsets of every item in the specified section by diff bytes.
@@ -1626,10 +1683,8 @@ void DexLayout::FixupSections(uint32_t offset, uint32_t diff) {
}
void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
- std::vector<dex_ir::ClassDef*> new_class_def_order = LayoutClassDefsAndClassData(dex_file);
- int32_t diff = LayoutCodeItems(new_class_def_order);
- // Adjust diff to be 4-byte aligned.
- diff = RoundUp(diff, 4);
+ std::vector<dex_ir::ClassData*> new_class_data_order = LayoutClassDefsAndClassData(dex_file);
+ int32_t diff = LayoutCodeItems(new_class_data_order);
// Move sections after ClassData by diff bytes.
FixupSections(header_->GetCollections().ClassDatasOffset(), diff);
// Update file size.
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index ac1a4a6efb..391870644a 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -105,8 +105,9 @@ class DexLayout {
void DumpSField(uint32_t idx, uint32_t flags, int i, dex_ir::EncodedValue* init);
void DumpDexFile();
- std::vector<dex_ir::ClassDef*> LayoutClassDefsAndClassData(const DexFile* dex_file);
- int32_t LayoutCodeItems(std::vector<dex_ir::ClassDef*> new_class_def_order);
+ std::vector<dex_ir::ClassData*> LayoutClassDefsAndClassData(const DexFile* dex_file);
+ int32_t LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order);
+ bool IsNextSectionCodeItemAligned(uint32_t offset);
template<class T> void FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map, uint32_t diff);
void FixupSections(uint32_t offset, uint32_t diff);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 9881e283df..bd6548e65b 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -41,7 +41,7 @@ static const char kDexFileLayoutInputDex[] =
"AAAAdQEAAAAQAAABAAAAjAEAAA==";
static const char kDexFileLayoutInputProfile[] =
- "cHJvADAwMwABCwABAAAAAAD1KW3+Y2xhc3Nlcy5kZXgBAA==";
+ "cHJvADAwNAABCwABAAAAAAD1KW3+Y2xhc3Nlcy5kZXgBAA==";
static const char kDexFileLayoutExpectedOutputDex[] =
"ZGV4CjAzNQD1KW3+B8NAB0f2A/ZVIBJ0aHrGIqcpVTAUAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAH"
@@ -55,6 +55,69 @@ static const char kDexFileLayoutExpectedOutputDex[] =
"qAAAAAYAAAACAAAAwAAAAAEgAAACAAAAAAEAAAIgAAAHAAAAMAEAAAMgAAACAAAAaQEAAAAgAAAC"
"AAAAdQEAAAAQAAABAAAAjAEAAA==";
+// Dex file with catch handler unreferenced by try blocks.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferencedCatchHandlerInputDex[] =
+ "ZGV4CjAzNQD+exd52Y0f9nY5x5GmInXq5nXrO6Kl2RV4AwAAcAAAAHhWNBIAAAAAAAAAANgCAAAS"
+ "AAAAcAAAAAgAAAC4AAAAAwAAANgAAAABAAAA/AAAAAQAAAAEAQAAAQAAACQBAAA0AgAARAEAANYB"
+ "AADeAQAA5gEAAO4BAAAAAgAADwIAACYCAAA9AgAAUQIAAGUCAAB5AgAAfwIAAIUCAACIAgAAjAIA"
+ "AKECAACnAgAArAIAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAwAAAAOAAAADAAAAAYAAAAAAAAA"
+ "DQAAAAYAAADIAQAADQAAAAYAAADQAQAABQABABAAAAAAAAAAAAAAAAAAAgAPAAAAAQABABEAAAAD"
+ "AAAAAAAAAAAAAAABAAAAAwAAAAAAAAADAAAAAAAAAMgCAAAAAAAAAQABAAEAAAC1AgAABAAAAHAQ"
+ "AwAAAA4AAwABAAIAAgC6AgAAIQAAAGIAAAAaAQoAbiACABAAYgAAABoBCwBuIAIAEAAOAA0AYgAA"
+ "ABoBAQBuIAIAEAAo8A0AYgAAABoBAgBuIAIAEAAo7gAAAAAAAAcAAQAHAAAABwABAAIBAg8BAhgA"
+ "AQAAAAQAAAABAAAABwAGPGluaXQ+AAZDYXRjaDEABkNhdGNoMgAQSGFuZGxlclRlc3QuamF2YQAN"
+ "TEhhbmRsZXJUZXN0OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABVMamF2YS9sYW5nL0V4Y2VwdGlv"
+ "bjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5"
+ "c3RlbTsABFRyeTEABFRyeTIAAVYAAlZMABNbTGphdmEvbGFuZy9TdHJpbmc7AARtYWluAANvdXQA"
+ "B3ByaW50bG4AAQAHDgAEAQAHDn17AncdHoseAAAAAgAAgYAExAIBCdwCAAANAAAAAAAAAAEAAAAA"
+ "AAAAAQAAABIAAABwAAAAAgAAAAgAAAC4AAAAAwAAAAMAAADYAAAABAAAAAEAAAD8AAAABQAAAAQA"
+ "AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA"
+ "AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA";
+
+// Dex file with 0-size (catch all only) catch handler unreferenced by try blocks.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferenced0SizeCatchHandlerInputDex[] =
+ "ZGV4CjAzNQCEbEEvMstSNpQpjPdfMEfUBS48cis2QRJoAwAAcAAAAHhWNBIAAAAAAAAAAMgCAAAR"
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAQAAAD8AAAAAQAAABwBAAAsAgAAPAEAAOoB"
+ "AADyAQAABAIAABMCAAAqAgAAPgIAAFICAABmAgAAaQIAAG0CAACCAgAAhgIAAIoCAACQAgAAlQIA"
+ "AJ4CAACiAgAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACQAAAAcAAAAFAAAAAAAAAAgAAAAFAAAA"
+ "3AEAAAgAAAAFAAAA5AEAAAQAAQANAAAAAAAAAAAAAAAAAAIADAAAAAEAAQAOAAAAAgAAAAAAAAAA"
+ "AAAAAQAAAAIAAAAAAAAAAQAAAAAAAAC5AgAAAAAAAAEAAQABAAAApgIAAAQAAABwEAMAAAAOAAQA"
+ "AQACAAIAqwIAAC8AAABiAAAAGgEPAG4gAgAQAGIAAAAaAQoAbiACABAAYgAAABoBEABuIAIAEABi"
+ "AAAAGgELAG4gAgAQAA4ADQBiAQAAGgIKAG4gAgAhACcADQBiAQAAGgILAG4gAgAhACcAAAAAAAAA"
+ "BwABAA4AAAAHAAEAAgAdACYAAAABAAAAAwAAAAEAAAAGAAY8aW5pdD4AEEhhbmRsZXJUZXN0Lmph"
+ "dmEADUxIYW5kbGVyVGVzdDsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmpl"
+ "Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xhbmcvU3lzdGVtOwABVgACVkwAE1tMamF2"
+ "YS9sYW5nL1N0cmluZzsAAmYxAAJmMgAEbWFpbgADb3V0AAdwcmludGxuAAJ0MQACdDIAAQAHDgAE"
+ "AQAHDnl7eXkCeB2bAAAAAgAAgYAEvAIBCdQCAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAAC"
+ "AAAABwAAALQAAAADAAAAAwAAANAAAAAEAAAAAQAAAPQAAAAFAAAABAAAAPwAAAAGAAAAAQAAABwB"
+ "AAABIAAAAgAAADwBAAABEAAAAgAAANwBAAACIAAAEQAAAOoBAAADIAAAAgAAAKYCAAAAIAAAAQAA"
+ "ALkCAAAAEAAAAQAAAMgCAAA=";
+
+// Dex file with an unreferenced catch handler at end of code item.
+// Constructed by building a dex file with try/catch blocks and hex editing.
+static const char kUnreferencedEndingCatchHandlerInputDex[] =
+ "ZGV4CjAzNQCEflufI6xGTDDRmLpbfYi6ujPrDLIwvYcEBAAAcAAAAHhWNBIAAAAAAAAAAGQDAAAT"
+ "AAAAcAAAAAgAAAC8AAAAAwAAANwAAAABAAAAAAEAAAUAAAAIAQAAAQAAADABAAC0AgAAUAEAAE4C"
+ "AABWAgAAXgIAAGYCAAB4AgAAhwIAAJ4CAAC1AgAAyQIAAN0CAADxAgAA9wIAAP0CAAAAAwAABAMA"
+ "ABkDAAAcAwAAIgMAACcDAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAMAAAADgAAAAwAAAAGAAAA"
+ "AAAAAA0AAAAGAAAAQAIAAA0AAAAGAAAASAIAAAUAAQARAAAAAAAAAAAAAAAAAAAADwAAAAAAAgAQ"
+ "AAAAAQABABIAAAADAAAAAAAAAAAAAAABAAAAAwAAAAAAAAADAAAAAAAAAFADAAAAAAAAAQABAAEA"
+ "AAAwAwAABAAAAHAQBAAAAA4AAgAAAAIAAgA1AwAAIQAAAGIAAAAaAQoAbiADABAAYgAAABoBCwBu"
+ "IAMAEAAOAA0AYgAAABoBAQBuIAMAEAAo8A0AYgAAABoBAgBuIAMAEAAo7gAAAAAAAAcAAQAHAAAA"
+ "BwABAAIBAg8BAhgAAwABAAIAAgBCAwAAIQAAAGIAAAAaAQoAbiADABAAYgAAABoBCwBuIAMAEAAO"
+ "AA0AYgAAABoBAQBuIAMAEAAo8A0AYgAAABoBAgBuIAMAEAAo7gAAAAAAAAcAAQAHAAAABwABAAIB"
+ "Ag8BAhgAAQAAAAQAAAABAAAABwAGPGluaXQ+AAZDYXRjaDEABkNhdGNoMgAQSGFuZGxlclRlc3Qu"
+ "amF2YQANTEhhbmRsZXJUZXN0OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABVMamF2YS9sYW5nL0V4"
+ "Y2VwdGlvbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9s"
+ "YW5nL1N5c3RlbTsABFRyeTEABFRyeTIAAVYAAlZMABNbTGphdmEvbGFuZy9TdHJpbmc7AAFhAARt"
+ "YWluAANvdXQAB3ByaW50bG4AAQAHDgAEAAcOfHsCeB0eih4AEQEABw59ewJ3HR6LHgAAAAMAAIGA"
+ "BNACAQnoAgEJ1AMAAA0AAAAAAAAAAQAAAAAAAAABAAAAEwAAAHAAAAACAAAACAAAALwAAAADAAAA"
+ "AwAAANwAAAAEAAAAAQAAAAABAAAFAAAABQAAAAgBAAAGAAAAAQAAADABAAABIAAAAwAAAFABAAAB"
+ "EAAAAgAAAEACAAACIAAAEwAAAE4CAAADIAAAAwAAADADAAAAIAAAAQAAAFADAAAAEAAAAQAAAGQD"
+ "AAA=";
+
// Dex file with multiple code items that have the same debug_info_off_. Constructed by a modified
// dexlayout on XandY.
static const char kDexFileDuplicateOffset[] =
@@ -100,25 +163,45 @@ static const char kNullSetRefListElementInputDex[] =
"ASAAAAIAAACEAQAABiAAAAIAAACwAQAAARAAAAIAAADYAQAAAiAAABIAAADoAQAAAyAAAAIAAADw"
"AgAABCAAAAIAAAD8AgAAACAAAAIAAAAIAwAAABAAAAEAAAAgAwAA";
-// Dex file with catch handler unreferenced by try blocks.
-// Constructed by building a dex file with try/catch blocks and hex editing.
-static const char kUnreferencedCatchHandlerInputDex[] =
- "ZGV4CjAzNQD+exd52Y0f9nY5x5GmInXq5nXrO6Kl2RV4AwAAcAAAAHhWNBIAAAAAAAAAANgCAAAS"
- "AAAAcAAAAAgAAAC4AAAAAwAAANgAAAABAAAA/AAAAAQAAAAEAQAAAQAAACQBAAA0AgAARAEAANYB"
- "AADeAQAA5gEAAO4BAAAAAgAADwIAACYCAAA9AgAAUQIAAGUCAAB5AgAAfwIAAIUCAACIAgAAjAIA"
- "AKECAACnAgAArAIAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAwAAAAOAAAADAAAAAYAAAAAAAAA"
- "DQAAAAYAAADIAQAADQAAAAYAAADQAQAABQABABAAAAAAAAAAAAAAAAAAAgAPAAAAAQABABEAAAAD"
- "AAAAAAAAAAAAAAABAAAAAwAAAAAAAAADAAAAAAAAAMgCAAAAAAAAAQABAAEAAAC1AgAABAAAAHAQ"
- "AwAAAA4AAwABAAIAAgC6AgAAIQAAAGIAAAAaAQoAbiACABAAYgAAABoBCwBuIAIAEAAOAA0AYgAA"
- "ABoBAQBuIAIAEAAo8A0AYgAAABoBAgBuIAIAEAAo7gAAAAAAAAcAAQAHAAAABwABAAIBAg8BAhgA"
- "AQAAAAQAAAABAAAABwAGPGluaXQ+AAZDYXRjaDEABkNhdGNoMgAQSGFuZGxlclRlc3QuamF2YQAN"
- "TEhhbmRsZXJUZXN0OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABVMamF2YS9sYW5nL0V4Y2VwdGlv"
- "bjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5"
- "c3RlbTsABFRyeTEABFRyeTIAAVYAAlZMABNbTGphdmEvbGFuZy9TdHJpbmc7AARtYWluAANvdXQA"
- "B3ByaW50bG4AAQAHDgAEAQAHDn17AncdHoseAAAAAgAAgYAExAIBCdwCAAANAAAAAAAAAAEAAAAA"
- "AAAAAQAAABIAAABwAAAAAgAAAAgAAAC4AAAAAwAAAAMAAADYAAAABAAAAAEAAAD8AAAABQAAAAQA"
- "AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA"
- "AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA";
+// Dex file with shared empty class data item for multiple class defs.
+// Constructing by building a dex file with multiple classes and hex editing.
+static const char kMultiClassDataInputDex[] =
+ "ZGV4CjAzNQALJgF9TtnLq748xVe/+wyxETrT9lTEiW6YAQAAcAAAAHhWNBIAAAAAAAAAADQBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAAAAAAAAAAACAAAAoAAAAAAAAAAAAAAAAgAAALAAAACoAAAA8AAAAPAA"
+ "AAD4AAAAAAEAAAMBAAAIAQAADQEAACEBAAAkAQAAAgAAAAMAAAAEAAAABQAAAAEAAAAGAAAAAgAA"
+ "AAcAAAABAAAAAQYAAAMAAAAAAAAAAAAAAAAAAAAnAQAAAAAAAAIAAAABBgAAAwAAAAAAAAABAAAA"
+ "AAAAACcBAAAAAAAABkEuamF2YQAGQi5qYXZhAAFJAANMQTsAA0xCOwASTGphdmEvbGFuZy9PYmpl"
+ "Y3Q7AAFhAAFiAAAAAAABAAAAARkAAAAIAAAAAAAAAAEAAAAAAAAAAQAAAAgAAABwAAAAAgAAAAQA"
+ "AACQAAAABAAAAAIAAACgAAAABgAAAAIAAACwAAAAAiAAAAgAAADwAAAAACAAAAIAAAAnAQAAABAA"
+ "AAEAAAA0AQAA";
+
+// Dex file with code info followed by non 4-byte aligned section.
+// Constructed a dex file with code info followed by string data and hex edited.
+static const char kUnalignedCodeInfoInputDex[] =
+ "ZGV4CjAzNQDXJzXNb4iWn2SLhmLydW/8h1K9moERIw7UAQAAcAAAAHhWNBIAAAAAAAAAAEwBAAAG"
+ "AAAAcAAAAAMAAACIAAAAAQAAAJQAAAAAAAAAAAAAAAMAAACgAAAAAQAAALgAAAD8AAAA2AAAAAIB"
+ "AAAKAQAAEgEAABcBAAArAQAALgEAAAIAAAADAAAABAAAAAQAAAACAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAUAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAABAAAAAAAAADsBAAAAAAAAAQABAAEAAAAxAQAA"
+ "BAAAAHAQAgAAAA4AAQABAAAAAAA2AQAAAQAAAA4ABjxpbml0PgAGQS5qYXZhAANMQTsAEkxqYXZh"
+ "L2xhbmcvT2JqZWN0OwABVgABYQABAAcOAAMABw4AAAABAQCBgATYAQEB8AEAAAALAAAAAAAAAAEA"
+ "AAAAAAAAAQAAAAYAAABwAAAAAgAAAAMAAACIAAAAAwAAAAEAAACUAAAABQAAAAMAAACgAAAABgAA"
+ "AAEAAAC4AAAAASAAAAIAAADYAAAAAiAAAAYAAAACAQAAAyAAAAIAAAAxAQAAACAAAAEAAAA7AQAA"
+ "ABAAAAEAAABMAQAA";
+
+// Dex file with class data section preceding code items.
+// Constructed by passing dex file through dexmerger tool and hex editing.
+static const char kClassDataBeforeCodeInputDex[] =
+ "ZGV4CjAzNQCZKmCu3XXn4zvxCh5VH0gZNNobEAcsc49EAgAAcAAAAHhWNBIAAAAAAAAAAAQBAAAJ"
+ "AAAAcAAAAAQAAACUAAAAAgAAAKQAAAAAAAAAAAAAAAUAAAC8AAAAAQAAAOQAAABAAQAABAEAAPgB"
+ "AAAAAgAACAIAAAsCAAAQAgAAJAIAACcCAAAqAgAALQIAAAIAAAADAAAABAAAAAUAAAACAAAAAAAA"
+ "AAAAAAAFAAAAAwAAAAAAAAABAAEAAAAAAAEAAAAGAAAAAQAAAAcAAAABAAAACAAAAAIAAQAAAAAA"
+ "AQAAAAEAAAACAAAAAAAAAAEAAAAAAAAAjAEAAAAAAAALAAAAAAAAAAEAAAAAAAAAAQAAAAkAAABw"
+ "AAAAAgAAAAQAAACUAAAAAwAAAAIAAACkAAAABQAAAAUAAAC8AAAABgAAAAEAAADkAAAAABAAAAEA"
+ "AAAEAQAAACAAAAEAAACMAQAAASAAAAQAAACkAQAAAiAAAAkAAAD4AQAAAyAAAAQAAAAwAgAAAAAB"
+ "AwCBgASkAwEBvAMBAdADAQHkAwAAAQABAAEAAAAwAgAABAAAAHAQBAAAAA4AAgABAAAAAAA1AgAA"
+ "AgAAABIQDwACAAEAAAAAADoCAAACAAAAEiAPAAIAAQAAAAAAPwIAAAIAAAASMA8ABjxpbml0PgAG"
+ "QS5qYXZhAAFJAANMQTsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgABYQABYgABYwABAAcOAAMABw4A"
+ "BgAHDgAJAAcOAA==";
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
@@ -257,8 +340,8 @@ class DexLayoutTest : public CommonRuntimeTest {
return true;
}
- // Runs UnreferencedCatchHandlerTest.
- bool UnreferencedCatchHandlerExec(std::string* error_msg) {
+ // Runs UnreferencedCatchHandlerTest & Unreferenced0SizeCatchHandlerTest.
+ bool UnreferencedCatchHandlerExec(std::string* error_msg, const char* filename) {
ScratchFile tmp_file;
std::string tmp_name = tmp_file.GetFilename();
size_t tmp_last_slash = tmp_name.rfind("/");
@@ -266,7 +349,7 @@ class DexLayoutTest : public CommonRuntimeTest {
// Write inputs and expected outputs.
std::string input_dex = tmp_dir + "classes.dex";
- WriteFileBase64(kUnreferencedCatchHandlerInputDex, input_dex.c_str());
+ WriteFileBase64(filename, input_dex.c_str());
std::string output_dex = tmp_dir + "classes.dex.new";
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
@@ -314,6 +397,30 @@ TEST_F(DexLayoutTest, DexFileLayout) {
ASSERT_TRUE(DexFileLayoutExec(&error_msg)) << error_msg;
}
+TEST_F(DexLayoutTest, UnreferencedCatchHandler) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferencedCatchHandlerInputDex)) << error_msg;
+}
+
+TEST_F(DexLayoutTest, Unreferenced0SizeCatchHandler) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferenced0SizeCatchHandlerInputDex)) << error_msg;
+}
+
+TEST_F(DexLayoutTest, UnreferencedEndingCatchHandler) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg,
+ kUnreferencedEndingCatchHandlerInputDex)) << error_msg;
+}
+
TEST_F(DexLayoutTest, DuplicateOffset) {
ScratchFile temp;
WriteBase64ToFile(kDexFileDuplicateOffset, temp.GetFile());
@@ -351,11 +458,58 @@ TEST_F(DexLayoutTest, NullSetRefListElement) {
}
}
-TEST_F(DexLayoutTest, UnreferencedCatchHandler) {
- // Disable test on target.
- TEST_DISABLED_FOR_TARGET();
+TEST_F(DexLayoutTest, MultiClassData) {
+ ScratchFile temp;
+ WriteBase64ToFile(kMultiClassDataInputDex, temp.GetFile());
+ ScratchFile temp2;
+ WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
+ EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
+ std::string error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ EXPECT_TRUE(result);
+ if (!result) {
+ LOG(ERROR) << "Error " << error_msg;
+ }
+}
+
+TEST_F(DexLayoutTest, UnalignedCodeInfo) {
+ ScratchFile temp;
+ WriteBase64ToFile(kUnalignedCodeInfoInputDex, temp.GetFile());
+ ScratchFile temp2;
+ WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
+ EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
+ std::string error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ EXPECT_TRUE(result);
+ if (!result) {
+ LOG(ERROR) << "Error " << error_msg;
+ }
+}
+
+TEST_F(DexLayoutTest, ClassDataBeforeCode) {
+ ScratchFile temp;
+ WriteBase64ToFile(kClassDataBeforeCodeInputDex, temp.GetFile());
+ ScratchFile temp2;
+ WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile());
+ EXPECT_EQ(temp.GetFile()->Flush(), 0);
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() };
std::string error_msg;
- ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg)) << error_msg;
+ const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg);
+ EXPECT_TRUE(result);
+ if (!result) {
+ LOG(ERROR) << "Error " << error_msg;
+ }
}
} // namespace art
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index d395c170bf..52f3b52ee2 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -16,11 +16,14 @@
#include <gtest/gtest.h>
+#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "exec_utils.h"
-#include "profile_assistant.h"
#include "jit/profile_compilation_info.h"
+#include "mirror/class-inl.h"
+#include "profile_assistant.h"
+#include "scoped_thread_state_change-inl.h"
#include "utils.h"
namespace art {
@@ -95,10 +98,12 @@ class ProfileAssistantTest : public CommonRuntimeTest {
return ExecAndReturnCode(argv_str, &error);
}
- bool CreateProfile(std::string class_file_contents, const std::string& filename) {
+ bool CreateProfile(std::string profile_file_contents,
+ const std::string& filename,
+ const std::string& dex_location) {
ScratchFile class_names_file;
File* file = class_names_file.GetFile();
- EXPECT_TRUE(file->WriteFully(class_file_contents.c_str(), class_file_contents.length()));
+ EXPECT_TRUE(file->WriteFully(profile_file_contents.c_str(), profile_file_contents.length()));
EXPECT_EQ(0, file->Flush());
EXPECT_TRUE(file->ResetOffset());
std::string profman_cmd = GetProfmanCmd();
@@ -106,8 +111,8 @@ class ProfileAssistantTest : public CommonRuntimeTest {
argv_str.push_back(profman_cmd);
argv_str.push_back("--create-profile-from=" + class_names_file.GetFilename());
argv_str.push_back("--reference-profile-file=" + filename);
- argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
- argv_str.push_back("--dex-location=classes.dex");
+ argv_str.push_back("--apk=" + dex_location);
+ argv_str.push_back("--dex-location=" + dex_location);
std::string error;
EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
return true;
@@ -121,7 +126,7 @@ class ProfileAssistantTest : public CommonRuntimeTest {
argv_str.push_back("--dump-classes");
argv_str.push_back("--profile-file=" + filename);
argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
- argv_str.push_back("--dex-location=classes.dex");
+ argv_str.push_back("--dex-location=" + GetLibCoreDexFileNames()[0]);
argv_str.push_back("--dump-output-to-fd=" + std::to_string(GetFd(class_names_file)));
std::string error;
EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
@@ -137,11 +142,74 @@ class ProfileAssistantTest : public CommonRuntimeTest {
bool CreateAndDump(const std::string& input_file_contents, std::string* output_file_contents) {
ScratchFile profile_file;
- EXPECT_TRUE(CreateProfile(input_file_contents, profile_file.GetFilename()));
+ EXPECT_TRUE(CreateProfile(input_file_contents,
+ profile_file.GetFilename(),
+ GetLibCoreDexFileNames()[0]));
profile_file.GetFile()->ResetOffset();
EXPECT_TRUE(DumpClasses(profile_file.GetFilename(), output_file_contents));
return true;
}
+
+ mirror::Class* GetClass(jobject class_loader, const std::string& clazz) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> h_loader(
+ hs.NewHandle(self->DecodeJObject(class_loader)->AsClassLoader()));
+ return class_linker->FindClass(self, clazz.c_str(), h_loader);
+ }
+
+ ArtMethod* GetVirtualMethod(jobject class_loader,
+ const std::string& clazz,
+ const std::string& name) {
+ mirror::Class* klass = GetClass(class_loader, clazz);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const auto pointer_size = class_linker->GetImagePointerSize();
+ ArtMethod* method = nullptr;
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ if (name == m.GetName()) {
+ EXPECT_TRUE(method == nullptr);
+ method = &m;
+ }
+ }
+ return method;
+ }
+
+ // Verify that given method has the expected inline caches and nothing else.
+ void AssertInlineCaches(ArtMethod* method,
+ const std::set<mirror::Class*>& expected_clases,
+ const ProfileCompilationInfo& info,
+ bool is_megamorphic,
+ bool is_missing_types)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ ASSERT_TRUE(info.GetMethod(method->GetDexFile()->GetLocation(),
+ method->GetDexFile()->GetLocationChecksum(),
+ method->GetDexMethodIndex(),
+ &pmi));
+ ASSERT_EQ(pmi.inline_caches.size(), 1u);
+ ProfileCompilationInfo::DexPcData dex_pc_data = pmi.inline_caches.begin()->second;
+
+ ASSERT_EQ(dex_pc_data.is_megamorphic, is_megamorphic);
+ ASSERT_EQ(dex_pc_data.is_missing_types, is_missing_types);
+ ASSERT_EQ(expected_clases.size(), dex_pc_data.classes.size());
+ size_t found = 0;
+ for (mirror::Class* it : expected_clases) {
+ for (const auto& class_ref : dex_pc_data.classes) {
+ ProfileCompilationInfo::DexReference dex_ref =
+ pmi.dex_references[class_ref.dex_profile_index];
+ if (dex_ref.MatchesDex(&(it->GetDexFile())) &&
+ class_ref.type_index == it->GetDexTypeIndex()) {
+ found++;
+ }
+ }
+ }
+
+ ASSERT_EQ(expected_clases.size(), found);
+ }
};
TEST_F(ProfileAssistantTest, AdviseCompilationEmptyReferences) {
@@ -358,25 +426,28 @@ TEST_F(ProfileAssistantTest, TestProfileGeneration) {
TEST_F(ProfileAssistantTest, TestProfileCreationAllMatch) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
- "java.lang.Comparable",
- "java.lang.Math",
- "java.lang.Object"
+ "Ljava/lang/Comparable;",
+ "Ljava/lang/Math;",
+ "Ljava/lang/Object;"
};
std::string input_file_contents;
+ std::string expected_contents;
for (std::string& class_name : class_names) {
input_file_contents += class_name + std::string("\n");
+ expected_contents += DescriptorToDot(class_name.c_str()) +
+ std::string("\n");
}
std::string output_file_contents;
ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
- ASSERT_EQ(output_file_contents, input_file_contents);
+ ASSERT_EQ(output_file_contents, expected_contents);
}
TEST_F(ProfileAssistantTest, TestProfileCreationOneNotMatched) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
- "doesnt.match.this.one",
- "java.lang.Comparable",
- "java.lang.Object"
+ "Ldoesnt/match/this/one;",
+ "Ljava/lang/Comparable;",
+ "Ljava/lang/Object;"
};
std::string input_file_contents;
for (std::string& class_name : class_names) {
@@ -385,16 +456,17 @@ TEST_F(ProfileAssistantTest, TestProfileCreationOneNotMatched) {
std::string output_file_contents;
ASSERT_TRUE(CreateAndDump(input_file_contents, &output_file_contents));
std::string expected_contents =
- class_names[1] + std::string("\n") + class_names[2] + std::string("\n");
+ DescriptorToDot(class_names[1].c_str()) + std::string("\n") +
+ DescriptorToDot(class_names[2].c_str()) + std::string("\n");
ASSERT_EQ(output_file_contents, expected_contents);
}
TEST_F(ProfileAssistantTest, TestProfileCreationNoneMatched) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
- "doesnt.match.this.one",
- "doesnt.match.this.one.either",
- "nor.this.one"
+ "Ldoesnt/match/this/one;",
+ "Ldoesnt/match/this/one/either;",
+ "Lnor/this/one;"
};
std::string input_file_contents;
for (std::string& class_name : class_names) {
@@ -406,4 +478,115 @@ TEST_F(ProfileAssistantTest, TestProfileCreationNoneMatched) {
ASSERT_EQ(output_file_contents, expected_contents);
}
+TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) {
+ // Create the profile content.
+ std::vector<std::string> methods = {
+ "LTestInline;->inlineMonomorphic(LSuper;)I+LSubA;",
+ "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;",
+ "LTestInline;->inlineMegamorphic(LSuper;)I+LSubA;,LSubB;,LSubC;,LSubD;,LSubE;",
+ "LTestInline;->inlineMissingTypes(LSuper;)I+missing_types",
+ "LTestInline;->noInlineCache(LSuper;)I"
+ };
+ std::string input_file_contents;
+ for (std::string& m : methods) {
+ input_file_contents += m + std::string("\n");
+ }
+
+ // Create the profile and save it to disk.
+ ScratchFile profile_file;
+ ASSERT_TRUE(CreateProfile(input_file_contents,
+ profile_file.GetFilename(),
+ GetTestDexFileName("ProfileTestMultiDex")));
+
+ // Load the profile from disk.
+ ProfileCompilationInfo info;
+ profile_file.GetFile()->ResetOffset();
+ ASSERT_TRUE(info.Load(GetFd(profile_file)));
+
+ // Load the dex files and verify that the profile contains the expected methods info.
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("ProfileTestMultiDex");
+ ASSERT_NE(class_loader, nullptr);
+
+ mirror::Class* sub_a = GetClass(class_loader, "LSubA;");
+ mirror::Class* sub_b = GetClass(class_loader, "LSubB;");
+ mirror::Class* sub_c = GetClass(class_loader, "LSubC;");
+
+ ASSERT_TRUE(sub_a != nullptr);
+ ASSERT_TRUE(sub_b != nullptr);
+ ASSERT_TRUE(sub_c != nullptr);
+
+ {
+ // Verify that method inlineMonomorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_monomorphic = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlineMonomorphic");
+ ASSERT_TRUE(inline_monomorphic != nullptr);
+ std::set<mirror::Class*> expected_monomorphic;
+ expected_monomorphic.insert(sub_a);
+ AssertInlineCaches(inline_monomorphic,
+ expected_monomorphic,
+ info,
+ /*megamorphic*/false,
+ /*missing_types*/false);
+ }
+
+ {
+ // Verify that method inlinePolymorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_polymorhic = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlinePolymorphic");
+ ASSERT_TRUE(inline_polymorhic != nullptr);
+ std::set<mirror::Class*> expected_polymorphic;
+ expected_polymorphic.insert(sub_a);
+ expected_polymorphic.insert(sub_b);
+ expected_polymorphic.insert(sub_c);
+ AssertInlineCaches(inline_polymorhic,
+ expected_polymorphic,
+ info,
+ /*megamorphic*/false,
+ /*missing_types*/false);
+ }
+
+ {
+ // Verify that method inlineMegamorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_megamorphic = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlineMegamorphic");
+ ASSERT_TRUE(inline_megamorphic != nullptr);
+ std::set<mirror::Class*> expected_megamorphic;
+ AssertInlineCaches(inline_megamorphic,
+ expected_megamorphic,
+ info,
+ /*megamorphic*/true,
+ /*missing_types*/false);
+ }
+
+ {
+ // Verify that method inlineMegamorphic has the expected inline caches and nothing else.
+ ArtMethod* inline_missing_types = GetVirtualMethod(class_loader,
+ "LTestInline;",
+ "inlineMissingTypes");
+ ASSERT_TRUE(inline_missing_types != nullptr);
+ std::set<mirror::Class*> expected_missing_Types;
+ AssertInlineCaches(inline_missing_types,
+ expected_missing_Types,
+ info,
+ /*megamorphic*/false,
+ /*missing_types*/true);
+ }
+
+ {
+ // Verify that method noInlineCache has no inline caches in the profile.
+ ArtMethod* no_inline_cache = GetVirtualMethod(class_loader, "LTestInline;", "noInlineCache");
+ ASSERT_TRUE(no_inline_cache != nullptr);
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi_no_inline_cache;
+ ASSERT_TRUE(info.GetMethod(no_inline_cache->GetDexFile()->GetLocation(),
+ no_inline_cache->GetDexFile()->GetLocationChecksum(),
+ no_inline_cache->GetDexMethodIndex(),
+ &pmi_no_inline_cache));
+ ASSERT_TRUE(pmi_no_inline_cache.inline_caches.empty());
+ }
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index a42e4f1db1..f7316cc129 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -36,6 +36,7 @@
#include "base/stringpiece.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
+#include "bytecode_utils.h"
#include "dex_file.h"
#include "jit/profile_compilation_info.h"
#include "runtime.h"
@@ -136,6 +137,15 @@ static constexpr uint16_t kDefaultTestProfileNumDex = 20;
static constexpr uint16_t kDefaultTestProfileMethodRatio = 5;
static constexpr uint16_t kDefaultTestProfileClassRatio = 5;
+// Separators used when parsing human friendly representation of profiles.
+static const std::string kMethodSep = "->";
+static const std::string kMissingTypesMarker = "missing_types";
+static constexpr char kProfileParsingInlineChacheSep = '+';
+static constexpr char kProfileParsingTypeSep = ',';
+static constexpr char kProfileParsingFirstCharInSignature = '(';
+
+// TODO(calin): This class has grown too much from its initial design. Split the functionality
+// into smaller, more contained pieces.
class ProfMan FINAL {
public:
ProfMan() :
@@ -522,6 +532,187 @@ class ProfMan FINAL {
return output.release();
}
+ // Find class klass_descriptor in the given dex_files and store its reference
+ // in the out parameter class_ref.
+ // Return true if the definition of the class was found in any of the dex_files.
+ bool FindClass(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ const std::string& klass_descriptor,
+ /*out*/ProfileMethodInfo::ProfileClassReference* class_ref) {
+ for (const std::unique_ptr<const DexFile>& dex_file_ptr : dex_files) {
+ const DexFile* dex_file = dex_file_ptr.get();
+ const DexFile::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
+ if (type_id == nullptr) {
+ continue;
+ }
+ dex::TypeIndex type_index = dex_file->GetIndexForTypeId(*type_id);
+ if (dex_file->FindClassDef(type_index) == nullptr) {
+ // Class is only referenced in the current dex file but not defined in it.
+ continue;
+ }
+ class_ref->dex_file = dex_file;
+ class_ref->type_index = type_index;
+ return true;
+ }
+ return false;
+ }
+
+ // Find the method specified by method_spec in the class class_ref. The method
+ // must have a single INVOKE_VIRTUAL in its byte code.
+ // Upon success it returns true and stores the method index and the invoke dex pc
+ // in the output parameters.
+ // The format of the method spec is "inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
+ //
+ // TODO(calin): support INVOKE_INTERFACE and the range variants.
+ bool FindMethodWithSingleInvoke(const ProfileMethodInfo::ProfileClassReference& class_ref,
+ const std::string& method_spec,
+ /*out*/uint16_t* method_index,
+ /*out*/uint32_t* dex_pc) {
+ std::vector<std::string> name_and_signature;
+ Split(method_spec, kProfileParsingFirstCharInSignature, &name_and_signature);
+ if (name_and_signature.size() != 2) {
+ LOG(ERROR) << "Invalid method name and signature " << method_spec;
+ }
+ const std::string& name = name_and_signature[0];
+ const std::string& signature = kProfileParsingFirstCharInSignature + name_and_signature[1];
+ const DexFile* dex_file = class_ref.dex_file;
+
+ const DexFile::StringId* name_id = dex_file->FindStringId(name.c_str());
+ if (name_id == nullptr) {
+ LOG(ERROR) << "Could not find name: " << name;
+ return false;
+ }
+ dex::TypeIndex return_type_idx;
+ std::vector<dex::TypeIndex> param_type_idxs;
+ if (!dex_file->CreateTypeList(signature, &return_type_idx, &param_type_idxs)) {
+ LOG(ERROR) << "Could not create type list" << signature;
+ return false;
+ }
+ const DexFile::ProtoId* proto_id = dex_file->FindProtoId(return_type_idx, param_type_idxs);
+ if (proto_id == nullptr) {
+ LOG(ERROR) << "Could not find proto_id: " << name;
+ return false;
+ }
+ const DexFile::MethodId* method_id = dex_file->FindMethodId(
+ dex_file->GetTypeId(class_ref.type_index), *name_id, *proto_id);
+ if (method_id == nullptr) {
+ LOG(ERROR) << "Could not find method_id: " << name;
+ return false;
+ }
+
+ *method_index = dex_file->GetIndexForMethodId(*method_id);
+
+ uint32_t offset = dex_file->FindCodeItemOffset(
+ *dex_file->FindClassDef(class_ref.type_index),
+ *method_index);
+ const DexFile::CodeItem* code_item = dex_file->GetCodeItem(offset);
+
+ bool found_invoke = false;
+ for (CodeItemIterator it(*code_item); !it.Done(); it.Advance()) {
+ if (it.CurrentInstruction().Opcode() == Instruction::INVOKE_VIRTUAL) {
+ if (found_invoke) {
+ LOG(ERROR) << "Multiple invoke INVOKE_VIRTUAL found: " << name;
+ return false;
+ }
+ found_invoke = true;
+ *dex_pc = it.CurrentDexPc();
+ }
+ }
+ if (!found_invoke) {
+ LOG(ERROR) << "Could not find any INVOKE_VIRTUAL: " << name;
+ }
+ return found_invoke;
+ }
+
+ // Process a line defining a class or a method and its inline caches.
+ // Upon success return true and add the class or the method info to profile.
+ // The possible line formats are:
+ // "LJustTheCass;".
+ // "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
+ // "LTestInline;->inlineMissingTypes(LSuper;)I+missing_types".
+ // "LTestInline;->inlineNoInlineCaches(LSuper;)I".
+ // The method and classes are searched only in the given dex files.
+ bool ProcessLine(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ const std::string& line,
+ /*out*/ProfileCompilationInfo* profile) {
+ std::string klass;
+ std::string method_str;
+ size_t method_sep_index = line.find(kMethodSep);
+ if (method_sep_index == std::string::npos) {
+ klass = line;
+ } else {
+ klass = line.substr(0, method_sep_index);
+ method_str = line.substr(method_sep_index + kMethodSep.size());
+ }
+
+ ProfileMethodInfo::ProfileClassReference class_ref;
+ if (!FindClass(dex_files, klass, &class_ref)) {
+ LOG(WARNING) << "Could not find class: " << klass;
+ return false;
+ }
+
+ if (method_str.empty()) {
+ // No method to add. Just add the class.
+ std::set<DexCacheResolvedClasses> resolved_class_set;
+ const DexFile* dex_file = class_ref.dex_file;
+ const auto& dex_resolved_classes = resolved_class_set.emplace(
+ dex_file->GetLocation(),
+ dex_file->GetBaseLocation(),
+ dex_file->GetLocationChecksum());
+ dex_resolved_classes.first->AddClass(class_ref.type_index);
+ profile->AddMethodsAndClasses(std::vector<ProfileMethodInfo>(), resolved_class_set);
+ return true;
+ }
+
+ // Process the method.
+ std::string method_spec;
+ std::vector<std::string> inline_cache_elems;
+
+ std::vector<std::string> method_elems;
+ bool is_missing_types = false;
+ Split(method_str, kProfileParsingInlineChacheSep, &method_elems);
+ if (method_elems.size() == 2) {
+ method_spec = method_elems[0];
+ is_missing_types = method_elems[1] == kMissingTypesMarker;
+ if (!is_missing_types) {
+ Split(method_elems[1], kProfileParsingTypeSep, &inline_cache_elems);
+ }
+ } else if (method_elems.size() == 1) {
+ method_spec = method_elems[0];
+ } else {
+ LOG(ERROR) << "Invalid method line: " << line;
+ return false;
+ }
+
+ uint16_t method_index;
+ uint32_t dex_pc;
+ if (!FindMethodWithSingleInvoke(class_ref, method_spec, &method_index, &dex_pc)) {
+ return false;
+ }
+ std::vector<ProfileMethodInfo::ProfileClassReference> classes(inline_cache_elems.size());
+ size_t class_it = 0;
+ for (const std::string ic_class : inline_cache_elems) {
+ if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
+ LOG(ERROR) << "Could not find class: " << ic_class;
+ return false;
+ }
+ }
+ std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
+ inline_caches.emplace_back(dex_pc, is_missing_types, classes);
+ std::vector<ProfileMethodInfo> pmi;
+ pmi.emplace_back(class_ref.dex_file, method_index, inline_caches);
+
+ profile->AddMethodsAndClasses(pmi, std::set<DexCacheResolvedClasses>());
+ return true;
+ }
+
+ // Creates a profile from a human friendly textual representation.
+ // The expected input format is:
+ // # Classes
+ // Ljava/lang/Comparable;
+ // Ljava/lang/Math;
+ // # Methods with inline caches
+ // LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;
+ // LTestInline;->noInlineCache(LSuper;)I
int CreateProfile() {
// Validate parameters for this command.
if (apk_files_.empty() && apks_fd_.empty()) {
@@ -550,51 +741,22 @@ class ProfMan FINAL {
return -1;
}
}
- // Read the user-specified list of classes (dot notation rather than descriptors).
+ // Read the user-specified list of classes and methods.
std::unique_ptr<std::unordered_set<std::string>>
- user_class_list(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
+ user_lines(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
create_profile_from_file_.c_str(), nullptr)); // No post-processing.
- std::unordered_set<std::string> matched_user_classes;
- // Open the dex files to look up class names.
+
+ // Open the dex files to look up classes and methods.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
- // Iterate over the dex files looking for class names in the input stream.
- std::set<DexCacheResolvedClasses> resolved_class_set;
- for (auto& dex_file : dex_files) {
- // Compute the set of classes to be added for this dex file first. This
- // avoids creating an entry in the profile information for dex files that
- // contribute no classes.
- std::unordered_set<dex::TypeIndex> classes_to_be_added;
- for (const auto& klass : *user_class_list) {
- std::string descriptor = DotToDescriptor(klass.c_str());
- const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor.c_str());
- if (type_id == nullptr) {
- continue;
- }
- classes_to_be_added.insert(dex_file->GetIndexForTypeId(*type_id));
- matched_user_classes.insert(klass);
- }
- if (classes_to_be_added.empty()) {
- continue;
- }
- // Insert the DexCacheResolved Classes into the set expected for
- // AddMethodsAndClasses.
- std::set<DexCacheResolvedClasses>::iterator dex_resolved_classes =
- resolved_class_set.emplace(dex_file->GetLocation(),
- dex_file->GetBaseLocation(),
- dex_file->GetLocationChecksum()).first;
- dex_resolved_classes->AddClasses(classes_to_be_added.begin(), classes_to_be_added.end());
- }
- // Warn the user if we didn't find matches for every class.
- for (const auto& klass : *user_class_list) {
- if (matched_user_classes.find(klass) == matched_user_classes.end()) {
- LOG(WARNING) << "requested class '" << klass << "' was not matched in any dex file";
- }
- }
- // Generate the profile data structure.
+
+ // Process the lines one by one and add the successful ones to the profile.
ProfileCompilationInfo info;
- std::vector<ProfileMethodInfo> methods; // No methods for now.
- info.AddMethodsAndClasses(methods, resolved_class_set);
+
+ for (const auto& line : *user_lines) {
+ ProcessLine(dex_files, line, &info);
+ }
+
// Write the profile file.
CHECK(info.Save(fd));
if (close(fd) < 0) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index d136aa15b3..9958814f58 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -156,6 +156,7 @@ cc_defaults {
"native/java_lang_Thread.cc",
"native/java_lang_Throwable.cc",
"native/java_lang_VMClassLoader.cc",
+ "native/java_lang_Void.cc",
"native/java_lang_invoke_MethodHandleImpl.cc",
"native/java_lang_ref_FinalizerReference.cc",
"native/java_lang_ref_Reference.cc",
@@ -171,6 +172,7 @@ cc_defaults {
"native/org_apache_harmony_dalvik_ddmc_DdmServer.cc",
"native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc",
"native/sun_misc_Unsafe.cc",
+ "non_debuggable_classes.cc",
"oat.cc",
"oat_file.cc",
"oat_file_assistant.cc",
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 01bd177221..e5f6f11326 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -53,6 +53,7 @@ Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
static const char* arm64_known_variants[] = {
"cortex-a35",
"exynos-m1",
+ "exynos-m2",
"denver64",
"kryo"
};
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ec8ae85722..4f7b4957b6 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2048,11 +2048,12 @@ ENTRY_NO_GP art_quick_indexof
lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
#endif
slt $t1, $a2, $zero # if fromIndex < 0
-#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+#if defined(_MIPS_ARCH_MIPS32R6)
seleqz $a2, $a2, $t1 # fromIndex = 0;
#else
movn $a2, $zero, $t1 # fromIndex = 0;
#endif
+
#if (STRING_COMPRESSION_FEATURE)
srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length
#endif
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index 35f20fbf44..ef82bd239d 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -70,14 +70,16 @@
// Macros to poison (negate) the reference for heap poisoning.
.macro POISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
+ dsubu \rRef, $zero, \rRef
+ dext \rRef, \rRef, 0, 32
#endif // USE_HEAP_POISONING
.endm
// Macros to unpoison (negate) the reference for heap poisoning.
.macro UNPOISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
+ dsubu \rRef, $zero, \rRef
+ dext \rRef, \rRef, 0, 32
#endif // USE_HEAP_POISONING
.endm
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 473d9cf74e..685e26c78d 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -55,8 +55,10 @@ inline mirror::Class* ArtMethod::GetDeclaringClass() {
if (kIsDebugBuild) {
if (!IsRuntimeMethod()) {
CHECK(result != nullptr) << this;
- CHECK(result->IsIdxLoaded() || result->IsErroneous())
- << result->GetStatus() << " " << result->PrettyClass();
+ if (kCheckDeclaringClassState) {
+ CHECK(result->IsIdxLoaded() || result->IsErroneous())
+ << result->GetStatus() << " " << result->PrettyClass();
+ }
} else {
CHECK(result == nullptr) << this;
}
@@ -89,7 +91,7 @@ ALWAYS_INLINE static inline void DoGetAccessFlagsHelper(ArtMethod* method)
template <ReadBarrierOption kReadBarrierOption>
inline uint32_t ArtMethod::GetAccessFlags() {
- if (kIsDebugBuild) {
+ if (kCheckDeclaringClassState) {
Thread* self = Thread::Current();
if (!Locks::mutator_lock_->IsSharedHeld(self)) {
if (self->IsThreadSuspensionAllowable()) {
@@ -118,8 +120,10 @@ inline uint16_t ArtMethod::GetMethodIndexDuringLinking() {
}
inline uint32_t ArtMethod::GetDexMethodIndex() {
- DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
- GetDeclaringClass()->IsErroneous());
+ if (kCheckDeclaringClassState) {
+ CHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
+ GetDeclaringClass()->IsErroneous());
+ }
return GetDexMethodIndexUnchecked();
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 99d7a49dc9..cd1950c0e2 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -53,6 +53,8 @@ class PointerArray;
class ArtMethod FINAL {
public:
+ static constexpr bool kCheckDeclaringClassState = kIsDebugBuild;
+
ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
method_index_(0), hotness_count_(0) { }
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index c7a94a90dc..4a2e34f243 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -246,7 +246,7 @@ ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32
ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
// String compression feature.
-#define STRING_COMPRESSION_FEATURE 0
+#define STRING_COMPRESSION_FEATURE 1
ADD_TEST_EQ(STRING_COMPRESSION_FEATURE, art::mirror::kUseStringCompression);
#if defined(__cplusplus)
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 245ab3b24f..f92fbea15d 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -89,6 +89,7 @@ enum ArenaAllocKind {
kArenaAllocRegisterAllocator,
kArenaAllocRegisterAllocatorValidate,
kArenaAllocStackMapStream,
+ kArenaAllocVectorNode,
kArenaAllocCodeGenerator,
kArenaAllocAssembler,
kArenaAllocParallelMoveResolver,
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index f0811b020b..4041f5e1ed 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -152,6 +152,11 @@ inline bool IsAlignedParam(T x, int n) {
return (x & (n - 1)) == 0;
}
+template<typename T>
+inline bool IsAlignedParam(T* x, int n) {
+ return IsAlignedParam(reinterpret_cast<const uintptr_t>(x), n);
+}
+
#define CHECK_ALIGNED(value, alignment) \
CHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value)
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index ff2dd1b399..03fc959f6b 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -73,7 +73,7 @@ void FdFile::Destroy() {
}
if (auto_close_ && fd_ != -1) {
if (Close() != 0) {
- PLOG(WARNING) << "Failed to close file " << file_path_;
+ PLOG(WARNING) << "Failed to close file with fd=" << fd_ << " path=" << file_path_;
}
}
}
diff --git a/compiler/optimizing/bytecode_utils.h b/runtime/bytecode_utils.h
index 133afa47fe..fa87b1d6da 100644
--- a/compiler/optimizing/bytecode_utils.h
+++ b/runtime/bytecode_utils.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_OPTIMIZING_BYTECODE_UTILS_H_
-#define ART_COMPILER_OPTIMIZING_BYTECODE_UTILS_H_
+#ifndef ART_RUNTIME_BYTECODE_UTILS_H_
+#define ART_RUNTIME_BYTECODE_UTILS_H_
#include "base/arena_object.h"
#include "dex_file.h"
@@ -177,4 +177,4 @@ inline bool IsThrowingDexInstruction(const Instruction& instruction) {
} // namespace art
-#endif // ART_COMPILER_OPTIMIZING_BYTECODE_UTILS_H_
+#endif // ART_RUNTIME_BYTECODE_UTILS_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 9b0ffaf031..d232714338 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -752,22 +752,6 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
FindSystemClass(self, "[Ljava/lang/StackTraceElement;"));
mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
- // Ensure void type is resolved in the core's dex cache so java.lang.Void is correctly
- // initialized.
- {
- const DexFile& dex_file = java_lang_Object->GetDexFile();
- const DexFile::TypeId* void_type_id = dex_file.FindTypeId("V");
- CHECK(void_type_id != nullptr);
- dex::TypeIndex void_type_idx = dex_file.GetIndexForTypeId(*void_type_id);
- // Now we resolve void type so the dex cache contains it. We use java.lang.Object class
- // as referrer so the used dex cache is core's one.
- ObjPtr<mirror::Class> resolved_type = ResolveType(dex_file,
- void_type_idx,
- java_lang_Object.Get());
- CHECK_EQ(resolved_type, GetClassRoot(kPrimitiveVoid));
- self->AssertNoPendingException();
- }
-
// Create conflict tables that depend on the class linker.
runtime->FixupConflictTables();
@@ -1041,7 +1025,8 @@ bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
class_loader->GetClass();
}
-static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
+static bool GetDexPathListElementName(ObjPtr<mirror::Object> element,
+ ObjPtr<mirror::String>* out_name)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
@@ -1053,17 +1038,20 @@ static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
CHECK_EQ(dex_file_field->GetDeclaringClass(), element->GetClass()) << element->PrettyTypeOf();
ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
if (dex_file == nullptr) {
- return nullptr;
+ // Null dex file means it was probably a jar with no dex files, return a null string.
+ *out_name = nullptr;
+ return true;
}
ObjPtr<mirror::Object> name_object = dex_file_name_field->GetObject(dex_file);
if (name_object != nullptr) {
- return name_object->AsString();
+ *out_name = name_object->AsString();
+ return true;
}
- return nullptr;
+ return false;
}
static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
- std::list<mirror::String*>* out_dex_file_names,
+ std::list<ObjPtr<mirror::String>>* out_dex_file_names,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(out_dex_file_names != nullptr);
@@ -1099,12 +1087,14 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
*error_msg = StringPrintf("Null dex element at index %d", i);
return false;
}
- ObjPtr<mirror::String> const name = GetDexPathListElementName(element);
- if (name == nullptr) {
- *error_msg = StringPrintf("Null name for dex element at index %d", i);
+ ObjPtr<mirror::String> name;
+ if (!GetDexPathListElementName(element, &name)) {
+ *error_msg = StringPrintf("Invalid dex path list element at index %d", i);
return false;
}
- out_dex_file_names->push_front(name.Ptr());
+ if (name != nullptr) {
+ out_dex_file_names->push_front(name.Ptr());
+ }
}
}
}
@@ -1785,14 +1775,14 @@ bool ClassLinker::AddImageSpace(
*error_msg = "Unexpected BootClassLoader in app image";
return false;
}
- std::list<mirror::String*> image_dex_file_names;
+ std::list<ObjPtr<mirror::String>> image_dex_file_names;
std::string temp_error_msg;
if (!FlattenPathClassLoader(image_class_loader.Get(), &image_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten image class loader hierarchy '%s'",
temp_error_msg.c_str());
return false;
}
- std::list<mirror::String*> loader_dex_file_names;
+ std::list<ObjPtr<mirror::String>> loader_dex_file_names;
if (!FlattenPathClassLoader(class_loader.Get(), &loader_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten class loader hierarchy '%s'",
temp_error_msg.c_str());
@@ -1804,7 +1794,10 @@ bool ClassLinker::AddImageSpace(
ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
if (element != nullptr) {
// If we are somewhere in the middle of the array, there may be nulls at the end.
- loader_dex_file_names.push_back(GetDexPathListElementName(element));
+ ObjPtr<mirror::String> name;
+ if (GetDexPathListElementName(element, &name) && name != nullptr) {
+ loader_dex_file_names.push_back(name);
+ }
}
}
// Ignore the number of image dex files since we are adding those to the class loader anyways.
@@ -4162,19 +4155,6 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file,
return false;
}
- // We may be running with a preopted oat file but without image. In this case,
- // we don't skip verification of skip_access_checks classes to ensure we initialize
- // dex caches with all types resolved during verification.
- // We need to trust image classes, as these might be coming out of a pre-opted, quickened boot
- // image (that we just failed loading), and the verifier can't be run on quickened opcodes when
- // the runtime isn't started. On the other hand, app classes can be re-verified even if they are
- // already pre-opted, as then the runtime is started.
- if (!Runtime::Current()->IsAotCompiler() &&
- !Runtime::Current()->GetHeap()->HasBootImageSpace() &&
- klass->GetClassLoader() != nullptr) {
- return false;
- }
-
uint16_t class_def_index = klass->GetDexClassDefIndex();
oat_file_class_status = oat_dex_file->GetOatClass(class_def_index).GetStatus();
if (oat_file_class_status == mirror::Class::kStatusVerified ||
@@ -4428,9 +4408,15 @@ void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod*
// Create constructor for Proxy that must initialize the method.
CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 23u);
- ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->GetDirectMethodUnchecked(
- 8, image_pointer_size_);
- DCHECK_EQ(std::string(proxy_constructor->GetName()), "<init>");
+ // Find the <init>(InvocationHandler)V method. The exact method offset varies depending
+ // on which front-end compiler was used to build the libcore DEX files.
+ ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->
+ FindDeclaredDirectMethod("<init>",
+ "(Ljava/lang/reflect/InvocationHandler;)V",
+ image_pointer_size_);
+ DCHECK(proxy_constructor != nullptr)
+ << "Could not find <init> method in java.lang.reflect.Proxy";
+
// Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden
// constructor method.
GetClassRoot(kJavaLangReflectProxy)->GetDexCache()->SetResolvedMethod(
@@ -8947,7 +8933,7 @@ std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_bo
return ret;
}
-std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
+std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForResolvedClasses(
const std::set<DexCacheResolvedClasses>& classes) {
ScopedTrace trace(__PRETTY_FUNCTION__);
std::unordered_set<std::string> ret;
@@ -8962,14 +8948,13 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
if (dex_cache != nullptr) {
const DexFile* dex_file = dex_cache->GetDexFile();
// There could be duplicates if two dex files with the same location are mapped.
- location_to_dex_file.emplace(
- ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation()), dex_file);
+ location_to_dex_file.emplace(dex_file->GetLocation(), dex_file);
}
}
}
for (const DexCacheResolvedClasses& info : classes) {
- const std::string& profile_key = info.GetDexLocation();
- auto found = location_to_dex_file.find(profile_key);
+ const std::string& location = info.GetDexLocation();
+ auto found = location_to_dex_file.find(location);
if (found != location_to_dex_file.end()) {
const DexFile* dex_file = found->second;
VLOG(profiler) << "Found opened dex file for " << dex_file->GetLocation() << " with "
@@ -8981,7 +8966,7 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
ret.insert(descriptor);
}
} else {
- VLOG(class_linker) << "Failed to find opened dex file for profile key " << profile_key;
+ VLOG(class_linker) << "Failed to find opened dex file for location " << location;
}
}
return ret;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 33eed3c8e3..a5d26c7a88 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -617,7 +617,8 @@ class ClassLinker {
std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
REQUIRES(!Locks::dex_lock_);
- std::unordered_set<std::string> GetClassDescriptorsForProfileKeys(
+ // Returns the class descriptors for loaded dex files.
+ std::unordered_set<std::string> GetClassDescriptorsForResolvedClasses(
const std::set<DexCacheResolvedClasses>& classes)
REQUIRES(!Locks::dex_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 21cdede06b..e5722a13a7 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -139,7 +139,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_FALSE(JavaLangObject->IsFinal());
EXPECT_FALSE(JavaLangObject->IsPrimitive());
EXPECT_FALSE(JavaLangObject->IsSynthetic());
- EXPECT_EQ(2U, JavaLangObject->NumDirectMethods());
+ EXPECT_EQ(4U, JavaLangObject->NumDirectMethods());
EXPECT_EQ(11U, JavaLangObject->NumVirtualMethods());
if (!kUseBrooksReadBarrier) {
EXPECT_EQ(2U, JavaLangObject->NumInstanceFields());
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index b6a2e09719..35e9d5db29 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -179,6 +179,14 @@ std::unique_ptr<const DexFile> DexFile::Open(const std::string& location,
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
CHECK(map.get() != nullptr);
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
map->Size(),
location,
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 86266e2500..e77a5b8e39 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -80,7 +80,7 @@ class ReadBarrierTable {
}
// This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
private:
static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8f9c187e1d..aea9708ddc 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1644,10 +1644,10 @@ void ConcurrentCopying::ReclaimPhase() {
// Record freed objects.
TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
// Don't include thread-locals that are in the to-space.
- uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
- uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
- uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
- uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
+ const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
+ const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
+ const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
+ const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
@@ -1658,8 +1658,18 @@ void ConcurrentCopying::ReclaimPhase() {
}
CHECK_LE(to_objects, from_objects);
CHECK_LE(to_bytes, from_bytes);
- int64_t freed_bytes = from_bytes - to_bytes;
- int64_t freed_objects = from_objects - to_objects;
+ // cleared_bytes and cleared_objects may be greater than the from space equivalents since
+ // ClearFromSpace may clear empty unevac regions.
+ uint64_t cleared_bytes;
+ uint64_t cleared_objects;
+ {
+ TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ CHECK_GE(cleared_bytes, from_bytes);
+ CHECK_GE(cleared_objects, from_objects);
+ }
+ int64_t freed_bytes = cleared_bytes - to_bytes;
+ int64_t freed_objects = cleared_objects - to_objects;
if (kVerboseMode) {
LOG(INFO) << "RecordFree:"
<< " from_bytes=" << from_bytes << " from_objects=" << from_objects
@@ -1678,11 +1688,6 @@ void ConcurrentCopying::ReclaimPhase() {
}
{
- TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace();
- }
-
- {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Sweep(false);
SwapBitmaps();
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 4c0f317aca..67e7383a4f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -724,7 +724,9 @@ class SemiSpace::MarkObjectVisitor {
void SemiSpace::ScanObject(Object* obj) {
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
MarkObjectVisitor visitor(this);
- obj->VisitReferences(visitor, visitor);
+ // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+ obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor, visitor);
}
// Scan anything that's on the mark stack.
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index eef4fba20d..f0e1029f85 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -59,6 +59,8 @@ enum CollectorType {
kCollectorTypeHprof,
// Fake collector for installing/removing a system-weak holder.
kCollectorTypeAddRemoveSystemWeakHolder,
+ // Fake collector type for GetObjectsAllocated
+ kCollectorTypeGetObjectsAllocated,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 9e34346686..c1c1cad861 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -40,6 +40,7 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseJitCodeCache: return "JitCodeCache";
case kGcCauseAddRemoveSystemWeakHolder: return "SystemWeakHolder";
case kGcCauseHprof: return "Hprof";
+ case kGcCauseGetObjectsAllocated: return "ObjectsAllocated";
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 9b285b12a4..eb27547768 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -53,8 +53,10 @@ enum GcCause {
kGcCauseJitCodeCache,
// Not a real GC cause, used to add or remove system-weak holders.
kGcCauseAddRemoveSystemWeakHolder,
- // Not a real GC cause, used to hprof running in the middle of GC.
+ // Not a real GC cause, used to prevent hprof running in the middle of GC.
kGcCauseHprof,
+ // Not a real GC cause, used to prevent GetObjectsAllocated running in the middle of GC.
+ kGcCauseGetObjectsAllocated,
};
const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 53be30eafc..a7697484e2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -133,6 +133,17 @@ static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
// config.
static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
+static const char* kRegionSpaceName = "main space (region space)";
+
+#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
+// 300 MB (0x12c00000) - (default non-moving space capacity).
+static uint8_t* const kPreferredAllocSpaceBegin =
+ reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
+#else
+// For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
+static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
+#endif
+
static inline bool CareAboutPauseTimes() {
return Runtime::Current()->InJankPerceptibleProcessState();
}
@@ -286,15 +297,9 @@ Heap::Heap(size_t initial_size,
// Requested begin for the alloc space, to follow the mapped image and oat files
uint8_t* requested_alloc_space_begin = nullptr;
if (foreground_collector_type_ == kCollectorTypeCC) {
- // Need to use a low address so that we can allocate a contiguous
- // 2 * Xmx space when there's no image (dex2oat for target).
-#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
- CHECK_GE(300 * MB, non_moving_space_capacity);
- requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
-#else
- // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
- requested_alloc_space_begin = reinterpret_cast<uint8_t*>(0x20000000);
-#endif
+ // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
+ // image (dex2oat for target).
+ requested_alloc_space_begin = kPreferredAllocSpaceBegin;
}
// Load image space(s).
@@ -369,12 +374,7 @@ Heap::Heap(size_t initial_size,
&error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
-#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
- request_begin = reinterpret_cast<uint8_t*>(300 * MB);
-#else
- // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
- request_begin = reinterpret_cast<uint8_t*>(0x20000000) + non_moving_space_capacity;
-#endif
+ request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
}
// Attempt to create 2 mem maps at or after the requested begin.
if (foreground_collector_type_ != kCollectorTypeCC) {
@@ -419,7 +419,12 @@ Heap::Heap(size_t initial_size,
}
// Create other spaces based on whether or not we have a moving GC.
if (foreground_collector_type_ == kCollectorTypeCC) {
- region_space_ = space::RegionSpace::Create("main space (region space)", capacity_ * 2, request_begin);
+ CHECK(separate_non_moving_space);
+ MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
+ capacity_ * 2,
+ request_begin);
+ CHECK(region_space_mem_map != nullptr) << "No region space mem map";
+ region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
AddSpace(region_space_);
} else if (IsMovingGc(foreground_collector_type_) &&
foreground_collector_type_ != kCollectorTypeGSS) {
@@ -1830,6 +1835,11 @@ void Heap::SetTargetHeapUtilization(float target) {
size_t Heap::GetObjectsAllocated() const {
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
+ // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells
+ // us to suspend while we are doing SuspendAll. b/35232978
+ gc::ScopedGCCriticalSection gcs(Thread::Current(),
+ gc::kGcCauseGetObjectsAllocated,
+ gc::kCollectorTypeGetObjectsAllocated);
// Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
ScopedSuspendAll ssa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -2327,7 +2337,9 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
size_t bin_size = object_addr - context->prev_;
// Add the bin consisting of the end of the previous object to the start of the current object.
collector->AddBin(bin_size, context->prev_);
- context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
+ // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+ context->prev_ = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(),
+ kObjectAlignment);
}
void AddBin(size_t size, uintptr_t position) {
@@ -2347,7 +2359,8 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- size_t obj_size = obj->SizeOf();
+ // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+ size_t obj_size = obj->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
mirror::Object* forward_address;
// Find the smallest bin which we can move obj in.
@@ -3551,11 +3564,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
collector::GcType gc_type = collector_ran->GetGcType();
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics.
- const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
- const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
+ const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
+ const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
@@ -3961,7 +3971,14 @@ void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
- (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
+ (c->IsVariableSize() || c->GetObjectSize() == byte_count))
+ << "ClassFlags=" << c->GetClassFlags()
+ << " IsClassClass=" << c->IsClassClass()
+ << " byte_count=" << byte_count
+ << " IsVariableSize=" << c->IsVariableSize()
+ << " ObjectSize=" << c->GetObjectSize()
+ << " sizeof(Class)=" << sizeof(mirror::Class)
+ << " klass=" << c.Ptr();
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 3e79223498..5809027235 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -78,7 +78,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
r->SetNewlyAllocated();
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
@@ -91,7 +91,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
@@ -233,10 +233,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
continue;
}
if (r->IsLarge()) {
+ // Avoid visiting dead large objects since they may contain dangling pointers to the
+ // from-space.
+ DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
- if (obj->GetClass() != nullptr) {
- callback(obj, arg);
- }
+ DCHECK(obj->GetClass() != nullptr);
+ callback(obj, arg);
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
@@ -310,13 +312,13 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK_EQ(left + num_regs, right);
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
- first_reg->UnfreeLarge(time_);
+ first_reg->UnfreeLarge(this, time_);
++num_non_free_regions_;
first_reg->SetTop(first_reg->Begin() + num_bytes);
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
- regions_[p].UnfreeLargeTail(time_);
+ regions_[p].UnfreeLargeTail(this, time_);
++num_non_free_regions_;
}
*bytes_allocated = num_bytes;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 8077319ec7..1ad48438ba 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -28,20 +28,52 @@ namespace space {
// value of the region size, evaculate the region.
static constexpr uint kEvaculateLivePercentThreshold = 75U;
-RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity,
- uint8_t* requested_begin) {
- capacity = RoundUp(capacity, kRegionSize);
+MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
+ uint8_t* requested_begin) {
+ CHECK_ALIGNED(capacity, kRegionSize);
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
- PROT_READ | PROT_WRITE, true, false,
- &error_msg));
+ // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
+ // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
+ std::unique_ptr<MemMap> mem_map;
+ while (true) {
+ mem_map.reset(MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ capacity + kRegionSize,
+ PROT_READ | PROT_WRITE,
+ true,
+ false,
+ &error_msg));
+ if (mem_map.get() != nullptr || requested_begin == nullptr) {
+ break;
+ }
+ // Retry with no specified request begin.
+ requested_begin = nullptr;
+ }
if (mem_map.get() == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
MemMap::DumpMaps(LOG_STREAM(ERROR));
return nullptr;
}
- return new RegionSpace(name, mem_map.release());
+ CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
+ CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
+ CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
+ if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
+ // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
+ // kRegionSize at the end.
+ mem_map->SetSize(capacity);
+ } else {
+ // Got an unaligned map. Align the both ends.
+ mem_map->AlignBy(kRegionSize);
+ }
+ CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
+ CHECK_ALIGNED(mem_map->End(), kRegionSize);
+ CHECK_EQ(mem_map->Size(), capacity);
+ return mem_map.release();
+}
+
+RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
+ return new RegionSpace(name, mem_map);
}
RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
@@ -54,6 +86,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
num_regions_ = mem_map_size / kRegionSize;
num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
+ non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -160,7 +193,11 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
MutexLock mu(Thread::Current(), region_lock_);
size_t num_expected_large_tails = 0;
bool prev_large_evacuated = false;
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
Region* r = &regions_[i];
RegionState state = r->State();
RegionType type = r->Type();
@@ -204,18 +241,50 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
}
}
}
+ DCHECK_EQ(num_expected_large_tails, 0U);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
-void RegionSpace::ClearFromSpace() {
+void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
+ DCHECK(cleared_bytes != nullptr);
+ DCHECK(cleared_objects != nullptr);
+ *cleared_bytes = 0;
+ *cleared_objects = 0;
MutexLock mu(Thread::Current(), region_lock_);
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ size_t new_non_free_region_index_limit = 0;
+ for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = &regions_[i];
if (r->IsInFromSpace()) {
- r->Clear();
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
--num_non_free_regions_;
+ r->Clear();
} else if (r->IsInUnevacFromSpace()) {
+ if (r->LiveBytes() == 0) {
+ // Special case for 0 live bytes, this means all of the objects in the region are dead and
+ // we can clear it. This is important for large objects since we must not visit dead ones in
+ // RegionSpace::Walk because they may contain dangling references to invalid objects.
+ // It is also better to clear these regions now instead of at the end of the next GC to
+ // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
+ // live percent evacuation logic.
+ size_t free_regions = 1;
+ // Also release RAM for large tails.
+ while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
+ DCHECK(r->IsLarge());
+ regions_[i + free_regions].Clear();
+ ++free_regions;
+ }
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ num_non_free_regions_ -= free_regions;
+ r->Clear();
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ continue;
+ }
size_t full_count = 0;
while (r->IsInUnevacFromSpace()) {
Region* const cur = &regions_[i + full_count];
@@ -223,6 +292,7 @@ void RegionSpace::ClearFromSpace() {
cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) {
break;
}
+ DCHECK(cur->IsInUnevacFromSpace());
if (full_count != 0) {
cur->SetUnevacFromSpaceAsToSpace();
}
@@ -239,7 +309,15 @@ void RegionSpace::ClearFromSpace() {
i += full_count - 1;
}
}
+ // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
+ Region* last_checked_region = &regions_[i];
+ if (!last_checked_region->IsFree()) {
+ new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
+ last_checked_region->Idx() + 1);
+ }
}
+ // Update non_free_region_index_limit_.
+ SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
}
@@ -292,6 +370,7 @@ void RegionSpace::Clear() {
}
r->Clear();
}
+ SetNonFreeRegionLimit(0);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
@@ -358,7 +437,7 @@ bool RegionSpace::AllocNewTlab(Thread* self) {
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
r->SetNewlyAllocated();
r->SetTop(r->End());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index feab9b0fe9..253792993b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -35,10 +35,11 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return kSpaceTypeRegionSpace;
}
- // Create a region space with the requested sizes. The requested base address is not
+ // Create a region space mem map with the requested sizes. The requested base address is not
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
- static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+ static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
+ static RegionSpace* Create(const std::string& name, MemMap* mem_map);
// Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -166,7 +167,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
// The region size.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
bool IsInFromSpace(mirror::Object* ref) {
if (HasAddress(ref)) {
@@ -214,7 +215,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace() REQUIRES(!region_lock_);
+ void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Region* reg = RefToRegionUnlocked(ref);
@@ -307,25 +308,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
// Given a free region, declare it non-free (allocated).
- void Unfree(uint32_t alloc_time) {
+ void Unfree(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateAllocated;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLarge(uint32_t alloc_time) {
+ void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLarge;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLargeTail(uint32_t alloc_time) {
+ void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLargeTail;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
void SetNewlyAllocated() {
@@ -341,7 +348,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
bool IsLarge() const {
bool is_large = state_ == RegionState::kRegionStateLarge;
if (is_large) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
}
return is_large;
}
@@ -428,7 +435,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t ObjectsAllocated() const {
if (IsLarge()) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
return 1;
} else if (IsLargeTail()) {
@@ -519,6 +526,27 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
mirror::Object* GetNextObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
+ DCHECK_LT(new_non_free_region_index, num_regions_);
+ non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
+ new_non_free_region_index + 1);
+ VerifyNonFreeRegionLimit();
+ }
+
+ void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
+ DCHECK_LE(new_non_free_region_index_limit, num_regions_);
+ non_free_region_index_limit_ = new_non_free_region_index_limit;
+ VerifyNonFreeRegionLimit();
+ }
+
+ void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
+ if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
+ for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
+ CHECK(regions_[i].IsFree());
+ }
+ }
+ }
+
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
@@ -526,6 +554,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t num_non_free_regions_; // The number of non-free regions in this space.
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
+ // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
+ // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
+ // true.
+ size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
Region* current_region_; // The region that's being allocated currently.
Region* evac_region_; // The region that's being evacuated to currently.
Region full_region_; // The dummy/sentinel region that looks full.
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index e59c4bb28e..495fec7a48 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1111,7 +1111,9 @@ void Hprof::DumpHeapObject(mirror::Object* obj) {
if (space != nullptr) {
if (space->IsZygoteSpace()) {
heap_type = HPROF_HEAP_ZYGOTE;
- } else if (space->IsImageSpace()) {
+ } else if (space->IsImageSpace() && heap->ObjectIsInBootImageSpace(obj)) {
+ // Only count objects in the boot image as HPROF_HEAP_IMAGE, this leaves app image objects as
+ // HPROF_HEAP_APP. b/35762934
heap_type = HPROF_HEAP_IMAGE;
}
} else {
diff --git a/runtime/image.cc b/runtime/image.cc
index 4e6da79205..88f28f3ea1 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '8', '\0' }; // hash-based DexCache types
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '0', '\0' }; // Integer.valueOf intrinsic
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S
index 2b81f5069f..3c813efb31 100644
--- a/runtime/interpreter/mterp/arm/op_sget.S
+++ b/runtime/interpreter/mterp/arm/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+%default { "is_object":"0", "helper":"MterpGet32Static" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S
index ebfb44cb20..eb06aa881c 100644
--- a/runtime/interpreter/mterp/arm/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_sget_boolean.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S
index d76862e600..9f4c9046a2 100644
--- a/runtime/interpreter/mterp/arm/op_sget_byte.S
+++ b/runtime/interpreter/mterp/arm/op_sget_byte.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S
index b7fcfc23d4..dd8c991264 100644
--- a/runtime/interpreter/mterp/arm/op_sget_char.S
+++ b/runtime/interpreter/mterp/arm/op_sget_char.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S
index 8e7d075b72..e1d9eaee29 100644
--- a/runtime/interpreter/mterp/arm/op_sget_object.S
+++ b/runtime/interpreter/mterp/arm/op_sget_object.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "arm/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S
index 3e80f0da87..c0d61c4d33 100644
--- a/runtime/interpreter/mterp/arm/op_sget_short.S
+++ b/runtime/interpreter/mterp/arm/op_sget_short.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "arm/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
index 4f2f89d6c3..aeee016294 100644
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -4,12 +4,12 @@
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S
index 7e0c1a64ef..494df8aa5d 100644
--- a/runtime/interpreter/mterp/arm/op_sput.S
+++ b/runtime/interpreter/mterp/arm/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S
index e3bbf2b8ba..47bed0a2ce 100644
--- a/runtime/interpreter/mterp/arm/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_sput_boolean.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S
index e3bbf2b8ba..b4d22b4fd8 100644
--- a/runtime/interpreter/mterp/arm/op_sput_byte.S
+++ b/runtime/interpreter/mterp/arm/op_sput_byte.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S
index d8d65cb5ef..58a957d1f6 100644
--- a/runtime/interpreter/mterp/arm/op_sput_char.S
+++ b/runtime/interpreter/mterp/arm/op_sput_char.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S
index d8d65cb5ef..88c321127e 100644
--- a/runtime/interpreter/mterp/arm/op_sput_short.S
+++ b/runtime/interpreter/mterp/arm/op_sput_short.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
index 8d8ed8c4a2..1e8fcc9b75 100644
--- a/runtime/interpreter/mterp/arm/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sput_wide.S
@@ -3,15 +3,15 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rINST, lsr #8 @ r3<- AA
- VREG_INDEX_TO_ADDR r2, r2
+ mov r1, rINST, lsr #8 @ r1<- AA
+ VREG_INDEX_TO_ADDR r1, r1
+ ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
diff --git a/runtime/interpreter/mterp/arm64/op_sget.S b/runtime/interpreter/mterp/arm64/op_sget.S
index 6352ce0597..84e71ac15e 100644
--- a/runtime/interpreter/mterp/arm64/op_sget.S
+++ b/runtime/interpreter/mterp/arm64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" }
+%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm64/op_sget_boolean.S b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
index c40dbdd7d7..868f41cb7f 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"uxtb w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"uxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_byte.S b/runtime/interpreter/mterp/arm64/op_sget_byte.S
index 6cf69a382a..e135aa737a 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_byte.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"sxtb w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"sxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_char.S b/runtime/interpreter/mterp/arm64/op_sget_char.S
index 8924a349ab..05d57ac20b 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_char.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_char.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"uxth w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"uxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_object.S b/runtime/interpreter/mterp/arm64/op_sget_object.S
index 620b0bab00..1faaf6eb8e 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_object.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_object.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "arm64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_short.S b/runtime/interpreter/mterp/arm64/op_sget_short.S
index 19dbba6f74..5900231b06 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_short.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_short.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"sxth w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"sxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_wide.S b/runtime/interpreter/mterp/arm64/op_sget_wide.S
index 287f66daeb..92f3f7dd66 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_wide.S
@@ -4,12 +4,12 @@
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64StaticFromCode
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w4, wINST, #8 // w4<- AA
cbnz x3, MterpException // bail out
diff --git a/runtime/interpreter/mterp/arm64/op_sput.S b/runtime/interpreter/mterp/arm64/op_sput.S
index 75f27abdcc..e322af0e76 100644
--- a/runtime/interpreter/mterp/arm64/op_sput.S
+++ b/runtime/interpreter/mterp/arm64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm64/op_sput_boolean.S b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
index 11c55e529b..9928f31c98 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_byte.S b/runtime/interpreter/mterp/arm64/op_sput_byte.S
index 11c55e529b..16d6ba96e0 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_byte.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_char.S b/runtime/interpreter/mterp/arm64/op_sput_char.S
index b4dd5aa76c..ab5e8152b9 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_char.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_char.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_short.S b/runtime/interpreter/mterp/arm64/op_sput_short.S
index b4dd5aa76c..b54f88ad48 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_short.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_short.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "arm64/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S
index a79b1a6172..4aeb8ff316 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_wide.S
@@ -3,15 +3,15 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- lsr w2, wINST, #8 // w3<- AA
- VREG_INDEX_TO_ADDR x2, w2
+ lsr w1, wINST, #8 // w1<- AA
+ VREG_INDEX_TO_ADDR x1, w1
+ ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cbnz w0, MterpException // 0 on success, -1 on failure
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
index 6d9774c223..b19426bfbb 100644
--- a/runtime/interpreter/mterp/config_arm
+++ b/runtime/interpreter/mterp/config_arm
@@ -288,8 +288,8 @@ op-start arm
# op op_unused_f9 FALLBACK
op op_invoke_polymorphic FALLBACK
op op_invoke_polymorphic_range FALLBACK
- # op op_unused_fc FALLBACK
- # op op_unused_fd FALLBACK
+ op op_invoke_custom FALLBACK
+ op op_invoke_custom_range FALLBACK
# op op_unused_fe FALLBACK
# op op_unused_ff FALLBACK
op-end
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index 9f32695664..0987964090 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -286,8 +286,8 @@ op-start arm64
# op op_unused_f9 FALLBACK
op op_invoke_polymorphic FALLBACK
op op_invoke_polymorphic_range FALLBACK
- # op op_unused_fc FALLBACK
- # op op_unused_fd FALLBACK
+ op op_invoke_custom FALLBACK
+ op op_invoke_custom_range FALLBACK
# op op_unused_fe FALLBACK
# op op_unused_ff FALLBACK
op-end
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
index 708a22b6a4..fe07385b5a 100644
--- a/runtime/interpreter/mterp/config_mips
+++ b/runtime/interpreter/mterp/config_mips
@@ -288,8 +288,8 @@ op-start mips
# op op_unused_f9 FALLBACK
op op_invoke_polymorphic FALLBACK
op op_invoke_polymorphic_range FALLBACK
- # op op_unused_fc FALLBACK
- # op op_unused_fd FALLBACK
+ op op_invoke_custom FALLBACK
+ op op_invoke_custom_range FALLBACK
# op op_unused_fe FALLBACK
# op op_unused_ff FALLBACK
op-end
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
index 7643a4829e..d24cf4d8d0 100644
--- a/runtime/interpreter/mterp/config_mips64
+++ b/runtime/interpreter/mterp/config_mips64
@@ -288,8 +288,8 @@ op-start mips64
# op op_unused_f9 FALLBACK
op op_invoke_polymorphic FALLBACK
op op_invoke_polymorphic_range FALLBACK
- # op op_unused_fc FALLBACK
- # op op_unused_fd FALLBACK
+ op op_invoke_custom FALLBACK
+ op op_invoke_custom_range FALLBACK
# op op_unused_fe FALLBACK
# op op_unused_ff FALLBACK
op-end
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
index f454786682..076baf2907 100644
--- a/runtime/interpreter/mterp/config_x86
+++ b/runtime/interpreter/mterp/config_x86
@@ -292,8 +292,8 @@ op-start x86
# op op_unused_f9 FALLBACK
op op_invoke_polymorphic FALLBACK
op op_invoke_polymorphic_range FALLBACK
- # op op_unused_fc FALLBACK
- # op op_unused_fd FALLBACK
+ op op_invoke_custom FALLBACK
+ op op_invoke_custom_range FALLBACK
# op op_unused_fe FALLBACK
# op op_unused_ff FALLBACK
op-end
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
index dbfd3d18fc..44b671a36f 100644
--- a/runtime/interpreter/mterp/config_x86_64
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -292,8 +292,8 @@ op-start x86_64
# op op_unused_f9 FALLBACK
op op_invoke_polymorphic FALLBACK
op op_invoke_polymorphic_range FALLBACK
- # op op_unused_fc FALLBACK
- # op op_unused_fd FALLBACK
+ op op_invoke_custom FALLBACK
+ op op_invoke_custom_range FALLBACK
# op op_unused_fe FALLBACK
# op op_unused_ff FALLBACK
op-end
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
index 64ece1e1c8..635df8aa1f 100644
--- a/runtime/interpreter/mterp/mips/op_sget.S
+++ b/runtime/interpreter/mterp/mips/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+%default { "is_object":"0", "helper":"MterpGet32Static" }
/*
* General SGET handler.
*
diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S
index 45a5a70228..7829970d84 100644
--- a/runtime/interpreter/mterp/mips/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_sget_boolean.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S
index 319122cac0..ee0834201b 100644
--- a/runtime/interpreter/mterp/mips/op_sget_byte.S
+++ b/runtime/interpreter/mterp/mips/op_sget_byte.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S
index 71038478e0..d8b477a7bc 100644
--- a/runtime/interpreter/mterp/mips/op_sget_char.S
+++ b/runtime/interpreter/mterp/mips/op_sget_char.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S
index b205f513aa..2dc00c386c 100644
--- a/runtime/interpreter/mterp/mips/op_sget_object.S
+++ b/runtime/interpreter/mterp/mips/op_sget_object.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "mips/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S
index 3301823d86..ab55d93060 100644
--- a/runtime/interpreter/mterp/mips/op_sget_short.S
+++ b/runtime/interpreter/mterp/mips/op_sget_short.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "mips/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
index c729250003..ec4295ad03 100644
--- a/runtime/interpreter/mterp/mips/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sget_wide.S
@@ -2,12 +2,12 @@
* 64-bit SGET handler.
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGet64StaticFromCode)
+ JAL(MterpGet64Static)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
index 7034a0e740..37f8687aaa 100644
--- a/runtime/interpreter/mterp/mips/op_sput.S
+++ b/runtime/interpreter/mterp/mips/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler.
*
diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S
index 7909ef5622..6426cd40eb 100644
--- a/runtime/interpreter/mterp/mips/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_sput_boolean.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S
index 7909ef5622..c68d18f2f7 100644
--- a/runtime/interpreter/mterp/mips/op_sput_byte.S
+++ b/runtime/interpreter/mterp/mips/op_sput_byte.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S
index 188195cc3a..9b8983e4c6 100644
--- a/runtime/interpreter/mterp/mips/op_sput_char.S
+++ b/runtime/interpreter/mterp/mips/op_sput_char.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S
index 188195cc3a..5a57ed9922 100644
--- a/runtime/interpreter/mterp/mips/op_sput_short.S
+++ b/runtime/interpreter/mterp/mips/op_sput_short.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
index 3b347fc008..c090007968 100644
--- a/runtime/interpreter/mterp/mips/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sput_wide.S
@@ -2,15 +2,15 @@
* 64-bit SPUT handler.
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- GET_OPA(a2) # a2 <- AA
- EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ GET_OPA(a1) # a1 <- AA
+ EAS2(a1, rFP, a1) # a1 <- &fp[AA]
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet64IndirectStaticFromMterp)
+ JAL(MterpSet64Static)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S
index bd2cfe3778..71046dba1a 100644
--- a/runtime/interpreter/mterp/mips64/op_sget.S
+++ b/runtime/interpreter/mterp/mips64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" }
+%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
index e7b1844d86..ec1ce9eb14 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"and v0, v0, 0xff"}
+%include "mips64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"and v0, v0, 0xff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S
index 52a2e4a5d5..6a802f63ea 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_byte.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"seb v0, v0"}
+%include "mips64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"seb v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S
index 873d82a0d6..483d085719 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_char.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_char.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"and v0, v0, 0xffff"}
+%include "mips64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"and v0, v0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S
index 3108417e00..2250696a97 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_object.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_object.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "mips64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S
index fed4e76baa..b257bbbba1 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_short.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_short.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"seh v0, v0"}
+%include "mips64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"seh v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S
index 77124d1d8d..ace64f8e80 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_wide.S
@@ -3,12 +3,12 @@
*
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGet64StaticFromCode
+ jal MterpGet64Static
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a4, rINST, 8 # a4 <- AA
bnez a3, MterpException # bail out
diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S
index 142f18f3ba..466f3339c2 100644
--- a/runtime/interpreter/mterp/mips64/op_sput.S
+++ b/runtime/interpreter/mterp/mips64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode" }
+%default { "helper":"MterpSet32Static" }
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
index f5b8dbf433..eba58f7fa1 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S
index f5b8dbf433..80a26c0161 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_byte.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S
index c4d195c82f..c0d5bf3bba 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_char.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_char.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S
index c4d195c82f..b001832bc4 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_short.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_short.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "mips64/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S
index 828ddc15e7..aa3d5b4157 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_wide.S
@@ -3,15 +3,15 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- srl a2, rINST, 8 # a2 <- AA
- dlsa a2, a2, rFP, 2
+ srl a1, rINST, 8 # a2 <- AA
+ dlsa a1, a1, rFP, 2
+ ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet64IndirectStaticFromMterp
+ jal MterpSet64Static
bnezc v0, MterpException # 0 on success, -1 on failure
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 75ab91acba..8bf094e1b8 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -587,27 +587,6 @@ extern "C" size_t MterpSuspendCheck(Thread* self)
return MterpShouldSwitchInterpreters();
}
-extern "C" ssize_t artSet64IndirectStaticFromMterp(uint32_t field_idx,
- ArtMethod* referrer,
- uint64_t* new_value,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
- ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), *new_value);
- return 0; // success
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != nullptr)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), *new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx,
mirror::Object* obj,
uint8_t new_value,
@@ -689,7 +668,187 @@ extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx,
return -1; // failure
}
-extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
+template <typename return_type, Primitive::Type primitive_type>
+ALWAYS_INLINE return_type MterpGetStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self,
+ return_type (ArtField::*func)(ObjPtr<mirror::Object>))
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return_type res = 0; // On exception, the result will be ignored.
+ ArtField* f =
+ FindFieldFromCode<StaticPrimitiveRead, false>(field_idx,
+ referrer,
+ self,
+ primitive_type);
+ if (LIKELY(f != nullptr)) {
+ ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
+ res = (f->*func)(obj);
+ }
+ return res;
+}
+
+extern "C" int32_t MterpGetBooleanStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetBoolean);
+}
+
+extern "C" int32_t MterpGetByteStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int8_t, Primitive::kPrimByte>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetByte);
+}
+
+extern "C" uint32_t MterpGetCharStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetChar);
+}
+
+extern "C" int32_t MterpGetShortStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int16_t, Primitive::kPrimShort>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetShort);
+}
+
+extern "C" mirror::Object* MterpGetObjStatic(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<ObjPtr<mirror::Object>, Primitive::kPrimNot>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetObject).Ptr();
+}
+
+extern "C" int32_t MterpGet32Static(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int32_t, Primitive::kPrimInt>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetInt);
+}
+
+extern "C" int64_t MterpGet64Static(uint32_t field_idx, ArtMethod* referrer, Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpGetStatic<int64_t, Primitive::kPrimLong>(field_idx,
+ referrer,
+ self,
+ &ArtField::GetLong);
+}
+
+
+template <typename field_type, Primitive::Type primitive_type>
+int MterpSetStatic(uint32_t field_idx,
+ field_type new_value,
+ ArtMethod* referrer,
+ Thread* self,
+ void (ArtField::*func)(ObjPtr<mirror::Object>, field_type val))
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ int res = 0; // Assume success (following quick_field_entrypoints conventions)
+ ArtField* f =
+ FindFieldFromCode<StaticPrimitiveWrite, false>(field_idx, referrer, self, primitive_type);
+ if (LIKELY(f != nullptr)) {
+ ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
+ (f->*func)(obj, new_value);
+ } else {
+ res = -1; // Failure
+ }
+ return res;
+}
+
+extern "C" int MterpSetBooleanStatic(uint32_t field_idx,
+ uint8_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetBoolean<false>);
+}
+
+extern "C" int MterpSetByteStatic(uint32_t field_idx,
+ int8_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int8_t, Primitive::kPrimByte>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetByte<false>);
+}
+
+extern "C" int MterpSetCharStatic(uint32_t field_idx,
+ uint16_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetChar<false>);
+}
+
+extern "C" int MterpSetShortStatic(uint32_t field_idx,
+ int16_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int16_t, Primitive::kPrimShort>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetShort<false>);
+}
+
+extern "C" int MterpSet32Static(uint32_t field_idx,
+ int32_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int32_t, Primitive::kPrimInt>(field_idx,
+ new_value,
+ referrer,
+ self,
+ &ArtField::SetInt<false>);
+}
+
+extern "C" int MterpSet64Static(uint32_t field_idx,
+ int64_t* new_value,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return MterpSetStatic<int64_t, Primitive::kPrimLong>(field_idx,
+ *new_value,
+ referrer,
+ self,
+ &ArtField::SetLong<false>);
+}
+
+extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
+ int32_t index)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(arr == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
@@ -703,7 +862,8 @@ extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t i
}
}
-extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
+extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
+ uint32_t field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 891624160b..e2b693f269 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -2631,12 +2631,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGet32StaticFromCode
+ bl MterpGet32Static
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2661,12 +2661,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
@@ -2690,12 +2690,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetObjStaticFromCode
+ bl MterpGetObjStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2723,12 +2723,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetBooleanStaticFromCode
+ bl MterpGetBooleanStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2756,12 +2756,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetByteStaticFromCode
+ bl MterpGetByteStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2789,12 +2789,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetCharStaticFromCode
+ bl MterpGetCharStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2822,12 +2822,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl artGetShortStaticFromCode
+ bl MterpGetShortStatic
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2860,7 +2860,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet32StaticFromCode
+ bl MterpSet32Static
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2876,15 +2876,15 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rINST, lsr #8 @ r3<- AA
- VREG_INDEX_TO_ADDR r2, r2
+ mov r1, rINST, lsr #8 @ r1<- AA
+ VREG_INDEX_TO_ADDR r1, r1
+ ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2925,7 +2925,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetBooleanStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2951,7 +2951,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetByteStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2977,7 +2977,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetCharStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -3003,7 +3003,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetShortStatic
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -7347,24 +7347,16 @@ constvalop_long_to_double:
/* ------------------------------ */
.balign 128
-.L_op_unused_fc: /* 0xfc */
-/* File: arm/op_unused_fc.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_custom: /* 0xfc */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fd: /* 0xfd */
-/* File: arm/op_unused_fd.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_custom_range: /* 0xfd */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
@@ -11763,7 +11755,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fc: /* 0xfc */
+.L_ALT_op_invoke_custom: /* 0xfc */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11780,7 +11772,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fd: /* 0xfd */
+.L_ALT_op_invoke_custom_range: /* 0xfd */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 7d442c0b4b..ef5a4daa51 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -2543,12 +2543,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGet32StaticFromCode
+ bl MterpGet32Static
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
@@ -2573,12 +2573,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64StaticFromCode
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGet64StaticFromCode
+ bl MterpGet64Static
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w4, wINST, #8 // w4<- AA
cbnz x3, MterpException // bail out
@@ -2599,12 +2599,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetObjStaticFromCode
+ bl MterpGetObjStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
@@ -2632,12 +2632,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetBooleanStaticFromCode
+ bl MterpGetBooleanStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
uxtb w0, w0
@@ -2665,12 +2665,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetByteStaticFromCode
+ bl MterpGetByteStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
sxtb w0, w0
@@ -2698,12 +2698,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetCharStaticFromCode
+ bl MterpGetCharStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
uxth w0, w0
@@ -2731,12 +2731,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl artGetShortStaticFromCode
+ bl MterpGetShortStatic
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
sxth w0, w0
@@ -2769,7 +2769,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet32StaticFromCode
+ bl MterpSet32Static
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2784,15 +2784,15 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- lsr w2, wINST, #8 // w3<- AA
- VREG_INDEX_TO_ADDR x2, w2
+ lsr w1, wINST, #8 // w1<- AA
+ VREG_INDEX_TO_ADDR x1, w1
+ ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet64IndirectStaticFromMterp
+ bl MterpSet64Static
cbnz w0, MterpException // 0 on success, -1 on failure
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
@@ -2831,7 +2831,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetBooleanStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2856,7 +2856,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet8StaticFromCode
+ bl MterpSetByteStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2881,7 +2881,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetCharStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2906,7 +2906,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl artSet16StaticFromCode
+ bl MterpSetShortStatic
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -6914,24 +6914,16 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fc: /* 0xfc */
-/* File: arm64/op_unused_fc.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_custom: /* 0xfc */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fd: /* 0xfd */
-/* File: arm64/op_unused_fd.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_invoke_custom_range: /* 0xfd */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
@@ -11580,7 +11572,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fc: /* 0xfc */
+.L_ALT_op_invoke_custom: /* 0xfc */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11597,7 +11589,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fd: /* 0xfd */
+.L_ALT_op_invoke_custom_range: /* 0xfd */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index e154e6c6b2..579afc2387 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -3038,12 +3038,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGet32StaticFromCode)
+ JAL(MterpGet32Static)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3064,12 +3064,12 @@ artMterpAsmInstructionStart = .L_op_nop
* 64-bit SGET handler.
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGet64StaticFromCode)
+ JAL(MterpGet64Static)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
@@ -3088,12 +3088,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetObjStaticFromCode)
+ JAL(MterpGetObjStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3118,12 +3118,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetBooleanStaticFromCode)
+ JAL(MterpGetBooleanStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3148,12 +3148,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetByteStaticFromCode)
+ JAL(MterpGetByteStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3178,12 +3178,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetCharStaticFromCode)
+ JAL(MterpGetCharStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3208,12 +3208,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(artGetShortStaticFromCode)
+ JAL(MterpGetShortStatic)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3244,7 +3244,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet32StaticFromCode)
+ JAL(MterpSet32Static)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3258,15 +3258,15 @@ artMterpAsmInstructionStart = .L_op_nop
* 64-bit SPUT handler.
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- GET_OPA(a2) # a2 <- AA
- EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ GET_OPA(a1) # a1 <- AA
+ EAS2(a1, rFP, a1) # a1 <- &fp[AA]
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet64IndirectStaticFromMterp)
+ JAL(MterpSet64Static)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3311,7 +3311,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet8StaticFromCode)
+ JAL(MterpSetBooleanStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3336,7 +3336,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet8StaticFromCode)
+ JAL(MterpSetByteStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3361,7 +3361,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet16StaticFromCode)
+ JAL(MterpSetCharStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3386,7 +3386,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(artSet16StaticFromCode)
+ JAL(MterpSetShortStatic)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -7761,25 +7761,15 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fc: /* 0xfc */
-/* File: mips/op_unused_fc.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
+.L_op_invoke_custom: /* 0xfc */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fd: /* 0xfd */
-/* File: mips/op_unused_fd.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
+.L_op_invoke_custom_range: /* 0xfd */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
/* ------------------------------ */
.balign 128
@@ -12423,7 +12413,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fc: /* 0xfc */
+.L_ALT_op_invoke_custom: /* 0xfc */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12441,7 +12431,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fd: /* 0xfd */
+.L_ALT_op_invoke_custom_range: /* 0xfd */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 013bb32e8f..3656df9a8e 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -2585,12 +2585,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGet32StaticFromCode
+ jal MterpGet32Static
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
@@ -2614,12 +2614,12 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sget-wide vAA, field//BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGet64StaticFromCode
+ jal MterpGet64Static
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a4, rINST, 8 # a4 <- AA
bnez a3, MterpException # bail out
@@ -2639,12 +2639,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetObjStaticFromCode
+ jal MterpGetObjStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
@@ -2671,12 +2671,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetBooleanStaticFromCode
+ jal MterpGetBooleanStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
and v0, v0, 0xff
@@ -2703,12 +2703,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetByteStaticFromCode
+ jal MterpGetByteStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
seb v0, v0
@@ -2735,12 +2735,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetCharStaticFromCode
+ jal MterpGetCharStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
and v0, v0, 0xffff
@@ -2767,12 +2767,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal artGetShortStaticFromCode
+ jal MterpGetShortStatic
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
seh v0, v0
@@ -2798,7 +2798,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet32StaticFromCode
+ .extern MterpSet32Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2806,7 +2806,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet32StaticFromCode
+ jal MterpSet32Static
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2821,15 +2821,15 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sput-wide vAA, field//BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- srl a2, rINST, 8 # a2 <- AA
- dlsa a2, a2, rFP, 2
+ srl a1, rINST, 8 # a2 <- AA
+ dlsa a1, a1, rFP, 2
+ ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet64IndirectStaticFromMterp
+ jal MterpSet64Static
bnezc v0, MterpException # 0 on success, -1 on failure
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2862,7 +2862,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetBooleanStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2870,7 +2870,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet8StaticFromCode
+ jal MterpSetBooleanStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2888,7 +2888,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetByteStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2896,7 +2896,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet8StaticFromCode
+ jal MterpSetByteStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2914,7 +2914,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetCharStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2922,7 +2922,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet16StaticFromCode
+ jal MterpSetCharStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2940,7 +2940,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetShortStatic
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2948,7 +2948,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal artSet16StaticFromCode
+ jal MterpSetShortStatic
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -7084,26 +7084,16 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fc: /* 0xfc */
-/* File: mips64/op_unused_fc.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_custom: /* 0xfc */
+/* Transfer stub to alternate interpreter */
b MterpFallback
-
/* ------------------------------ */
.balign 128
-.L_op_unused_fd: /* 0xfd */
-/* File: mips64/op_unused_fd.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_custom_range: /* 0xfd */
+/* Transfer stub to alternate interpreter */
b MterpFallback
-
/* ------------------------------ */
.balign 128
.L_op_unused_fe: /* 0xfe */
@@ -11982,7 +11972,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fc: /* 0xfc */
+.L_ALT_op_invoke_custom: /* 0xfc */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12001,7 +11991,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fd: /* 0xfd */
+.L_ALT_op_invoke_custom_range: /* 0xfd */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 695d1e4973..21d9671f8b 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -2535,7 +2535,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2543,7 +2543,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGet32StaticFromCode)
+ call SYMBOL(MterpGet32Static)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2564,7 +2564,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
*
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2572,7 +2572,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGet64StaticFromCode)
+ call SYMBOL(MterpGet64Static)
movl rSELF, %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
@@ -2592,7 +2592,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2600,7 +2600,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetObjStaticFromCode)
+ call SYMBOL(MterpGetObjStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2624,7 +2624,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2632,7 +2632,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetBooleanStaticFromCode)
+ call SYMBOL(MterpGetBooleanStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2656,7 +2656,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2664,7 +2664,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetByteStaticFromCode)
+ call SYMBOL(MterpGetByteStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2688,7 +2688,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2696,7 +2696,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetCharStaticFromCode)
+ call SYMBOL(MterpGetCharStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2720,7 +2720,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2728,7 +2728,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGetShortStaticFromCode)
+ call SYMBOL(MterpGetShortStatic)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2751,7 +2751,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet32StaticFromCode
+ .extern MterpSet32Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2761,7 +2761,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet32StaticFromCode)
+ call SYMBOL(MterpSet32Static)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2776,17 +2776,17 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG2(%esp) # &fp[AA]
+ movl %eax, OUT_ARG1(%esp) # &fp[AA]
+ movl OFF_FP_METHOD(rFP), %eax
+ movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2821,7 +2821,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetBooleanStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2831,7 +2831,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetBooleanStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2849,7 +2849,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetByteStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2859,7 +2859,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetByteStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2877,7 +2877,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetCharStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2887,7 +2887,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetCharStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2905,7 +2905,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetShortStatic
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2915,7 +2915,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetShortStatic)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -6292,23 +6292,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
/* ------------------------------ */
.balign 128
-.L_op_unused_fc: /* 0xfc */
-/* File: x86/op_unused_fc.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_custom: /* 0xfc */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fd: /* 0xfd */
-/* File: x86/op_unused_fd.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_custom_range: /* 0xfd */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
@@ -12410,7 +12402,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fc: /* 0xfc */
+.L_ALT_op_invoke_custom: /* 0xfc */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12434,7 +12426,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fd: /* 0xfd */
+.L_ALT_op_invoke_custom_range: /* 0xfd */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 2eab58c053..b5a5ae5963 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -2445,12 +2445,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGet32StaticFromCode
+ .extern MterpGet32Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGet32StaticFromCode)
+ call SYMBOL(MterpGet32Static)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2476,12 +2476,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGet64StaticFromCode)
+ call SYMBOL(MterpGet64Static)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2508,12 +2508,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetObjStaticFromCode
+ .extern MterpGetObjStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetObjStaticFromCode)
+ call SYMBOL(MterpGetObjStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2540,12 +2540,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetBooleanStaticFromCode
+ .extern MterpGetBooleanStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetBooleanStaticFromCode)
+ call SYMBOL(MterpGetBooleanStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2572,12 +2572,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetByteStaticFromCode
+ .extern MterpGetByteStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetByteStaticFromCode)
+ call SYMBOL(MterpGetByteStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2604,12 +2604,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetCharStaticFromCode
+ .extern MterpGetCharStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetCharStaticFromCode)
+ call SYMBOL(MterpGetCharStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2636,12 +2636,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern artGetShortStaticFromCode
+ .extern MterpGetShortStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(artGetShortStaticFromCode)
+ call SYMBOL(MterpGetShortStatic)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2667,13 +2667,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet32StaticFromCode
+ .extern MterpSet32Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet32StaticFromCode)
+ call SYMBOL(MterpSet32Static)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2687,13 +2687,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA]
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2724,13 +2724,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetBooleanStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetBooleanStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2747,13 +2747,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet8StaticFromCode
+ .extern MterpSetByteStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet8StaticFromCode)
+ call SYMBOL(MterpSetByteStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2770,13 +2770,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetCharStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetCharStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2793,13 +2793,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern artSet16StaticFromCode
+ .extern MterpSetShortStatic
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet16StaticFromCode)
+ call SYMBOL(MterpSetShortStatic)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6057,23 +6057,15 @@ movswl %ax, %eax
/* ------------------------------ */
.balign 128
-.L_op_unused_fc: /* 0xfc */
-/* File: x86_64/op_unused_fc.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_custom: /* 0xfc */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
/* ------------------------------ */
.balign 128
-.L_op_unused_fd: /* 0xfd */
-/* File: x86_64/op_unused_fd.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
+.L_op_invoke_custom_range: /* 0xfd */
+/* Transfer stub to alternate interpreter */
jmp MterpFallback
@@ -11671,7 +11663,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fc: /* 0xfc */
+.L_ALT_op_invoke_custom: /* 0xfc */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11693,7 +11685,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fd: /* 0xfd */
+.L_ALT_op_invoke_custom_range: /* 0xfd */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S
index 0e9a3d82da..6e42d323e6 100644
--- a/runtime/interpreter/mterp/x86/op_sget.S
+++ b/runtime/interpreter/mterp/x86/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+%default { "is_object":"0", "helper":"MterpGet32Static" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86/op_sget_boolean.S b/runtime/interpreter/mterp/x86/op_sget_boolean.S
index f058dd8f7c..5fa2bf0cfc 100644
--- a/runtime/interpreter/mterp/x86/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_sget_boolean.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_byte.S b/runtime/interpreter/mterp/x86/op_sget_byte.S
index c952f40772..ef812f118e 100644
--- a/runtime/interpreter/mterp/x86/op_sget_byte.S
+++ b/runtime/interpreter/mterp/x86/op_sget_byte.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_char.S b/runtime/interpreter/mterp/x86/op_sget_char.S
index d7bd410c7d..3bc34ef338 100644
--- a/runtime/interpreter/mterp/x86/op_sget_char.S
+++ b/runtime/interpreter/mterp/x86/op_sget_char.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_object.S b/runtime/interpreter/mterp/x86/op_sget_object.S
index 1c95f9a00e..b829e75f30 100644
--- a/runtime/interpreter/mterp/x86/op_sget_object.S
+++ b/runtime/interpreter/mterp/x86/op_sget_object.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "x86/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_short.S b/runtime/interpreter/mterp/x86/op_sget_short.S
index 6475306b26..449cf6f918 100644
--- a/runtime/interpreter/mterp/x86/op_sget_short.S
+++ b/runtime/interpreter/mterp/x86/op_sget_short.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "x86/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S
index 2b603034c6..a605bcf2e5 100644
--- a/runtime/interpreter/mterp/x86/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sget_wide.S
@@ -3,7 +3,7 @@
*
*/
/* sget-wide vAA, field@BBBB */
- .extern artGet64StaticFromCode
+ .extern MterpGet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -11,7 +11,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(artGet64StaticFromCode)
+ call SYMBOL(MterpGet64Static)
movl rSELF, %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S
index 0b5de0953d..99f6088982 100644
--- a/runtime/interpreter/mterp/x86/op_sput.S
+++ b/runtime/interpreter/mterp/x86/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86/op_sput_boolean.S b/runtime/interpreter/mterp/x86/op_sput_boolean.S
index 63601bd2bb..a7fffda1db 100644
--- a/runtime/interpreter/mterp/x86/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_sput_boolean.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_byte.S b/runtime/interpreter/mterp/x86/op_sput_byte.S
index 63601bd2bb..3a5ff9267d 100644
--- a/runtime/interpreter/mterp/x86/op_sput_byte.S
+++ b/runtime/interpreter/mterp/x86/op_sput_byte.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_char.S b/runtime/interpreter/mterp/x86/op_sput_char.S
index 1749f7c9f8..565cc2aa0f 100644
--- a/runtime/interpreter/mterp/x86/op_sput_char.S
+++ b/runtime/interpreter/mterp/x86/op_sput_char.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_short.S b/runtime/interpreter/mterp/x86/op_sput_short.S
index 1749f7c9f8..85c344165e 100644
--- a/runtime/interpreter/mterp/x86/op_sput_short.S
+++ b/runtime/interpreter/mterp/x86/op_sput_short.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S
index 19cff0db5a..8cc7e28554 100644
--- a/runtime/interpreter/mterp/x86/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sput_wide.S
@@ -3,17 +3,17 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG2(%esp) # &fp[AA]
+ movl %eax, OUT_ARG1(%esp) # &fp[AA]
+ movl OFF_FP_METHOD(rFP), %eax
+ movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S
index d39e6c4396..e996c77801 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32StaticFromCode", "wide":"0" }
+%default { "is_object":"0", "helper":"MterpGet32Static", "wide":"0" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
index 7d358daec2..ee772ad4e1 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
index 79d9ff448b..f65ea4951e 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetByteStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S
index 448861052f..3972551bec 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_char.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetCharStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S
index 09b627e124..a0bbfd8d35 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_object.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_object.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
+%include "x86_64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S
index 47ac23803c..df212dc5c9 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_short.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGetShortStaticFromCode"}
+%include "x86_64/op_sget.S" {"helper":"MterpGetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
index aa223434cf..1e98e28a92 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"artGet64StaticFromCode", "wide":"1"}
+%include "x86_64/op_sget.S" {"helper":"MterpGet64Static", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S
index e92b03273b..9705619900 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32StaticFromCode"}
+%default { "helper":"MterpSet32Static"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
index 8718915cb2..8bf4a62328 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
index 8718915cb2..5bb26ebed5 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetByteStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S
index 2fe9d14816..42b244e2bb 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_char.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetCharStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S
index 2fe9d14816..9670092aaf 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_short.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"}
+%include "x86_64/op_sput.S" {"helper":"MterpSetShortStatic"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
index c4bc269eb6..a21bcb5dd5 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
@@ -3,13 +3,13 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern artSet64IndirectStaticFromMterp
+ .extern MterpSet64Static
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA]
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
+ movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(artSet64IndirectStaticFromMterp)
+ call SYMBOL(MterpSet64Static)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 1ec4749146..3631a9d467 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -325,16 +325,12 @@ void Jit::DeleteThreadPool() {
}
void Jit::StartProfileSaver(const std::string& filename,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir) {
+ const std::vector<std::string>& code_paths) {
if (profile_saver_options_.IsEnabled()) {
ProfileSaver::Start(profile_saver_options_,
filename,
code_cache_.get(),
- code_paths,
- foreign_dex_profile_path,
- app_dir);
+ code_paths);
}
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index d566799340..5da1ea1196 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -136,14 +136,8 @@ class Jit {
// Starts the profile saver if the config options allow profile recording.
// The profile will be stored in the specified `filename` and will contain
// information collected from the given `code_paths` (a set of dex locations).
- // The `foreign_dex_profile_path` is the path where the saver will put the
- // profile markers for loaded dex files which are not owned by the application.
- // The `app_dir` is the application directory and is used to decide which
- // dex files belong to the application.
void StartProfileSaver(const std::string& filename,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir);
+ const std::vector<std::string>& code_paths);
void StopProfileSaver();
void DumpForSigQuit(std::ostream& os) REQUIRES(!lock_);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index a2ea96d953..8b2a2b4d13 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -987,8 +987,11 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
if (ContainsPc(entry_point)) {
info->SetSavedEntryPoint(entry_point);
- Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
- info->GetMethod(), GetQuickToInterpreterBridge());
+ // Don't call Instrumentation::UpdateMethods, as it can check the declaring
+ // class of the method. We may be concurrently running a GC which makes accessing
+ // the class unsafe. We know it is OK to bypass the instrumentation as we've just
+ // checked that the current entry point is JIT compiled code.
+ info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
}
}
@@ -1259,6 +1262,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
std::vector<ProfileMethodInfo::ProfileClassReference> profile_classes;
const InlineCache& cache = info->cache_[i];
+ bool is_missing_types = false;
for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
mirror::Class* cls = cache.classes_[k].Read();
if (cls == nullptr) {
@@ -1281,17 +1285,20 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
if (!type_index.IsValid()) {
// Could be a proxy class or an array for which we couldn't find the type index.
+ is_missing_types = true;
continue;
}
if (ContainsElement(dex_base_locations, class_dex_file->GetBaseLocation())) {
// Only consider classes from the same apk (including multidex).
profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
class_dex_file, type_index);
+ } else {
+ is_missing_types = true;
}
}
if (!profile_classes.empty()) {
inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
- cache.dex_pc_, profile_classes);
+ cache.dex_pc_, is_missing_types, profile_classes);
}
}
methods.emplace_back(/*ProfileMethodInfo*/
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 33a792fe75..c970979eaa 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -229,6 +229,12 @@ class JitCodeCache {
void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+ // Dynamically change whether we want to garbage collect code. Should only be used
+ // by tests.
+ void SetGarbageCollectCode(bool value) {
+ garbage_collect_code_ = value;
+ }
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
@@ -359,8 +365,8 @@ class JitCodeCache {
// It is atomic to avoid locking when reading it.
Atomic<uint64_t> last_update_time_ns_;
- // Whether we can do garbage collection.
- const bool garbage_collect_code_;
+ // Whether we can do garbage collection. Not 'const' as tests may override this.
+ bool garbage_collect_code_;
// The size in bytes of used memory for the data portion of the code cache.
size_t used_memory_for_data_ GUARDED_BY(lock_);
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 627cc93f38..b23a86313f 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -37,7 +37,8 @@
namespace art {
const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
-const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '3', '\0' }; // inline caches
+// Last profile version: fix the order of dex files in the profile.
+const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '4', '\0' };
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
@@ -46,16 +47,19 @@ static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
// using the same test profile.
static constexpr bool kDebugIgnoreChecksum = false;
-static constexpr uint8_t kMegamorphicEncoding = 7;
+static constexpr uint8_t kIsMissingTypesEncoding = 6;
+static constexpr uint8_t kIsMegamorphicEncoding = 7;
static_assert(sizeof(InlineCache::kIndividualCacheSize) == sizeof(uint8_t),
"InlineCache::kIndividualCacheSize does not have the expect type size");
-static_assert(InlineCache::kIndividualCacheSize < kMegamorphicEncoding,
+static_assert(InlineCache::kIndividualCacheSize < kIsMegamorphicEncoding,
+ "InlineCache::kIndividualCacheSize is larger than expected");
+static_assert(InlineCache::kIndividualCacheSize < kIsMissingTypesEncoding,
"InlineCache::kIndividualCacheSize is larger than expected");
void ProfileCompilationInfo::DexPcData::AddClass(uint16_t dex_profile_idx,
const dex::TypeIndex& type_idx) {
- if (is_megamorphic) {
+ if (is_megamorphic || is_missing_types) {
return;
}
classes.emplace(dex_profile_idx, type_idx);
@@ -206,7 +210,8 @@ static constexpr size_t kLineHeaderSize =
* Classes are grouped per their dex files and the line
* `dex_profile_index,class_id1,class_id2...,dex_profile_index2,...` encodes the
* mapping from `dex_profile_index` to the set of classes `class_id1,class_id2...`
- * M stands for megamorphic and it's encoded as the byte kMegamorphicEncoding.
+ * M stands for megamorphic or missing types and it's encoded as either
+ * the byte kIsMegamorphicEncoding or kIsMissingTypesEncoding.
* When present, there will be no class ids following.
**/
bool ProfileCompilationInfo::Save(int fd) {
@@ -222,15 +227,23 @@ bool ProfileCompilationInfo::Save(int fd) {
DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
AddUintToBuffer(&buffer, static_cast<uint8_t>(info_.size()));
+ // Make sure we write the dex files in order of their profile index. This
+ // avoids writing the index in the output file and simplifies the parsing logic.
+ std::vector<const std::string*> ordered_info_location(info_.size());
+ std::vector<const DexFileData*> ordered_info_data(info_.size());
for (const auto& it : info_) {
+ ordered_info_location[it.second.profile_index] = &(it.first);
+ ordered_info_data[it.second.profile_index] = &(it.second);
+ }
+ for (size_t i = 0; i < info_.size(); i++) {
if (buffer.size() > kMaxSizeToKeepBeforeWriting) {
if (!WriteBuffer(fd, buffer.data(), buffer.size())) {
return false;
}
buffer.clear();
}
- const std::string& dex_location = it.first;
- const DexFileData& dex_data = it.second;
+ const std::string& dex_location = *ordered_info_location[i];
+ const DexFileData& dex_data = *ordered_info_data[i];
// Note that we allow dex files without any methods or classes, so that
// inline caches can refer valid dex files.
@@ -289,10 +302,19 @@ void ProfileCompilationInfo::AddInlineCacheToBuffer(std::vector<uint8_t>* buffer
// Add the dex pc.
AddUintToBuffer(buffer, dex_pc);
- if (dex_pc_data.is_megamorphic) {
- // Add the megamorphic encoding if needed and continue.
- // If megamorphic, we don't add the rest of the classes.
- AddUintToBuffer(buffer, kMegamorphicEncoding);
+ // Add the megamorphic/missing_types encoding if needed and continue.
+ // In either cases we don't add any classes to the profiles and so there's
+ // no point to continue.
+ // TODO(calin): in case we miss types there is still value to add the
+ // rest of the classes. They can be added without bumping the profile version.
+ if (dex_pc_data.is_missing_types) {
+ DCHECK(!dex_pc_data.is_megamorphic); // at this point the megamorphic flag should not be set.
+ DCHECK_EQ(classes.size(), 0u);
+ AddUintToBuffer(buffer, kIsMissingTypesEncoding);
+ continue;
+ } else if (dex_pc_data.is_megamorphic) {
+ DCHECK_EQ(classes.size(), 0u);
+ AddUintToBuffer(buffer, kIsMegamorphicEncoding);
continue;
}
@@ -403,11 +425,21 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
for (const auto& pmi_inline_cache_it : pmi.inline_caches) {
uint16_t pmi_ic_dex_pc = pmi_inline_cache_it.first;
const DexPcData& pmi_ic_dex_pc_data = pmi_inline_cache_it.second;
- auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(pmi_ic_dex_pc);
+ DexPcData& dex_pc_data = inline_cache_it->second.FindOrAdd(pmi_ic_dex_pc)->second;
+ if (dex_pc_data.is_missing_types || dex_pc_data.is_megamorphic) {
+ // We are already megamorphic or we are missing types; no point in going forward.
+ continue;
+ }
+
+ if (pmi_ic_dex_pc_data.is_missing_types) {
+ dex_pc_data.SetIsMissingTypes();
+ continue;
+ }
if (pmi_ic_dex_pc_data.is_megamorphic) {
- dex_pc_data_it->second.SetMegamorphic();
+ dex_pc_data.SetIsMegamorphic();
continue;
}
+
for (const ClassReference& class_ref : pmi_ic_dex_pc_data.classes) {
const DexReference& dex_ref = pmi.dex_references[class_ref.dex_profile_index];
DexFileData* class_dex_data = GetOrAddDexFileData(
@@ -416,7 +448,7 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
if (class_dex_data == nullptr) { // checksum mismatch
return false;
}
- dex_pc_data_it->second.AddClass(class_dex_data->profile_index, class_ref.type_index);
+ dex_pc_data.AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
return true;
@@ -432,6 +464,11 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
auto inline_cache_it = data->method_map.FindOrAdd(pmi.dex_method_index);
for (const ProfileMethodInfo::ProfileInlineCache& cache : pmi.inline_caches) {
+ if (cache.is_missing_types) {
+ auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
+ dex_pc_data_it->second.SetIsMissingTypes();
+ continue;
+ }
for (const ProfileMethodInfo::ProfileClassReference& class_ref : cache.classes) {
DexFileData* class_dex_data = GetOrAddDexFileData(
GetProfileDexFileKey(class_ref.dex_file->GetLocation()),
@@ -440,6 +477,10 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
return false;
}
auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
+ if (dex_pc_data_it->second.is_missing_types) {
+ // Don't bother adding classes if we are missing types.
+ break;
+ }
dex_pc_data_it->second.AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
@@ -478,8 +519,12 @@ bool ProfileCompilationInfo::ReadInlineCache(SafeBuffer& buffer,
READ_UINT(uint16_t, buffer, dex_pc, error);
READ_UINT(uint8_t, buffer, dex_to_classes_map_size, error);
auto dex_pc_data_it = inline_cache->FindOrAdd(dex_pc);
- if (dex_to_classes_map_size == kMegamorphicEncoding) {
- dex_pc_data_it->second.SetMegamorphic();
+ if (dex_to_classes_map_size == kIsMissingTypesEncoding) {
+ dex_pc_data_it->second.SetIsMissingTypes();
+ continue;
+ }
+ if (dex_to_classes_map_size == kIsMegamorphicEncoding) {
+ dex_pc_data_it->second.SetIsMegamorphic();
continue;
}
for (; dex_to_classes_map_size > 0; dex_to_classes_map_size--) {
@@ -797,10 +842,13 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
for (const auto& other_it : other.info_) {
const std::string& other_dex_location = other_it.first;
+ uint32_t other_checksum = other_it.second.checksum;
const DexFileData& other_dex_data = other_it.second;
- auto info_it = info_.FindOrAdd(other_dex_location, DexFileData(other_dex_data.checksum, 0));
- const DexFileData& dex_data = info_it->second;
- dex_profile_index_remap.Put(other_dex_data.profile_index, dex_data.profile_index);
+ const DexFileData* dex_data = GetOrAddDexFileData(other_dex_location, other_checksum);
+ if (dex_data == nullptr) {
+ return false; // Could happen if we exceed the number of allowed dex files.
+ }
+ dex_profile_index_remap.Put(other_dex_data.profile_index, dex_data->profile_index);
}
// Merge the actual profile data.
@@ -823,8 +871,10 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
uint16_t other_dex_pc = other_ic_it.first;
const ClassSet& other_class_set = other_ic_it.second.classes;
auto class_set = method_it->second.FindOrAdd(other_dex_pc);
- if (other_ic_it.second.is_megamorphic) {
- class_set->second.SetMegamorphic();
+ if (other_ic_it.second.is_missing_types) {
+ class_set->second.SetIsMissingTypes();
+ } else if (other_ic_it.second.is_megamorphic) {
+ class_set->second.SetIsMegamorphic();
} else {
for (const auto& class_it : other_class_set) {
class_set->second.AddClass(dex_profile_index_remap.Get(
@@ -949,10 +999,17 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
os << "ProfileInfo:";
const std::string kFirstDexFileKeySubstitute = ":classes.dex";
+ // Write the entries in profile index order.
+ std::vector<const std::string*> ordered_info_location(info_.size());
+ std::vector<const DexFileData*> ordered_info_data(info_.size());
for (const auto& it : info_) {
+ ordered_info_location[it.second.profile_index] = &(it.first);
+ ordered_info_data[it.second.profile_index] = &(it.second);
+ }
+ for (size_t profile_index = 0; profile_index < info_.size(); profile_index++) {
os << "\n";
- const std::string& location = it.first;
- const DexFileData& dex_data = it.second;
+ const std::string& location = *ordered_info_location[profile_index];
+ const DexFileData& dex_data = *ordered_info_data[profile_index];
if (print_full_dex_location) {
os << location;
} else {
@@ -960,6 +1017,7 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
std::string multidex_suffix = DexFile::GetMultiDexSuffix(location);
os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
}
+ os << " [index=" << static_cast<uint32_t>(dex_data.profile_index) << "]";
const DexFile* dex_file = nullptr;
if (dex_files != nullptr) {
for (size_t i = 0; i < dex_files->size(); i++) {
@@ -979,8 +1037,10 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
os << "[";
for (const auto& inline_cache_it : method_it.second) {
os << "{" << std::hex << inline_cache_it.first << std::dec << ":";
- if (inline_cache_it.second.is_megamorphic) {
- os << "M";
+ if (inline_cache_it.second.is_missing_types) {
+ os << "MT";
+ } else if (inline_cache_it.second.is_megamorphic) {
+ os << "MM";
} else {
for (const ClassReference& class_ref : inline_cache_it.second.classes) {
os << "(" << static_cast<uint32_t>(class_ref.dex_profile_index)
@@ -1022,7 +1082,8 @@ void ProfileCompilationInfo::GetClassNames(const std::vector<const DexFile*>* de
const DexFile* dex_file = nullptr;
if (dex_files != nullptr) {
for (size_t i = 0; i < dex_files->size(); i++) {
- if (location == (*dex_files)[i]->GetLocation()) {
+ if (location == GetProfileDexFileKey((*dex_files)[i]->GetLocation()) &&
+ dex_data.checksum == (*dex_files)[i]->GetLocationChecksum()) {
dex_file = (*dex_files)[i];
}
}
@@ -1039,15 +1100,22 @@ bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
return info_.Equals(other.info_);
}
-std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses() const {
+std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses(
+ const std::unordered_set<std::string>& dex_files_locations) const {
+ std::unordered_map<std::string, std::string> key_to_location_map;
+ for (const std::string& location : dex_files_locations) {
+ key_to_location_map.emplace(GetProfileDexFileKey(location), location);
+ }
std::set<DexCacheResolvedClasses> ret;
for (auto&& pair : info_) {
const std::string& profile_key = pair.first;
- const DexFileData& data = pair.second;
- // TODO: Is it OK to use the same location for both base and dex location here?
- DexCacheResolvedClasses classes(profile_key, profile_key, data.checksum);
- classes.AddClasses(data.class_set.begin(), data.class_set.end());
- ret.insert(classes);
+ auto it = key_to_location_map.find(profile_key);
+ if (it != key_to_location_map.end()) {
+ const DexFileData& data = pair.second;
+ DexCacheResolvedClasses classes(it->second, it->second, data.checksum);
+ classes.AddClasses(data.class_set.begin(), data.class_set.end());
+ ret.insert(classes);
+ }
}
return ret;
}
@@ -1108,7 +1176,7 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
}
// We can't use a simple equality test because we need to match the dex files
- // of the inline caches which might have different profile indices.
+ // of the inline caches which might have different profile indexes.
for (const auto& inline_cache_it : inline_caches) {
uint16_t dex_pc = inline_cache_it.first;
const DexPcData dex_pc_data = inline_cache_it.second;
@@ -1117,7 +1185,8 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
return false;
}
const DexPcData& other_dex_pc_data = other_it->second;
- if (dex_pc_data.is_megamorphic != other_dex_pc_data.is_megamorphic) {
+ if (dex_pc_data.is_megamorphic != other_dex_pc_data.is_megamorphic ||
+ dex_pc_data.is_missing_types != other_dex_pc_data.is_missing_types) {
return false;
}
for (const ClassReference& class_ref : dex_pc_data.classes) {
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 4bfbfcd287..6ad528c805 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -36,18 +36,22 @@ namespace art {
*/
struct ProfileMethodInfo {
struct ProfileClassReference {
+ ProfileClassReference() : dex_file(nullptr) {}
ProfileClassReference(const DexFile* dex, const dex::TypeIndex& index)
: dex_file(dex), type_index(index) {}
const DexFile* dex_file;
- const dex::TypeIndex type_index;
+ dex::TypeIndex type_index;
};
struct ProfileInlineCache {
- ProfileInlineCache(uint32_t pc, const std::vector<ProfileClassReference>& profile_classes)
- : dex_pc(pc), classes(profile_classes) {}
+ ProfileInlineCache(uint32_t pc,
+ bool missing_types,
+ const std::vector<ProfileClassReference>& profile_classes)
+ : dex_pc(pc), is_missing_types(missing_types), classes(profile_classes) {}
const uint32_t dex_pc;
+ const bool is_missing_types;
const std::vector<ProfileClassReference> classes;
};
@@ -91,6 +95,11 @@ class ProfileCompilationInfo {
return dex_checksum == other.dex_checksum && dex_location == other.dex_location;
}
+ bool MatchesDex(const DexFile* dex_file) const {
+ return dex_checksum == dex_file->GetLocationChecksum() &&
+ dex_location == GetProfileDexFileKey(dex_file->GetLocation());
+ }
+
std::string dex_location;
uint32_t dex_checksum;
};
@@ -128,18 +137,30 @@ class ProfileCompilationInfo {
// Encodes the actual inline cache for a given dex pc (whether or not the receiver is
// megamorphic and its possible types).
- // If the receiver is megamorphic the set of classes will be empty.
+ // If the receiver is megamorphic or is missing types the set of classes will be empty.
struct DexPcData {
- DexPcData() : is_megamorphic(false) {}
+ DexPcData() : is_missing_types(false), is_megamorphic(false) {}
void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx);
- void SetMegamorphic() {
+ void SetIsMegamorphic() {
+ if (is_missing_types) return;
is_megamorphic = true;
classes.clear();
}
+ void SetIsMissingTypes() {
+ is_megamorphic = false;
+ is_missing_types = true;
+ classes.clear();
+ }
bool operator==(const DexPcData& other) const {
- return is_megamorphic == other.is_megamorphic && classes == other.classes;
+ return is_megamorphic == other.is_megamorphic &&
+ is_missing_types == other.is_missing_types &&
+ classes == other.classes;
}
+ // Not all runtime types can be encoded in the profile. For example if the receiver
+ // type is in a dex file which is not tracked for profiling its type cannot be
+ // encoded. When types are missing this field will be set to true.
+ bool is_missing_types;
bool is_megamorphic;
ClassSet classes;
};
@@ -218,9 +239,8 @@ class ProfileCompilationInfo {
bool Equals(const ProfileCompilationInfo& other);
// Return the class descriptors for all of the classes in the profiles' class sets.
- // Note the dex location is actually the profile key, the caller needs to call back in to the
- // profile info stuff to generate a map back to the dex location.
- std::set<DexCacheResolvedClasses> GetResolvedClasses() const;
+ std::set<DexCacheResolvedClasses> GetResolvedClasses(
+ const std::unordered_set<std::string>& dex_files_locations) const;
// Clear the resolved classes from the current object.
void ClearResolvedClasses();
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 332280a0e3..5cd8e8fef5 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -108,26 +108,31 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
for (ArtMethod* method : methods) {
std::vector<ProfileMethodInfo::ProfileInlineCache> caches;
// Monomorphic
- for (uint16_t dex_pc = 0; dex_pc < 1; dex_pc++) {
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(0));
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Polymorphic
- for (uint16_t dex_pc = 1; dex_pc < 2; dex_pc++) {
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
for (uint16_t k = 0; k < InlineCache::kIndividualCacheSize / 2; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Megamorphic
- for (uint16_t dex_pc = 2; dex_pc < 3; dex_pc++) {
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
for (uint16_t k = 0; k < 2 * InlineCache::kIndividualCacheSize; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
+ }
+ // Missing types
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
+ std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ caches.emplace_back(dex_pc, /*is_missing_types*/true, classes);
}
ProfileMethodInfo pmi(method->GetDexFile(), method->GetDexMethodIndex(), caches);
profile_methods.push_back(pmi);
@@ -148,12 +153,15 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi;
SafeMap<DexFile*, uint8_t> dex_map; // dex files to profile index
for (const auto& inline_cache : pmi.inline_caches) {
+ ProfileCompilationInfo::DexPcData& dex_pc_data =
+ offline_pmi.inline_caches.FindOrAdd(inline_cache.dex_pc)->second;
+ if (inline_cache.is_missing_types) {
+ dex_pc_data.SetIsMissingTypes();
+ }
for (const auto& class_ref : inline_cache.classes) {
uint8_t dex_profile_index = dex_map.FindOrAdd(const_cast<DexFile*>(class_ref.dex_file),
static_cast<uint8_t>(dex_map.size()))->second;
- offline_pmi.inline_caches
- .FindOrAdd(inline_cache.dex_pc)->second
- .AddClass(dex_profile_index, class_ref.type_index);
+ dex_pc_data.AddClass(dex_profile_index, class_ref.type_index);
if (dex_profile_index >= offline_pmi.dex_references.size()) {
// This is a new dex.
const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileKey(
@@ -170,18 +178,18 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */ 3);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
// Monomorphic
- for (uint16_t dex_pc = 0; dex_pc < 10; dex_pc++) {
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
dex_pc_data.AddClass(0, dex::TypeIndex(0));
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
// Polymorphic
- for (uint16_t dex_pc = 10; dex_pc < 20; dex_pc++) {
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
@@ -190,9 +198,15 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
// Megamorphic
- for (uint16_t dex_pc = 20; dex_pc < 30; dex_pc++) {
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMegamorphic();
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ }
+ // Missing types
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
- dex_pc_data.is_megamorphic = true;
+ dex_pc_data.SetIsMissingTypes();
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
@@ -207,7 +221,13 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
}
- // Cannot sizeof the actual arrays so hardcode the values here.
+ void SetIsMissingTypes(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
+ for (auto it : pmi->inline_caches) {
+ it.second.SetIsMissingTypes();
+ }
+ }
+
+ // Cannot sizeof the actual arrays so hard code the values here.
// They should not change anyway.
static constexpr int kProfileMagicSize = 4;
static constexpr int kProfileVersionSize = 4;
@@ -530,6 +550,58 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
ASSERT_TRUE(loaded_pmi1 == pmi_extra);
}
+TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+
+ // Add methods with inline caches.
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ }
+
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Make some inline caches megamorphic and add them to the profile again.
+ ProfileCompilationInfo saved_info_extra;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
+ MakeMegamorphic(&pmi_extra);
+ for (uint16_t method_idx = 5; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ }
+
+ // Mark all inline caches with missing types and add them to the profile again.
+ // This will verify that all inline caches (megamorphic or not) should be marked as missing types.
+ ProfileCompilationInfo::OfflineProfileMethodInfo missing_types = GetOfflineProfileMethodInfo();
+ SetIsMissingTypes(&missing_types);
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ }
+
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(saved_info_extra.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Merge the profiles so that we have the same view as the file.
+ ASSERT_TRUE(saved_info.MergeWith(saved_info_extra));
+
+ // Check that we get back what we saved.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+
+ ASSERT_TRUE(loaded_info.Equals(saved_info));
+
+ ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi1;
+ ASSERT_TRUE(loaded_info.GetMethod("dex_location1",
+ /* checksum */ 1,
+ /* method_idx */ 3,
+ &loaded_pmi1));
+ ASSERT_TRUE(loaded_pmi1 == pmi_extra);
+}
+
TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
ScratchFile profile;
@@ -570,7 +642,7 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
}
}
-TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCahce) {
+TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCache) {
ScratchFile profile;
ProfileCompilationInfo info;
@@ -662,7 +734,7 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
ProfileCompilationInfo::DexPcData dex_pc_data;
- dex_pc_data.is_megamorphic = true;
+ dex_pc_data.SetIsMegamorphic();
pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
@@ -686,4 +758,33 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
}
+TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
+ // Create an inline cache with missing types
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMissingTypes();
+ pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
+
+ ProfileCompilationInfo info_megamorphic;
+ ASSERT_TRUE(AddMethod("dex_location1",
+ /*checksum*/ 1,
+ /*method_idx*/ 0,
+ pmi,
+ &info_megamorphic));
+
+ // Create a profile with no inline caches (for the same method).
+ ProfileCompilationInfo info_no_inline_cache;
+ ASSERT_TRUE(AddMethod("dex_location1",
+ /*checksum*/ 1,
+ /*method_idx*/ 0,
+ &info_no_inline_cache));
+
+ // Merge the missing type cache into the empty one.
+ // Everything should be saved without errors.
+ ASSERT_TRUE(info_no_inline_cache.MergeWith(info_megamorphic));
+ ScratchFile profile;
+ ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
+}
+
} // namespace art
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 61e6c4126a..00487c6728 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -39,11 +39,8 @@ pthread_t ProfileSaver::profiler_pthread_ = 0U;
ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir)
+ const std::vector<std::string>& code_paths)
: jit_code_cache_(jit_code_cache),
- foreign_dex_profile_path_(foreign_dex_profile_path),
shutting_down_(false),
last_save_number_of_methods_(0),
last_save_number_of_classes_(0),
@@ -58,13 +55,12 @@ ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
total_number_of_failed_writes_(0),
total_ms_of_sleep_(0),
total_ns_of_work_(0),
- total_number_of_foreign_dex_marks_(0),
max_number_of_profile_entries_cached_(0),
total_number_of_hot_spikes_(0),
total_number_of_wake_ups_(0),
options_(options) {
DCHECK(options_.IsEnabled());
- AddTrackedLocations(output_filename, app_data_dir, code_paths);
+ AddTrackedLocations(output_filename, code_paths);
}
void ProfileSaver::Run() {
@@ -382,9 +378,7 @@ static bool ShouldProfileLocation(const std::string& location) {
void ProfileSaver::Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir) {
+ const std::vector<std::string>& code_paths) {
DCHECK(options.IsEnabled());
DCHECK(Runtime::Current()->GetJit() != nullptr);
DCHECK(!output_filename.empty());
@@ -409,7 +403,7 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
// apps which share the same runtime).
DCHECK_EQ(instance_->jit_code_cache_, jit_code_cache);
// Add the code_paths to the tracked locations.
- instance_->AddTrackedLocations(output_filename, app_data_dir, code_paths_to_profile);
+ instance_->AddTrackedLocations(output_filename, code_paths_to_profile);
return;
}
@@ -419,9 +413,7 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
instance_ = new ProfileSaver(options,
output_filename,
jit_code_cache,
- code_paths_to_profile,
- foreign_dex_profile_path,
- app_data_dir);
+ code_paths_to_profile);
// Create a new thread which does the saving.
CHECK_PTHREAD_CALL(
@@ -481,154 +473,16 @@ bool ProfileSaver::IsStarted() {
}
void ProfileSaver::AddTrackedLocations(const std::string& output_filename,
- const std::string& app_data_dir,
const std::vector<std::string>& code_paths) {
auto it = tracked_dex_base_locations_.find(output_filename);
if (it == tracked_dex_base_locations_.end()) {
tracked_dex_base_locations_.Put(output_filename,
std::set<std::string>(code_paths.begin(), code_paths.end()));
- if (!app_data_dir.empty()) {
- app_data_dirs_.insert(app_data_dir);
- }
} else {
it->second.insert(code_paths.begin(), code_paths.end());
}
}
-// TODO(calin): This may lead to several calls to realpath.
-// Consider moving the logic to the saver thread (i.e. when notified,
-// only cache the location, and then wake up the saver thread to do the
-// comparisons with the real file paths and to create the markers).
-void ProfileSaver::NotifyDexUse(const std::string& dex_location) {
- if (!ShouldProfileLocation(dex_location)) {
- return;
- }
- std::set<std::string> app_code_paths;
- std::string foreign_dex_profile_path;
- std::set<std::string> app_data_dirs;
- {
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
- if (instance_ == nullptr) {
- return;
- }
- // Make a copy so that we don't hold the lock while doing I/O.
- for (const auto& it : instance_->tracked_dex_base_locations_) {
- app_code_paths.insert(it.second.begin(), it.second.end());
- }
- foreign_dex_profile_path = instance_->foreign_dex_profile_path_;
- app_data_dirs.insert(instance_->app_data_dirs_.begin(), instance_->app_data_dirs_.end());
- }
-
- bool mark_created = MaybeRecordDexUseInternal(dex_location,
- app_code_paths,
- foreign_dex_profile_path,
- app_data_dirs);
- if (mark_created) {
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
- if (instance_ != nullptr) {
- instance_->total_number_of_foreign_dex_marks_++;
- }
- }
-}
-
-static bool CheckContainsWithRealPath(const std::set<std::string>& paths_set,
- const std::string& path_to_check) {
- for (const auto& path : paths_set) {
- UniqueCPtr<const char[]> real_path(realpath(path.c_str(), nullptr));
- if (real_path == nullptr) {
- PLOG(WARNING) << "Could not get realpath for " << path;
- continue;
- }
- std::string real_path_str(real_path.get());
- if (real_path_str == path_to_check) {
- return true;
- }
- }
- return false;
-}
-
-// After the call, dex_location_real_path will contain the marker's name.
-static bool CreateForeignDexMarker(const std::string& foreign_dex_profile_path,
- /*in-out*/ std::string* dex_location_real_path) {
- // For foreign dex files we record a flag on disk. PackageManager will (potentially) take this
- // into account when deciding how to optimize the loaded dex file.
- // The expected flag name is the canonical path of the apk where '/' is substituted to '@'.
- // (it needs to be kept in sync with
- // frameworks/base/services/core/java/com/android/server/pm/PackageDexOptimizer.java)
- std::replace(dex_location_real_path->begin(), dex_location_real_path->end(), '/', '@');
- std::string flag_path = foreign_dex_profile_path + "/" + *dex_location_real_path;
- // We use O_RDONLY as the access mode because we must supply some access
- // mode, and there is no access mode that means 'create but do not read' the
- // file. We will not not actually read from the file.
- int fd = TEMP_FAILURE_RETRY(open(flag_path.c_str(),
- O_CREAT | O_RDONLY | O_EXCL | O_CLOEXEC | O_NOFOLLOW, 0));
- if (fd != -1) {
- if (close(fd) != 0) {
- PLOG(WARNING) << "Could not close file after flagging foreign dex use " << flag_path;
- }
- return true;
- } else {
- if (errno != EEXIST && errno != EACCES) {
- // Another app could have already created the file, and selinux may not
- // allow the read access to the file implied by the call to open.
- PLOG(WARNING) << "Could not create foreign dex use mark " << flag_path;
- return false;
- }
- return true;
- }
-}
-
-bool ProfileSaver::MaybeRecordDexUseInternal(
- const std::string& dex_location,
- const std::set<std::string>& app_code_paths,
- const std::string& foreign_dex_profile_path,
- const std::set<std::string>& app_data_dirs) {
- if (dex_location.empty()) {
- LOG(WARNING) << "Asked to record foreign dex use with an empty dex location.";
- return false;
- }
- if (foreign_dex_profile_path.empty()) {
- LOG(WARNING) << "Asked to record foreign dex use without a valid profile path ";
- return false;
- }
-
- if (app_code_paths.find(dex_location) != app_code_paths.end()) {
- // The dex location belongs to the application code paths. Nothing to record.
- return false;
- }
-
- if (app_data_dirs.find(dex_location) != app_data_dirs.end()) {
- // The dex location is under the application folder. Nothing to record.
- return false;
- }
-
- // Do another round of checks with the real paths.
- // Application directory could be a symlink (e.g. /data/data instead of /data/user/0), and we
- // don't have control over how the dex files are actually loaded (symlink or canonical path),
-
- // Note that we could cache all the real locations in the saver (since it's an expensive
- // operation). However we expect that app_code_paths is small (usually 1 element), and
- // NotifyDexUse is called just a few times in the app lifetime. So we make the compromise
- // to save some bytes of memory usage.
-
- UniqueCPtr<const char[]> dex_location_real_path(realpath(dex_location.c_str(), nullptr));
- if (dex_location_real_path == nullptr) {
- PLOG(WARNING) << "Could not get realpath for " << dex_location;
- return false;
- }
- std::string dex_location_real_path_str(dex_location_real_path.get());
-
- if (CheckContainsWithRealPath(app_code_paths, dex_location_real_path_str)) {
- return false;
- }
-
- if (CheckContainsWithRealPath(app_data_dirs, dex_location_real_path_str)) {
- return false;
- }
-
- return CreateForeignDexMarker(foreign_dex_profile_path, &dex_location_real_path_str);
-}
-
void ProfileSaver::DumpInstanceInfo(std::ostream& os) {
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
@@ -645,8 +499,6 @@ void ProfileSaver::DumpInfo(std::ostream& os) {
<< "ProfileSaver total_number_of_failed_writes=" << total_number_of_failed_writes_ << '\n'
<< "ProfileSaver total_ms_of_sleep=" << total_ms_of_sleep_ << '\n'
<< "ProfileSaver total_ms_of_work=" << NsToMs(total_ns_of_work_) << '\n'
- << "ProfileSaver total_number_of_foreign_dex_marks="
- << total_number_of_foreign_dex_marks_ << '\n'
<< "ProfileSaver max_number_profile_entries_cached="
<< max_number_of_profile_entries_cached_ << '\n'
<< "ProfileSaver total_number_of_hot_spikes=" << total_number_of_hot_spikes_ << '\n'
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 9c5e41fd13..ec8342ad9e 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -32,9 +32,7 @@ class ProfileSaver {
static void Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir)
+ const std::vector<std::string>& code_paths)
REQUIRES(!Locks::profiler_lock_, !wait_lock_);
// Stops the profile saver thread.
@@ -46,8 +44,6 @@ class ProfileSaver {
// Returns true if the profile saver is started.
static bool IsStarted() REQUIRES(!Locks::profiler_lock_);
- static void NotifyDexUse(const std::string& dex_location);
-
// If the profile saver is running, dumps statistics to the `os`. Otherwise it does nothing.
static void DumpInstanceInfo(std::ostream& os);
@@ -66,9 +62,7 @@ class ProfileSaver {
ProfileSaver(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir);
+ const std::vector<std::string>& code_paths);
// NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
static void* RunProfileSaverThread(void* arg)
@@ -90,7 +84,6 @@ class ProfileSaver {
bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
void AddTrackedLocations(const std::string& output_filename,
- const std::string& app_data_dir,
const std::vector<std::string>& code_paths)
REQUIRES(Locks::profiler_lock_);
@@ -102,12 +95,6 @@ class ProfileSaver {
// profile_cache_ for later save.
void FetchAndCacheResolvedClassesAndMethods();
- static bool MaybeRecordDexUseInternal(
- const std::string& dex_location,
- const std::set<std::string>& tracked_locations,
- const std::string& foreign_dex_profile_path,
- const std::set<std::string>& app_data_dirs);
-
void DumpInfo(std::ostream& os);
// The only instance of the saver.
@@ -121,13 +108,6 @@ class ProfileSaver {
// It maps profile locations to code paths (dex base locations).
SafeMap<std::string, std::set<std::string>> tracked_dex_base_locations_
GUARDED_BY(Locks::profiler_lock_);
- // The directory were the we should store the code paths.
- std::string foreign_dex_profile_path_;
-
- // A list of application directories, used to infer if a loaded dex belongs
- // to the application or not. Multiple application data directories are possible when
- // different apps share the same runtime.
- std::set<std::string> app_data_dirs_ GUARDED_BY(Locks::profiler_lock_);
bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
uint32_t last_save_number_of_methods_;
@@ -152,7 +132,6 @@ class ProfileSaver {
uint64_t total_number_of_failed_writes_;
uint64_t total_ms_of_sleep_;
uint64_t total_ns_of_work_;
- uint64_t total_number_of_foreign_dex_marks_;
// TODO(calin): replace with an actual size.
uint64_t max_number_of_profile_entries_cached_;
uint64_t total_number_of_hot_spikes_;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 93c212bafb..40309b9bae 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -962,4 +962,52 @@ void ZeroAndReleasePages(void* address, size_t length) {
}
}
+void MemMap::AlignBy(size_t size) {
+ CHECK_EQ(begin_, base_begin_) << "Unsupported";
+ CHECK_EQ(size_, base_size_) << "Unsupported";
+ CHECK_GT(size, static_cast<size_t>(kPageSize));
+ CHECK_ALIGNED(size, kPageSize);
+ if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
+ IsAlignedParam(base_size_, size)) {
+ // Already aligned.
+ return;
+ }
+ uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
+ uint8_t* base_end = base_begin + base_size_;
+ uint8_t* aligned_base_begin = AlignUp(base_begin, size);
+ uint8_t* aligned_base_end = AlignDown(base_end, size);
+ CHECK_LE(base_begin, aligned_base_begin);
+ CHECK_LE(aligned_base_end, base_end);
+ size_t aligned_base_size = aligned_base_end - aligned_base_begin;
+ CHECK_LT(aligned_base_begin, aligned_base_end)
+ << "base_begin = " << reinterpret_cast<void*>(base_begin)
+ << " base_end = " << reinterpret_cast<void*>(base_end);
+ CHECK_GE(aligned_base_size, size);
+ // Unmap the unaligned parts.
+ if (base_begin < aligned_base_begin) {
+ MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
+ CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
+ << "base_begin=" << reinterpret_cast<void*>(base_begin)
+ << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
+ }
+ if (aligned_base_end < base_end) {
+ MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
+ CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
+ << "base_end=" << reinterpret_cast<void*>(base_end)
+ << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
+ }
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ base_begin_ = aligned_base_begin;
+ base_size_ = aligned_base_size;
+ begin_ = aligned_base_begin;
+ size_ = aligned_base_size;
+ DCHECK(maps_ != nullptr);
+ if (base_begin < aligned_base_begin) {
+ auto it = maps_->find(base_begin);
+ CHECK(it != maps_->end()) << "MemMap not found";
+ maps_->erase(it);
+ maps_->insert(std::make_pair(base_begin_, this));
+ }
+}
+
} // namespace art
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 71db3f7014..ceb4c33ee3 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -193,6 +193,9 @@ class MemMap {
// intermittently.
void TryReadable();
+ // Align the map by unmapping the unaligned parts at the lower and the higher ends.
+ void AlignBy(size_t size);
+
private:
MemMap(const std::string& name,
uint8_t* begin,
@@ -222,10 +225,10 @@ class MemMap {
bool low_4gb);
const std::string name_;
- uint8_t* const begin_; // Start of data.
+ uint8_t* begin_; // Start of data. May be changed by AlignBy.
size_t size_; // Length of data.
- void* const base_begin_; // Page-aligned base address.
+ void* base_begin_; // Page-aligned base address. May be changed by AlignBy.
size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
int prot_; // Protection of the map.
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index e703b78cfa..aa306ac337 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -431,4 +431,108 @@ TEST_F(MemMapTest, CheckNoGaps) {
ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
}
+TEST_F(MemMapTest, AlignBy) {
+ CommonInit();
+ std::string error_msg;
+ // Cast the page size to size_t.
+ const size_t page_size = static_cast<size_t>(kPageSize);
+ // Map a region.
+ std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
+ nullptr,
+ 14 * page_size,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ uint8_t* base0 = m0->Begin();
+ ASSERT_TRUE(base0 != nullptr) << error_msg;
+ ASSERT_EQ(m0->Size(), 14 * page_size);
+ ASSERT_EQ(BaseBegin(m0.get()), base0);
+ ASSERT_EQ(BaseSize(m0.get()), m0->Size());
+
+ // Break it into several regions by using RemapAtEnd.
+ std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
+ "MemMapTest_AlignByTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg));
+ uint8_t* base1 = m1->Begin();
+ ASSERT_TRUE(base1 != nullptr) << error_msg;
+ ASSERT_EQ(base1, base0 + 3 * page_size);
+ ASSERT_EQ(m0->Size(), 3 * page_size);
+
+ std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
+ "MemMapTest_AlignByTest_map2",
+ PROT_READ | PROT_WRITE,
+ &error_msg));
+ uint8_t* base2 = m2->Begin();
+ ASSERT_TRUE(base2 != nullptr) << error_msg;
+ ASSERT_EQ(base2, base1 + 4 * page_size);
+ ASSERT_EQ(m1->Size(), 4 * page_size);
+
+ std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
+ "MemMapTest_AlignByTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg));
+ uint8_t* base3 = m3->Begin();
+ ASSERT_TRUE(base3 != nullptr) << error_msg;
+ ASSERT_EQ(base3, base2 + 3 * page_size);
+ ASSERT_EQ(m2->Size(), 3 * page_size);
+ ASSERT_EQ(m3->Size(), 4 * page_size);
+
+ uint8_t* end0 = base0 + m0->Size();
+ uint8_t* end1 = base1 + m1->Size();
+ uint8_t* end2 = base2 + m2->Size();
+ uint8_t* end3 = base3 + m3->Size();
+
+ ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
+
+ if (IsAlignedParam(base0, 2 * page_size)) {
+ ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
+ ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
+ ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
+ ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
+ } else {
+ ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
+ ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
+ ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
+ ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
+ }
+
+ // Align by 2 * page_size;
+ m0->AlignBy(2 * page_size);
+ m1->AlignBy(2 * page_size);
+ m2->AlignBy(2 * page_size);
+ m3->AlignBy(2 * page_size);
+
+ EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
+
+ EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
+
+ if (IsAlignedParam(base0, 2 * page_size)) {
+ EXPECT_EQ(m0->Begin(), base0);
+ EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
+ EXPECT_EQ(m1->Begin(), base1 + page_size);
+ EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
+ EXPECT_EQ(m2->Begin(), base2 + page_size);
+ EXPECT_EQ(m2->Begin() + m2->Size(), end2);
+ EXPECT_EQ(m3->Begin(), base3);
+ EXPECT_EQ(m3->Begin() + m3->Size(), end3);
+ } else {
+ EXPECT_EQ(m0->Begin(), base0 + page_size);
+ EXPECT_EQ(m0->Begin() + m0->Size(), end0);
+ EXPECT_EQ(m1->Begin(), base1);
+ EXPECT_EQ(m1->Begin() + m1->Size(), end1);
+ EXPECT_EQ(m2->Begin(), base2);
+ EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
+ EXPECT_EQ(m3->Begin(), base3 + page_size);
+ EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
+ }
+}
+
} // namespace art
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 35ce98efad..dbb5a4c387 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -32,7 +32,7 @@ class StubTest_ReadBarrierForRoot_Test;
namespace mirror {
// String Compression
-static constexpr bool kUseStringCompression = false;
+static constexpr bool kUseStringCompression = true;
enum class StringCompressionFlag : uint32_t {
kCompressed = 0u,
kUncompressed = 1u
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index efc42fdac7..9b707f8e1c 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -580,9 +580,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
static void VMRuntime_registerAppInfo(JNIEnv* env,
jclass clazz ATTRIBUTE_UNUSED,
jstring profile_file,
- jstring app_dir,
- jobjectArray code_paths,
- jstring foreign_dex_profile_path) {
+ jobjectArray code_paths) {
std::vector<std::string> code_paths_vec;
int code_paths_length = env->GetArrayLength(code_paths);
for (int i = 0; i < code_paths_length; i++) {
@@ -596,22 +594,7 @@ static void VMRuntime_registerAppInfo(JNIEnv* env,
std::string profile_file_str(raw_profile_file);
env->ReleaseStringUTFChars(profile_file, raw_profile_file);
- std::string foreign_dex_profile_path_str = "";
- if (foreign_dex_profile_path != nullptr) {
- const char* raw_foreign_dex_profile_path =
- env->GetStringUTFChars(foreign_dex_profile_path, nullptr);
- foreign_dex_profile_path_str.assign(raw_foreign_dex_profile_path);
- env->ReleaseStringUTFChars(foreign_dex_profile_path, raw_foreign_dex_profile_path);
- }
-
- const char* raw_app_dir = env->GetStringUTFChars(app_dir, nullptr);
- std::string app_dir_str(raw_app_dir);
- env->ReleaseStringUTFChars(app_dir, raw_app_dir);
-
- Runtime::Current()->RegisterAppInfo(code_paths_vec,
- profile_file_str,
- foreign_dex_profile_path_str,
- app_dir_str);
+ Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file_str);
}
static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) {
@@ -674,8 +657,7 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMRuntime, is64Bit, "()Z"),
FAST_NATIVE_METHOD(VMRuntime, isCheckJniEnabled, "()Z"),
NATIVE_METHOD(VMRuntime, preloadDexCaches, "()V"),
- NATIVE_METHOD(VMRuntime, registerAppInfo,
- "(Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;Ljava/lang/String;)V"),
+ NATIVE_METHOD(VMRuntime, registerAppInfo, "(Ljava/lang/String;[Ljava/lang/String;)V"),
NATIVE_METHOD(VMRuntime, isBootClassPathOnDisk, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(VMRuntime, getCurrentInstructionSet, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, didPruneDalvikCache, "()Z"),
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index fd22d9e646..836ba81d8e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -26,9 +26,11 @@
#include "jit/jit.h"
#include "jni_internal.h"
#include "JNIHelp.h"
+#include "non_debuggable_classes.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
#include "thread-inl.h"
+#include "thread_list.h"
#include "trace.h"
#if defined(__linux__)
@@ -39,6 +41,10 @@
namespace art {
+// Set to true to always determine the non-debuggable classes even if we would not allow a debugger
+// to actually attach.
+static constexpr bool kAlwaysCollectNonDebuggableClasses = kIsDebugBuild;
+
using android::base::StringPrintf;
static void EnableDebugger() {
@@ -68,6 +74,82 @@ static void EnableDebugger() {
}
}
+class ClassSet {
+ public:
+ // The number of classes we reasonably expect to have to look at. Realistically the number is more
+ // ~10 but there is little harm in having some extra.
+ static constexpr int kClassSetCapacity = 100;
+
+ explicit ClassSet(Thread* const self) : self_(self) {
+ self_->GetJniEnv()->PushFrame(kClassSetCapacity);
+ }
+
+ ~ClassSet() {
+ self_->GetJniEnv()->PopFrame();
+ }
+
+ void AddClass(ObjPtr<mirror::Class> klass) REQUIRES(Locks::mutator_lock_) {
+ class_set_.insert(self_->GetJniEnv()->AddLocalReference<jclass>(klass.Ptr()));
+ }
+
+ const std::unordered_set<jclass>& GetClasses() const {
+ return class_set_;
+ }
+
+ private:
+ Thread* const self_;
+ std::unordered_set<jclass> class_set_;
+};
+
+static void DoCollectNonDebuggableCallback(Thread* thread, void* data)
+ REQUIRES(Locks::mutator_lock_) {
+ class NonDebuggableStacksVisitor : public StackVisitor {
+ public:
+ NonDebuggableStacksVisitor(Thread* t, ClassSet* class_set)
+ : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ class_set_(class_set) {}
+
+ ~NonDebuggableStacksVisitor() OVERRIDE {}
+
+ bool VisitFrame() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ if (GetMethod()->IsRuntimeMethod()) {
+ return true;
+ }
+ class_set_->AddClass(GetMethod()->GetDeclaringClass());
+ if (kIsDebugBuild) {
+ LOG(INFO) << GetMethod()->GetDeclaringClass()->PrettyClass()
+ << " might not be fully debuggable/deoptimizable due to "
+ << GetMethod()->PrettyMethod() << " appearing on the stack during zygote fork.";
+ }
+ return true;
+ }
+
+ private:
+ ClassSet* class_set_;
+ };
+ NonDebuggableStacksVisitor visitor(thread, reinterpret_cast<ClassSet*>(data));
+ visitor.WalkStack();
+}
+
+static void CollectNonDebuggableClasses() REQUIRES(!Locks::mutator_lock_) {
+ Runtime* const runtime = Runtime::Current();
+ Thread* const self = Thread::Current();
+ // Get the mutator lock.
+ ScopedObjectAccess soa(self);
+ ClassSet classes(self);
+ {
+ // Drop the shared mutator lock.
+ ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ // Get exclusive mutator lock with suspend all.
+ ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
+ }
+ for (jclass klass : classes.GetClasses()) {
+ NonDebuggableClasses::AddNonDebuggableClass(klass);
+ }
+}
+
static void EnableDebugFeatures(uint32_t debug_flags) {
// Must match values in com.android.internal.os.Zygote.
enum {
@@ -131,12 +213,17 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
debug_flags &= ~DEBUG_ALWAYS_JIT;
}
+ bool needs_non_debuggable_classes = false;
if ((debug_flags & DEBUG_JAVA_DEBUGGABLE) != 0) {
runtime->AddCompilerOption("--debuggable");
runtime->SetJavaDebuggable(true);
// Deoptimize the boot image as it may be non-debuggable.
runtime->DeoptimizeBootImage();
debug_flags &= ~DEBUG_JAVA_DEBUGGABLE;
+ needs_non_debuggable_classes = true;
+ }
+ if (needs_non_debuggable_classes || kAlwaysCollectNonDebuggableClasses) {
+ CollectNonDebuggableClasses();
}
if ((debug_flags & DEBUG_NATIVE_DEBUGGABLE) != 0) {
diff --git a/runtime/native/java_lang_Object.cc b/runtime/native/java_lang_Object.cc
index 6989244280..fb4f99a126 100644
--- a/runtime/native/java_lang_Object.cc
+++ b/runtime/native/java_lang_Object.cc
@@ -48,12 +48,19 @@ static void Object_waitJI(JNIEnv* env, jobject java_this, jlong ms, jint ns) {
soa.Decode<mirror::Object>(java_this)->Wait(soa.Self(), ms, ns);
}
+static jint Object_identityHashCodeNative(JNIEnv* env, jclass, jobject javaObject) {
+ ScopedFastNativeObjectAccess soa(env);
+ ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(javaObject);
+ return static_cast<jint>(o->IdentityHashCode());
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Object, internalClone, "()Ljava/lang/Object;"),
FAST_NATIVE_METHOD(Object, notify, "()V"),
FAST_NATIVE_METHOD(Object, notifyAll, "()V"),
OVERLOADED_FAST_NATIVE_METHOD(Object, wait, "()V", wait),
OVERLOADED_FAST_NATIVE_METHOD(Object, wait, "(JI)V", waitJI),
+ FAST_NATIVE_METHOD(Object, identityHashCodeNative, "(Ljava/lang/Object;)I"),
};
void register_java_lang_Object(JNIEnv* env) {
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index d7c9cd07b5..2cabce8868 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -227,15 +227,6 @@ static void System_arraycopyBooleanUnchecked(JNIEnv* env, jclass, jobject javaSr
javaDst, dstPos, count);
}
-static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
- if (UNLIKELY(javaObject == nullptr)) {
- return 0;
- }
- ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(javaObject);
- return static_cast<jint>(o->IdentityHashCode());
-}
-
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(System, arraycopy, "(Ljava/lang/Object;ILjava/lang/Object;II)V"),
FAST_NATIVE_METHOD(System, arraycopyCharUnchecked, "([CI[CII)V"),
@@ -246,7 +237,6 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(System, arraycopyFloatUnchecked, "([FI[FII)V"),
FAST_NATIVE_METHOD(System, arraycopyDoubleUnchecked, "([DI[DII)V"),
FAST_NATIVE_METHOD(System, arraycopyBooleanUnchecked, "([ZI[ZII)V"),
- FAST_NATIVE_METHOD(System, identityHashCode, "(Ljava/lang/Object;)I"),
};
void register_java_lang_System(JNIEnv* env) {
diff --git a/runtime/native/java_lang_Void.cc b/runtime/native/java_lang_Void.cc
new file mode 100644
index 0000000000..96bfd1b4fb
--- /dev/null
+++ b/runtime/native/java_lang_Void.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "java_lang_Void.h"
+
+#include "class_linker.h"
+#include "jni_internal.h"
+#include "runtime.h"
+#include "scoped_fast_native_object_access-inl.h"
+
+namespace art {
+
+static jclass Void_lookupType(JNIEnv* env, jclass) {
+ ScopedFastNativeObjectAccess soa(env);
+ return soa.AddLocalReference<jclass>(
+ Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kPrimitiveVoid));
+}
+
+static JNINativeMethod gMethods[] = {
+ FAST_NATIVE_METHOD(Void, lookupType, "()Ljava/lang/Class;"),
+};
+
+void register_java_lang_Void(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/Void");
+}
+
+} // namespace art
diff --git a/runtime/native/java_lang_Void.h b/runtime/native/java_lang_Void.h
new file mode 100644
index 0000000000..8777d8068c
--- /dev/null
+++ b/runtime/native/java_lang_Void.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_VOID_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_VOID_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_Void(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_VOID_H_
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
new file mode 100644
index 0000000000..829ea65876
--- /dev/null
+++ b/runtime/non_debuggable_classes.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "non_debuggable_classes.h"
+
+#include "base/logging.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "obj_ptr-inl.h"
+#include "ScopedLocalRef.h"
+#include "thread-inl.h"
+
+namespace art {
+
+std::vector<jclass> NonDebuggableClasses::non_debuggable_classes;
+
+void NonDebuggableClasses::AddNonDebuggableClass(jclass klass) {
+ Thread* self = Thread::Current();
+ JNIEnvExt* env = self->GetJniEnv();
+ ObjPtr<mirror::Class> mirror_klass(self->DecodeJObject(klass)->AsClass());
+ for (jclass c : non_debuggable_classes) {
+ if (self->DecodeJObject(c)->AsClass() == mirror_klass.Ptr()) {
+ return;
+ }
+ }
+ non_debuggable_classes.push_back(reinterpret_cast<jclass>(env->NewGlobalRef(klass)));
+}
+
+} // namespace art
diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h
new file mode 100644
index 0000000000..e1b563339d
--- /dev/null
+++ b/runtime/non_debuggable_classes.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NON_DEBUGGABLE_CLASSES_H_
+#define ART_RUNTIME_NON_DEBUGGABLE_CLASSES_H_
+
+#include <vector>
+
+#include "base/mutex.h"
+#include "jni.h"
+
+namespace art {
+
+struct NonDebuggableClasses {
+ public:
+ static const std::vector<jclass>& GetNonDebuggableClasses() {
+ return non_debuggable_classes;
+ }
+
+ static void AddNonDebuggableClass(jclass klass)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ static std::vector<jclass> non_debuggable_classes;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_NON_DEBUGGABLE_CLASSES_H_
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 5ae2fc51b7..48bf1e72a4 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -430,8 +430,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
// starts up.
LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
<< "Allow oat file use. This is potentially dangerous.";
- } else if (file.GetOatHeader().GetImageFileLocationOatChecksum()
- != GetCombinedImageChecksum()) {
+ } else if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
VLOG(oat) << "Oat image checksum does not match image checksum.";
return kOatBootImageOutOfDate;
}
@@ -726,68 +725,81 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
-const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
- if (!image_info_load_attempted_) {
- image_info_load_attempted_ = true;
-
- Runtime* runtime = Runtime::Current();
- std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
- if (!image_spaces.empty()) {
- cached_image_info_.location = image_spaces[0]->GetImageLocation();
-
- if (isa_ == kRuntimeISA) {
- const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
- cached_image_info_.oat_checksum = image_header.GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header.GetOatDataBegin());
- cached_image_info_.patch_delta = image_header.GetPatchDelta();
- } else {
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(cached_image_info_.location.c_str(),
- isa_,
- &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- cached_image_info_.oat_checksum = image_header->GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header->GetOatDataBegin());
- cached_image_info_.patch_delta = image_header->GetPatchDelta();
- }
- }
- image_info_load_succeeded_ = (!image_spaces.empty());
+// TODO: Use something better than xor for the combined image checksum.
+std::unique_ptr<OatFileAssistant::ImageInfo>
+OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
+ CHECK(error_msg != nullptr);
- combined_image_checksum_ = CalculateCombinedImageChecksum(isa_);
+ // Use the currently loaded image to determine the image locations for all
+ // the image spaces, regardless of the isa requested. Otherwise we would
+ // need to read from the boot image's oat file to determine the rest of the
+ // image locations in the case of multi-image.
+ Runtime* runtime = Runtime::Current();
+ std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+ if (image_spaces.empty()) {
+ *error_msg = "There are no boot image spaces";
+ return nullptr;
}
- return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
-}
-// TODO: Use something better than xor.
-uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) {
- uint32_t checksum = 0;
- std::vector<gc::space::ImageSpace*> image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ std::unique_ptr<ImageInfo> info(new ImageInfo());
+ info->location = image_spaces[0]->GetImageLocation();
+
+ // TODO: Special casing on isa == kRuntimeISA is presumably motivated by
+ // performance: 'it's faster to use an already loaded image header than read
+ // the image header from disk'. But the loaded image is not necessarily the
+ // same as kRuntimeISA, so this behavior is suspect (b/35659889).
if (isa == kRuntimeISA) {
+ const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ info->patch_delta = image_header.GetPatchDelta();
+
+ info->oat_checksum = 0;
for (gc::space::ImageSpace* image_space : image_spaces) {
- checksum ^= image_space->GetImageHeader().GetOatChecksum();
+ info->oat_checksum ^= image_space->GetImageHeader().GetOatChecksum();
}
} else {
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
+ return nullptr;
+ }
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ info->patch_delta = image_header->GetPatchDelta();
+
+ info->oat_checksum = 0;
for (gc::space::ImageSpace* image_space : image_spaces) {
std::string location = image_space->GetImageLocation();
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- checksum ^= image_header->GetOatChecksum();
+ image_header.reset(
+ gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
+ return nullptr;
+ }
+ info->oat_checksum ^= image_header->GetOatChecksum();
}
}
- return checksum;
+ return info;
}
-uint32_t OatFileAssistant::GetCombinedImageChecksum() {
+const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
if (!image_info_load_attempted_) {
- GetImageInfo();
+ image_info_load_attempted_ = true;
+ std::string error_msg;
+ cached_image_info_ = ImageInfo::GetRuntimeImageInfo(isa_, &error_msg);
+ if (cached_image_info_ == nullptr) {
+ LOG(WARNING) << "Unable to get runtime image info: " << error_msg;
+ }
+ }
+ return cached_image_info_.get();
+}
+
+uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) {
+ std::string error_msg;
+ std::unique_ptr<ImageInfo> info = ImageInfo::GetRuntimeImageInfo(isa, &error_msg);
+ if (info == nullptr) {
+ LOG(WARNING) << "Unable to get runtime image info for checksum: " << error_msg;
+ return 0;
}
- return combined_image_checksum_;
+ return info->oat_checksum;
}
OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 3ede29f5e0..eec87f0768 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -284,6 +284,9 @@ class OatFileAssistant {
uintptr_t oat_data_begin = 0;
int32_t patch_delta = 0;
std::string location;
+
+ static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
+ std::string* error_msg);
};
class OatFileInfo {
@@ -414,8 +417,6 @@ class OatFileAssistant {
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
- uint32_t GetCombinedImageChecksum();
-
// To implement Lock(), we lock a dummy file where the oat file would go
// (adding ".flock" to the target file name) and retain the lock for the
// remaining lifetime of the OatFileAssistant object.
@@ -445,9 +446,7 @@ class OatFileAssistant {
// TODO: The image info should probably be moved out of the oat file
// assistant to an image file manager.
bool image_info_load_attempted_ = false;
- bool image_info_load_succeeded_ = false;
- ImageInfo cached_image_info_;
- uint32_t combined_image_checksum_ = 0;
+ std::unique_ptr<ImageInfo> cached_image_info_;
DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
};
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 70796148a4..6e30f14fbb 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -729,9 +729,6 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
}
}
- // TODO(calin): Consider optimizing this knowing that is useless to record the
- // use of fully compiled apks.
- Runtime::Current()->NotifyDexLoaded(dex_location);
return dex_files;
}
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 77ca9ce2e5..c016728fa5 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -66,14 +66,9 @@
#include "ti_timers.h"
#include "transform.h"
-// TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
-// easier to create.
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-
namespace openjdkjvmti {
EventHandler gEventHandler;
-ObjectTagTable gObjectTagTable(&gEventHandler);
#define ENSURE_NON_NULL(n) \
do { \
@@ -137,38 +132,40 @@ class JvmtiFunctions {
return ThreadUtil::GetAllThreads(env, threads_count_ptr, threads_ptr);
}
- static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread) {
+ static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SuspendThreadList(jvmtiEnv* env,
- jint request_count,
- const jthread* request_list,
- jvmtiError* results) {
+ jint request_count ATTRIBUTE_UNUSED,
+ const jthread* request_list ATTRIBUTE_UNUSED,
+ jvmtiError* results ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ResumeThread(jvmtiEnv* env, jthread thread) {
+ static jvmtiError ResumeThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ResumeThreadList(jvmtiEnv* env,
- jint request_count,
- const jthread* request_list,
- jvmtiError* results) {
+ jint request_count ATTRIBUTE_UNUSED,
+ const jthread* request_list ATTRIBUTE_UNUSED,
+ jvmtiError* results ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError StopThread(jvmtiEnv* env, jthread thread, jobject exception) {
+ static jvmtiError StopThread(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jobject exception ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread) {
+ static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
@@ -178,24 +175,25 @@ class JvmtiFunctions {
}
static jvmtiError GetOwnedMonitorInfo(jvmtiEnv* env,
- jthread thread,
- jint* owned_monitor_count_ptr,
- jobject** owned_monitors_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint* owned_monitor_count_ptr ATTRIBUTE_UNUSED,
+ jobject** owned_monitors_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_owned_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
- jthread thread,
- jint* monitor_info_count_ptr,
- jvmtiMonitorStackDepthInfo** monitor_info_ptr) {
+ static jvmtiError GetOwnedMonitorStackDepthInfo(
+ jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jint* monitor_info_count_ptr ATTRIBUTE_UNUSED,
+ jvmtiMonitorStackDepthInfo** monitor_info_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_owned_monitor_stack_depth_info);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
- jthread thread,
- jobject* monitor_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jobject* monitor_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_current_contended_monitor);
return ERR(NOT_IMPLEMENTED);
}
@@ -279,7 +277,7 @@ class JvmtiFunctions {
return StackUtil::GetFrameCount(env, thread, count_ptr);
}
- static jvmtiError PopFrame(jvmtiEnv* env, jthread thread) {
+ static jvmtiError PopFrame(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_pop_frame);
return ERR(NOT_IMPLEMENTED);
}
@@ -292,37 +290,49 @@ class JvmtiFunctions {
return StackUtil::GetFrameLocation(env, thread, depth, method_ptr, location_ptr);
}
- static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
+ static jvmtiError NotifyFramePop(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_generate_frame_pop_events);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env, jthread thread, jobject value) {
+ static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jobject value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env, jthread thread, jint value) {
+ static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jint value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env, jthread thread, jlong value) {
+ static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jlong value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env, jthread thread, jfloat value) {
+ static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jfloat value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env, jthread thread, jdouble value) {
+ static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jdouble value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread) {
+ static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -334,7 +344,7 @@ class JvmtiFunctions {
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
ENSURE_HAS_CAP(env, can_tag_objects);
- HeapUtil heap_util(&gObjectTagTable);
+ HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.FollowReferences(env,
heap_filter,
klass,
@@ -349,7 +359,7 @@ class JvmtiFunctions {
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
ENSURE_HAS_CAP(env, can_tag_objects);
- HeapUtil heap_util(&gObjectTagTable);
+ HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
}
@@ -363,7 +373,7 @@ class JvmtiFunctions {
art::ScopedObjectAccess soa(jni_env);
art::ObjPtr<art::mirror::Object> obj = soa.Decode<art::mirror::Object>(object);
- if (!gObjectTagTable.GetTag(obj.Ptr(), tag_ptr)) {
+ if (!ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table->GetTag(obj.Ptr(), tag_ptr)) {
*tag_ptr = 0;
}
@@ -384,7 +394,7 @@ class JvmtiFunctions {
art::ScopedObjectAccess soa(jni_env);
art::ObjPtr<art::mirror::Object> obj = soa.Decode<art::mirror::Object>(object);
- gObjectTagTable.Set(obj.Ptr(), tag);
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table->Set(obj.Ptr(), tag);
return ERR(NONE);
}
@@ -403,12 +413,12 @@ class JvmtiFunctions {
}
art::ScopedObjectAccess soa(jni_env);
- return gObjectTagTable.GetTaggedObjects(env,
- tag_count,
- tags,
- count_ptr,
- object_result_ptr,
- tag_result_ptr);
+ return ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table->GetTaggedObjects(env,
+ tag_count,
+ tags,
+ count_ptr,
+ object_result_ptr,
+ tag_result_ptr);
}
static jvmtiError ForceGarbageCollection(jvmtiEnv* env) {
@@ -417,169 +427,183 @@ class JvmtiFunctions {
static jvmtiError IterateOverObjectsReachableFromObject(
jvmtiEnv* env,
- jobject object,
- jvmtiObjectReferenceCallback object_reference_callback,
- const void* user_data) {
+ jobject object ATTRIBUTE_UNUSED,
+ jvmtiObjectReferenceCallback object_reference_callback ATTRIBUTE_UNUSED,
+ const void* user_data ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError IterateOverReachableObjects(jvmtiEnv* env,
- jvmtiHeapRootCallback heap_root_callback,
- jvmtiStackReferenceCallback stack_ref_callback,
- jvmtiObjectReferenceCallback object_ref_callback,
- const void* user_data) {
+ static jvmtiError IterateOverReachableObjects(
+ jvmtiEnv* env,
+ jvmtiHeapRootCallback heap_root_callback ATTRIBUTE_UNUSED,
+ jvmtiStackReferenceCallback stack_ref_callback ATTRIBUTE_UNUSED,
+ jvmtiObjectReferenceCallback object_ref_callback ATTRIBUTE_UNUSED,
+ const void* user_data ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError IterateOverHeap(jvmtiEnv* env,
- jvmtiHeapObjectFilter object_filter,
- jvmtiHeapObjectCallback heap_object_callback,
- const void* user_data) {
+ jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
+ jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
+ const void* user_data ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError IterateOverInstancesOfClass(jvmtiEnv* env,
- jclass klass,
- jvmtiHeapObjectFilter object_filter,
- jvmtiHeapObjectCallback heap_object_callback,
- const void* user_data) {
+ static jvmtiError IterateOverInstancesOfClass(
+ jvmtiEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
+ jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
+ const void* user_data ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLocalObject(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jobject* value_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jobject* value_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLocalInstance(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jobject* value_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jobject* value_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLocalInt(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jint* value_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jint* value_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLocalLong(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jlong* value_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jlong* value_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLocalFloat(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jfloat* value_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jfloat* value_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLocalDouble(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jdouble* value_ptr) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jdouble* value_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetLocalObject(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jobject value) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jobject value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetLocalInt(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jint value) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jint value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetLocalLong(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jlong value) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jlong value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetLocalFloat(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jfloat value) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jfloat value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError SetLocalDouble(jvmtiEnv* env,
- jthread thread,
- jint depth,
- jint slot,
- jdouble value) {
+ jthread thread ATTRIBUTE_UNUSED,
+ jint depth ATTRIBUTE_UNUSED,
+ jint slot ATTRIBUTE_UNUSED,
+ jdouble value ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError SetBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location) {
+ static jvmtiError SetBreakpoint(jvmtiEnv* env,
+ jmethodID method ATTRIBUTE_UNUSED,
+ jlocation location ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ClearBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location) {
+ static jvmtiError ClearBreakpoint(jvmtiEnv* env,
+ jmethodID method ATTRIBUTE_UNUSED,
+ jlocation location ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ static jvmtiError SetFieldAccessWatch(jvmtiEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jfieldID field ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jfieldID field ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ static jvmtiError SetFieldModificationWatch(jvmtiEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jfieldID field ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field) {
+ static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jfieldID field ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
- HeapUtil heap_util(&gObjectTagTable);
+ HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
@@ -601,7 +625,9 @@ class JvmtiFunctions {
return ClassUtil::GetClassStatus(env, klass, status_ptr);
}
- static jvmtiError GetSourceFileName(jvmtiEnv* env, jclass klass, char** source_name_ptr) {
+ static jvmtiError GetSourceFileName(jvmtiEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ char** source_name_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_source_file_name);
return ERR(NOT_IMPLEMENTED);
}
@@ -639,10 +665,10 @@ class JvmtiFunctions {
}
static jvmtiError GetConstantPool(jvmtiEnv* env,
- jclass klass,
- jint* constant_pool_count_ptr,
- jint* constant_pool_byte_count_ptr,
- unsigned char** constant_pool_bytes_ptr) {
+ jclass klass ATTRIBUTE_UNUSED,
+ jint* constant_pool_count_ptr ATTRIBUTE_UNUSED,
+ jint* constant_pool_byte_count_ptr ATTRIBUTE_UNUSED,
+ unsigned char** constant_pool_bytes_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_constant_pool);
return ERR(NOT_IMPLEMENTED);
}
@@ -668,8 +694,8 @@ class JvmtiFunctions {
}
static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
- jclass klass,
- char** source_debug_extension_ptr) {
+ jclass klass ATTRIBUTE_UNUSED,
+ char** source_debug_extension_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_source_debug_extension);
return ERR(NOT_IMPLEMENTED);
}
@@ -678,6 +704,7 @@ class JvmtiFunctions {
ENSURE_HAS_CAP(env, can_retransform_classes);
std::string error_msg;
jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
+ &gEventHandler,
art::Runtime::Current(),
art::Thread::Current(),
class_count,
@@ -695,6 +722,7 @@ class JvmtiFunctions {
ENSURE_HAS_CAP(env, can_redefine_classes);
std::string error_msg;
jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
+ &gEventHandler,
art::Runtime::Current(),
art::Thread::Current(),
class_count,
@@ -715,8 +743,8 @@ class JvmtiFunctions {
}
static jvmtiError GetObjectMonitorUsage(jvmtiEnv* env,
- jobject object,
- jvmtiMonitorUsage* info_ptr) {
+ jobject object ATTRIBUTE_UNUSED,
+ jvmtiMonitorUsage* info_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -800,17 +828,17 @@ class JvmtiFunctions {
}
static jvmtiError GetLocalVariableTable(jvmtiEnv* env,
- jmethodID method,
- jint* entry_count_ptr,
- jvmtiLocalVariableEntry** table_ptr) {
+ jmethodID method ATTRIBUTE_UNUSED,
+ jint* entry_count_ptr ATTRIBUTE_UNUSED,
+ jvmtiLocalVariableEntry** table_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetBytecodes(jvmtiEnv* env,
- jmethodID method,
- jint* bytecode_count_ptr,
- unsigned char** bytecodes_ptr) {
+ jmethodID method ATTRIBUTE_UNUSED,
+ jint* bytecode_count_ptr ATTRIBUTE_UNUSED,
+ unsigned char** bytecodes_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_bytecodes);
return ERR(NOT_IMPLEMENTED);
}
@@ -828,12 +856,14 @@ class JvmtiFunctions {
return MethodUtil::IsMethodObsolete(env, method, is_obsolete_ptr);
}
- static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix) {
+ static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError SetNativeMethodPrefixes(jvmtiEnv* env, jint prefix_count, char** prefixes) {
+ static jvmtiError SetNativeMethodPrefixes(jvmtiEnv* env,
+ jint prefix_count ATTRIBUTE_UNUSED,
+ char** prefixes ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
@@ -925,11 +955,12 @@ class JvmtiFunctions {
return gEventHandler.SetEvent(art_env, art_thread, GetArtJvmtiEvent(art_env, event_type), mode);
}
- static jvmtiError GenerateEvents(jvmtiEnv* env, jvmtiEvent event_type) {
- return ERR(NOT_IMPLEMENTED);
+ static jvmtiError GenerateEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jvmtiEvent event_type ATTRIBUTE_UNUSED) {
+ return OK;
}
- static jvmtiError GetExtensionFunctions(jvmtiEnv* env,
+ static jvmtiError GetExtensionFunctions(jvmtiEnv* env ATTRIBUTE_UNUSED,
jint* extension_count_ptr,
jvmtiExtensionFunctionInfo** extensions) {
// We do not have any extension functions.
@@ -939,7 +970,7 @@ class JvmtiFunctions {
return ERR(NONE);
}
- static jvmtiError GetExtensionEvents(jvmtiEnv* env,
+ static jvmtiError GetExtensionEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
jint* extension_count_ptr,
jvmtiExtensionEventInfo** extensions) {
// We do not have any extension events.
@@ -949,9 +980,9 @@ class JvmtiFunctions {
return ERR(NONE);
}
- static jvmtiError SetExtensionEventCallback(jvmtiEnv* env,
- jint extension_event_index,
- jvmtiExtensionEvent callback) {
+ static jvmtiError SetExtensionEventCallback(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jint extension_event_index ATTRIBUTE_UNUSED,
+ jvmtiExtensionEvent callback ATTRIBUTE_UNUSED) {
// We do not have any extension events, so any call is illegal.
return ERR(ILLEGAL_ARGUMENT);
}
@@ -969,10 +1000,15 @@ class JvmtiFunctions {
ArtJvmTiEnv* art_env = static_cast<ArtJvmTiEnv*>(env);
jvmtiError ret = OK;
jvmtiCapabilities changed;
+ jvmtiCapabilities potential_capabilities;
+ ret = env->GetPotentialCapabilities(&potential_capabilities);
+ if (ret != OK) {
+ return ret;
+ }
#define ADD_CAPABILITY(e) \
do { \
if (capabilities_ptr->e == 1) { \
- if (kPotentialCapabilities.e == 1) { \
+ if (potential_capabilities.e == 1) { \
if (art_env->capabilities.e != 1) { \
art_env->capabilities.e = 1; \
changed.e = 1; \
@@ -1103,22 +1139,26 @@ class JvmtiFunctions {
return OK;
}
- static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env,
+ jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError GetCurrentThreadCpuTime(jvmtiEnv* env, jlong* nanos_ptr) {
+ static jvmtiError GetCurrentThreadCpuTime(jvmtiEnv* env, jlong* nanos_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError GetThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ static jvmtiError GetThreadCpuTimerInfo(jvmtiEnv* env,
+ jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
- static jvmtiError GetThreadCpuTime(jvmtiEnv* env, jthread thread, jlong* nanos_ptr) {
+ static jvmtiError GetThreadCpuTime(jvmtiEnv* env,
+ jthread thread ATTRIBUTE_UNUSED,
+ jlong* nanos_ptr ATTRIBUTE_UNUSED) {
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
@@ -1162,6 +1202,8 @@ class JvmtiFunctions {
static jvmtiError DisposeEnvironment(jvmtiEnv* env) {
ENSURE_VALID_ENV(env);
gEventHandler.RemoveArtJvmTiEnv(ArtJvmTiEnv::AsArtJvmTiEnv(env));
+ art::Runtime::Current()->RemoveSystemWeakHolder(
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
delete env;
return OK;
}
@@ -1261,7 +1303,9 @@ class JvmtiFunctions {
}
}
- static jvmtiError SetVerboseFlag(jvmtiEnv* env, jvmtiVerboseFlag flag, jboolean value) {
+ static jvmtiError SetVerboseFlag(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jvmtiVerboseFlag flag,
+ jboolean value) {
if (flag == jvmtiVerboseFlag::JVMTI_VERBOSE_OTHER) {
// OTHER is special, as it's 0, so can't do a bit check.
bool val = (value == JNI_TRUE) ? true : false;
@@ -1315,7 +1359,8 @@ class JvmtiFunctions {
return ERR(NONE);
}
- static jvmtiError GetJLocationFormat(jvmtiEnv* env, jvmtiJlocationFormat* format_ptr) {
+ static jvmtiError GetJLocationFormat(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jvmtiJlocationFormat* format_ptr) {
// Report BCI as jlocation format. We report dex bytecode indices.
if (format_ptr == nullptr) {
return ERR(NULL_POINTER);
@@ -1333,13 +1378,25 @@ static bool IsJvmtiVersion(jint version) {
version == JVMTI_VERSION;
}
+extern const jvmtiInterface_1 gJvmtiInterface;
+ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
+ : art_vm(runtime),
+ local_data(nullptr),
+ capabilities(),
+ object_tag_table(new ObjectTagTable(event_handler)) {
+ functions = &gJvmtiInterface;
+}
+
// Creates a jvmtiEnv and returns it with the art::ti::Env that is associated with it. new_art_ti
// is a pointer to the uninitialized memory for an art::ti::Env.
static void CreateArtJvmTiEnv(art::JavaVMExt* vm, /*out*/void** new_jvmtiEnv) {
- struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm);
+ struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm, &gEventHandler);
*new_jvmtiEnv = env;
gEventHandler.RegisterArtJvmTiEnv(env);
+
+ art::Runtime::Current()->AddSystemWeakHolder(
+ ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
}
// A hook that the runtime uses to allow plugins to handle GetEnv calls. It returns true and
@@ -1371,7 +1428,6 @@ extern "C" bool ArtPlugin_Initialize() {
SearchUtil::Register();
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
- runtime->AddSystemWeakHolder(&gObjectTagTable);
return true;
}
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index 99139a1f37..2ff3a478c4 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -48,8 +48,7 @@
namespace openjdkjvmti {
-extern const jvmtiInterface_1 gJvmtiInterface;
-extern EventHandler gEventHandler;
+class ObjectTagTable;
// A structure that is a jvmtiEnv with additional information for the runtime.
struct ArtJvmTiEnv : public jvmtiEnv {
@@ -60,10 +59,10 @@ struct ArtJvmTiEnv : public jvmtiEnv {
EventMasks event_masks;
std::unique_ptr<jvmtiEventCallbacks> event_callbacks;
- explicit ArtJvmTiEnv(art::JavaVMExt* runtime)
- : art_vm(runtime), local_data(nullptr), capabilities() {
- functions = &gJvmtiInterface;
- }
+ // Tagging is specific to the jvmtiEnv.
+ std::unique_ptr<ObjectTagTable> object_tag_table;
+
+ ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler);
static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
return art::down_cast<ArtJvmTiEnv*>(env);
diff --git a/runtime/openjdkjvmti/ti_field.cc b/runtime/openjdkjvmti/ti_field.cc
index 8c3f2fffbd..1e5fbda35b 100644
--- a/runtime/openjdkjvmti/ti_field.cc
+++ b/runtime/openjdkjvmti/ti_field.cc
@@ -88,7 +88,6 @@ jvmtiError FieldUtil::GetFieldName(jvmtiEnv* env,
*signature_ptr = signature_copy.get();
}
- // TODO: Support generic signature.
if (generic_ptr != nullptr) {
*generic_ptr = nullptr;
if (!art_field->GetDeclaringClass()->IsProxyClass()) {
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 2fbc12bb38..d52f0ea290 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -50,25 +50,28 @@ jint ReportString(art::ObjPtr<art::mirror::Object> obj,
if (UNLIKELY(cb->string_primitive_value_callback != nullptr) && obj->IsString()) {
art::ObjPtr<art::mirror::String> str = obj->AsString();
int32_t string_length = str->GetLength();
- jvmtiError alloc_error;
- JvmtiUniquePtr<uint16_t[]> data = AllocJvmtiUniquePtr<uint16_t[]>(env,
- string_length,
- &alloc_error);
- if (data == nullptr) {
- // TODO: Not really sure what to do here. Should we abort the iteration and go all the way
- // back? For now just warn.
- LOG(WARNING) << "Unable to allocate buffer for string reporting! Silently dropping value.";
- return 0;
- }
+ JvmtiUniquePtr<uint16_t[]> data;
- if (str->IsCompressed()) {
- uint8_t* compressed_data = str->GetValueCompressed();
- for (int32_t i = 0; i != string_length; ++i) {
- data[i] = compressed_data[i];
+ if (string_length > 0) {
+ jvmtiError alloc_error;
+ data = AllocJvmtiUniquePtr<uint16_t[]>(env, string_length, &alloc_error);
+ if (data == nullptr) {
+ // TODO: Not really sure what to do here. Should we abort the iteration and go all the way
+ // back? For now just warn.
+ LOG(WARNING) << "Unable to allocate buffer for string reporting! Silently dropping value."
+ << " >" << str->ToModifiedUtf8() << "<";
+ return 0;
+ }
+
+ if (str->IsCompressed()) {
+ uint8_t* compressed_data = str->GetValueCompressed();
+ for (int32_t i = 0; i != string_length; ++i) {
+ data[i] = compressed_data[i];
+ }
+ } else {
+ // Can copy directly.
+ memcpy(data.get(), str->GetValue(), string_length * sizeof(uint16_t));
}
- } else {
- // Can copy directly.
- memcpy(data.get(), str->GetValue(), string_length * sizeof(uint16_t));
}
const jlong class_tag = tag_table->GetTagOrZero(obj->GetClass());
@@ -159,32 +162,437 @@ jint ReportPrimitiveArray(art::ObjPtr<art::mirror::Object> obj,
return 0;
}
-} // namespace
+template <typename UserData>
+bool VisitorFalse(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+ art::ArtField& field ATTRIBUTE_UNUSED,
+ size_t field_index ATTRIBUTE_UNUSED,
+ UserData* user_data ATTRIBUTE_UNUSED) {
+ return false;
+}
-struct IterateThroughHeapData {
- IterateThroughHeapData(HeapUtil* _heap_util,
- jvmtiEnv* _env,
- jint heap_filter,
- art::ObjPtr<art::mirror::Class> klass,
- const jvmtiHeapCallbacks* _callbacks,
- const void* _user_data)
- : heap_util(_heap_util),
- filter_klass(klass),
- env(_env),
- callbacks(_callbacks),
- user_data(_user_data),
- filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
+template <typename UserData, bool kCallVisitorOnRecursion>
+class FieldVisitor {
+ public:
+ // Report the contents of a primitive fields of the given object, if a callback is set.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ static bool ReportFields(art::ObjPtr<art::mirror::Object> obj,
+ UserData* user_data,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ FieldVisitor fv(user_data);
+
+ if (obj->IsClass()) {
+ // When visiting a class, we only visit the static fields of the given class. No field of
+ // superclasses is visited.
+ art::ObjPtr<art::mirror::Class> klass = obj->AsClass();
+ // Only report fields on resolved classes. We need valid field data.
+ if (!klass->IsResolved()) {
+ return false;
+ }
+ return fv.ReportFieldsImpl(nullptr,
+ obj->AsClass(),
+ obj->AsClass()->IsInterface(),
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor);
+ } else {
+ // See comment above. Just double-checking here, but an instance *should* mean the class was
+ // resolved.
+ DCHECK(obj->GetClass()->IsResolved() || obj->GetClass()->IsErroneousResolved());
+ return fv.ReportFieldsImpl(obj,
+ obj->GetClass(),
+ false,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor);
+ }
+ }
+
+ private:
+ explicit FieldVisitor(UserData* user_data) : user_data_(user_data) {}
+
+ // Report the contents of fields of the given object. If obj is null, report the static fields,
+ // otherwise the instance fields.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ bool ReportFieldsImpl(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ bool skip_java_lang_object,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Compute the offset of field indices.
+ size_t interface_field_count = CountInterfaceFields(klass);
+
+ size_t tmp;
+ bool aborted = ReportFieldsRecursive(obj,
+ klass,
+ interface_field_count,
+ skip_java_lang_object,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor,
+ &tmp);
+ return aborted;
+ }
+
+ // Visit primitive fields in an object (instance). Return true if the visit was aborted.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ bool ReportFieldsRecursive(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ size_t interface_fields,
+ bool skip_java_lang_object,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor,
+ size_t* field_index_out)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(klass != nullptr);
+ size_t field_index;
+ if (klass->GetSuperClass() == nullptr) {
+ // j.l.Object. Start with the fields from interfaces.
+ field_index = interface_fields;
+ if (skip_java_lang_object) {
+ *field_index_out = field_index;
+ return false;
+ }
+ } else {
+ // Report superclass fields.
+ if (kCallVisitorOnRecursion) {
+ if (ReportFieldsRecursive(obj,
+ klass->GetSuperClass(),
+ interface_fields,
+ skip_java_lang_object,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor,
+ &field_index)) {
+ return true;
+ }
+ } else {
+ // Still call, but with empty visitor. This is required for correct counting.
+ ReportFieldsRecursive(obj,
+ klass->GetSuperClass(),
+ interface_fields,
+ skip_java_lang_object,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ &field_index);
+ }
+ }
+
+ // Now visit fields for the current klass.
+
+ for (auto& static_field : klass->GetSFields()) {
+ if (static_field.IsPrimitiveType()) {
+ if (static_prim_visitor(obj,
+ klass,
+ static_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ } else {
+ if (static_ref_visitor(obj,
+ klass,
+ static_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ }
+ field_index++;
+ }
+
+ for (auto& instance_field : klass->GetIFields()) {
+ if (instance_field.IsPrimitiveType()) {
+ if (instance_prim_visitor(obj,
+ klass,
+ instance_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ } else {
+ if (instance_ref_visitor(obj,
+ klass,
+ instance_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ }
+ field_index++;
+ }
+
+ *field_index_out = field_index;
+ return false;
+ }
+
+ // Implements a visit of the implemented interfaces of a given class.
+ template <typename T>
+ struct RecursiveInterfaceVisit {
+ static void VisitStatic(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ RecursiveInterfaceVisit rv;
+ rv.Visit(self, klass, visitor);
+ }
+
+ void Visit(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // First visit the parent, to get the order right.
+ // (We do this in preparation for actual visiting of interface fields.)
+ if (klass->GetSuperClass() != nullptr) {
+ Visit(self, klass->GetSuperClass(), visitor);
+ }
+ for (uint32_t i = 0; i != klass->NumDirectInterfaces(); ++i) {
+ art::ObjPtr<art::mirror::Class> inf_klass =
+ art::mirror::Class::GetDirectInterface(self, klass, i);
+ DCHECK(inf_klass != nullptr);
+ VisitInterface(self, inf_klass, visitor);
+ }
+ }
+
+ void VisitInterface(art::Thread* self, art::ObjPtr<art::mirror::Class> inf_klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ auto it = visited_interfaces.find(inf_klass.Ptr());
+ if (it != visited_interfaces.end()) {
+ return;
+ }
+ visited_interfaces.insert(inf_klass.Ptr());
+
+ // Let the visitor know about this one. Note that this order is acceptable, as the ordering
+ // of these fields never matters for known visitors.
+ visitor(inf_klass);
+
+ // Now visit the superinterfaces.
+ for (uint32_t i = 0; i != inf_klass->NumDirectInterfaces(); ++i) {
+ art::ObjPtr<art::mirror::Class> super_inf_klass =
+ art::mirror::Class::GetDirectInterface(self, inf_klass, i);
+ DCHECK(super_inf_klass != nullptr);
+ VisitInterface(self, super_inf_klass, visitor);
+ }
+ }
+
+ std::unordered_set<art::mirror::Class*> visited_interfaces;
+ };
+
+ // Counting interface fields. Note that we cannot use the interface table, as that only contains
+ // "non-marker" interfaces (= interfaces with methods).
+ static size_t CountInterfaceFields(art::ObjPtr<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ size_t count = 0;
+ auto visitor = [&count](art::ObjPtr<art::mirror::Class> inf_klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(inf_klass->IsInterface());
+ DCHECK_EQ(0u, inf_klass->NumInstanceFields());
+ count += inf_klass->NumStaticFields();
+ };
+ RecursiveInterfaceVisit<decltype(visitor)>::VisitStatic(art::Thread::Current(), klass, visitor);
+ return count;
+
+ // TODO: Implement caching.
+ }
+
+ UserData* user_data_;
+};
+
+// Debug helper. Prints the structure of an object.
+template <bool kStatic, bool kRef>
+struct DumpVisitor {
+ static bool Callback(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ LOG(ERROR) << (kStatic ? "static " : "instance ")
+ << (kRef ? "ref " : "primitive ")
+ << field.PrettyField()
+ << " @ "
+ << field_index;
+ return false;
+ }
+};
+ATTRIBUTE_UNUSED
+void DumpObjectFields(art::ObjPtr<art::mirror::Object> obj)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (obj->IsClass()) {
+ FieldVisitor<void, false>:: ReportFields(obj,
+ nullptr,
+ DumpVisitor<true, false>::Callback,
+ DumpVisitor<true, true>::Callback,
+ DumpVisitor<false, false>::Callback,
+ DumpVisitor<false, true>::Callback);
+ } else {
+ FieldVisitor<void, true>::ReportFields(obj,
+ nullptr,
+ DumpVisitor<true, false>::Callback,
+ DumpVisitor<true, true>::Callback,
+ DumpVisitor<false, false>::Callback,
+ DumpVisitor<false, true>::Callback);
+ }
+}
+
+class ReportPrimitiveField {
+ public:
+ static bool Report(art::ObjPtr<art::mirror::Object> obj,
+ ObjectTagTable* tag_table,
+ const jvmtiHeapCallbacks* cb,
+ const void* user_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (UNLIKELY(cb->primitive_field_callback != nullptr)) {
+ jlong class_tag = tag_table->GetTagOrZero(obj->GetClass());
+ ReportPrimitiveField rpf(tag_table, class_tag, cb, user_data);
+ if (obj->IsClass()) {
+ return FieldVisitor<ReportPrimitiveField, false>::ReportFields(
+ obj,
+ &rpf,
+ ReportPrimitiveFieldCallback<true>,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>);
+ } else {
+ return FieldVisitor<ReportPrimitiveField, true>::ReportFields(
+ obj,
+ &rpf,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>,
+ ReportPrimitiveFieldCallback<false>,
+ VisitorFalse<ReportPrimitiveField>);
+ }
+ }
+ return false;
+ }
+
+
+ private:
+ ReportPrimitiveField(ObjectTagTable* tag_table,
+ jlong class_tag,
+ const jvmtiHeapCallbacks* cb,
+ const void* user_data)
+ : tag_table_(tag_table), class_tag_(class_tag), cb_(cb), user_data_(user_data) {}
+
+ template <bool kReportStatic>
+ static bool ReportPrimitiveFieldCallback(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ art::ArtField& field,
+ size_t field_index,
+ ReportPrimitiveField* user_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::Primitive::Type art_prim_type = field.GetTypeAsPrimitiveType();
+ jvmtiPrimitiveType prim_type =
+ static_cast<jvmtiPrimitiveType>(art::Primitive::Descriptor(art_prim_type)[0]);
+ DCHECK(prim_type == JVMTI_PRIMITIVE_TYPE_BOOLEAN ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_BYTE ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_CHAR ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_SHORT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_INT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_LONG ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_FLOAT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_DOUBLE);
+ jvmtiHeapReferenceInfo info;
+ info.field.index = field_index;
+
+ jvalue value;
+ memset(&value, 0, sizeof(jvalue));
+ art::ObjPtr<art::mirror::Object> src = kReportStatic ? klass : obj;
+ switch (art_prim_type) {
+ case art::Primitive::Type::kPrimBoolean:
+ value.z = field.GetBoolean(src) == 0 ? JNI_FALSE : JNI_TRUE;
+ break;
+ case art::Primitive::Type::kPrimByte:
+ value.b = field.GetByte(src);
+ break;
+ case art::Primitive::Type::kPrimChar:
+ value.c = field.GetChar(src);
+ break;
+ case art::Primitive::Type::kPrimShort:
+ value.s = field.GetShort(src);
+ break;
+ case art::Primitive::Type::kPrimInt:
+ value.i = field.GetInt(src);
+ break;
+ case art::Primitive::Type::kPrimLong:
+ value.j = field.GetLong(src);
+ break;
+ case art::Primitive::Type::kPrimFloat:
+ value.f = field.GetFloat(src);
+ break;
+ case art::Primitive::Type::kPrimDouble:
+ value.d = field.GetDouble(src);
+ break;
+ case art::Primitive::Type::kPrimVoid:
+ case art::Primitive::Type::kPrimNot: {
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ }
+ }
+
+ jlong obj_tag = user_data->tag_table_->GetTagOrZero(src.Ptr());
+ const jlong saved_obj_tag = obj_tag;
+
+ jint ret = user_data->cb_->primitive_field_callback(kReportStatic
+ ? JVMTI_HEAP_REFERENCE_STATIC_FIELD
+ : JVMTI_HEAP_REFERENCE_FIELD,
+ &info,
+ user_data->class_tag_,
+ &obj_tag,
+ value,
+ prim_type,
+ const_cast<void*>(user_data->user_data_));
+
+ if (saved_obj_tag != obj_tag) {
+ user_data->tag_table_->Set(src.Ptr(), obj_tag);
+ }
+
+ if ((ret & JVMTI_VISIT_ABORT) != 0) {
+ return true;
+ }
+
+ return false;
+ }
+
+ ObjectTagTable* tag_table_;
+ jlong class_tag_;
+ const jvmtiHeapCallbacks* cb_;
+ const void* user_data_;
+};
+
+struct HeapFilter {
+ explicit HeapFilter(jint heap_filter)
+ : filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
filter_out_untagged((heap_filter & JVMTI_HEAP_FILTER_UNTAGGED) != 0),
filter_out_class_tagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_TAGGED) != 0),
filter_out_class_untagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_UNTAGGED) != 0),
any_filter(filter_out_tagged ||
filter_out_untagged ||
filter_out_class_tagged ||
- filter_out_class_untagged),
- stop_reports(false) {
+ filter_out_class_untagged) {
}
- bool ShouldReportByHeapFilter(jlong tag, jlong class_tag) {
+ bool ShouldReportByHeapFilter(jlong tag, jlong class_tag) const {
if (!any_filter) {
return true;
}
@@ -201,16 +609,37 @@ struct IterateThroughHeapData {
return true;
}
- HeapUtil* heap_util;
- art::ObjPtr<art::mirror::Class> filter_klass;
- jvmtiEnv* env;
- const jvmtiHeapCallbacks* callbacks;
- const void* user_data;
const bool filter_out_tagged;
const bool filter_out_untagged;
const bool filter_out_class_tagged;
const bool filter_out_class_untagged;
const bool any_filter;
+};
+
+} // namespace
+
+struct IterateThroughHeapData {
+ IterateThroughHeapData(HeapUtil* _heap_util,
+ jvmtiEnv* _env,
+ art::ObjPtr<art::mirror::Class> klass,
+ jint _heap_filter,
+ const jvmtiHeapCallbacks* _callbacks,
+ const void* _user_data)
+ : heap_util(_heap_util),
+ heap_filter(_heap_filter),
+ filter_klass(klass),
+ env(_env),
+ callbacks(_callbacks),
+ user_data(_user_data),
+ stop_reports(false) {
+ }
+
+ HeapUtil* heap_util;
+ const HeapFilter heap_filter;
+ art::ObjPtr<art::mirror::Class> filter_klass;
+ jvmtiEnv* env;
+ const jvmtiHeapCallbacks* callbacks;
+ const void* user_data;
bool stop_reports;
};
@@ -233,7 +662,7 @@ static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg
ithd->heap_util->GetTags()->GetTag(klass.Ptr(), &class_tag);
// For simplicity, even if we find a tag = 0, assume 0 = not tagged.
- if (!ithd->ShouldReportByHeapFilter(tag, class_tag)) {
+ if (!ithd->heap_filter.ShouldReportByHeapFilter(tag, class_tag)) {
return;
}
@@ -281,7 +710,12 @@ static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg
ithd->stop_reports = (array_ret & JVMTI_VISIT_ABORT) != 0;
}
- // TODO Implement primitive field callback.
+ if (!ithd->stop_reports) {
+ ithd->stop_reports = ReportPrimitiveField::Report(obj,
+ ithd->heap_util->GetTags(),
+ ithd->callbacks,
+ ithd->user_data);
+ }
}
jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env,
@@ -298,8 +732,8 @@ jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env,
IterateThroughHeapData ithd(this,
env,
- heap_filter,
soa.Decode<art::mirror::Class>(klass),
+ heap_filter,
callbacks,
user_data);
@@ -314,11 +748,15 @@ class FollowReferencesHelper FINAL {
jvmtiEnv* jvmti_env,
art::ObjPtr<art::mirror::Object> initial_object,
const jvmtiHeapCallbacks* callbacks,
+ art::ObjPtr<art::mirror::Class> class_filter,
+ jint heap_filter,
const void* user_data)
: env(jvmti_env),
tag_table_(h->GetTags()),
initial_object_(initial_object),
callbacks_(callbacks),
+ class_filter_(class_filter),
+ heap_filter_(heap_filter),
user_data_(user_data),
start_(0),
stop_reports_(false) {
@@ -553,64 +991,50 @@ class FollowReferencesHelper FINAL {
return;
}
- // TODO: We'll probably have to rewrite this completely with our own visiting logic, if we
- // want to have a chance of getting the field indices computed halfway efficiently. For
- // now, ignore them altogether.
-
- struct InstanceReferenceVisitor {
- explicit InstanceReferenceVisitor(FollowReferencesHelper* helper_)
- : helper(helper_), stop_reports(false) {}
-
- void operator()(art::mirror::Object* src,
- art::MemberOffset field_offset,
- bool is_static ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!*helper->tag_table_->GetAllowDisallowLock()) {
- if (stop_reports) {
- return;
- }
-
- art::mirror::Object* trg = src->GetFieldObjectReferenceAddr(field_offset)->AsMirrorPtr();
+ // All instance fields.
+ auto report_instance_field = [&](art::ObjPtr<art::mirror::Object> src,
+ art::ObjPtr<art::mirror::Class> obj_klass ATTRIBUTE_UNUSED,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ art::ObjPtr<art::mirror::Object> field_value = field.GetObject(src);
+ if (field_value != nullptr) {
jvmtiHeapReferenceInfo reference_info;
memset(&reference_info, 0, sizeof(reference_info));
// TODO: Implement spec-compliant numbering.
- reference_info.field.index = field_offset.Int32Value();
+ reference_info.field.index = field_index;
jvmtiHeapReferenceKind kind =
- field_offset.Int32Value() == art::mirror::Object::ClassOffset().Int32Value()
+ field.GetOffset().Int32Value() == art::mirror::Object::ClassOffset().Int32Value()
? JVMTI_HEAP_REFERENCE_CLASS
: JVMTI_HEAP_REFERENCE_FIELD;
const jvmtiHeapReferenceInfo* reference_info_ptr =
kind == JVMTI_HEAP_REFERENCE_CLASS ? nullptr : &reference_info;
- stop_reports = !helper->ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src, trg);
- }
-
- void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED)
- const {
- LOG(FATAL) << "Unreachable";
- }
- void VisitRootIfNonNull(
- art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const {
- LOG(FATAL) << "Unreachable";
+ return !ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src.Ptr(), field_value.Ptr());
}
-
- // "mutable" required by the visitor API.
- mutable FollowReferencesHelper* helper;
- mutable bool stop_reports;
+ return false;
};
+ stop_reports_ = FieldVisitor<void, true>::ReportFields(obj,
+ nullptr,
+ VisitorFalse<void>,
+ VisitorFalse<void>,
+ VisitorFalse<void>,
+ report_instance_field);
+ if (stop_reports_) {
+ return;
+ }
- InstanceReferenceVisitor visitor(this);
- // Visit references, not native roots.
- obj->VisitReferences<false>(visitor, art::VoidFunctor());
-
- stop_reports_ = visitor.stop_reports;
-
- if (!stop_reports_) {
- jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_);
- stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0;
+ jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_);
+ stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0;
+ if (stop_reports_) {
+ return;
}
+
+ stop_reports_ = ReportPrimitiveField::Report(obj, tag_table_, callbacks_, user_data_);
}
void VisitArray(art::mirror::Object* array)
@@ -704,26 +1128,38 @@ class FollowReferencesHelper FINAL {
DCHECK_EQ(h_klass.Get(), klass);
// Declared static fields.
- for (auto& field : klass->GetSFields()) {
- if (!field.IsPrimitiveType()) {
- art::ObjPtr<art::mirror::Object> field_value = field.GetObject(klass);
- if (field_value != nullptr) {
- jvmtiHeapReferenceInfo reference_info;
- memset(&reference_info, 0, sizeof(reference_info));
+ auto report_static_field = [&](art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> obj_klass,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ art::ObjPtr<art::mirror::Object> field_value = field.GetObject(obj_klass);
+ if (field_value != nullptr) {
+ jvmtiHeapReferenceInfo reference_info;
+ memset(&reference_info, 0, sizeof(reference_info));
- // TODO: Implement spec-compliant numbering.
- reference_info.field.index = field.GetOffset().Int32Value();
+ reference_info.field.index = static_cast<jint>(field_index);
- stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
- &reference_info,
- klass,
- field_value.Ptr());
- if (stop_reports_) {
- return;
- }
- }
+ return !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
+ &reference_info,
+ obj_klass.Ptr(),
+ field_value.Ptr());
}
+ return false;
+ };
+ stop_reports_ = FieldVisitor<void, false>::ReportFields(klass,
+ nullptr,
+ VisitorFalse<void>,
+ report_static_field,
+ VisitorFalse<void>,
+ VisitorFalse<void>);
+ if (stop_reports_) {
+ return;
}
+
+ stop_reports_ = ReportPrimitiveField::Report(klass, tag_table_, callbacks_, user_data_);
}
void MaybeEnqueue(art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -760,11 +1196,20 @@ class FollowReferencesHelper FINAL {
return 0;
}
+ if (UNLIKELY(class_filter_ != nullptr) && class_filter_ != referree->GetClass()) {
+ return JVMTI_VISIT_OBJECTS;
+ }
+
const jlong class_tag = tag_table_->GetTagOrZero(referree->GetClass());
+ jlong tag = tag_table_->GetTagOrZero(referree);
+
+ if (!heap_filter_.ShouldReportByHeapFilter(tag, class_tag)) {
+ return JVMTI_VISIT_OBJECTS;
+ }
+
const jlong referrer_class_tag =
referrer == nullptr ? 0 : tag_table_->GetTagOrZero(referrer->GetClass());
const jlong size = static_cast<jlong>(referree->SizeOf());
- jlong tag = tag_table_->GetTagOrZero(referree);
jlong saved_tag = tag;
jlong referrer_tag = 0;
jlong saved_referrer_tag = 0;
@@ -779,6 +1224,7 @@ class FollowReferencesHelper FINAL {
referrer_tag_ptr = &referrer_tag;
}
}
+
jint length = -1;
if (referree->IsArrayInstance()) {
length = referree->AsArray()->GetLength();
@@ -808,6 +1254,8 @@ class FollowReferencesHelper FINAL {
ObjectTagTable* tag_table_;
art::ObjPtr<art::mirror::Object> initial_object_;
const jvmtiHeapCallbacks* callbacks_;
+ art::ObjPtr<art::mirror::Class> class_filter_;
+ const HeapFilter heap_filter_;
const void* user_data_;
std::vector<art::mirror::Object*> worklist_;
@@ -822,8 +1270,8 @@ class FollowReferencesHelper FINAL {
};
jvmtiError HeapUtil::FollowReferences(jvmtiEnv* env,
- jint heap_filter ATTRIBUTE_UNUSED,
- jclass klass ATTRIBUTE_UNUSED,
+ jint heap_filter,
+ jclass klass,
jobject initial_object,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
@@ -844,10 +1292,15 @@ jvmtiError HeapUtil::FollowReferences(jvmtiEnv* env,
art::ScopedThreadSuspension sts(self, art::kWaitingForVisitObjects);
art::ScopedSuspendAll ssa("FollowReferences");
+ art::ObjPtr<art::mirror::Class> class_filter = klass == nullptr
+ ? nullptr
+ : art::ObjPtr<art::mirror::Class>::DownCast(self->DecodeJObject(klass));
FollowReferencesHelper frh(this,
env,
self->DecodeJObject(initial_object),
callbacks,
+ class_filter,
+ heap_filter,
user_data);
frh.Init();
frh.Work();
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index 60371cfafe..e494cb6530 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -56,7 +56,6 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
}
void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
- // TODO: Events.
switch (phase) {
case RuntimePhase::kInitialAgents:
PhaseUtil::current_phase_ = JVMTI_PHASE_PRIMORDIAL;
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index eefd012d9a..a173a4abdd 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -56,6 +56,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
+#include "non_debuggable_classes.h"
#include "object_lock.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
@@ -248,8 +249,13 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
return ERR(UNMODIFIABLE_CLASS);
}
- // TODO We should check if the class has non-obsoletable methods on the stack
- LOG(WARNING) << "presence of non-obsoletable methods on stacks is not currently checked";
+ for (jclass c : art::NonDebuggableClasses::GetNonDebuggableClasses()) {
+ if (klass.Get() == art::Thread::Current()->DecodeJObject(c)->AsClass()) {
+ *error_msg = "Class might have stack frames that cannot be made obsolete";
+ return ERR(UNMODIFIABLE_CLASS);
+ }
+ }
+
return OK;
}
@@ -297,6 +303,7 @@ Redefiner::ClassRedefinition::~ClassRedefinition() {
}
jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
@@ -318,12 +325,19 @@ jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
std::vector<ArtClassDefinition> def_vector;
def_vector.reserve(class_count);
for (jint i = 0; i < class_count; i++) {
+ jboolean is_modifiable = JNI_FALSE;
+ jvmtiError res = env->IsModifiableClass(definitions[i].klass, &is_modifiable);
+ if (res != OK) {
+ return res;
+ } else if (!is_modifiable) {
+ return ERR(UNMODIFIABLE_CLASS);
+ }
// We make a copy of the class_bytes to pass into the retransformation.
// This makes cleanup easier (since we unambiguously own the bytes) and also is useful since we
// will need to keep the original bytes around unaltered for subsequent RetransformClasses calls
// to get the passed in bytes.
unsigned char* class_bytes_copy = nullptr;
- jvmtiError res = env->Allocate(definitions[i].class_byte_count, &class_bytes_copy);
+ res = env->Allocate(definitions[i].class_byte_count, &class_bytes_copy);
if (res != OK) {
return res;
}
@@ -344,6 +358,7 @@ jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
}
// Call all the transformation events.
jvmtiError res = Transformer::RetransformClassesDirect(env,
+ event_handler,
self,
&def_vector);
if (res != OK) {
@@ -771,6 +786,8 @@ bool Redefiner::ClassRedefinition::CheckRedefinitionIsValid() {
CheckSameMethods();
}
+class RedefinitionDataIter;
+
// A wrapper that lets us hold onto the arbitrary sized data needed for redefinitions in a
// reasonably sane way. This adds no fields to the normal ObjectArray. By doing this we can avoid
// having to deal with the fact that we need to hold an arbitrary number of references live.
@@ -794,13 +811,15 @@ class RedefinitionDataHolder {
RedefinitionDataHolder(art::StackHandleScope<1>* hs,
art::Runtime* runtime,
art::Thread* self,
- int32_t num_redefinitions) REQUIRES_SHARED(art::Locks::mutator_lock_) :
+ std::vector<Redefiner::ClassRedefinition>* redefinitions)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) :
arr_(
hs->NewHandle(
art::mirror::ObjectArray<art::mirror::Object>::Alloc(
self,
runtime->GetClassLinker()->GetClassRoot(art::ClassLinker::kObjectArrayClass),
- num_redefinitions * kNumSlots))) {}
+ redefinitions->size() * kNumSlots))),
+ redefinitions_(redefinitions) {}
bool IsNull() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_.IsNull();
@@ -862,8 +881,27 @@ class RedefinitionDataHolder {
return arr_->GetLength() / kNumSlots;
}
+ std::vector<Redefiner::ClassRedefinition>* GetRedefinitions()
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return redefinitions_;
+ }
+
+ bool operator==(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_.Get() == other.arr_.Get();
+ }
+
+ bool operator!=(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter begin() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ RedefinitionDataIter end() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
private:
mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+ std::vector<Redefiner::ClassRedefinition>* redefinitions_;
art::mirror::Object* GetSlot(jint klass_index,
DataSlot slot) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -882,8 +920,115 @@ class RedefinitionDataHolder {
DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
};
-bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder) {
+class RedefinitionDataIter {
+ public:
+ RedefinitionDataIter(int32_t idx, RedefinitionDataHolder& holder) : idx_(idx), holder_(holder) {}
+
+ RedefinitionDataIter(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter(RedefinitionDataIter&&) = default;
+ RedefinitionDataIter& operator=(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter& operator=(RedefinitionDataIter&&) = default;
+
+ bool operator==(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return idx_ == other.idx_ && holder_ == other.holder_;
+ }
+
+ bool operator!=(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter operator++() { // Value after modification.
+ idx_++;
+ return *this;
+ }
+
+ RedefinitionDataIter operator++(int) {
+ RedefinitionDataIter temp = *this;
+ idx_++;
+ return temp;
+ }
+
+ RedefinitionDataIter operator+(ssize_t delta) const {
+ RedefinitionDataIter temp = *this;
+ temp += delta;
+ return temp;
+ }
+
+ RedefinitionDataIter& operator+=(ssize_t delta) {
+ idx_ += delta;
+ return *this;
+ }
+
+ Redefiner::ClassRedefinition& GetRedefinition() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return (*holder_.GetRedefinitions())[idx_];
+ }
+
+ RedefinitionDataHolder& GetHolder() {
+ return holder_;
+ }
+
+ art::mirror::ClassLoader* GetSourceClassLoader() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetSourceClassLoader(idx_);
+ }
+ art::mirror::Object* GetJavaDexFile() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetJavaDexFile(idx_);
+ }
+ art::mirror::LongArray* GetNewDexFileCookie() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexFileCookie(idx_);
+ }
+ art::mirror::DexCache* GetNewDexCache() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexCache(idx_);
+ }
+ art::mirror::Class* GetMirrorClass() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetMirrorClass(idx_);
+ }
+ art::mirror::ByteArray* GetOriginalDexFileBytes() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOriginalDexFileBytes(idx_);
+ }
+ int32_t GetIndex() const {
+ return idx_;
+ }
+
+ void SetSourceClassLoader(art::mirror::ClassLoader* loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetSourceClassLoader(idx_, loader);
+ }
+ void SetJavaDexFile(art::mirror::Object* dexfile) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetJavaDexFile(idx_, dexfile);
+ }
+ void SetNewDexFileCookie(art::mirror::LongArray* cookie)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexFileCookie(idx_, cookie);
+ }
+ void SetNewDexCache(art::mirror::DexCache* cache) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexCache(idx_, cache);
+ }
+ void SetMirrorClass(art::mirror::Class* klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetMirrorClass(idx_, klass);
+ }
+ void SetOriginalDexFileBytes(art::mirror::ByteArray* bytes)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOriginalDexFileBytes(idx_, bytes);
+ }
+
+ private:
+ int32_t idx_;
+ RedefinitionDataHolder& holder_;
+};
+
+RedefinitionDataIter RedefinitionDataHolder::begin() {
+ return RedefinitionDataIter(0, *this);
+}
+
+RedefinitionDataIter RedefinitionDataHolder::end() {
+ return RedefinitionDataIter(Length(), *this);
+}
+
+bool Redefiner::ClassRedefinition::CheckVerification(const RedefinitionDataIter& iter) {
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
art::StackHandleScope<2> hs(driver_->self_);
std::string error;
@@ -891,7 +1036,7 @@ bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
art::verifier::MethodVerifier::FailureKind failure =
art::verifier::MethodVerifier::VerifyClass(driver_->self_,
dex_file_.get(),
- hs.NewHandle(holder.GetNewDexCache(klass_index)),
+ hs.NewHandle(iter.GetNewDexCache()),
hs.NewHandle(GetClassLoader()),
dex_file_->GetClassDef(0), /*class_def*/
nullptr, /*compiler_callbacks*/
@@ -910,21 +1055,20 @@ bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
// dexfile. This is so that even if multiple classes with the same classloader are redefined at
// once they are all added to the classloader.
bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::StackHandleScope<2> hs(driver_->self_);
art::MutableHandle<art::mirror::LongArray> old_cookie(
hs.NewHandle<art::mirror::LongArray>(nullptr));
bool has_older_cookie = false;
// See if we already have a cookie that a previous redefinition got from the same classloader.
- for (int32_t i = 0; i < klass_index; i++) {
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
// Since every instance of this classloader should have the same cookie associated with it we
// can stop looking here.
has_older_cookie = true;
- old_cookie.Assign(holder->GetNewDexFileCookie(i));
+ old_cookie.Assign(old_data.GetNewDexFileCookie());
break;
}
}
@@ -945,14 +1089,14 @@ bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
}
// Save the cookie.
- holder->SetNewDexFileCookie(klass_index, new_cookie.Get());
+ cur_data->SetNewDexFileCookie(new_cookie.Get());
// If there are other copies of this same classloader we need to make sure that we all have the
// same cookie.
if (has_older_cookie) {
- for (int32_t i = 0; i < klass_index; i++) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
// We will let the GC take care of the cookie we allocated for this one.
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
- holder->SetNewDexFileCookie(i, new_cookie.Get());
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
+ old_data.SetNewDexFileCookie(new_cookie.Get());
}
}
}
@@ -961,32 +1105,32 @@ bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
}
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
- int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::ScopedObjectAccessUnchecked soa(driver_->self_);
art::StackHandleScope<2> hs(driver_->self_);
- holder->SetMirrorClass(klass_index, GetMirrorClass());
+ cur_data->SetMirrorClass(GetMirrorClass());
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
// The bootclasspath is handled specially so it doesn't have a j.l.DexFile.
if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
- holder->SetSourceClassLoader(klass_index, loader.Get());
+ cur_data->SetSourceClassLoader(loader.Get());
art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
- holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
+ cur_data->SetJavaDexFile(dex_file_obj.Get());
if (dex_file_obj == nullptr) {
RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
return false;
}
// Allocate the new dex file cookie.
- if (!AllocateAndRememberNewDexFileCookie(klass_index, loader, dex_file_obj, holder)) {
+ if (!AllocateAndRememberNewDexFileCookie(loader, dex_file_obj, cur_data)) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
return false;
}
}
- holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
- if (holder->GetNewDexCache(klass_index) == nullptr) {
+ cur_data->SetNewDexCache(CreateNewDexCache(loader));
+ if (cur_data->GetNewDexCache() == nullptr) {
driver_->self_->AssertPendingException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
@@ -994,8 +1138,8 @@ bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
}
// We won't always need to set this field.
- holder->SetOriginalDexFileBytes(klass_index, AllocateOrGetOriginalDexFileBytes());
- if (holder->GetOriginalDexFileBytes(klass_index) == nullptr) {
+ cur_data->SetOriginalDexFileBytes(AllocateOrGetOriginalDexFileBytes());
+ if (cur_data->GetOriginalDexFileBytes() == nullptr) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate array for original dex file");
@@ -1040,13 +1184,11 @@ bool Redefiner::EnsureAllClassAllocationsFinished() {
}
bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
// Allocate the data this redefinition requires.
- if (!redef.FinishRemainingAllocations(cnt, &holder)) {
+ if (!data.GetRedefinition().FinishRemainingAllocations(&data)) {
return false;
}
- cnt++;
}
return true;
}
@@ -1061,22 +1203,39 @@ void Redefiner::ReleaseAllDexFiles() {
}
}
-bool Redefiner::CheckAllClassesAreVerified(const RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (!redef.CheckVerification(cnt, holder)) {
+bool Redefiner::CheckAllClassesAreVerified(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (!data.GetRedefinition().CheckVerification(data)) {
return false;
}
- cnt++;
}
return true;
}
+class ScopedDisableConcurrentAndMovingGc {
+ public:
+ ScopedDisableConcurrentAndMovingGc(art::gc::Heap* heap, art::Thread* self)
+ : heap_(heap), self_(self) {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->IncrementDisableMovingGC(self_);
+ }
+ }
+
+ ~ScopedDisableConcurrentAndMovingGc() {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->DecrementDisableMovingGC(self_);
+ }
+ }
+ private:
+ art::gc::Heap* heap_;
+ art::Thread* self_;
+};
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<1> hs(self_);
// Allocate an array to hold onto all java temporary objects associated with this redefinition.
// We will let this be collected after the end of this function.
- RedefinitionDataHolder holder(&hs, runtime_, self_, redefinitions_.size());
+ RedefinitionDataHolder holder(&hs, runtime_, self_, &redefinitions_);
if (holder.IsNull()) {
self_->AssertPendingOOMException();
self_->ClearException();
@@ -1099,57 +1258,43 @@ jvmtiError Redefiner::Run() {
// cleaned up by the GC eventually.
return result_;
}
+
// At this point we can no longer fail without corrupting the runtime state.
- int32_t counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (holder.GetSourceClassLoader(counter) == nullptr) {
- runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile());
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (data.GetSourceClassLoader() == nullptr) {
+ runtime_->GetClassLinker()->AppendToBootClassPath(self_, data.GetRedefinition().GetDexFile());
}
- counter++;
}
UnregisterAllBreakpoints();
+
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
- art::gc::Heap* heap = runtime_->GetHeap();
- if (heap->IsGcConcurrentAndMoving()) {
- // GC moving objects can cause deadlocks as we are deoptimizing the stack.
- heap->IncrementDisableMovingGC(self_);
- }
+ ScopedDisableConcurrentAndMovingGc sdcamgc(runtime_->GetHeap(), self_);
+
// Do transition to final suspension
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
- self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
- runtime_->GetThreadList()->SuspendAll(
- "Final installation of redefined Classes!", /*long_suspend*/true);
- counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
+ art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend*/true);
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
- if (holder.GetSourceClassLoader(counter) != nullptr) {
- ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter),
- holder.GetNewDexFileCookie(counter));
+ ClassRedefinition& redef = data.GetRedefinition();
+ if (data.GetSourceClassLoader() != nullptr) {
+ ClassLoaderHelper::UpdateJavaDexFile(data.GetJavaDexFile(), data.GetNewDexFileCookie());
}
- art::mirror::Class* klass = holder.GetMirrorClass(counter);
+ art::mirror::Class* klass = data.GetMirrorClass();
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, holder.GetNewDexCache(counter),
- holder.GetOriginalDexFileBytes(counter));
- counter++;
+ redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFileBytes());
}
// TODO We should check for if any of the redefined methods are intrinsic methods here and, if any
// are, force a full-world deoptimization before finishing redefinition. If we don't do this then
// methods that have been jitted prior to the current redefinition being applied might continue
// to use the old versions of the intrinsics!
// TODO Shrink the obsolete method maps if possible?
- // TODO Put this into a scoped thing.
- runtime_->GetThreadList()->ResumeAll();
- // Get back shared mutator lock as expected for return.
- self_->TransitionFromSuspendedToRunnable();
// TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
// owns the DexFile and when ownership is transferred.
ReleaseAllDexFiles();
- if (heap->IsGcConcurrentAndMoving()) {
- heap->DecrementDisableMovingGC(self_);
- }
return OK;
}
@@ -1251,8 +1396,6 @@ bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(driver_->self_)));
if (ext == nullptr) {
// No memory. Clear exception (it's not useful) and return error.
- // TODO This doesn't need to be fatal. We could just not support obsolete methods after hitting
- // this case.
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 65ee2912e2..4313a9476e 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -66,6 +66,7 @@
namespace openjdkjvmti {
class RedefinitionDataHolder;
+class RedefinitionDataIter;
// Class that can redefine a single class's methods.
// TODO We should really make this be driven by an outside class so we can do multiple classes at
@@ -88,6 +89,7 @@ class Redefiner {
// The caller is responsible for freeing it. The runtime makes its own copy of the data.
// TODO This function should call the transformation events.
static jvmtiError RedefineClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
@@ -142,14 +144,13 @@ class Redefiner {
driver_->RecordFailure(e, class_sig_, err);
}
- bool FinishRemainingAllocations(int32_t klass_index, /*out*/RedefinitionDataHolder* holder)
+ bool FinishRemainingAllocations(/*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder)
+ /*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
@@ -160,8 +161,7 @@ class Redefiner {
bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
// Checks that the contained class can be successfully verified.
- bool CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder)
+ bool CheckVerification(const RedefinitionDataIter& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
// Preallocates all needed allocations in klass so that we can pause execution safely.
@@ -240,7 +240,7 @@ class Redefiner {
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool CheckAllClassesAreVerified(const RedefinitionDataHolder& holder)
+ bool CheckAllClassesAreVerified(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
@@ -254,6 +254,8 @@ class Redefiner {
}
friend struct CallbackCtx;
+ friend class RedefinitionDataHolder;
+ friend class RedefinitionDataIter;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_search.cc b/runtime/openjdkjvmti/ti_search.cc
index df80f85ed8..f51a98f976 100644
--- a/runtime/openjdkjvmti/ti_search.cc
+++ b/runtime/openjdkjvmti/ti_search.cc
@@ -212,7 +212,6 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U
return ERR(WRONG_PHASE);
}
if (current->GetClassLinker() == nullptr) {
- // TODO: Support boot classpath change in OnLoad.
return ERR(WRONG_PHASE);
}
if (segment == nullptr) {
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 2fec631c00..bd52cbb7f9 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -63,12 +63,13 @@ namespace openjdkjvmti {
jvmtiError Transformer::RetransformClassesDirect(
ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Thread* self,
/*in-out*/std::vector<ArtClassDefinition>* definitions) {
for (ArtClassDefinition& def : *definitions) {
jint new_len = -1;
unsigned char* new_data = nullptr;
- gEventHandler.DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
GetJniEnv(env),
def.klass,
@@ -85,6 +86,7 @@ jvmtiError Transformer::RetransformClassesDirect(
}
jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
@@ -107,6 +109,13 @@ jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
std::vector<ArtClassDefinition> definitions;
jvmtiError res = OK;
for (jint i = 0; i < class_count; i++) {
+ jboolean is_modifiable = JNI_FALSE;
+ res = env->IsModifiableClass(classes[i], &is_modifiable);
+ if (res != OK) {
+ return res;
+ } else if (!is_modifiable) {
+ return ERR(UNMODIFIABLE_CLASS);
+ }
ArtClassDefinition def;
res = FillInTransformationData(env, classes[i], &def);
if (res != OK) {
@@ -114,7 +123,7 @@ jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
}
definitions.push_back(std::move(def));
}
- res = RetransformClassesDirect(env, self, &definitions);
+ res = RetransformClassesDirect(env, event_handler, self, &definitions);
if (res != OK) {
return res;
}
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
index 65f2ae1353..c6a36e8e20 100644
--- a/runtime/openjdkjvmti/transform.h
+++ b/runtime/openjdkjvmti/transform.h
@@ -42,14 +42,20 @@
namespace openjdkjvmti {
+class EventHandler;
+
jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string* location);
class Transformer {
public:
static jvmtiError RetransformClassesDirect(
- ArtJvmTiEnv* env, art::Thread* self, /*in-out*/std::vector<ArtClassDefinition>* definitions);
+ ArtJvmTiEnv* env,
+ EventHandler* event_handler,
+ art::Thread* self,
+ /*in-out*/std::vector<ArtClassDefinition>* definitions);
static jvmtiError RetransformClasses(ArtJvmTiEnv* env,
+ EventHandler* event_handler,
art::Runtime* runtime,
art::Thread* self,
jint class_count,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 69dcfebcb1..9fd2c88c3c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -114,6 +114,7 @@
#include "native/java_lang_Thread.h"
#include "native/java_lang_Throwable.h"
#include "native/java_lang_VMClassLoader.h"
+#include "native/java_lang_Void.h"
#include "native/java_lang_invoke_MethodHandleImpl.h"
#include "native/java_lang_ref_FinalizerReference.h"
#include "native/java_lang_ref_Reference.h"
@@ -1556,6 +1557,7 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
register_java_lang_Thread(env);
register_java_lang_Throwable(env);
register_java_lang_VMClassLoader(env);
+ register_java_lang_Void(env);
register_java_util_concurrent_atomic_AtomicLong(env);
register_libcore_util_CharsetUtils(env);
register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
@@ -1961,9 +1963,7 @@ void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
}
void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
- const std::string& profile_output_filename,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir) {
+ const std::string& profile_output_filename) {
if (jit_.get() == nullptr) {
// We are not JITing. Nothing to do.
return;
@@ -1985,18 +1985,7 @@ void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
return;
}
- jit_->StartProfileSaver(profile_output_filename,
- code_paths,
- foreign_dex_profile_path,
- app_dir);
-}
-
-void Runtime::NotifyDexLoaded(const std::string& dex_location) {
- VLOG(profiler) << "Notify dex loaded: " << dex_location;
- // We know that if the ProfileSaver is started then we can record profile information.
- if (ProfileSaver::IsStarted()) {
- ProfileSaver::NotifyDexUse(dex_location);
- }
+ jit_->StartProfileSaver(profile_output_filename, code_paths);
}
// Transaction support.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 4a0169db68..d244a9b618 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -454,10 +454,7 @@ class Runtime {
}
void RegisterAppInfo(const std::vector<std::string>& code_paths,
- const std::string& profile_output_filename,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir);
- void NotifyDexLoaded(const std::string& dex_location);
+ const std::string& profile_output_filename);
// Transaction support.
bool IsActiveTransaction() const {
diff --git a/test/577-profile-foreign-dex/expected.txt b/test/152-dead-large-object/expected.txt
index e69de29bb2..e69de29bb2 100644
--- a/test/577-profile-foreign-dex/expected.txt
+++ b/test/152-dead-large-object/expected.txt
diff --git a/test/152-dead-large-object/info.txt b/test/152-dead-large-object/info.txt
new file mode 100644
index 0000000000..45023cd0b7
--- /dev/null
+++ b/test/152-dead-large-object/info.txt
@@ -0,0 +1 @@
+Test that large objects are freed properly after a GC.
diff --git a/test/577-profile-foreign-dex/src-ex/OtherDex.java b/test/152-dead-large-object/src/Main.java
index cba73b3094..72fd25c2c0 100644
--- a/test/577-profile-foreign-dex/src-ex/OtherDex.java
+++ b/test/152-dead-large-object/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,5 +13,14 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-public class OtherDex {
+
+public class Main {
+ static volatile Object a[] = null;
+
+ public static void main(String[] args) {
+ for (int i = 0; i < 10; ++i) {
+ a = new Object[i * 300000];
+ Runtime.getRuntime().gc();
+ }
+ }
}
diff --git a/test/154-gc-loop/src/Main.java b/test/154-gc-loop/src/Main.java
index 3a256c109e..69015b65aa 100644
--- a/test/154-gc-loop/src/Main.java
+++ b/test/154-gc-loop/src/Main.java
@@ -38,7 +38,7 @@ public class Main {
}
} catch (Exception e) {}
System.out.println("Finalize count too large: " +
- ((finalizeCounter >= 10) ? Integer.toString(finalizeCounter) : "false"));
+ ((finalizeCounter >= 15) ? Integer.toString(finalizeCounter) : "false"));
}
private static native void backgroundProcessState();
diff --git a/test/157-void-class/expected.txt b/test/157-void-class/expected.txt
new file mode 100644
index 0000000000..3f61c0b5b0
--- /dev/null
+++ b/test/157-void-class/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+void.class = void
diff --git a/test/157-void-class/info.txt b/test/157-void-class/info.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/157-void-class/info.txt
diff --git a/test/577-profile-foreign-dex/run b/test/157-void-class/run
index ad57d14c60..59e852c8cd 100644..100755
--- a/test/577-profile-foreign-dex/run
+++ b/test/157-void-class/run
@@ -1,12 +1,12 @@
#!/bin/bash
#
-# Copyright 2016 The Android Open Source Project
+# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-exec ${RUN} \
- --runtime-option -Xjitsaveprofilinginfo \
- --runtime-option -Xusejit:true \
- "${@}"
+# Let the test build its own core image with --no-image and use verify-profile,
+# so that the compiler does not try to initialize classes. This leaves the
+# java.lang.Void compile-time verified but uninitialized.
+./default-run "$@" --no-image \
+ --runtime-option -Ximage-compiler-option \
+ --runtime-option --compiler-filter=verify-profile
diff --git a/test/157-void-class/src/Main.java b/test/157-void-class/src/Main.java
new file mode 100644
index 0000000000..322b705f1d
--- /dev/null
+++ b/test/157-void-class/src/Main.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import libcore.util.EmptyArray;
+
+public class Main {
+ public static void main(String[] args) {
+ try {
+ // Check if we're running dalvik or RI.
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ System.loadLibrary(args[0]);
+ } catch (ClassNotFoundException e) {
+ usingRI = true;
+ // Add expected JNI_OnLoad log line to match expected.txt.
+ System.out.println("JNI_OnLoad called");
+ }
+ try {
+ // Initialize all classes needed for old java.lang.Void.TYPE initialization.
+ Runnable.class.getMethod("run", EmptyArray.CLASS).getReturnType();
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ // Clear the resolved types of the ojluni dex file to make sure there is no entry
+ // for "V", i.e. void.
+ clearResolvedTypes(Integer.class);
+ // With java.lang.Void being compile-time verified but uninitialized, initialize
+ // it now. Previously, this would indirectly initialize TYPE with the current,
+ // i.e. zero-initialized, value of TYPE. The only thing that could prevent the
+ // series of calls leading to this was a cache hit in Class.getDexCacheType()
+ // which we have prevented by clearing the cache above.
+ Class<?> voidClass = void.class;
+ System.out.println("void.class = " + voidClass);
+ }
+
+ public static void clearResolvedTypes(Class<?> c) {
+ if (!usingRI) {
+ nativeClearResolvedTypes(c);
+ }
+ }
+
+ public static native void nativeClearResolvedTypes(Class<?> c);
+
+ static boolean usingRI = false;
+}
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 9f4be6c227..14a40ef317 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -785,6 +785,86 @@ public class Main {
return new Circle(Math.PI).getArea();
}
+ /// CHECK-START: int Main.testAllocationEliminationOfArray1() load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray1() load_store_elimination (after)
+ /// CHECK-NOT: NewArray
+ /// CHECK-NOT: ArraySet
+ /// CHECK-NOT: ArrayGet
+ private static int testAllocationEliminationOfArray1() {
+ int[] array = new int[4];
+ array[2] = 4;
+ array[3] = 7;
+ return array[0] + array[1] + array[2] + array[3];
+ }
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray2() load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray2() load_store_elimination (after)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ private static int testAllocationEliminationOfArray2() {
+ // Cannot eliminate array allocation since array is accessed with non-constant
+ // index.
+ int[] array = new int[4];
+ array[2] = 4;
+ array[3] = 7;
+ int sum = 0;
+ for (int e : array) {
+ sum += e;
+ }
+ return sum;
+ }
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray3(int) load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray3(int) load_store_elimination (after)
+ /// CHECK-NOT: NewArray
+ /// CHECK-NOT: ArraySet
+ /// CHECK-NOT: ArrayGet
+ private static int testAllocationEliminationOfArray3(int i) {
+ int[] array = new int[4];
+ array[i] = 4;
+ return array[i];
+ }
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray4(int) load_store_elimination (before)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testAllocationEliminationOfArray4(int) load_store_elimination (after)
+ /// CHECK: NewArray
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK-NOT: ArrayGet
+ private static int testAllocationEliminationOfArray4(int i) {
+ // Cannot eliminate array allocation due to index aliasing between 1 and i.
+ int[] array = new int[4];
+ array[1] = 2;
+ array[i] = 4;
+ return array[1] + array[i];
+ }
+
static void assertIntEquals(int result, int expected) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -865,6 +945,11 @@ public class Main {
assertDoubleEquals(darray[0], Math.PI);
assertDoubleEquals(darray[1], Math.PI);
assertDoubleEquals(darray[2], Math.PI);
+
+ assertIntEquals(testAllocationEliminationOfArray1(), 11);
+ assertIntEquals(testAllocationEliminationOfArray2(), 11);
+ assertIntEquals(testAllocationEliminationOfArray3(2), 4);
+ assertIntEquals(testAllocationEliminationOfArray4(2), 6);
}
static boolean sFlag;
diff --git a/test/532-checker-nonnull-arrayset/src/Main.java b/test/532-checker-nonnull-arrayset/src/Main.java
index 2c701bbb94..61c9e88e9e 100644
--- a/test/532-checker-nonnull-arrayset/src/Main.java
+++ b/test/532-checker-nonnull-arrayset/src/Main.java
@@ -30,10 +30,14 @@ public class Main {
/// CHECK: ReturnVoid
public static void test() {
Object[] array = new Object[2];
+ // Storing to static to avoid some lse optimization.
+ sArray = array;
Object nonNull = array[0];
nonNull.getClass(); // Ensure nonNull has an implicit null check.
array[1] = nonNull;
}
public static void main(String[] args) {}
+
+ static Object[] sArray;
}
diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java
index 52f3f84406..e395e283e0 100644
--- a/test/536-checker-intrinsic-optimization/src/Main.java
+++ b/test/536-checker-intrinsic-optimization/src/Main.java
@@ -330,6 +330,21 @@ public class Main {
// Terminate the scope for the CHECK-NOT search at the reference or length comparison,
// whichever comes first.
/// CHECK: cmp {{w.*,}} {{w.*|#.*}}
+
+ /// CHECK-START-MIPS: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after)
+ /// CHECK: InvokeVirtual {{.*\.equals.*}} intrinsic:StringEquals
+ /// CHECK-NOT: beq r0,
+ /// CHECK-NOT: beqz
+ /// CHECK-NOT: beqzc
+ // Terminate the scope for the CHECK-NOT search at the class field or length comparison,
+ // whichever comes first.
+ /// CHECK: lw
+
+ /// CHECK-START-MIPS64: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after)
+ /// CHECK: InvokeVirtual {{.*\.equals.*}} intrinsic:StringEquals
+ /// CHECK-NOT: beqzc
+ // Terminate the scope for the CHECK-NOT search at the reference comparison.
+ /// CHECK: beqc
public static boolean stringArgumentNotNull(Object obj) {
obj.getClass();
return "foo".equals(obj);
@@ -384,6 +399,22 @@ public class Main {
/// CHECK-NOT: ldr {{w\d+}}, [{{x\d+}}]
/// CHECK-NOT: ldr {{w\d+}}, [{{x\d+}}, #0]
/// CHECK: cmp {{w\d+}}, {{w\d+|#.*}}
+
+ // Test is brittle as it depends on the class offset being 0.
+ /// CHECK-START-MIPS: boolean Main.stringArgumentIsString() disassembly (after)
+ /// CHECK: InvokeVirtual intrinsic:StringEquals
+ /// CHECK: beq{{(zc)?}}
+ // Check that we don't try to compare the classes.
+ /// CHECK-NOT: lw {{r\d+}}, +0({{r\d+}})
+ /// CHECK: bne{{c?}}
+
+ // Test is brittle as it depends on the class offset being 0.
+ /// CHECK-START-MIPS64: boolean Main.stringArgumentIsString() disassembly (after)
+ /// CHECK: InvokeVirtual intrinsic:StringEquals
+ /// CHECK: beqzc
+ // Check that we don't try to compare the classes.
+ /// CHECK-NOT: lw {{r\d+}}, +0({{r\d+}})
+ /// CHECK: bnec
public static boolean stringArgumentIsString() {
return "foo".equals(myString);
}
diff --git a/test/577-profile-foreign-dex/info.txt b/test/577-profile-foreign-dex/info.txt
deleted file mode 100644
index 090db3fdc6..0000000000
--- a/test/577-profile-foreign-dex/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Check that we record the use of foreign dex files when profiles are enabled.
diff --git a/test/577-profile-foreign-dex/src/Main.java b/test/577-profile-foreign-dex/src/Main.java
deleted file mode 100644
index ed7a625e75..0000000000
--- a/test/577-profile-foreign-dex/src/Main.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Constructor;
-import java.util.HashMap;
-
-public class Main {
-
- private static final String PROFILE_NAME = "primary.prof";
- private static final String APP_DIR_PREFIX = "app_dir_";
- private static final String FOREIGN_DEX_PROFILE_DIR = "foreign-dex";
- private static final String TEMP_FILE_NAME_PREFIX = "dummy";
- private static final String TEMP_FILE_NAME_SUFFIX = "-file";
-
- public static void main(String[] args) throws Exception {
- File tmpFile = null;
- File appDir = null;
- File profileFile = null;
- File foreignDexProfileDir = null;
-
- try {
- // Create the necessary files layout.
- tmpFile = createTempFile();
- appDir = new File(tmpFile.getParent(), APP_DIR_PREFIX + tmpFile.getName());
- appDir.mkdir();
- foreignDexProfileDir = new File(tmpFile.getParent(), FOREIGN_DEX_PROFILE_DIR);
- foreignDexProfileDir.mkdir();
- profileFile = createTempFile();
-
- String codePath = System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex.jar";
-
- // Register the app with the runtime
- VMRuntime.registerAppInfo(profileFile.getPath(), appDir.getPath(),
- new String[] { codePath }, foreignDexProfileDir.getPath());
-
- testMarkerForForeignDex(foreignDexProfileDir);
- testMarkerForCodePath(foreignDexProfileDir);
- testMarkerForApplicationDexFile(foreignDexProfileDir, appDir);
- } finally {
- if (tmpFile != null) {
- tmpFile.delete();
- }
- if (profileFile != null) {
- profileFile.delete();
- }
- if (foreignDexProfileDir != null) {
- foreignDexProfileDir.delete();
- }
- if (appDir != null) {
- appDir.delete();
- }
- }
- }
-
- // Verify we actually create a marker on disk for foreign dex files.
- private static void testMarkerForForeignDex(File foreignDexProfileDir) throws Exception {
- String foreignDex = System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex-ex.jar";
- loadDexFile(foreignDex);
- checkMarker(foreignDexProfileDir, foreignDex, /* exists */ true);
- }
-
- // Verify we do not create a marker on disk for dex files path of the code path.
- private static void testMarkerForCodePath(File foreignDexProfileDir) throws Exception {
- String codePath = System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex.jar";
- loadDexFile(codePath);
- checkMarker(foreignDexProfileDir, codePath, /* exists */ false);
- }
-
- private static void testMarkerForApplicationDexFile(File foreignDexProfileDir, File appDir)
- throws Exception {
- // Copy the -ex jar to the application directory and load it from there.
- // This will record duplicate class conflicts but we don't care for this use case.
- File foreignDex = new File(System.getenv("DEX_LOCATION") + "/577-profile-foreign-dex-ex.jar");
- File appDex = new File(appDir, "appDex.jar");
- try {
- copyFile(foreignDex, appDex);
-
- loadDexFile(appDex.getAbsolutePath());
- checkMarker(foreignDexProfileDir, appDex.getAbsolutePath(), /* exists */ false);
- } finally {
- if (appDex != null) {
- appDex.delete();
- }
- }
- }
-
- private static void checkMarker(File foreignDexProfileDir, String dexFile, boolean exists) {
- File marker = new File(foreignDexProfileDir, dexFile.replace('/', '@'));
- boolean result_ok = exists ? marker.exists() : !marker.exists();
- if (!result_ok) {
- throw new RuntimeException("Marker test failed for:" + marker.getPath());
- }
- }
-
- private static void loadDexFile(String dexFile) throws Exception {
- Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
- if (pathClassLoader == null) {
- throw new RuntimeException("Couldn't find path class loader class");
- }
- Constructor<?> constructor =
- pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
- constructor.newInstance(
- dexFile, ClassLoader.getSystemClassLoader());
- }
-
- private static class VMRuntime {
- private static final Method registerAppInfoMethod;
- static {
- try {
- Class<?> c = Class.forName("dalvik.system.VMRuntime");
- registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo",
- String.class, String.class, String[].class, String.class);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- public static void registerAppInfo(String pkgName, String appDir,
- String[] codePath, String foreignDexProfileDir) throws Exception {
- registerAppInfoMethod.invoke(null, pkgName, appDir, codePath, foreignDexProfileDir);
- }
- }
-
- private static void copyFile(File fromFile, File toFile) throws Exception {
- FileInputStream in = new FileInputStream(fromFile);
- FileOutputStream out = new FileOutputStream(toFile);
- try {
- byte[] buffer = new byte[4096];
- int bytesRead;
- while ((bytesRead = in.read(buffer)) >= 0) {
- out.write(buffer, 0, bytesRead);
- }
- } finally {
- out.flush();
- try {
- out.getFD().sync();
- } catch (IOException e) {
- }
- out.close();
- in.close();
- }
- }
-
- private static File createTempFile() throws Exception {
- try {
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- } catch (IOException e) {
- System.setProperty("java.io.tmpdir", "/data/local/tmp");
- try {
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- } catch (IOException e2) {
- System.setProperty("java.io.tmpdir", "/sdcard");
- return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
- }
- }
- }
-}
diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java
index 039503f7a4..faf94c4fcc 100644
--- a/test/595-profile-saving/src/Main.java
+++ b/test/595-profile-saving/src/Main.java
@@ -29,9 +29,7 @@ public class Main {
// String codePath = getDexBaseLocation();
String codePath = System.getenv("DEX_LOCATION") + "/595-profile-saving.jar";
VMRuntime.registerAppInfo(file.getPath(),
- System.getenv("DEX_LOCATION"),
- new String[] {codePath},
- /* foreignProfileDir */ null);
+ new String[] {codePath});
int methodIdx = $opt$noinline$testProfile();
ensureProfileProcessing();
@@ -85,15 +83,15 @@ public class Main {
try {
Class<? extends Object> c = Class.forName("dalvik.system.VMRuntime");
registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo",
- String.class, String.class, String[].class, String.class);
+ String.class, String[].class);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
- public static void registerAppInfo(String profile, String appDir,
- String[] codePaths, String foreignDir) throws Exception {
- registerAppInfoMethod.invoke(null, profile, appDir, codePaths, foreignDir);
+ public static void registerAppInfo(String profile, String[] codePaths)
+ throws Exception {
+ registerAppInfoMethod.invoke(null, profile, codePaths);
}
}
}
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 7509d9b4f3..2a7be2fbc1 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -213,6 +213,8 @@ public class Main {
/// CHECK-START: long Main.geoLongDivLastValue(long) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Long:j\d+>> LongConstant 0 loop:none
/// CHECK-DAG: Return [<<Long>>] loop:none
+ //
+ // Tests overflow in the divisor (while updating intermediate result).
static long geoLongDivLastValue(long x) {
for (int i = 0; i < 10; i++) {
x /= 1081788608;
@@ -220,6 +222,26 @@ public class Main {
return x;
}
+ /// CHECK-START: long Main.geoLongDivLastValue() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ //
+ /// CHECK-START: long Main.geoLongDivLastValue() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Long:j\d+>> LongConstant 0 loop:none
+ /// CHECK-DAG: Return [<<Long>>] loop:none
+ //
+ // Tests overflow in the divisor (while updating base).
+ static long geoLongDivLastValue() {
+ long x = -1;
+ for (int i2 = 0; i2 < 2; i2++) {
+ x /= (Long.MAX_VALUE);
+ }
+ return x;
+ }
+
/// CHECK-START: long Main.geoLongMulLastValue(long) loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
@@ -286,6 +308,8 @@ public class Main {
expectEquals(0L, geoLongDivLastValue(9223372036854775807L));
expectEquals(0L, geoLongDivLastValue(-9223372036854775808L));
+ expectEquals(0L, geoLongDivLastValue());
+
expectEquals( 0L, geoLongMulLastValue(0L));
expectEquals(-8070450532247928832L, geoLongMulLastValue(1L));
expectEquals( 2305843009213693952L, geoLongMulLastValue(2L));
diff --git a/test/639-checker-code-sinking/expected.txt b/test/639-checker-code-sinking/expected.txt
new file mode 100644
index 0000000000..52e756c231
--- /dev/null
+++ b/test/639-checker-code-sinking/expected.txt
@@ -0,0 +1,3 @@
+0
+class java.lang.Object
+43
diff --git a/test/639-checker-code-sinking/info.txt b/test/639-checker-code-sinking/info.txt
new file mode 100644
index 0000000000..9722bdff2e
--- /dev/null
+++ b/test/639-checker-code-sinking/info.txt
@@ -0,0 +1 @@
+Checker tests for the code sinking optimization pass.
diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java
new file mode 100644
index 0000000000..1da19b687c
--- /dev/null
+++ b/test/639-checker-code-sinking/src/Main.java
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) {
+ testSimpleUse();
+ testTwoUses();
+ testFieldStores(doThrow);
+ testFieldStoreCycle();
+ testArrayStores();
+ testOnlyStoreUses();
+ testNoUse();
+ testPhiInput();
+ testVolatileStore();
+ doThrow = true;
+ try {
+ testInstanceSideEffects();
+ } catch (Error e) {
+ // expected
+ System.out.println(e.getMessage());
+ }
+ try {
+ testStaticSideEffects();
+ } catch (Error e) {
+ // expected
+ System.out.println(e.getMessage());
+ }
+
+ try {
+ testStoreStore(doThrow);
+ } catch (Error e) {
+ // expected
+ System.out.println(e.getMessage());
+ }
+ }
+
+ /// CHECK-START: void Main.testSimpleUse() code_sinking (before)
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testSimpleUse() code_sinking (after)
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testSimpleUse() {
+ Object o = new Object();
+ if (doThrow) {
+ throw new Error(o.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testTwoUses() code_sinking (before)
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testTwoUses() code_sinking (after)
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testTwoUses() {
+ Object o = new Object();
+ if (doThrow) {
+ throw new Error(o.toString() + o.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testFieldStores(boolean) code_sinking (before)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testFieldStores(boolean) code_sinking (after)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK-NOT: begin_block
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testFieldStores(boolean doThrow) {
+ Main m = new Main();
+ m.intField = 42;
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testFieldStoreCycle() code_sinking (before)
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance1:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: <<NewInstance2:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance1>>,<<NewInstance2>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance2>>,<<NewInstance1>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ // TODO(ngeoffray): Handle allocation/store cycles.
+ /// CHECK-START: void Main.testFieldStoreCycle() code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance1:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: <<NewInstance2:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance1>>,<<NewInstance2>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance2>>,<<NewInstance1>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+ public static void testFieldStoreCycle() {
+ Main m1 = new Main();
+ Main m2 = new Main();
+ m1.objectField = m2;
+ m2.objectField = m1;
+ if (doThrow) {
+ throw new Error(m1.toString() + m2.toString());
+ }
+ }
+
+ /// CHECK-START: void Main.testArrayStores() code_sinking (before)
+ /// CHECK: <<Int1:i\d+>> IntConstant 1
+ /// CHECK: <<Int0:i\d+>> IntConstant 0
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[]
+ /// CHECK: <<NewArray:l\d+>> NewArray [<<LoadClass>>,<<Int1>>]
+ /// CHECK: ArraySet [<<NewArray>>,<<Int0>>,<<NewArray>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testArrayStores() code_sinking (after)
+ /// CHECK: <<Int1:i\d+>> IntConstant 1
+ /// CHECK: <<Int0:i\d+>> IntConstant 0
+ /// CHECK-NOT: NewArray
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[]
+ /// CHECK-NOT: begin_block
+ /// CHECK: <<NewArray:l\d+>> NewArray [<<LoadClass>>,<<Int1>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: ArraySet [<<NewArray>>,<<Int0>>,<<NewArray>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testArrayStores() {
+ Object[] o = new Object[1];
+ o[0] = o;
+ if (doThrow) {
+ throw new Error(o.toString());
+ }
+ }
+
+ // Make sure code sinking does not crash on dead allocations.
+ public static void testOnlyStoreUses() {
+ Main m = new Main();
+ Object[] o = new Object[1]; // dead allocation, should eventually be removed b/35634932.
+ o[0] = m;
+ o = null; // Avoid environment uses for the array allocation.
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ // Make sure code sinking does not crash on dead code.
+ public static void testNoUse() {
+ Main m = new Main();
+ boolean load = Main.doLoop; // dead code, not removed because of environment use.
+ // Ensure one environment use for the static field
+ $opt$noinline$foo();
+ load = false;
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ // Make sure we can move code only used by a phi.
+ /// CHECK-START: void Main.testPhiInput() code_sinking (before)
+ /// CHECK: <<Null:l\d+>> NullConstant
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Phi [<<Null>>,<<NewInstance>>]
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testPhiInput() code_sinking (after)
+ /// CHECK: <<Null:l\d+>> NullConstant
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: begin_block
+ /// CHECK: Phi [<<Null>>,<<NewInstance>>]
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testPhiInput() {
+ Object f = new Object();
+ if (doThrow) {
+ Object o = null;
+ int i = 2;
+ if (doLoop) {
+ o = f;
+ i = 42;
+ }
+ throw new Error(o.toString() + i);
+ }
+ }
+
+ static void $opt$noinline$foo() {}
+
+ // Check that we do not move volatile stores.
+ /// CHECK-START: void Main.testVolatileStore() code_sinking (before)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testVolatileStore() code_sinking (after)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+ public static void testVolatileStore() {
+ Main m = new Main();
+ m.volatileField = 42;
+ if (doThrow) {
+ throw new Error(m.toString());
+ }
+ }
+
+ public static void testInstanceSideEffects() {
+ int a = mainField.intField;
+ $noinline$changeIntField();
+ if (doThrow) {
+ throw new Error("" + a);
+ }
+ }
+
+ static void $noinline$changeIntField() {
+ mainField.intField = 42;
+ }
+
+ public static void testStaticSideEffects() {
+ Object o = obj;
+ $noinline$changeStaticObjectField();
+ if (doThrow) {
+ throw new Error(o.getClass().toString());
+ }
+ }
+
+ static void $noinline$changeStaticObjectField() {
+ obj = new Main();
+ }
+
+ // Test that we preserve the order of stores.
+ /// CHECK-START: void Main.testStoreStore(boolean) code_sinking (before)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<Int43:i\d+>> IntConstant 43
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int43>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testStoreStore(boolean) code_sinking (after)
+ /// CHECK: <<Int42:i\d+>> IntConstant 42
+ /// CHECK: <<Int43:i\d+>> IntConstant 43
+ /// CHECK-NOT: NewInstance
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main
+ /// CHECK-NOT: begin_block
+ /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int43>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewInstance [<<Error>>]
+ /// CHECK: Throw
+ public static void testStoreStore(boolean doThrow) {
+ Main m = new Main();
+ m.intField = 42;
+ m.intField = 43;
+ if (doThrow) {
+ throw new Error(m.$opt$noinline$toString());
+ }
+ }
+
+ public String $opt$noinline$toString() {
+ return "" + intField;
+ }
+
+ volatile int volatileField;
+ int intField;
+ Object objectField;
+ static boolean doThrow;
+ static boolean doLoop;
+ static Main mainField = new Main();
+ static Object obj = new Object();
+}
diff --git a/test/640-checker-integer-valueof/expected.txt b/test/640-checker-integer-valueof/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/640-checker-integer-valueof/expected.txt
diff --git a/test/640-checker-integer-valueof/info.txt b/test/640-checker-integer-valueof/info.txt
new file mode 100644
index 0000000000..51021a4eda
--- /dev/null
+++ b/test/640-checker-integer-valueof/info.txt
@@ -0,0 +1 @@
+Test for Integer.valueOf.
diff --git a/test/640-checker-integer-valueof/src/Main.java b/test/640-checker-integer-valueof/src/Main.java
new file mode 100644
index 0000000000..0837fd18ee
--- /dev/null
+++ b/test/640-checker-integer-valueof/src/Main.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START: java.lang.Integer Main.foo(int) disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo(int a) {
+ return Integer.valueOf(a);
+ }
+
+ /// CHECK-START: java.lang.Integer Main.foo2() disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK-NOT: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo2() {
+ return Integer.valueOf(-42);
+ }
+
+ /// CHECK-START: java.lang.Integer Main.foo3() disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK-NOT: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo3() {
+ return Integer.valueOf(42);
+ }
+
+ /// CHECK-START: java.lang.Integer Main.foo4() disassembly (after)
+ /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf
+ /// CHECK: pAllocObjectInitialized
+ /// CHECK: Return [<<Integer>>]
+ public static Integer foo4() {
+ return Integer.valueOf(55555);
+ }
+
+ public static void main(String[] args) {
+ assertEqual("42", foo(intField));
+ assertEqual(foo(intField), foo(intField2));
+ assertEqual("-42", foo2());
+ assertEqual("42", foo3());
+ assertEqual("55555", foo4());
+ assertEqual("55555", foo(intField3));
+ assertEqual("-129", foo(intFieldMinus129));
+ assertEqual("-128", foo(intFieldMinus128));
+ assertEqual(foo(intFieldMinus128), foo(intFieldMinus128));
+ assertEqual("-127", foo(intFieldMinus127));
+ assertEqual(foo(intFieldMinus127), foo(intFieldMinus127));
+ assertEqual("126", foo(intField126));
+ assertEqual(foo(intField126), foo(intField126));
+ assertEqual("127", foo(intField127));
+ assertEqual(foo(intField127), foo(intField127));
+ assertEqual("128", foo(intField128));
+ }
+
+ static void assertEqual(String a, Integer b) {
+ if (!a.equals(b.toString())) {
+ throw new Error("Expected " + a + ", got " + b);
+ }
+ }
+
+ static void assertEqual(Integer a, Integer b) {
+ if (a != b) {
+ throw new Error("Expected " + a + ", got " + b);
+ }
+ }
+
+ static int intField = 42;
+ static int intField2 = 42;
+ static int intField3 = 55555;
+
+ // Edge cases.
+ static int intFieldMinus129 = -129;
+ static int intFieldMinus128 = -128;
+ static int intFieldMinus127 = -127;
+ static int intField126 = 126;
+ static int intField127 = 127;
+ static int intField128 = 128;
+}
diff --git a/test/903-hello-tagging/expected.txt b/test/903-hello-tagging/expected.txt
index 872b79b518..acfdbd810b 100644
--- a/test/903-hello-tagging/expected.txt
+++ b/test/903-hello-tagging/expected.txt
@@ -8,3 +8,4 @@
[<null;1>, <null;1>, <null;2>, <null;2>, <null;3>, <null;3>, <null;4>, <null;4>, <null;5>, <null;5>, <null;6>, <null;6>, <null;7>, <null;7>, <null;8>, <null;8>, <null;9>, <null;9>]
18
[<1;0>, <2;0>, <3;0>, <4;0>, <5;0>, <6;0>, <7;0>, <8;0>, <9;0>, <11;0>, <12;0>, <13;0>, <14;0>, <15;0>, <16;0>, <17;0>, <18;0>, <19;0>]
+[100, 101, 102, 103, 104, 105, 106, 107, 108, 109]
diff --git a/test/903-hello-tagging/src/Main.java b/test/903-hello-tagging/src/Main.java
index 2f0365a921..48896b236a 100644
--- a/test/903-hello-tagging/src/Main.java
+++ b/test/903-hello-tagging/src/Main.java
@@ -22,6 +22,7 @@ public class Main {
public static void main(String[] args) {
doTest();
testGetTaggedObjects();
+ testTags();
}
public static void doTest() {
@@ -35,6 +36,12 @@ public class Main {
}
}
+ public static void testTags() {
+ Object o = new Object();
+ long[] res = testTagsInDifferentEnvs(o, 100, 10);
+ System.out.println(Arrays.toString(res));
+ }
+
private static WeakReference<Object> test() {
Object o1 = new Object();
setTag(o1, 1);
@@ -166,4 +173,5 @@ public class Main {
private static native long getTag(Object o);
private static native Object[] getTaggedObjects(long[] searchTags, boolean returnObjects,
boolean returnTags);
+ private static native long[] testTagsInDifferentEnvs(Object o, long baseTag, int n);
}
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index f74c1fc2ea..6177263cd2 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -139,6 +139,62 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTaggedObjects(JNIEnv* env
return resultArray;
}
+static jvmtiEnv* CreateJvmtiEnv(JNIEnv* env) {
+ JavaVM* jvm;
+ CHECK_EQ(0, env->GetJavaVM(&jvm));
+
+ jvmtiEnv* new_jvmti_env;
+ CHECK_EQ(0, jvm->GetEnv(reinterpret_cast<void**>(&new_jvmti_env), JVMTI_VERSION_1_0));
+
+ jvmtiCapabilities capa;
+ memset(&capa, 0, sizeof(jvmtiCapabilities));
+ capa.can_tag_objects = 1;
+ jvmtiError error = new_jvmti_env->AddCapabilities(&capa);
+ CHECK_EQ(JVMTI_ERROR_NONE, error);
+
+ return new_jvmti_env;
+}
+
+static void SetTag(jvmtiEnv* env, jobject obj, jlong tag) {
+ jvmtiError ret = env->SetTag(obj, tag);
+ CHECK_EQ(JVMTI_ERROR_NONE, ret);
+}
+
+static jlong GetTag(jvmtiEnv* env, jobject obj) {
+ jlong tag;
+ jvmtiError ret = env->GetTag(obj, &tag);
+ CHECK_EQ(JVMTI_ERROR_NONE, ret);
+ return tag;
+}
+
+extern "C" JNIEXPORT jlongArray JNICALL Java_Main_testTagsInDifferentEnvs(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jlong base_tag, jint count) {
+ std::unique_ptr<jvmtiEnv*[]> envs = std::unique_ptr<jvmtiEnv*[]>(new jvmtiEnv*[count]);
+ envs[0] = jvmti_env;
+ for (int32_t i = 1; i != count; ++i) {
+ envs[i] = CreateJvmtiEnv(env);
+ }
+
+ for (int32_t i = 0; i != count; ++i) {
+ SetTag(envs[i], obj, base_tag + i);
+ }
+ std::unique_ptr<jlong[]> vals = std::unique_ptr<jlong[]>(new jlong[count]);
+ for (int32_t i = 0; i != count; ++i) {
+ vals[i] = GetTag(envs[i], obj);
+ }
+
+ for (int32_t i = 1; i != count; ++i) {
+ CHECK_EQ(JVMTI_ERROR_NONE, envs[i]->DisposeEnvironment());
+ }
+
+ jlongArray res = env->NewLongArray(count);
+ if (res == nullptr) {
+ return nullptr;
+ }
+ env->SetLongArrayRegion(res, 0, count, vals.get());
+ return res;
+}
+
} // namespace Test903HelloTagging
} // namespace art
diff --git a/test/906-iterate-heap/expected.txt b/test/906-iterate-heap/expected.txt
index c8228d677e..b6af8435de 100644
--- a/test/906-iterate-heap/expected.txt
+++ b/test/906-iterate-heap/expected.txt
@@ -1,6 +1,6 @@
-[{tag=1, class-tag=0, size=8, length=-1}, {tag=2, class-tag=100, size=8, length=-1}, {tag=3, class-tag=100, size=8, length=-1}, {tag=4, class-tag=0, size=32, length=5}, {tag=5, class-tag=0, size=40, length=-1}, {tag=100, class-tag=0, size=<class>, length=-1}]
-[{tag=11, class-tag=0, size=8, length=-1}, {tag=12, class-tag=110, size=8, length=-1}, {tag=13, class-tag=110, size=8, length=-1}, {tag=14, class-tag=0, size=32, length=5}, {tag=15, class-tag=0, size=40, length=-1}, {tag=110, class-tag=0, size=<class>, length=-1}]
-15@0 (40, 'Hello World')
+[{tag=1, class-tag=0, size=8, length=-1}, {tag=2, class-tag=100, size=8, length=-1}, {tag=3, class-tag=100, size=8, length=-1}, {tag=4, class-tag=0, size=32, length=5}, {tag=5, class-tag=0, size=32, length=-1}, {tag=100, class-tag=0, size=<class>, length=-1}]
+[{tag=11, class-tag=0, size=8, length=-1}, {tag=12, class-tag=110, size=8, length=-1}, {tag=13, class-tag=110, size=8, length=-1}, {tag=14, class-tag=0, size=32, length=5}, {tag=15, class-tag=0, size=32, length=-1}, {tag=110, class-tag=0, size=<class>, length=-1}]
+15@0 (32, 'Hello World')
16
1@0 (14, 2xZ '0001')
2
@@ -18,3 +18,27 @@
2
1@0 (32, 2xD '0000000000000000000000000000f03f')
2
+10000@0 (static, int, index=3) 0000000000000000
+10001
+10000@0 (static, int, index=11) 0000000000000000
+10001
+10000@0 (static, int, index=0) 0000000000000000
+10001
+10000@0 (static, int, index=1) 0000000000000000
+10001
+10000@0 (instance, int, index=2) 0000000000000000
+10001@0 (instance, byte, index=4) 0000000000000001
+10002@0 (instance, char, index=5) 0000000000000061
+10003@0 (instance, int, index=6) 0000000000000003
+10004@0 (instance, long, index=7) 0000000000000004
+10005@0 (instance, short, index=9) 0000000000000002
+10006
+10000@0 (instance, int, index=3) 0000000000000000
+10001@0 (instance, byte, index=5) 0000000000000001
+10002@0 (instance, char, index=6) 0000000000000061
+10003@0 (instance, int, index=7) 0000000000000003
+10004@0 (instance, long, index=8) 0000000000000004
+10005@0 (instance, short, index=10) 0000000000000002
+10006@0 (instance, double, index=12) 3ff3ae147ae147ae
+10007@0 (instance, float, index=13) 000000003f9d70a4
+10008
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 890220ee8d..13c3562b60 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -322,5 +322,92 @@ extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveArray(
return env->NewStringUTF(fac.data.c_str());
}
+static constexpr const char* GetPrimitiveTypeName(jvmtiPrimitiveType type) {
+ switch (type) {
+ case JVMTI_PRIMITIVE_TYPE_BOOLEAN:
+ return "boolean";
+ case JVMTI_PRIMITIVE_TYPE_BYTE:
+ return "byte";
+ case JVMTI_PRIMITIVE_TYPE_CHAR:
+ return "char";
+ case JVMTI_PRIMITIVE_TYPE_SHORT:
+ return "short";
+ case JVMTI_PRIMITIVE_TYPE_INT:
+ return "int";
+ case JVMTI_PRIMITIVE_TYPE_FLOAT:
+ return "float";
+ case JVMTI_PRIMITIVE_TYPE_LONG:
+ return "long";
+ case JVMTI_PRIMITIVE_TYPE_DOUBLE:
+ return "double";
+ }
+ LOG(FATAL) << "Unknown type " << static_cast<size_t>(type);
+ UNREACHABLE();
+}
+
+extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveFields(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) {
+ struct FindFieldCallbacks {
+ explicit FindFieldCallbacks(jlong t) : tag_to_find(t) {}
+
+ static jint JNICALL HeapIterationCallback(jlong class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED,
+ void* user_data ATTRIBUTE_UNUSED) {
+ return 0;
+ }
+
+ static jint JNICALL PrimitiveFieldValueCallback(jvmtiHeapReferenceKind kind,
+ const jvmtiHeapReferenceInfo* info,
+ jlong class_tag,
+ jlong* tag_ptr,
+ jvalue value,
+ jvmtiPrimitiveType value_type,
+ void* user_data) {
+ FindFieldCallbacks* p = reinterpret_cast<FindFieldCallbacks*>(user_data);
+ if (*tag_ptr >= p->tag_to_find) {
+ std::ostringstream oss;
+ oss << *tag_ptr
+ << '@'
+ << class_tag
+ << " ("
+ << (kind == JVMTI_HEAP_REFERENCE_FIELD ? "instance, " : "static, ")
+ << GetPrimitiveTypeName(value_type)
+ << ", index="
+ << info->field.index
+ << ") ";
+ // Be lazy, always print eight bytes.
+ static_assert(sizeof(jvalue) == sizeof(uint64_t), "Unexpected jvalue size");
+ uint64_t val;
+ memcpy(&val, &value, sizeof(uint64_t)); // To avoid undefined behavior.
+ oss << android::base::StringPrintf("%016" PRIx64, val);
+
+ if (!p->data.empty()) {
+ p->data += "\n";
+ }
+ p->data += oss.str();
+ *tag_ptr = *tag_ptr + 1;
+ }
+ return 0;
+ }
+
+ std::string data;
+ const jlong tag_to_find;
+ };
+
+ jvmtiHeapCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+ callbacks.heap_iteration_callback = FindFieldCallbacks::HeapIterationCallback;
+ callbacks.primitive_field_callback = FindFieldCallbacks::PrimitiveFieldValueCallback;
+
+ FindFieldCallbacks ffc(tag);
+ jvmtiError ret = jvmti_env->IterateThroughHeap(0, nullptr, &callbacks, &ffc);
+ if (JvmtiErrorToException(env, ret)) {
+ return nullptr;
+ }
+ return env->NewStringUTF(ffc.data.c_str());
+}
+
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/src/Main.java b/test/906-iterate-heap/src/Main.java
index d4998865b5..365ce0f214 100644
--- a/test/906-iterate-heap/src/Main.java
+++ b/test/906-iterate-heap/src/Main.java
@@ -119,6 +119,60 @@ public class Main {
setTag(dArray, 1);
System.out.println(iterateThroughHeapPrimitiveArray(getTag(dArray)));
System.out.println(getTag(dArray));
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsClasses();
+
+ doTestPrimitiveFieldsIntegral();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsFloat();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+ }
+
+ private static void doTestPrimitiveFieldsClasses() {
+ setTag(IntObject.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(IntObject.class));
+ setTag(IntObject.class, 0);
+
+ setTag(FloatObject.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(FloatObject.class));
+ setTag(FloatObject.class, 0);
+
+ setTag(Inf1.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(Inf1.class));
+ setTag(Inf1.class, 0);
+
+ setTag(Inf2.class, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(Inf2.class));
+ setTag(Inf2.class, 0);
+ }
+
+ private static void doTestPrimitiveFieldsIntegral() {
+ IntObject intObject = new IntObject();
+ setTag(intObject, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(intObject));
+ }
+
+ private static void doTestPrimitiveFieldsFloat() {
+ FloatObject floatObject = new FloatObject();
+ setTag(floatObject, 10000);
+ System.out.println(iterateThroughHeapPrimitiveFields(10000));
+ System.out.println(getTag(floatObject));
}
static class A {
@@ -172,6 +226,31 @@ public class Main {
return ret;
}
+ private static interface Inf1 {
+ public final static int A = 1;
+ }
+
+ private static interface Inf2 extends Inf1 {
+ public final static int B = 1;
+ }
+
+ private static class IntObject implements Inf1 {
+ byte b = (byte)1;
+ char c= 'a';
+ short s = (short)2;
+ int i = 3;
+ long l = 4;
+ Object o = new Object();
+ static int sI = 5;
+ }
+
+ private static class FloatObject extends IntObject implements Inf2 {
+ float f = 1.23f;
+ double d = 1.23;
+ Object p = new Object();
+ static int sI = 6;
+ }
+
private static native void setTag(Object o, long tag);
private static native long getTag(Object o);
@@ -188,4 +267,5 @@ public class Main {
Class<?> klassFilter);
private static native String iterateThroughHeapString(long tag);
private static native String iterateThroughHeapPrimitiveArray(long tag);
+ private static native String iterateThroughHeapPrimitiveFields(long tag);
}
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 6432172e25..fc2761e800 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -8,34 +8,34 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
@@ -49,38 +49,39 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
1002@0 --(interface)--> 2001@0 [size=124, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
1@1000 --(class)--> 1000@0 [size=123, length=-1]
-1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
-1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
2001@0 --(interface)--> 2000@0 [size=124, length=-1]
2@1000 --(class)--> 1000@0 [size=123, length=-1]
3@1001 --(class)--> 1001@0 [size=123, length=-1]
-3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
-3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
4@1000 --(class)--> 1000@0 [size=123, length=-1]
5@1002 --(class)--> 1002@0 [size=123, length=-1]
-5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
-5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123, length=-1]
---
-[1@0 (40, 'HelloWorld')]
+[1@0 (32, 'HelloWorld'), 2@0 (16, '')]
2
+3
2@0 (15, 3xB '010203')
3@0 (16, 2xC '41005a00')
8@0 (32, 2xD '0000000000000000000000000000f03f')
@@ -90,3 +91,227 @@ root@root --(thread)--> 3000@0 [size=132, length=-1]
4@0 (18, 3xS '010002000300')
1@0 (14, 2xZ '0001')
23456789
+10000@0 (static, int, index=3) 0000000000000000
+10001
+10000@0 (static, int, index=11) 0000000000000000
+10001
+10000@0 (static, int, index=0) 0000000000000000
+10001
+10000@0 (static, int, index=1) 0000000000000000
+10001
+10000@0 (instance, int, index=2) 0000000000000000
+10001@0 (instance, byte, index=4) 0000000000000001
+10002@0 (instance, char, index=5) 0000000000000061
+10003@0 (instance, int, index=6) 0000000000000003
+10004@0 (instance, long, index=7) 0000000000000004
+10005@0 (instance, short, index=9) 0000000000000002
+10006
+10000@0 (instance, int, index=3) 0000000000000000
+10001@0 (instance, byte, index=5) 0000000000000001
+10002@0 (instance, char, index=6) 0000000000000061
+10003@0 (instance, int, index=7) 0000000000000003
+10004@0 (instance, long, index=8) 0000000000000004
+10005@0 (instance, short, index=10) 0000000000000002
+10006@0 (instance, double, index=12) 3ff3ae147ae147ae
+10007@0 (instance, float, index=13) 000000003f9d70a4
+10008
+--- klass ---
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
+--- heap_filter ---
+---- tagged objects
+---
+---
+---
+---
+---- untagged objects
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+---- tagged classes
+root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=124, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+2001@0 --(interface)--> 2000@0 [size=124, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+---- untagged classes
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
+1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
+1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
+3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@5)--> 5@1002 [size=32, length=-1]
+5@1002 --(field@8)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@9)--> 1@1000 [size=16, length=-1]
+---
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 1de1a695c2..39fa000195 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <string.h>
+#include <iostream>
#include <vector>
#include "android-base/stringprintf.h"
@@ -29,6 +30,7 @@
#include "native_stack_dump.h"
#include "openjdkjvmti/jvmti.h"
#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -279,8 +281,14 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferences(JNIEnv* env
jlong size,
jint length,
const jvmtiHeapReferenceInfo* reference_info)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: Elem(referrer, referree, size, length) {
memcpy(&info_, reference_info, sizeof(jvmtiHeapReferenceInfo));
+ // Debug stack trace for failure condition. Remove when done.
+ if (info_.stack_local.depth == 3 && info_.stack_local.slot == 13) {
+ DumpNativeStack(std::cerr, GetTid());
+ Thread::Current()->DumpJavaStack(std::cerr, false, false);
+ }
}
protected:
@@ -646,5 +654,95 @@ extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveArray(
return env->NewStringUTF(fac.data.c_str());
}
+static constexpr const char* GetPrimitiveTypeName(jvmtiPrimitiveType type) {
+ switch (type) {
+ case JVMTI_PRIMITIVE_TYPE_BOOLEAN:
+ return "boolean";
+ case JVMTI_PRIMITIVE_TYPE_BYTE:
+ return "byte";
+ case JVMTI_PRIMITIVE_TYPE_CHAR:
+ return "char";
+ case JVMTI_PRIMITIVE_TYPE_SHORT:
+ return "short";
+ case JVMTI_PRIMITIVE_TYPE_INT:
+ return "int";
+ case JVMTI_PRIMITIVE_TYPE_FLOAT:
+ return "float";
+ case JVMTI_PRIMITIVE_TYPE_LONG:
+ return "long";
+ case JVMTI_PRIMITIVE_TYPE_DOUBLE:
+ return "double";
+ }
+ LOG(FATAL) << "Unknown type " << static_cast<size_t>(type);
+ UNREACHABLE();
+}
+
+extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveFields(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject initial_object) {
+ struct FindFieldCallbacks {
+ static jint JNICALL FollowReferencesCallback(
+ jvmtiHeapReferenceKind reference_kind ATTRIBUTE_UNUSED,
+ const jvmtiHeapReferenceInfo* reference_info ATTRIBUTE_UNUSED,
+ jlong class_tag ATTRIBUTE_UNUSED,
+ jlong referrer_class_tag ATTRIBUTE_UNUSED,
+ jlong size ATTRIBUTE_UNUSED,
+ jlong* tag_ptr ATTRIBUTE_UNUSED,
+ jlong* referrer_tag_ptr ATTRIBUTE_UNUSED,
+ jint length ATTRIBUTE_UNUSED,
+ void* user_data ATTRIBUTE_UNUSED) {
+ return JVMTI_VISIT_OBJECTS; // Continue visiting.
+ }
+
+ static jint JNICALL PrimitiveFieldValueCallback(jvmtiHeapReferenceKind kind,
+ const jvmtiHeapReferenceInfo* info,
+ jlong class_tag,
+ jlong* tag_ptr,
+ jvalue value,
+ jvmtiPrimitiveType value_type,
+ void* user_data) {
+ FindFieldCallbacks* p = reinterpret_cast<FindFieldCallbacks*>(user_data);
+ if (*tag_ptr != 0) {
+ std::ostringstream oss;
+ oss << *tag_ptr
+ << '@'
+ << class_tag
+ << " ("
+ << (kind == JVMTI_HEAP_REFERENCE_FIELD ? "instance, " : "static, ")
+ << GetPrimitiveTypeName(value_type)
+ << ", index="
+ << info->field.index
+ << ") ";
+ // Be lazy, always print eight bytes.
+ static_assert(sizeof(jvalue) == sizeof(uint64_t), "Unexpected jvalue size");
+ uint64_t val;
+ memcpy(&val, &value, sizeof(uint64_t)); // To avoid undefined behavior.
+ oss << android::base::StringPrintf("%016" PRIx64, val);
+
+ if (!p->data.empty()) {
+ p->data += "\n";
+ }
+ p->data += oss.str();
+ // Update the tag to test whether that works.
+ *tag_ptr = *tag_ptr + 1;
+ }
+ return 0;
+ }
+
+ std::string data;
+ };
+
+ jvmtiHeapCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+ callbacks.heap_reference_callback = FindFieldCallbacks::FollowReferencesCallback;
+ callbacks.primitive_field_callback = FindFieldCallbacks::PrimitiveFieldValueCallback;
+
+ FindFieldCallbacks ffc;
+ jvmtiError ret = jvmti_env->FollowReferences(0, nullptr, initial_object, &callbacks, &ffc);
+ if (JvmtiErrorToException(env, ret)) {
+ return nullptr;
+ }
+ return env->NewStringUTF(ffc.data.c_str());
+}
+
} // namespace Test913Heaps
} // namespace art
diff --git a/test/913-heaps/run b/test/913-heaps/run
index c6e62ae6cd..dd35526d25 100755
--- a/test/913-heaps/run
+++ b/test/913-heaps/run
@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-./default-run "$@" --jvmti
+./default-run "$@" --jvmti -Xcompiler-option -g
diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java
index 2767d89b98..66f68834a1 100644
--- a/test/913-heaps/src/Main.java
+++ b/test/913-heaps/src/Main.java
@@ -25,8 +25,34 @@ public class Main {
doTest();
new TestConfig().doFollowReferencesTest();
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
doStringTest();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
doPrimitiveArrayTest();
+ doPrimitiveFieldTest();
+
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ // Test klass filter.
+ System.out.println("--- klass ---");
+ new TestConfig(A.class, 0).doFollowReferencesTest();
+
+ // Test heap filter.
+ System.out.println("--- heap_filter ---");
+ System.out.println("---- tagged objects");
+ new TestConfig(null, 0x4).doFollowReferencesTest();
+ System.out.println("---- untagged objects");
+ new TestConfig(null, 0x8).doFollowReferencesTest();
+ System.out.println("---- tagged classes");
+ new TestConfig(null, 0x10).doFollowReferencesTest();
+ System.out.println("---- untagged classes");
+ new TestConfig(null, 0x20).doFollowReferencesTest();
}
public static void doTest() throws Exception {
@@ -38,14 +64,18 @@ public class Main {
}
public static void doStringTest() throws Exception {
- final String str = "HelloWorld";
+ final String str = new String("HelloWorld");
+ final String str2 = new String("");
Object o = new Object() {
String s = str;
+ String s2 = str2;
};
setTag(str, 1);
+ setTag(str2, 2);
System.out.println(Arrays.toString(followReferencesString(o)));
System.out.println(getTag(str));
+ System.out.println(getTag(str2));
}
public static void doPrimitiveArrayTest() throws Exception {
@@ -95,6 +125,62 @@ public class Main {
System.out.println(getTag(dArray));
}
+ public static void doPrimitiveFieldTest() throws Exception {
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsClasses();
+
+ doTestPrimitiveFieldsIntegral();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+
+ doTestPrimitiveFieldsFloat();
+
+ // Force GCs to clean up dirt.
+ Runtime.getRuntime().gc();
+ Runtime.getRuntime().gc();
+ }
+
+ private static void doTestPrimitiveFieldsClasses() {
+ setTag(IntObject.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(IntObject.class));
+ System.out.println(getTag(IntObject.class));
+ setTag(IntObject.class, 0);
+
+ setTag(FloatObject.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(FloatObject.class));
+ System.out.println(getTag(FloatObject.class));
+ setTag(FloatObject.class, 0);
+
+ setTag(Inf1.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(Inf1.class));
+ System.out.println(getTag(Inf1.class));
+ setTag(Inf1.class, 0);
+
+ setTag(Inf2.class, 10000);
+ System.out.println(followReferencesPrimitiveFields(Inf2.class));
+ System.out.println(getTag(Inf2.class));
+ setTag(Inf2.class, 0);
+ }
+
+ private static void doTestPrimitiveFieldsIntegral() {
+ IntObject intObject = new IntObject();
+ setTag(intObject, 10000);
+ System.out.println(followReferencesPrimitiveFields(intObject));
+ System.out.println(getTag(intObject));
+ }
+
+ private static void doTestPrimitiveFieldsFloat() {
+ FloatObject floatObject = new FloatObject();
+ setTag(floatObject, 10000);
+ System.out.println(followReferencesPrimitiveFields(floatObject));
+ System.out.println(getTag(floatObject));
+ }
+
private static void run() {
clearStats();
forceGarbageCollection();
@@ -286,7 +372,35 @@ public class Main {
}
}
+ private static interface Inf1 {
+ public final static int A = 1;
+ }
+
+ private static interface Inf2 extends Inf1 {
+ public final static int B = 1;
+ }
+
+ private static class IntObject implements Inf1 {
+ byte b = (byte)1;
+ char c= 'a';
+ short s = (short)2;
+ int i = 3;
+ long l = 4;
+ Object o = new Object();
+ static int sI = 5;
+ }
+
+ private static class FloatObject extends IntObject implements Inf2 {
+ float f = 1.23f;
+ double d = 1.23;
+ Object p = new Object();
+ static int sI = 6;
+ }
+
public static class Verifier {
+ // Should roots with vreg=-1 be printed?
+ public final static boolean PRINT_ROOTS_WITH_UNKNOWN_VREG = false;
+
public static class Node {
public String referrer;
@@ -371,6 +485,9 @@ public class Main {
continue;
}
lastRoot = l;
+ if (!PRINT_ROOTS_WITH_UNKNOWN_VREG && l.indexOf("vreg=-1") > 0) {
+ continue;
+ }
System.out.println(l);
}
}
@@ -473,4 +590,5 @@ public class Main {
Object initialObject, int stopAfter, int followSet, Object jniRef);
public static native String[] followReferencesString(Object initialObject);
public static native String followReferencesPrimitiveArray(Object initialObject);
+ public static native String followReferencesPrimitiveFields(Object initialObject);
}
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index a5dc10d59c..fdbfbe2191 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -50,3 +50,6 @@ hello there - MissingField
hello there again - FieldChange
Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
hello there again - FieldChange
+hello - Unmodifiable
+Transformation error : java.lang.Exception(Failed to redefine class <[LTransform;> due to JVMTI_ERROR_UNMODIFIABLE_CLASS)
+hello - Unmodifiable
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 5bbe2b5479..6779ed862a 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -32,6 +32,7 @@ public class Main {
NewField.doTest(new Transform());
MissingField.doTest(new Transform4("there"));
FieldChange.doTest(new Transform4("there again"));
+ Unmodifiable.doTest(new Transform[] { new Transform(), });
}
// Transforms the class. This throws an exception if something goes wrong.
diff --git a/test/921-hello-failure/src/Unmodifiable.java b/test/921-hello-failure/src/Unmodifiable.java
new file mode 100644
index 0000000000..ad05f51f9f
--- /dev/null
+++ b/test/921-hello-failure/src/Unmodifiable.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class Unmodifiable {
+ // The following is a base64 encoding of a valid class file.
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQAOVHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3Qg" +
+ "YmUgY2FsbGVkIQwABwAMAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUABgAAAAAA" +
+ "AgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAIAAAALAAwAAQAJAAAA" +
+ "IgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAQAAQANAAAAAgAO");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCrV81cy4Q+YKMMMqc0bZEO5Y1X5u7irPeQAgAAcAAAAHhWNBIAAAAAAAAAAPwBAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACIAQAACAEAAEoB" +
+ "AABSAQAAXwEAAHIBAACGAQAAmgEAALEBAADBAQAAxAEAAMgBAADcAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAO4BAAAAAAAAAQABAAEAAADjAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADoAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgALTFRy" +
+ "YW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0uamF2YQABVgACVkwA" +
+ "EmVtaXR0ZXI6IGphY2stNC4yNAAFc2F5SGkAAgAHDgAEAQAHDgAAAAEBAICABIgCAQCgAgwAAAAA" +
+ "AAAAAQAAAAAAAAABAAAACwAAAHAAAAACAAAABQAAAJwAAAADAAAAAgAAALAAAAAFAAAABAAAAMgA" +
+ "AAAGAAAAAQAAAOgAAAABIAAAAgAAAAgBAAABEAAAAQAAAEQBAAACIAAACwAAAEoBAAADIAAAAgAA" +
+ "AOMBAAAAIAAAAQAAAO4BAAAAEAAAAQAAAPwBAAA=");
+
+ public static void doTest(Transform[] ts) {
+ ts[0].sayHi("Unmodifiable");
+ try {
+ Main.doCommonClassRedefinition(Transform[].class, CLASS_BYTES, DEX_BYTES);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ ts[0].sayHi("Unmodifiable");
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 95967b527c..bb0d51ec51 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -222,6 +222,7 @@ define name-to-var
$(shell echo $(1) | tr '[:lower:]' '[:upper:]' | tr '-' '_')
endef # name-to-var
+# Disable 115-native-bridge, it fails when run through make b/35984597.
# Disable 153-reference-stress temporarily until a fix arrives. b/33389022.
# Disable 080-oom-fragmentation due to flakes. b/33795328
# Disable 497-inlining-and-class-loader and 542-unresolved-access-check until
@@ -229,6 +230,7 @@ endef # name-to-var
# register a dex file that's already registered with a different loader.
# b/34193123
ART_TEST_RUN_TEST_SKIP += \
+ 115-native-bridge \
153-reference-stress \
080-oom-fragmentation \
497-inlining-and-class-loader \
@@ -240,10 +242,8 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),
# Disable 149-suspend-all-stress, its output is flaky (b/28988206).
-# Disable 577-profile-foreign-dex (b/27454772).
TEST_ART_BROKEN_ALL_TARGET_TESTS := \
149-suspend-all-stress \
- 577-profile-foreign-dex \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -368,6 +368,7 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
# Tests that are broken with GC stress.
# * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
# hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 152-dead-large-object requires a heap larger than what gcstress uses.
# * 908-gc-start-finish expects GCs only to be run at clear points. The reduced heap size makes
# this non-deterministic. Same for 913.
# * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often
@@ -375,6 +376,7 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
# slows down allocations significantly which these tests do a lot.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
137-cfi \
+ 152-dead-large-object \
154-gc-loop \
908-gc-start-finish \
913-heaps \
@@ -525,12 +527,14 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# Known broken tests for the JIT.
# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
# also uses Generic JNI instead of the JNI compiler.
+# 154-gc-loop requires more deterministic GC behavior than what JIT does.
# Test 906 iterates the heap filtering with different options. No instances should be created
# between those runs to be able to have precise checks.
# Test 629 requires compilation.
# 912: b/34655682
TEST_ART_BROKEN_JIT_RUN_TESTS := \
137-cfi \
+ 154-gc-loop \
629-vdex-speed \
904-object-allocation \
906-iterate-heap \
@@ -810,6 +814,12 @@ endif
TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti
TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmtid
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
+
# All tests require the host executables. The tests also depend on the core images, but on
# specific version depending on the compiler.
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
diff --git a/test/ProfileTestMultiDex/Main.java b/test/ProfileTestMultiDex/Main.java
index 41532ea8f7..a8ced544c9 100644
--- a/test/ProfileTestMultiDex/Main.java
+++ b/test/ProfileTestMultiDex/Main.java
@@ -25,3 +25,45 @@ class Main {
return "C";
}
}
+
+class TestInline {
+ public int inlineMonomorphic(Super s) {
+ return s.getValue();
+ }
+
+ public int inlinePolymorphic(Super s) {
+ return s.getValue();
+ }
+
+ public int inlineMegamorphic(Super s) {
+ return s.getValue();
+ }
+
+ public int inlineMissingTypes(Super s) {
+ return s.getValue();
+ }
+
+ public int noInlineCache(Super s) {
+ return s.getValue();
+ }
+}
+
+abstract class Super {
+ abstract int getValue();
+}
+
+class SubA extends Super {
+ int getValue() { return 42; }
+}
+
+class SubB extends Super {
+ int getValue() { return 38; };
+}
+
+class SubD extends Super {
+ int getValue() { return 20; };
+}
+
+class SubE extends Super {
+ int getValue() { return 16; };
+}
diff --git a/test/ProfileTestMultiDex/Second.java b/test/ProfileTestMultiDex/Second.java
index 4ac5abc300..4b3c7a479b 100644
--- a/test/ProfileTestMultiDex/Second.java
+++ b/test/ProfileTestMultiDex/Second.java
@@ -25,3 +25,8 @@ class Second {
return "Z";
}
}
+
+class SubC extends Super {
+ int getValue() { return 24; }
+}
+
diff --git a/test/ProfileTestMultiDex/main.jpp b/test/ProfileTestMultiDex/main.jpp
index f2e3b4e14c..5e55e96874 100644
--- a/test/ProfileTestMultiDex/main.jpp
+++ b/test/ProfileTestMultiDex/main.jpp
@@ -1,3 +1,21 @@
-main:
+Main:
@@com.android.jack.annotations.ForceInMainDex
- class Second
+ class Main
+TestInqline:
+ @@com.android.jack.annotations.ForceInMainDex
+ class TestInline
+Super:
+ @@com.android.jack.annotations.ForceInMainDex
+ class Super
+SubA:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubA
+SubB:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubB
+SubD:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubD
+SubE:
+ @@com.android.jack.annotations.ForceInMainDex
+ class SubE
diff --git a/test/ProfileTestMultiDex/main.list b/test/ProfileTestMultiDex/main.list
index 44ba78ead5..ec131f0f71 100644
--- a/test/ProfileTestMultiDex/main.list
+++ b/test/ProfileTestMultiDex/main.list
@@ -1 +1,7 @@
Main.class
+TestInline.class
+Super.class
+SubA.class
+SubB.class
+SubD.class
+SubE.class
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index a841f9e6a2..c7a57cefb6 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -180,6 +180,9 @@ extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
}
jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ // Update the code cache to make sure the JIT code does not get deleted.
+ // Note: this will apply to all JIT compilations.
+ code_cache->SetGarbageCollectCode(false);
while (true) {
const void* pc = method->GetEntryPointFromQuickCompiledCode();
if (code_cache->ContainsPc(pc)) {
diff --git a/test/etc/default-build b/test/etc/default-build
index 431896631d..d74b24d985 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -97,7 +97,7 @@ JAVAC_EXPERIMENTAL_ARGS["agents"]="-source 1.8 -target 1.8"
while true; do
if [ "x$1" = "x--dx-option" ]; then
shift
- on="$1"
+ option="$1"
DX_FLAGS="${DX_FLAGS} $option"
shift
elif [ "x$1" = "x--jvm" ]; then
@@ -209,9 +209,9 @@ if [ ${HAS_SRC_DEX2OAT_UNRESOLVED} = "true" ]; then
${JACK} --import classes.jill.jar --output-dex .
else
if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+ ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 ${DX_FLAGS} classes-ex
zip ${TEST_NAME}-ex.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 ${DX_FLAGS} classes
fi
fi
else
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index f3d4332009..fb005984d0 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -371,6 +371,20 @@ fi
if [ "$HAVE_IMAGE" = "n" ]; then
+ if [ "${HOST}" = "y" ]; then
+ framework="${ANDROID_HOST_OUT}/framework"
+ bpath_suffix="-hostdex"
+ else
+ framework="${ANDROID_ROOT}/framework"
+ bpath_suffix="-testdex"
+ fi
+ bpath="${framework}/core-libart${bpath_suffix}.jar"
+ bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
+ bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
+ bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
+ bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
+ # Pass down the bootclasspath
+ FLAGS="${FLAGS} -Xbootclasspath:${bpath}"
# Add 5 minutes to give some time to generate the boot image.
TIME_OUT_VALUE=$((${TIME_OUT_VALUE} + 300))
DALVIKVM_BOOT_OPT="-Ximage:/system/non-existant/core.art"
@@ -442,8 +456,9 @@ else
FLAGS="$FLAGS -Xnorelocate"
COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xnorelocate"
if [ "$HOST" = "y" ]; then
- # Increase ulimit to 64MB in case we are running hprof test.
- ulimit -S 64000 || exit 1
+ # Increase ulimit to 128MB in case we are running hprof test,
+ # or string append test with art-debug-gc.
+ ulimit -S 128000 || exit 1
fi
fi
@@ -490,7 +505,8 @@ fi
DEX_LOCATION_STRIPPED="${DEX_LOCATION#/}"
VDEX_NAME="${DEX_LOCATION_STRIPPED//\//@}@$TEST_NAME.jar@classes.vdex"
if [ ${#VDEX_NAME} -gt $max_filename_size ]; then
- echo "Dex location path too long."
+ echo "Dex location path too long:"
+ echo "$VDEX_NAME is ${#VDEX_NAME} character long, and the limit is $max_filename_size."
exit 1
fi
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 784f49c4b9..f2f90353ac 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -26,11 +26,6 @@
"bug": "http://b/28988206"
},
{
- "test": "577-profile-foreign-dex",
- "description": "Disable 577-profile-foreign-dex",
- "bug": "http://b/27454772"
- },
- {
"tests": ["002-sleep",
"053-wait-some",
"055-enum-performance",
@@ -106,6 +101,11 @@
"slowness of gcstress makes this bad."]
},
{
+ "test": "152-dead-large-object",
+ "variant": "gcstress",
+ "description": ["152-dead-large-object requires a heap larger than what gcstress uses."]
+ },
+ {
"tests": ["908-gc-start-finish",
"913-heaps"],
"variant": "gcstress",
@@ -124,11 +124,15 @@
"lot."]
},
{
- "tests": ["964-default-iface-init-gen",
- "154-gc-loop"],
+ "test": "964-default-iface-init-gen",
"variant": "gcstress"
},
{
+ "tests": "154-gc-loop",
+ "variant": "gcstress | jit",
+ "description": ["154-gc-loop depends GC not happening too often"]
+ },
+ {
"test": "115-native-bridge",
"variant": "target",
"description": ["115-native-bridge setup is complicated. Need to",
diff --git a/test/run-test b/test/run-test
index e808deef52..1ac285769d 100755
--- a/test/run-test
+++ b/test/run-test
@@ -80,7 +80,7 @@ fi
# ANDROID_HOST_OUT is not set in a build environment.
if [ -z "$ANDROID_HOST_OUT" ]; then
- export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out/}host/linux-x86
+ export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/linux-x86
fi
# If JACK_CLASSPATH is not set, assume it only contains core-libart.
@@ -247,6 +247,11 @@ while true; do
option="$1"
run_args="${run_args} -Xcompiler-option $option"
shift
+ elif [ "x$1" = "x--build-option" ]; then
+ shift
+ option="$1"
+ build_args="${build_args} $option"
+ shift
elif [ "x$1" = "x--runtime-option" ]; then
shift
option="$1"
@@ -525,22 +530,6 @@ if [ "$have_image" = "no" ]; then
err_echo "--no-image is only supported on the art runtime"
exit 1
fi
- if [ "$target_mode" = "no" ]; then
- framework="${ANDROID_HOST_OUT}/framework"
- bpath_suffix="-hostdex"
- else
- framework="${android_root}/framework"
- bpath_suffix=""
- fi
- # TODO If the target was compiled WITH_DEXPREOPT=true then these tests will
- # fail since these jar files will be stripped.
- bpath="${framework}/core-libart${bpath_suffix}.jar"
- bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
- bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
- bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
- bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
- # Pass down the bootclasspath
- run_args="${run_args} --runtime-option -Xbootclasspath:${bpath}"
run_args="${run_args} --no-image"
fi
@@ -611,6 +600,7 @@ if [ "$usage" = "yes" ]; then
echo " Runtime Options:"
echo " -O Run non-debug rather than debug build (off by default)."
echo " -Xcompiler-option Pass an option to the compiler."
+ echo " --build-option Pass an option to the build script."
echo " --runtime-option Pass an option to the runtime."
echo " --debug Wait for a debugger to attach."
echo " --debuggable Whether to compile Java code for a debugger."
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 4336d7772f..ed4b4a9f3e 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -178,6 +178,8 @@ ART_TEST_WITH_STRACE = getEnvBoolean('ART_TEST_DEBUG_GC', False)
EXTRA_DISABLED_TESTS = set(env.get("ART_TEST_RUN_TEST_SKIP", "").split())
+ART_TEST_RUN_TEST_BUILD = getEnvBoolean('ART_TEST_RUN_TEST_BUILD', False)
+
TARGET_2ND_ARCH = get_build_var('TARGET_2ND_ARCH')
TARGET_ARCH = get_build_var('TARGET_ARCH')
if TARGET_2ND_ARCH:
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 8c0b9283b6..9c8d3b870c 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -527,10 +527,10 @@ def print_test_info(test_name, result, failed_test_info=""):
test_name = ('%s...%s') % (
test_name[:(allowed_test_length - 3)/2],
test_name[-(allowed_test_length - 3)/2:])
- info += ('%s %s %s') % (
- progress_info,
- test_name,
- result_text)
+ info += ('%s %s %s') % (
+ progress_info,
+ test_name,
+ result_text)
print_text(info)
except Exception, e:
print_text(('%s\n%s\n') % (test_name, str(e)))
@@ -704,7 +704,6 @@ def parse_test_name(test_name):
return {match.group(12)}
raise ValueError(test_name + " is not a valid test")
-
def parse_option():
global verbose
global dry_run
@@ -726,7 +725,15 @@ def parse_option():
parser.add_argument('--dry-run', action='store_true', dest='dry_run')
parser.add_argument("--skip", action="append", dest="skips", default=[],
help="Skip the given test in all circumstances.")
- parser.add_argument('-b', '--build-dependencies', action='store_true', dest='build')
+ parser.add_argument('--no-build-dependencies',
+ action='store_false', dest='build',
+ help="Don't build dependencies under any circumstances. This is the " +
+ "behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.")
+ parser.add_argument('-b', '--build-dependencies',
+ action='store_true', dest='build',
+ help="Build dependencies under all circumstances. By default we will " +
+ "not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.")
+ parser.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
parser.add_argument('--gdb', action='store_true', dest='gdb')
parser.add_argument('--gdb-arg', dest='gdb_arg')
@@ -804,8 +811,7 @@ def parse_option():
if options.dry_run:
dry_run = True
verbose = True
- if options.build:
- build = True
+ build = options.build
if options.gdb:
n_thread = 1
gdb = True
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 65296406a1..e0aae46447 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -216,5 +216,55 @@
modes: [device],
names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
"libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
+},
+{
+ description: "Linker issues with libjavacoretests",
+ result: EXEC_FAILED,
+ bug: 35417197,
+ modes: [device],
+ names: [
+ "dalvik.system.JniTest#testGetSuperclass",
+ "dalvik.system.JniTest#testPassingBooleans",
+ "dalvik.system.JniTest#testPassingBytes",
+ "dalvik.system.JniTest#testPassingChars",
+ "dalvik.system.JniTest#testPassingClass",
+ "dalvik.system.JniTest#testPassingDoubles",
+ "dalvik.system.JniTest#testPassingFloats",
+ "dalvik.system.JniTest#testPassingInts",
+ "dalvik.system.JniTest#testPassingLongs",
+ "dalvik.system.JniTest#testPassingObjectReferences",
+ "dalvik.system.JniTest#testPassingShorts",
+ "dalvik.system.JniTest#testPassingThis",
+ "libcore.java.lang.OldSystemTest#test_load",
+ "libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited",
+ "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull",
+ "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups",
+ "libcore.java.lang.ThreadTest#testGetStackTrace",
+ "libcore.java.lang.ThreadTest#testJavaContextClassLoader",
+ "libcore.java.lang.ThreadTest#testLeakingStartedThreads",
+ "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads",
+ "libcore.java.lang.ThreadTest#testNativeThreadNames",
+ "libcore.java.lang.ThreadTest#testParkUntilWithUnderflowValue",
+ "libcore.java.lang.ThreadTest#testThreadDoubleStart",
+ "libcore.java.lang.ThreadTest#testThreadInterrupted",
+ "libcore.java.lang.ThreadTest#testThreadRestart",
+ "libcore.java.lang.ThreadTest#testThreadSleep",
+ "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments",
+ "libcore.java.lang.ThreadTest#testThreadWakeup",
+ "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler",
+ "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler",
+ "libcore.java.util.TimeZoneTest#testDisplayNamesWithScript",
+ "libcore.java.util.zip.ZipEntryTest#testCommentAndExtraInSameOrder",
+ "libcore.java.util.zip.ZipEntryTest#testMaxLengthExtra",
+ "libcore.util.NativeAllocationRegistryTest#testBadSize",
+ "libcore.util.NativeAllocationRegistryTest#testEarlyFree",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry",
+ "libcore.util.NativeAllocationRegistryTest#testNullArguments",
+ "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_y",
+ "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_yy"
+ ]
}
]
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 1e9c763534..7eaaaf9cbd 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -17,9 +17,33 @@
green='\033[0;32m'
nc='\033[0m'
+# Setup as root, as the next buildbot step (device cleanup) requires it.
+# This is also required to set the date, if needed.
+adb root
+adb wait-for-device
+
+echo -e "${green}Date on host${nc}"
+date
+
echo -e "${green}Date on device${nc}"
adb shell date
+host_seconds_since_epoch=$(date -u +%s)
+device_seconds_since_epoch=$(adb shell date -u +%s)
+
+abs_time_difference_in_seconds=$(expr $host_seconds_since_epoch - $device_seconds_since_epoch)
+if [ $abs_time_difference_in_seconds -lt 0 ]; then
+ abs_time_difference_in_seconds=$(expr 0 - $abs_time_difference_in_seconds)
+fi
+
+seconds_per_hour=3600
+
+# Update date on device if the difference with host is more than one hour.
+if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
+ echo -e "${green}Update date on device${nc}"
+ adb shell date -u @$host_seconds_since_epoch
+fi
+
echo -e "${green}Turn off selinux${nc}"
adb shell setenforce 0
adb shell getenforce